diff --git a/.github/workflows/check.yaml b/.github/workflows/check.yaml new file mode 100644 index 0000000..5a71863 --- /dev/null +++ b/.github/workflows/check.yaml @@ -0,0 +1,74 @@ +name: Check + +on: + pull_request: + types: [opened, synchronize, reopened] + push: + branches: [main] + +jobs: + test: + name: Tests + runs-on: ubuntu-latest + steps: + - name: Install Nargo + uses: noir-lang/noirup@v0.1.3 + with: + toolchain: v1.0.0-beta.2 + - uses: actions/checkout@v4 + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly + - name: Run tests + run: | + nargo compile --workspace + cargo test --all + + format: + name: Format + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly + components: rustfmt + - name: Check formatting + run: cargo fmt --all -- --check + - name: Install taplo + uses: taiki-e/install-action@cargo-binstall + - name: Install tools + run: cargo binstall --no-confirm taplo-cli + - name: Check TOML formatting + run: taplo fmt --check + + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly + components: clippy + - name: Run clippy + run: cargo clippy --all + + deps: + name: Dependencies + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly + - name: Install cargo-binstall + uses: taiki-e/install-action@cargo-binstall + - name: Install tools + run: cargo binstall --no-confirm cargo-udeps + - name: Check unused dependencies + run: cargo udeps \ No newline at end of file diff --git a/.rustfmt.toml b/.rustfmt.toml index de018f8..452cbec 100644 --- a/.rustfmt.toml +++ b/.rustfmt.toml @@ -1,11 +1,46 @@ -imports_granularity = "Crate" -group_imports = "StdExternalCrate" +# Rustfmt configuration +# Opinionated whitespace and tabs. The most important of these are imports and width settings. +# Others may want to borrow or change these to their own liking. +# https://rust-lang.github.io/rustfmt -format_code_in_doc_comments = true +# version-related +unstable_features=true +use_try_shorthand=true # replace any `try!` (2015 Rust) with `?` -use_field_init_shorthand = true +# misc formatting +condense_wildcard_suffixes =true # replace: (a,b,_,_)=(1, 2, 3, 4); -> (a,b,..)=(1, 2, 3, 4); +format_code_in_doc_comments =true # format code blocks in doc comments +format_macro_matchers =true # $a: ident -> $a:ident +format_strings =true # break and insert newlines for long string literals +match_block_trailing_comma =true # include comma in match blocks after '}' +normalize_comments =true # convert /*..*/ to //.. where possible +reorder_impl_items =true # move `type` and `const` declarations to top of impl block +struct_field_align_threshold=20 # align struct arguments' types vertically +use_field_init_shorthand =true # struct initialization short {x: x} -> {x} -wrap_comments = true -normalize_comments = true -comment_width = 80 -edition = "2021" +# reduce whitespace +blank_lines_upper_bound=1 # default: 1. Sometimes useful to change to 0 to condense a file. +brace_style ="PreferSameLine" # prefer starting `{` without inserting extra \n +fn_single_line =true # if it's a short 1-liner, let it be a short 1-liner +match_arm_blocks =false # remove unnecessary {} in match arms +newline_style ="Unix" # not auto, we won the culture war. \n over \r\n +overflow_delimited_expr=true # prefer ]); to ]\n); +where_single_line =true # put where on a single line if possible + +# imports preferences +group_imports ="StdExternalCrate" # create import groupings for std, external libs, and internal deps +imports_granularity="Crate" # aggressively group imports + +# width settings: everything to 100 +comment_width =100 # default: 80 +inline_attribute_width=60 # inlines #[cfg(test)]\nmod test -> #[cfg(test)] mod test +max_width =100 # default: 100 +use_small_heuristics ="Max" # don't ever newline short of `max_width`. +wrap_comments =true # wrap comments at `comment_width` +# format_strings = true # wrap strings at `max_length` + +# tabs and spaces +hard_tabs =false # (def: false) use spaces over tabs +tab_spaces=2 # 2 > 4, it's just math. + +ignore=["tls"] diff --git a/Cargo.lock b/Cargo.lock index 37a15d1..3a0e56e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,68 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 + +[[package]] +name = "acir" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" +dependencies = [ + "acir_field", + "base64", + "bincode", + "brillig", + "flate2", + "serde", + "serde-big-array", + "strum", + "strum_macros", + "thiserror", +] + +[[package]] +name = "acir_field" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" +dependencies = [ + "ark-bn254", + "ark-ff", + "cfg-if", + "hex", + "num-bigint 0.4.6", + "serde", +] + +[[package]] +name = "acvm" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" +dependencies = [ + "acir", + "acvm_blackbox_solver", + "brillig_vm", + "fxhash", + "indexmap 1.9.3", + "serde", + "thiserror", + "tracing", +] + +[[package]] +name = "acvm_blackbox_solver" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" +dependencies = [ + "acir", + "blake2", + "blake3", + "k256", + "keccak", + "libaes", + "num-bigint 0.4.6", + "p256", + "sha2", + "thiserror", +] [[package]] name = "addchain" @@ -13,21 +75,24 @@ dependencies = [ "num-traits", ] -[[package]] -name = "addr2line" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" -dependencies = [ - "gimli", -] - [[package]] name = "adler2" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.1.3" @@ -37,6 +102,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + [[package]] name = "anes" version = "0.1.6" @@ -55,6 +126,130 @@ version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +[[package]] +name = "ark-bn254" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d69eab57e8d2663efa5c63135b2af4f396d66424f88954c21104125ab6b3e6bc" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-std", +] + +[[package]] +name = "ark-ec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d68f2d516162846c1238e755a7c4d131b892b70cc70c471a8e3ca3ed818fce" +dependencies = [ + "ahash", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "educe", + "fnv", + "hashbrown 0.15.2", + "itertools 0.13.0", + "num-bigint 0.4.6", + "num-integer", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" +dependencies = [ + "ark-ff-asm", + "ark-ff-macros", + "ark-serialize", + "ark-std", + "arrayvec", + "digest", + "educe", + "itertools 0.13.0", + "num-bigint 0.4.6", + "num-traits", + "paste", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" +dependencies = [ + "quote", + "syn 2.0.98", +] + +[[package]] +name = "ark-ff-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "ark-poly" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "579305839da207f02b89cd1679e50e67b4331e2f9294a57693e5051b7703fe27" +dependencies = [ + "ahash", + "ark-ff", + "ark-serialize", + "ark-std", + "educe", + "fnv", + "hashbrown 0.15.2", +] + +[[package]] +name = "ark-serialize" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4d068aaf107ebcd7dfb52bc748f8030e0fc930ac8e360146ca54c1203088f7" +dependencies = [ + "ark-serialize-derive", + "ark-std", + "arrayvec", + "digest", + "num-bigint 0.4.6", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "ark-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + [[package]] name = "arrayref" version = "0.3.9" @@ -74,19 +269,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] -name = "backtrace" -version = "0.3.74" +name = "base16ct" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-targets", -] +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bellpepper" @@ -95,7 +293,7 @@ source = "git+https://github.com/argumentcomputer/bellpepper?branch=dev#d0225bf6 dependencies = [ "bellpepper-core", "byteorder", - "ff", + "ff 0.13.0", "itertools 0.12.1", ] @@ -107,7 +305,7 @@ checksum = "1d8abb418570756396d722841b19edfec21d4e89e1cf8990610663040ecb1aea" dependencies = [ "blake2s_simd", "byteorder", - "ff", + "ff 0.13.0", "serde", "thiserror", ] @@ -136,12 +334,6 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - [[package]] name = "bitflags" version = "2.6.0" @@ -160,6 +352,15 @@ dependencies = [ "wyz", ] +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + [[package]] name = "blake2b_simd" version = "1.0.2" @@ -182,6 +383,19 @@ dependencies = [ "constant_time_eq", ] +[[package]] +name = "blake3" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "675f87afced0413c9bb02843499dbbd3882a237645883f71a2b59644a6d2f753" +dependencies = [ + "arrayref", + "arrayvec", + "cc", + "cfg-if", + "constant_time_eq", +] + [[package]] name = "block-buffer" version = "0.10.4" @@ -211,14 +425,35 @@ checksum = "7a8a8ed6fefbeef4a8c7b460e4110e12c5e22a5b7cf32621aae6ad650c4dcf29" dependencies = [ "blst", "byte-slice-cast", - "ff", - "group", + "ff 0.13.0", + "group 0.13.0", "pairing", - "rand_core", + "rand_core 0.6.4", "serde", "subtle", ] +[[package]] +name = "brillig" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" +dependencies = [ + "acir_field", + "serde", +] + +[[package]] +name = "brillig_vm" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" +dependencies = [ + "acir", + "acvm_blackbox_solver", + "num-bigint 0.4.6", + "num-traits", + "thiserror", +] + [[package]] name = "bumpalo" version = "3.16.0" @@ -324,11 +559,11 @@ dependencies = [ "criterion", "digest", "expect-test", - "ff", + "ff 0.13.0", "flate2", "generic-array 1.1.0", - "getrandom", - "group", + "getrandom 0.2.15", + "group 0.13.0", "grumpkin-msm", "halo2curves", "handlebars", @@ -340,11 +575,10 @@ dependencies = [ "num-traits", "once_cell", "pairing", - "pprof", "proptest", - "rand", + "rand 0.8.5", "rand_chacha", - "rand_core", + "rand_core 0.6.4", "rayon", "rayon-scan", "ref-cast", @@ -360,23 +594,40 @@ dependencies = [ "tracing-subscriber", "tracing-test", "tracing-texray", - "vergen", ] [[package]] -name = "constant_time_eq" -version = "0.3.1" +name = "client-side-prover-frontend" +version = "0.1.0" +dependencies = [ + "acvm", + "ark-bn254", + "bellpepper-core", + "bincode", + "client-side-prover", + "client-side-prover-frontend", + "halo2curves", + "noirc_abi", + "serde", + "serde_json", + "tempdir", + "tempfile", + "thiserror", + "tracing", + "tracing-test", +] + +[[package]] +name = "const-oid" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] -name = "cpp_demangle" -version = "0.4.4" +name = "constant_time_eq" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96e58d342ad113c2b878f16d5d034c03be492ae460cdbc02b7f0f2284d310c7d" -dependencies = [ - "cfg-if", -] +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" [[package]] name = "cpufeatures" @@ -464,31 +715,35 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] -name = "crypto-common" -version = "0.1.6" +name = "crypto-bigint" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array 0.14.7", - "typenum", + "rand_core 0.6.4", + "subtle", + "zeroize", ] [[package]] -name = "debugid" -version = "0.8.0" +name = "crypto-common" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "uuid", + "generic-array 0.14.7", + "typenum", ] [[package]] -name = "deranged" -version = "0.3.11" +name = "der" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ - "powerfmt", + "const-oid", + "zeroize", ] [[package]] @@ -499,6 +754,7 @@ checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", "crypto-common", + "subtle", ] [[package]] @@ -507,20 +763,90 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59f8e79d1fbf76bdfbde321e902714bf6c49df88a7dda6fc682fc2979226962d" +[[package]] +name = "ecdsa" +version = "0.14.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" +dependencies = [ + "der", + "elliptic-curve", + "rfc6979", + "signature", +] + +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 2.0.98", +] + [[package]] name = "either" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +[[package]] +name = "elliptic-curve" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +dependencies = [ + "base16ct", + "crypto-bigint", + "der", + "digest", + "ff 0.12.1", + "generic-array 0.14.7", + "group 0.12.1", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "subtle", + "zeroize", +] + +[[package]] +name = "enum-ordinalize" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -539,6 +865,16 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +[[package]] +name = "ff" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "ff" version = "0.13.0" @@ -548,7 +884,7 @@ dependencies = [ "bitvec", "byteorder", "ff_derive", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -568,18 +904,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "findshlibs" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" -dependencies = [ - "cc", - "lazy_static", - "libc", - "winapi", -] - [[package]] name = "flate2" version = "1.0.34" @@ -596,12 +920,27 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" + [[package]] name = "funty" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -630,15 +969,21 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] [[package]] -name = "gimli" -version = "0.31.1" +name = "getrandom" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets", +] [[package]] name = "glob" @@ -646,15 +991,26 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff 0.12.1", + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff", - "rand", - "rand_core", + "ff 0.13.0", + "rand 0.8.5", + "rand_core 0.6.4", "rand_xorshift", "subtle", ] @@ -666,10 +1022,10 @@ source = "git+https://github.com/argumentcomputer/grumpkin-msm?branch=dev#414da3 dependencies = [ "blst", "cc", - "getrandom", + "getrandom 0.2.15", "halo2curves", "pasta_curves", - "rand", + "rand 0.8.5", "rand_chacha", "rayon", "semolina", @@ -694,8 +1050,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db81d01d0bbfec9f624d7590fc6929ee2537a64ec1e080d8f8c9e2d2da291405" dependencies = [ "blake2b_simd", - "ff", - "group", + "ff 0.13.0", + "group 0.13.0", "hex", "lazy_static", "num-bigint 0.4.6", @@ -703,8 +1059,8 @@ dependencies = [ "pairing", "pasta_curves", "paste", - "rand", - "rand_core", + "rand 0.8.5", + "rand_core 0.6.4", "rayon", "serde", "serde_arrays", @@ -726,6 +1082,27 @@ dependencies = [ "thiserror", ] +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "allocator-api2", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + [[package]] name = "hermit-abi" version = "0.3.9" @@ -747,6 +1124,15 @@ dependencies = [ "serde", ] +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + [[package]] name = "home" version = "0.5.9" @@ -756,6 +1142,26 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +dependencies = [ + "equivalent", + "hashbrown 0.15.2", +] + [[package]] name = "is-terminal" version = "0.4.13" @@ -767,6 +1173,11 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "iter-extended" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" + [[package]] name = "itertools" version = "0.10.5" @@ -809,6 +1220,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "k256" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "sha2", +] + [[package]] name = "keccak" version = "0.1.5" @@ -827,11 +1250,17 @@ dependencies = [ "spin", ] +[[package]] +name = "libaes" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82903360c009b816f5ab72a9b68158c27c301ee2c3f20655b55c5e589e7d3bb7" + [[package]] name = "libc" -version = "0.2.159" +version = "0.2.170" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" +checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" [[package]] name = "libm" @@ -876,15 +1305,6 @@ version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" -[[package]] -name = "memmap2" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" -dependencies = [ - "libc", -] - [[package]] name = "miniz_oxide" version = "0.8.0" @@ -904,7 +1324,7 @@ dependencies = [ "blake2s_simd", "blstrs", "byteorder", - "ff", + "ff 0.13.0", "generic-array 0.14.7", "pasta_curves", "serde", @@ -912,14 +1332,28 @@ dependencies = [ ] [[package]] -name = "nix" -version = "0.26.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +name = "noirc_abi" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" dependencies = [ - "bitflags 1.3.2", - "cfg-if", - "libc", + "acvm", + "iter-extended", + "noirc_printable_type", + "num-bigint 0.4.6", + "num-traits", + "serde", + "serde_json", + "thiserror", + "toml", +] + +[[package]] +name = "noirc_printable_type" +version = "1.0.0-beta.2" +source = "git+https://github.com/noir-lang/noir?rev=v1.0.0-beta.2#1a2a08cbcb68646ff1aaef383cfc1798933c1355" +dependencies = [ + "acvm", + "serde", ] [[package]] @@ -951,16 +1385,10 @@ checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", "num-traits", - "rand", + "rand 0.8.5", "serde", ] -[[package]] -name = "num-conv" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" - [[package]] name = "num-integer" version = "0.1.46" @@ -990,24 +1418,6 @@ dependencies = [ "libc", ] -[[package]] -name = "num_threads" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" -dependencies = [ - "libc", -] - -[[package]] -name = "object" -version = "0.36.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" -dependencies = [ - "memchr", -] - [[package]] name = "once_cell" version = "1.20.2" @@ -1026,13 +1436,24 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "p256" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" +dependencies = [ + "ecdsa", + "elliptic-curve", + "sha2", +] + [[package]] name = "pairing" version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" dependencies = [ - "group", + "group 0.13.0", ] [[package]] @@ -1065,11 +1486,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" dependencies = [ "blake2b_simd", - "ff", - "group", + "ff 0.13.0", + "group 0.13.0", "hex", "lazy_static", - "rand", + "rand 0.8.5", "serde", "static_assertions", "subtle", @@ -1112,7 +1533,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", ] [[package]] @@ -1132,6 +1553,16 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +[[package]] +name = "pkcs8" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" +dependencies = [ + "der", + "spki", +] + [[package]] name = "plotters" version = "0.3.7" @@ -1160,32 +1591,6 @@ dependencies = [ "plotters-backend", ] -[[package]] -name = "powerfmt" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" - -[[package]] -name = "pprof" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5c97c51bd34c7e742402e216abdeb44d415fbe6ae41d56b114723e953711cb" -dependencies = [ - "backtrace", - "cfg-if", - "findshlibs", - "libc", - "log", - "nix", - "once_cell", - "parking_lot", - "smallvec", - "symbolic-demangle", - "tempfile", - "thiserror", -] - [[package]] name = "ppv-lite86" version = "0.2.20" @@ -1197,9 +1602,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] @@ -1212,10 +1617,10 @@ checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.6.0", + "bitflags", "lazy_static", "num-traits", - "rand", + "rand 0.8.5", "rand_chacha", "rand_xorshift", "regex-syntax 0.8.5", @@ -1245,6 +1650,19 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" +[[package]] +name = "rand" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" +dependencies = [ + "fuchsia-cprng", + "libc", + "rand_core 0.3.1", + "rdrand", + "winapi", +] + [[package]] name = "rand" version = "0.8.5" @@ -1253,7 +1671,7 @@ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -1263,16 +1681,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", ] +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +dependencies = [ + "rand_core 0.4.2", +] + +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" + [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", ] [[package]] @@ -1281,7 +1714,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -1313,13 +1746,22 @@ dependencies = [ "rayon", ] +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +dependencies = [ + "rand_core 0.3.1", +] + [[package]] name = "redox_syscall" version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags 2.6.0", + "bitflags", ] [[package]] @@ -1339,7 +1781,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", ] [[package]] @@ -1387,22 +1829,36 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] -name = "rustc-demangle" -version = "0.1.24" +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "rfc6979" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" +dependencies = [ + "crypto-bigint", + "hmac", + "zeroize", +] [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.6.0", + "bitflags", "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -1444,6 +1900,20 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sec1" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +dependencies = [ + "base16ct", + "der", + "generic-array 0.14.7", + "pkcs8", + "subtle", + "zeroize", +] + [[package]] name = "semolina" version = "0.1.4" @@ -1463,6 +1933,15 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-big-array" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11fc7cc2c76d73e0f27ee52abbd64eec84d46f370c88371120433196934e4b7f" +dependencies = [ + "serde", +] + [[package]] name = "serde_arrays" version = "0.1.0" @@ -1480,7 +1959,7 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", ] [[package]] @@ -1495,6 +1974,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + [[package]] name = "sha2" version = "0.10.8" @@ -1531,6 +2019,16 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" +[[package]] +name = "signature" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +dependencies = [ + "digest", + "rand_core 0.6.4", +] + [[package]] name = "smallvec" version = "1.13.2" @@ -1543,6 +2041,16 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +[[package]] +name = "spki" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" +dependencies = [ + "base64ct", + "der", +] + [[package]] name = "sppark" version = "0.1.8" @@ -1553,12 +2061,6 @@ dependencies = [ "which", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "static_assertions" version = "1.1.0" @@ -1566,33 +2068,29 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] -name = "subtle" -version = "2.6.1" +name = "strum" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" [[package]] -name = "symbolic-common" -version = "12.12.0" +name = "strum_macros" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "366f1b4c6baf6cfefc234bbd4899535fca0b06c74443039a73f6dfb2fad88d77" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ - "debugid", - "memmap2", - "stable_deref_trait", - "uuid", + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 1.0.109", ] [[package]] -name = "symbolic-demangle" -version = "12.12.0" +name = "subtle" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aba05ba5b9962ea5617baf556293720a8b2d0a282aa14ee4bf10e22efc7da8c8" -dependencies = [ - "cpp_demangle", - "rustc-demangle", - "symbolic-common", -] +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" @@ -1607,9 +2105,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.79" +version = "2.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" dependencies = [ "proc-macro2", "quote", @@ -1622,14 +2120,25 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +[[package]] +name = "tempdir" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" +dependencies = [ + "rand 0.4.6", + "remove_dir_all", +] + [[package]] name = "tempfile" -version = "3.13.0" +version = "3.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "22e5a0acb1f3f55f65cc4a866c361b2fb2a0ff6366785ae6fbb5f85df07ba230" dependencies = [ "cfg-if", "fastrand", + "getrandom 0.3.1", "once_cell", "rustix", "windows-sys 0.59.0", @@ -1662,7 +2171,7 @@ checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", ] [[package]] @@ -1685,46 +2194,47 @@ dependencies = [ ] [[package]] -name = "time" -version = "0.3.36" +name = "tinytemplate" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" dependencies = [ - "deranged", - "itoa", - "libc", - "num-conv", - "num_threads", - "powerfmt", "serde", - "time-core", - "time-macros", + "serde_json", ] [[package]] -name = "time-core" -version = "0.1.2" +name = "toml" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] [[package]] -name = "time-macros" -version = "0.2.18" +name = "toml_datetime" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ - "num-conv", - "time-core", + "serde", ] [[package]] -name = "tinytemplate" -version = "1.2.1" +name = "toml_edit" +version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ + "indexmap 2.7.1", "serde", - "serde_json", + "serde_spanned", + "toml_datetime", + "winnow", ] [[package]] @@ -1746,7 +2256,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", ] [[package]] @@ -1806,7 +2316,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ "quote", - "syn 2.0.79", + "syn 2.0.98", ] [[package]] @@ -1857,30 +2367,12 @@ version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" -[[package]] -name = "uuid" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" - [[package]] name = "valuable" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" -[[package]] -name = "vergen" -version = "8.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2990d9ea5967266ea0ccf413a4aa5c42a93dbcfda9cb49a97de6931726b12566" -dependencies = [ - "anyhow", - "cfg-if", - "rustversion", - "time", -] - [[package]] name = "version_check" version = "0.9.5" @@ -1912,6 +2404,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + [[package]] name = "wasm-bindgen" version = "0.2.93" @@ -1934,7 +2435,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", "wasm-bindgen-shared", ] @@ -1956,7 +2457,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -2102,6 +2603,24 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "winnow" +version = "0.5.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +dependencies = [ + "memchr", +] + +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags", +] + [[package]] name = "wyz" version = "0.5.1" @@ -2129,7 +2648,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", ] [[package]] @@ -2149,5 +2668,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.98", ] diff --git a/Cargo.toml b/Cargo.toml index b4c3098..b016d36 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,86 +1,74 @@ -[package] -name = "client-side-prover" -version = "0.1.0" -authors = ["Pluto Engineering"] -edition = "2021" -description = "Client side proving" -readme = "README.md" -repository = "https://github.com/pluto/client-side-prover" -license-file = "LICENSE" -keywords = ["zkSNARKs", "cryptography", "proofs"] -# rust-version = "1.79" +[workspace] +members =["prover", "frontend"] +resolver="2" -[dependencies] -bellpepper-core = { version = "0.4.0", default-features = false } -bellpepper = { git = "https://github.com/argumentcomputer/bellpepper", branch = "dev", default-features = false } -ff = { version = "0.13.0", features = ["derive"] } -digest = "0.10" -halo2curves = { version = "0.6.0", features = ["bits", "derive_serde"] } -sha3 = "0.10" -rayon = "1.7" -rand_core = { version = "0.6", default-features = false } -rand_chacha = "0.3" -subtle = "2.5" -neptune = { git = "https://github.com/argumentcomputer/neptune", branch = "dev", default-features = false } -generic-array = "1.0.0" -num-bigint = { version = "0.4", features = ["serde", "rand"] } -num-traits = "0.2" -num-integer = "0.1" -serde = { version = "1.0", features = ["derive", "rc"] } -bincode = "1.3" -bitvec = "1.0" -byteorder = "1.4.3" -thiserror = "1.0" -group = "0.13.0" -pairing = "0.23.0" -tracing = "0.1.37" -cfg-if = "1.0.0" -once_cell = "1.18.0" -itertools = "0.13.0" # zip_eq -rand = "0.8.5" -ref-cast = "1.0.20" # allocation-less conversion in multilinear polys # lightens impl macros for pasta -static_assertions = "1.1.0" -rayon-scan = "0.1.0" +[workspace.dependencies] +bellpepper-core ={ version="0.4.0", default-features=false } +bellpepper ={ git="https://github.com/argumentcomputer/bellpepper", branch="dev", default-features=false } +ff ={ version="0.13.0", features=["derive"] } +digest ="0.10" +halo2curves ={ version="0.6.0", features=["bits", "derive_serde"] } +sha3 ="0.10" +rayon ="1.7" +rand_core ={ version="0.6", default-features=false } +rand_chacha ="0.3" +subtle ="2.5" +neptune ={ git="https://github.com/argumentcomputer/neptune", branch="dev", default-features=false } +generic-array ="1.0.0" +num-bigint ={ version="0.4", features=["serde", "rand"] } +num-traits ="0.2" +num-integer ="0.1" +serde ={ version="1.0", features=["derive", "rc"] } +bincode ="1.3" +bitvec ="1.0" +byteorder ="1.4.3" +thiserror ="1.0" +group ="0.13.0" +pairing ="0.23.0" +tracing ="0.1.37" +cfg-if ="1.0.0" +once_cell ="1.18.0" +itertools ="0.13.0" # zip_eq +rand ="0.8.5" +ref-cast ="1.0.20" # allocation-less conversion in multilinear polys # lightens impl macros for pasta +static_assertions="1.1.0" +rayon-scan ="0.1.0" +hex ="0.4.3" -[target.'cfg(any(target_arch = "x86_64", target_arch = "aarch64"))'.dependencies] # grumpkin-msm has been patched to support MSMs for the pasta curve cycle # see: https://github.com/argumentcomputer/grumpkin-msm/pull/3 -grumpkin-msm = { git = "https://github.com/argumentcomputer/grumpkin-msm", branch = "dev" } +grumpkin-msm={ git="https://github.com/argumentcomputer/grumpkin-msm", branch="dev" } -[target.'cfg(target_arch = "wasm32")'.dependencies] -getrandom = { version = "0.2.0", default-features = false, features = ["js"] } +# wasm32 dependencies +getrandom={ version="0.2.0", default-features=false, features=["js"] } -[target.'cfg(not(target_arch = "wasm32"))'.dependencies] -proptest = "1.2.0" -pprof = { version = "0.13", optional = true } # in benches under feature "flamegraph" +# property testing +proptest="1.2.0" -[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] -criterion = { version = "0.5", features = ["html_reports"] } +# benchmarks +criterion={ version="0.5", features=["html_reports"] } -[dev-dependencies] -flate2 = "1.0" -hex = "0.4.3" -sha2 = "0.10.7" -tracing-test = "0.2.4" -expect-test = "1.4.1" -anyhow = "1.0.72" -tap = "1.0.1" -tracing-texray = "0.2.0" -tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } -handlebars = "5.1.0" -serde_json = "1.0.1" - -[build-dependencies] -vergen = { version = "8", features = ["build", "git", "gitcl"] } - -[features] -default = ["grumpkin-msm/portable"] -# asm = ["halo2curves/asm"] -# Compiles in portable mode, w/o ISA extensions => binary can be executed on all systems. -# portable = ["grumpkin-msm/portable"] -# cuda = ["grumpkin-msm/cuda"] +# dev dependencies +flate2 ="1.0" +sha2 ="0.10.7" +tracing-test ={ version="0.2.4", features=["no-env-filter"] } +expect-test ="1.4.1" +anyhow ="1.0.72" +tap ="1.0.1" +tracing-texray ="0.2.0" +tracing-subscriber={ version="0.3.17", features=["env-filter"] } +handlebars ="5.1.0" +serde_json ="1.0.1" [profile.release] -lto = true -codegen-units = 1 -panic = "abort" +lto =true +codegen-units=1 +panic ="abort" + +[profile.dev] +opt-level =1 +debug =true +incremental =true +codegen-units=256 +lto =false +panic ="unwind" diff --git a/Nargo.toml b/Nargo.toml new file mode 100644 index 0000000..10a7548 --- /dev/null +++ b/Nargo.toml @@ -0,0 +1,11 @@ +[workspace] +members=[ + "nivc", + "examples/add_external", + "examples/square_zeroth", + "examples/swap_memory", + "examples/poseidon", + "examples/basic", + "examples/collatz_even", + "examples/collatz_odd", +] diff --git a/README.md b/README.md index 6042152..fbfbe92 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,20 @@ -# Nova: High-speed recursive arguments from folding schemes +# Client Side Prover > [!NOTE] -> This repository is a fork of the original hosted at [https://github.com/microsoft/nova](https://github.com/microsoft/nova) and also forked from [https://github.com/argumentcomputer/arecibo](https://github.com/argumentcomputer/arecibo) currently, but will likely be so different in the future that those are just motivation. +> This repository is a fork of the original hosted at [https://github.com/microsoft/nova](https://github.com/microsoft/nova) and also forked from [https://github.com/argumentcomputer/arecibo](https://github.com/argumentcomputer/arecibo). -## Notes +## Project Structure +The repository contains several key components: +- `client-side-prover-frontend`: Frontend adapters for both Noir and Circom +- `client-side-prover`: Backend implementation of the client side prover -### deny -I removed deny.toml, but having this could be good to use actually, \ No newline at end of file +## Features +- Supernova NIVC folding scheme implementation +- Support for both Noir and Circom circuit frameworks +- Client-side proving capabilities through WebAssembly +- Recursive proof generation and verification + +## Usage +This repository and its crates are **not** production ready. Do not use them in production. No audits have been done and none are planned. + +With that said, work has been done to make the implementation here work with an offline setup phase. Therefore, this can be used run proofs on an edge device which can later be verified by a remote server. \ No newline at end of file diff --git a/examples/add_external/Nargo.toml b/examples/add_external/Nargo.toml new file mode 100644 index 0000000..4fde61f --- /dev/null +++ b/examples/add_external/Nargo.toml @@ -0,0 +1,9 @@ +[package] +authors =["Colin Roberts"] +compiler_version=">=0.36.0" +name ="add_external" +type ="bin" +version ="0.1.0" + +[dependencies] +nivc={ path="../../nivc" } diff --git a/examples/add_external/src/main.nr b/examples/add_external/src/main.nr new file mode 100644 index 0000000..0452191 --- /dev/null +++ b/examples/add_external/src/main.nr @@ -0,0 +1,16 @@ +use nivc::FoldingVariables; + +/// Add two external values to two registers that are folded across circuits. +pub fn main( + folding_variables: pub FoldingVariables<2>, + external: [Field; 2], + next_pc: Field, +) -> pub FoldingVariables<2> { + FoldingVariables { + registers: [ + external[0] + folding_variables.registers[0], + external[1] + folding_variables.registers[1], + ], + program_counter: next_pc, + } +} diff --git a/examples/basic/Nargo.toml b/examples/basic/Nargo.toml new file mode 100644 index 0000000..a09a1c4 --- /dev/null +++ b/examples/basic/Nargo.toml @@ -0,0 +1,9 @@ +[package] +authors =["Colin Roberts"] +compiler_version=">=0.36.0" +name ="basic" +type ="bin" +version ="0.1.0" + +[dependencies] +nivc={ path="../../nivc" } diff --git a/examples/basic/src/main.nr b/examples/basic/src/main.nr new file mode 100644 index 0000000..02d122a --- /dev/null +++ b/examples/basic/src/main.nr @@ -0,0 +1,15 @@ +use nivc::FoldingVariables; + +/// Checks general arithmetic operations. +pub fn main( + folding_variables: pub FoldingVariables<1>, + external_mul: Field, + external_add: Field, +) -> pub FoldingVariables<1> { + FoldingVariables { + registers: [ + external_mul * folding_variables.registers[0] + external_add + 420, + ], + program_counter: folding_variables.program_counter, + } +} diff --git a/examples/collatz_even/Nargo.toml b/examples/collatz_even/Nargo.toml new file mode 100644 index 0000000..45b7a6c --- /dev/null +++ b/examples/collatz_even/Nargo.toml @@ -0,0 +1,9 @@ +[package] +authors =["Colin Roberts"] +compiler_version=">=0.36.0" +name ="collatz_even" +type ="bin" +version ="0.1.0" + +[dependencies] +nivc={ path="../../nivc" } diff --git a/examples/collatz_even/src/main.nr b/examples/collatz_even/src/main.nr new file mode 100644 index 0000000..27c35c3 --- /dev/null +++ b/examples/collatz_even/src/main.nr @@ -0,0 +1,25 @@ +use nivc::FoldingVariables; + +/// When the value is even, divide it by 2. +/// If the value is 1, return -1 to indicate the end of the sequence. +/// Otherwise, return 0 or 1 to indicate the next program counter, i.e., collatz_even or collatz_odd. +pub fn main(folding_variables: pub FoldingVariables<1>) -> pub FoldingVariables<1> { + // Get the value from the first register as a u64 for easier manipulation. + let value = folding_variables.registers[0] as u64; + + // Run the even-value transformation. { f(n) = n / 2 } + let next_value = value / 2; + + // If the next value is 1, we've reached the end of the sequence (the fixed cycle predicted by the Collatz conjecture). + // We return -1 to indicate that the sequence has ended. + let next_pc = if next_value % 2 == 0 { + 0 + } else if next_value == 1 { + -1 + } else { + 1 + }; + + // Return the updated folding variables with the new value and program counter. + FoldingVariables { registers: [next_value as Field], program_counter: next_pc } +} diff --git a/examples/collatz_odd/Nargo.toml b/examples/collatz_odd/Nargo.toml new file mode 100644 index 0000000..8761dd1 --- /dev/null +++ b/examples/collatz_odd/Nargo.toml @@ -0,0 +1,9 @@ +[package] +authors =["Colin Roberts"] +compiler_version=">=0.36.0" +name ="collatz_odd" +type ="bin" +version ="0.1.0" + +[dependencies] +nivc={ path="../../nivc" } diff --git a/examples/collatz_odd/src/main.nr b/examples/collatz_odd/src/main.nr new file mode 100644 index 0000000..44e5eee --- /dev/null +++ b/examples/collatz_odd/src/main.nr @@ -0,0 +1,14 @@ +use nivc::FoldingVariables; + +/// When the value is odd, multiply it by 3 and add 1. +/// If the result is even, return 0 to indicate the next program counter, i.e., collatz_even. +/// Otherwise, return 1 to indicate the next program counter, i.e., collatz_odd. +/// Note, { f(n) = 3n + 1 } can never be 1, so we don't need to check for that. +pub fn main(folding_variables: pub FoldingVariables<1>) -> pub FoldingVariables<1> { + let value = folding_variables.registers[0] as u64; + + let next_value = 3 * value + 1; + let next_pc = if next_value % 2 == 0 { 0 } else { 1 }; + + FoldingVariables { registers: [next_value as Field], program_counter: next_pc } +} diff --git a/examples/poseidon/Nargo.toml b/examples/poseidon/Nargo.toml new file mode 100644 index 0000000..472e553 --- /dev/null +++ b/examples/poseidon/Nargo.toml @@ -0,0 +1,9 @@ +[package] +authors =["Colin Roberts"] +compiler_version=">=0.36.0" +name ="poseidon" +type ="bin" +version ="0.1.0" + +[dependencies] +nivc={ path="../../nivc" } diff --git a/examples/poseidon/src/main.nr b/examples/poseidon/src/main.nr new file mode 100644 index 0000000..11914c6 --- /dev/null +++ b/examples/poseidon/src/main.nr @@ -0,0 +1,9 @@ +use nivc::FoldingVariables; +use std::hash::poseidon::bn254::hash_2; + +pub fn main(folding_variables: pub FoldingVariables<2>) -> pub FoldingVariables<2> { + FoldingVariables { + registers: [hash_2(folding_variables.registers), 0], + program_counter: folding_variables.program_counter, + } +} diff --git a/examples/square_zeroth/Nargo.toml b/examples/square_zeroth/Nargo.toml new file mode 100644 index 0000000..f203d4a --- /dev/null +++ b/examples/square_zeroth/Nargo.toml @@ -0,0 +1,9 @@ +[package] +authors =["Colin Roberts"] +compiler_version=">=0.36.0" +name ="square_zeroth" +type ="bin" +version ="0.1.0" + +[dependencies] +nivc={ path="../../nivc" } diff --git a/examples/square_zeroth/src/main.nr b/examples/square_zeroth/src/main.nr new file mode 100644 index 0000000..e5e030e --- /dev/null +++ b/examples/square_zeroth/src/main.nr @@ -0,0 +1,13 @@ +use nivc::FoldingVariables; + +/// Square only the first register. +pub fn main(folding_variables: pub FoldingVariables<2>, next_pc: Field) -> pub FoldingVariables<2> { + FoldingVariables { + registers: [ + folding_variables.registers[0] * folding_variables.registers[0], + folding_variables.registers[1], + ], + program_counter: next_pc, + } +} + diff --git a/examples/swap_memory/Nargo.toml b/examples/swap_memory/Nargo.toml new file mode 100644 index 0000000..27b99ed --- /dev/null +++ b/examples/swap_memory/Nargo.toml @@ -0,0 +1,9 @@ +[package] +authors =["Colin Roberts"] +compiler_version=">=0.36.0" +name ="swap_memory" +type ="bin" +version ="0.1.0" + +[dependencies] +nivc={ path="../../nivc" } diff --git a/examples/swap_memory/src/main.nr b/examples/swap_memory/src/main.nr new file mode 100644 index 0000000..dfaea29 --- /dev/null +++ b/examples/swap_memory/src/main.nr @@ -0,0 +1,9 @@ +use nivc::FoldingVariables; + +/// Swap the two registers. +pub fn main(folding_variables: pub FoldingVariables<2>, next_pc: Field) -> pub FoldingVariables<2> { + FoldingVariables { + registers: [folding_variables.registers[1], folding_variables.registers[0]], + program_counter: next_pc, + } +} diff --git a/frontend/Cargo.toml b/frontend/Cargo.toml new file mode 100644 index 0000000..3db7bfb --- /dev/null +++ b/frontend/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name ="client-side-prover-frontend" +version="0.1.0" +edition="2021" + +[dependencies] +client-side-prover={ path="../prover" } +serde ={ workspace=true } +serde_json ={ workspace=true } +thiserror ={ workspace=true } +tracing ={ workspace=true } +bellpepper-core ={ workspace=true } +halo2curves ={ workspace=true } + +# noir +acvm ={ git="https://github.com/noir-lang/noir", rev="v1.0.0-beta.2" } +noirc_abi={ git="https://github.com/noir-lang/noir", rev="v1.0.0-beta.2" } +ark-bn254="0.5" + +[dev-dependencies] +tracing-test ={ workspace=true } +tempdir ="0.3.7" +client-side-prover-frontend={ path=".", features=["demo"] } +tempfile ="3.17" +bincode ={ workspace=true } + +[features] +demo=[] diff --git a/frontend/src/error.rs b/frontend/src/error.rs new file mode 100644 index 0000000..6520aec --- /dev/null +++ b/frontend/src/error.rs @@ -0,0 +1,35 @@ +//! # Error Handling +//! +//! This module defines the error types used throughout the crate. +//! `FrontendError` is the primary error type that encapsulates various +//! lower-level errors that might occur during proof generation, verification, +//! and other operations. + +use thiserror::Error; + +/// Represents errors that can occur in the frontend operations of the NIVC system. +/// +/// This enum encapsulates various error types from dependent libraries as well as +/// custom error conditions specific to this crate. +#[derive(Debug, Error)] +pub enum FrontendError { + /// The error is a `bellpepper_core::SynthesisError` + #[error(transparent)] + Synthesis(#[from] bellpepper_core::SynthesisError), + + /// The error is a `std::io::Error` + #[error(transparent)] + Io(#[from] std::io::Error), + + /// The error is a `client_side_prover::errors::NovaError` + #[error(transparent)] + Nova(#[from] client_side_prover::errors::NovaError), + + /// The error is a `client_side_prover::supernova::error::SuperNovaError` + #[error(transparent)] + SuperNova(#[from] client_side_prover::supernova::error::SuperNovaError), + + /// The error is a [`client_side_prover::fast_serde::SerdeByteError`] + #[error(transparent)] + FastSerde(#[from] client_side_prover::fast_serde::SerdeByteError), +} diff --git a/frontend/src/lib.rs b/frontend/src/lib.rs new file mode 100644 index 0000000..9db7b17 --- /dev/null +++ b/frontend/src/lib.rs @@ -0,0 +1,165 @@ +#![warn(missing_docs, clippy::missing_docs_in_private_items)] + +//! # NIVC Folding for Noir Circuits +//! +//! This crate provides an frontend implementation to use Non-uniform Incrementally Verifiable +//! Computation (NIVC) folding for Noir circuits. NIVC allows for incremental verification of +//! computations across different circuit types, enabling complex proof systems that can switch +//! between different circuit implementations during execution. +//! +//! ## Key Components +//! +//! - **Noir Programs**: Representation and handling of Noir language programs +//! - **Switchboard**: Manages the flow between different circuit implementations +//! - **Setup**: Handles parameter generation and initialization for the proof system +//! - **Proof Generation**: Creation and verification of folding proofs +//! +//! ## Cryptographic Backends +//! +//! The crate uses several cryptographic backends: +//! - Primary curve: bn254 (also known as BN256) +//! - Secondary curve: Grumpkin +//! - Proof systems: SuperNova, Spartan R1CS SNARKs +//! +//! ## Memory Models +//! +//! The crate supports two memory models: +//! - **ROM (Read-Only Memory)**: All computation steps are known in advance +//! - **RAM (Random Access Memory)**: Computation steps are determined dynamically +//! +//! ## Example Usage +//! +//! The crate provides demo implementations accessible via the `demo` module when +//! built with the `demo` feature. + +use client_side_prover::{ + provider::GrumpkinEngine, + spartan::batched::BatchedRelaxedR1CSSNARK, + supernova::TrivialCircuit, + traits::{Engine, Group}, +}; +use halo2curves::ff::Field; +use serde::{Deserialize, Serialize}; +use tracing::{debug, info}; + +use crate::error::FrontendError; + +pub mod error; +pub mod noir; +pub mod program; +pub mod setup; + +/// Represents the scalar field for the primary curve (bn254) +/// +/// This type is used for representing field elements in the scalar field +/// of the primary elliptic curve used in the proof system. +pub type Scalar = ::Scalar; + +/// Represents the params needed to create `PublicParams` alongside the +/// circuits' R1CSs. +/// +/// These auxiliary parameters contain the cryptographic context needed for +/// setting up the proof system. +pub type AuxParams = client_side_prover::supernova::AuxParams; +/// The `ProverKey` needed to create a `CompressedSNARK`. +/// +/// This key is used by the prover to generate cryptographic proofs. +pub type ProverKey = client_side_prover::supernova::snark::ProverKey; +/// The `VerifierKey` needed to create a `CompressedSNARK`. +/// +/// This key is used by the verifier to validate cryptographic proofs. +pub type VerifierKey = client_side_prover::supernova::snark::VerifierKey; + +/// Represents the `CompressedSNARK` which is a succinct proof of a `RecursiveSNARK`. +pub type CompressedSNARK = client_side_prover::supernova::snark::CompressedSNARK; + +/// Represents the first elliptic curve engine used in the proof system. +/// +/// The primary engine uses BN256 with KZG polynomial commitments. +type E1 = client_side_prover::provider::Bn256EngineKZG; +/// Represents the second elliptic curve engine used in the proof system. +/// +/// The secondary engine uses the Grumpkin curve, which is cycle-friendly with BN256. +type E2 = GrumpkinEngine; +/// Represents the group associated with the first elliptic curve engine. +/// +/// This group is used for cryptographic operations in the primary curve. +type G1 = ::GE; +/// Represents the evaluation engine for the first elliptic curve. +/// +/// This evaluation engine handles polynomial evaluations for the primary curve. +type EE1 = client_side_prover::provider::hyperkzg::EvaluationEngine; +/// Represents the evaluation engine for the second elliptic curve. +/// +/// This evaluation engine handles polynomial evaluations for the secondary curve. +type EE2 = client_side_prover::provider::ipa_pc::EvaluationEngine; +/// Represents the SNARK for the first elliptic curve. +/// +/// This SNARK implementation is used for generating proofs on the primary curve. +type S1 = BatchedRelaxedR1CSSNARK; +/// Represents the SNARK for the second elliptic curve. +/// +/// This SNARK implementation is used for generating proofs on the secondary curve. +type S2 = BatchedRelaxedR1CSSNARK; + +#[cfg(any(test, feature = "demo"))] +/// Demo module providing example Noir programs for testing and demonstration +/// +/// This module is only available when the crate is built with the `demo` feature +/// or in test mode. It is also used to test the crate's functionality. +pub mod demo { + use crate::noir::NoirProgram; + + /// Creates a basic Noir program example + /// + /// Loads a compiled Noir program that performs simple operations that comprise a single ACIR + /// gate. + pub fn basic() -> NoirProgram { + let bytecode = std::fs::read("../target/basic.json").expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) + } + + /// Loads a compiled Noir program that demonstrates adding external private values to the running + /// state. + pub fn add_external() -> NoirProgram { + let bytecode = + std::fs::read("../target/add_external.json").expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) + } + + /// Creates a Noir program that squares the zeroth element of its input + pub fn square_zeroth() -> NoirProgram { + let bytecode = + std::fs::read("../target/square_zeroth.json").expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) + } + + /// Creates a Noir program that demonstrates memory swapping between the running state and the + /// folding memory. + pub fn swap_memory() -> NoirProgram { + let bytecode = + std::fs::read("../target/swap_memory.json").expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) + } + + /// Creates a Noir program implementing the Poseidon hash function on the running state. + pub fn poseidon() -> NoirProgram { + let bytecode = + std::fs::read("../target/poseidon.json").expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) + } + + /// Creates a Noir program that is the even case of the function in the Collatz conjecture. + pub fn collatz_even() -> NoirProgram { + let bytecode = + std::fs::read("../target/collatz_even.json").expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) + } + + /// Creates a Noir program that is the odd case of the function in the Collatz conjecture. + pub fn collatz_odd() -> NoirProgram { + let bytecode = + std::fs::read("../target/collatz_odd.json").expect("Failed to read Noir program file"); + NoirProgram::new(&bytecode) + } +} diff --git a/frontend/src/noir.rs b/frontend/src/noir.rs new file mode 100644 index 0000000..78a31c2 --- /dev/null +++ b/frontend/src/noir.rs @@ -0,0 +1,516 @@ +//! # Noir Program Integration +//! +//! This module provides the integration between Noir programs and the NIVC system. +//! It handles the translation of Noir's ACIR (Abstract Circuit Intermediate Representation) +//! into constraints that can be used in the folding proof system. This allows Noir programs +//! to be used as circuit components in Non-uniform Incrementally Verifiable Computation. +//! +//! ## Key Components +//! +//! - `NoirProgram`: Represents a compiled Noir program with its bytecode and ABI +//! - `StepCircuit` implementation: Allows Noir programs to be used in the `SuperNova` NIVC system +//! - Field conversion functions: Convert between ACIR field representation and proof system fields + +use std::collections::{BTreeMap, HashMap}; + +use acvm::{ + acir::{ + acir_field::GenericFieldElement, + circuit::{brillig::BrilligBytecode, Circuit, Opcode, Program}, + native_types::{Witness, WitnessMap}, + }, + blackbox_solver::StubbedBlackBoxSolver, + pwg::ACVM, + AcirField, +}; +use ark_bn254::Fr; +use bellpepper_core::{num::AllocatedNum, ConstraintSystem, LinearCombination, SynthesisError}; +use client_side_prover::supernova::StepCircuit; +use halo2curves::ff::PrimeField; +use noirc_abi::{input_parser::InputValue, Abi, AbiType, InputMap}; +use tracing::{error, trace}; + +use super::*; + +/// Represents a compiled Noir program ready for execution in the NIVC system +/// +/// A `NoirProgram` contains the compiled bytecode of a Noir program along with its ABI +/// (Application Binary Interface) which describes the program's inputs and outputs. +/// It can be used as a circuit component in the `SuperNova` NIVC system. +#[derive(Clone, Serialize, Deserialize, Debug)] +pub struct NoirProgram { + /// The program's ABI describing its inputs and outputs + pub abi: Abi, + + /// The program's bytecode in ACIR format, serialized as base64 + #[serde( + serialize_with = "Program::serialize_program_base64", + deserialize_with = "Program::deserialize_program_base64" + )] + pub bytecode: Program>, + + /// Optional witness inputs for the program (is used internally by the [`program::run`] function) + #[serde(skip)] + pub witness: Option, + + /// The index of this program in the switchboard's circuit list + #[serde(skip)] + pub index: usize, +} + +impl NoirProgram { + /// Creates a new `NoirProgram` from JSON bytecode + /// + /// # Arguments + /// + /// * `bin` - The JSON bytecode of a compiled Noir program + /// + /// # Returns + /// + /// A new `NoirProgram` instance + pub fn new(bin: &[u8]) -> Self { serde_json::from_slice(bin).unwrap() } + + /// Gets the main circuit from the program + /// + /// # Returns + /// + /// A reference to the main circuit function + pub fn circuit(&self) -> &Circuit> { &self.bytecode.functions[0] } + + /// Gets the unconstrained functions from the program + /// + /// Unconstrained functions are functions that are executed during witness generation + /// but do not contribute to the circuit's constraints. These are handled by the + /// [`StubbedBlackBoxSolver`]. + /// + /// # Returns + /// + /// A reference to the list of unconstrained functions + pub fn unconstrained_functions(&self) -> &Vec>> { + &self.bytecode.unconstrained_functions + } + + /// Sets the witness inputs for the program + /// + /// # Arguments + /// + /// * `witness` - The input map containing witness values + pub fn set_inputs(&mut self, witness: InputMap) { self.witness = Some(witness); } +} + +impl StepCircuit for NoirProgram { + /// Returns the number of registers in the folding state + /// + /// This is determined by examining the ABI to find the "registers" array + /// in the `FoldingVariables` struct. + fn arity(&self) -> usize { + let input_type = self + .abi + .parameters + .iter() + .find(|param| { + if let AbiType::Struct { path, .. } = ¶m.typ { + path == "nivc::FoldingVariables" + } else { + false + } + }) + .map(|param| ¶m.typ); + + let return_type = self.abi.return_type.as_ref().map(|ret| &ret.abi_type); + + let get_register_length = |typ: &AbiType| -> usize { + if let AbiType::Struct { fields, .. } = typ { + if let Some((_, AbiType::Array { length, .. })) = + fields.iter().find(|(name, _)| name == "registers") + { + *length as usize + } else { + panic!("FoldingVariables missing registers array or invalid type") + } + } else { + panic!("Expected struct type for FoldingVariables") + } + }; + + match (input_type, return_type) { + (Some(input), Some(output)) => { + if let (AbiType::Struct { path: in_path, .. }, AbiType::Struct { path: out_path, .. }) = + (input, output) + { + if in_path == "nivc::FoldingVariables" && out_path == "nivc::FoldingVariables" { + let in_len = get_register_length(input); + let out_len = get_register_length(output); + + assert!( + in_len == out_len, + "Input and output must have same number of registers: {in_len} vs {out_len}", + ); + + return in_len; + } + } + panic!("Both input and output must be nivc::FoldingVariables structs") + }, + _ => panic!("Missing input or output FoldingVariables type"), + } + } + + /// Returns the index of this circuit in the switchboard + fn circuit_index(&self) -> usize { self.index } + + /// Synthesizes the Noir program into a constraint system + /// + /// This is the core method that translates the Noir program's ACIR representation + /// into constraints that can be used in the folding proof system. It processes + /// each gate in the ACIR circuit and creates corresponding constraints in the + /// target constraint system. + /// + /// # Arguments + /// + /// * `cs` - The constraint system to add constraints to + /// * `pc` - The program counter (next circuit to execute) + /// * `z` - The current folding state (register values) + /// + /// # Returns + /// + /// A tuple of the next program counter and updated register values + #[allow(clippy::too_many_lines)] + fn synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + trace!("Synthesizing NoirProgram with {} inputs", z.len()); + + // Create variable tracker and initialize ACVM + let mut allocated_vars: HashMap> = HashMap::new(); + + let acvm_witness_map = self.witness.as_ref().map(|inputs| { + let mut acvm = ACVM::new( + &StubbedBlackBoxSolver(false), + &self.circuit().opcodes, + WitnessMap::new(), + self.unconstrained_functions(), + &[], + ); + + // TODO: Can we remove this clone since it may be a lot of data? + let mut inputs_with_folding_variables = inputs.clone(); + let folding_variables = InputValue::Struct(BTreeMap::from([ + ( + "registers".to_string(), + InputValue::Vec( + z.iter() + .filter_map(|var| { + var.get_value().map(|v| InputValue::Field(convert_to_acir_field(v))) + }) + .collect(), + ), + ), + ( + // TODO: This is a bit hacky with unwraps + "program_counter".to_string(), + InputValue::Field(convert_to_acir_field(pc.unwrap().get_value().unwrap())), + ), + ])); + inputs_with_folding_variables.insert("folding_variables".to_string(), folding_variables); + + // Encode inputs through ABI + if let Ok(encoded_map) = self.abi.encode(&inputs_with_folding_variables, None) { + for (witness, value) in encoded_map { + acvm.overwrite_witness(witness, value); + } + } + + // Solve and get resulting witness map + debug!("Executing ACVM solve..."); + acvm.solve(); + acvm.finalize() + }); + + // Allocate variables from public inputs (z) + for (i, witness) in self.circuit().public_parameters.0.iter().enumerate() { + if i < z.len() { + allocated_vars.insert(*witness, z[i].clone()); + } + } + + // Helper for getting/creating variables + let get_var = |witness: &Witness, + vars: &mut HashMap>, + cs: &mut CS| { + if let Some(var) = vars.get(witness) { + Ok::<_, SynthesisError>(var.get_variable()) + } else { + let value = acvm_witness_map + .as_ref() + .and_then(|map| map.get(witness)) + .map(|&v| convert_to_halo2_field(v)); + + let var = AllocatedNum::alloc(cs.namespace(|| format!("w{}", witness.as_usize())), || { + Ok(value.unwrap_or_else(Scalar::zero)) + })?; + + vars.insert(*witness, var.clone()); + Ok(var.get_variable()) + } + }; + + // Process gates using R1CS approach + for (idx, opcode) in self.circuit().opcodes.iter().enumerate() { + if let Opcode::AssertZero(gate) = opcode { + // Create a single linear combination that will be constrained to zero + let mut zero_lc = LinearCombination::zero(); + + // Handle mul terms by creating intermediate variables for each product + for mul_term in &gate.mul_terms { + let left_variable = get_var(&mul_term.1, &mut allocated_vars, cs)?; + let right_variable = get_var(&mul_term.2, &mut allocated_vars, cs)?; + + // Get the values if available + let left_value = acvm_witness_map + .as_ref() + .and_then(|map| map.get(&mul_term.1)) + .map(|&v| convert_to_halo2_field(v)); + + let right_value = acvm_witness_map + .as_ref() + .and_then(|map| map.get(&mul_term.2)) + .map(|&v| convert_to_halo2_field(v)); + + // Create a new variable for the product + let product = AllocatedNum::alloc( + cs.namespace(|| format!("prod_g{idx}_t{}", mul_term.1.as_usize())), + || { + let l = left_value.unwrap_or_else(Scalar::zero); + let r = right_value.unwrap_or_else(Scalar::zero); + Ok(l * r) + }, + )?; + + // Enforce that this is indeed the product + cs.enforce( + || format!("prod_constraint_g{idx}_t{}", mul_term.1.as_usize()), + |lc| lc + left_variable, + |lc| lc + right_variable, + |lc| lc + product.get_variable(), + ); + + // Add this product to our zero linear combination with the coefficient + zero_lc = zero_lc + (convert_to_halo2_field(mul_term.0), product.get_variable()); + } + + // Handle linear terms (these go into the zero linear combination) + for add_term in &gate.linear_combinations { + let var = get_var(&add_term.1, &mut allocated_vars, cs)?; + zero_lc = zero_lc + (convert_to_halo2_field(add_term.0), var); + } + + // Handle constant term + if !gate.q_c.is_zero() { + zero_lc = zero_lc + (convert_to_halo2_field(gate.q_c), CS::one()); + } + + // Enforce that the entire expression equals zero + cs.enforce( + || format!("constraint_g{idx}"), + |_| LinearCombination::zero() + CS::one(), + |_| zero_lc.clone(), + |_| LinearCombination::zero(), + ); + } + } + + // Prepare return values + let mut return_values = vec![]; + for ret in &self.circuit().return_values.0 { + // Ensure return witness has an allocated variable + if !allocated_vars.contains_key(ret) { + let value = acvm_witness_map + .as_ref() + .and_then(|map| map.get(ret)) + .map(|&v| convert_to_halo2_field(v)); + + let var = AllocatedNum::alloc(cs.namespace(|| format!("ret{}", ret.as_usize())), || { + Ok(value.unwrap_or_else(Scalar::zero)) + })?; + + allocated_vars.insert(*ret, var); + } + return_values.push(allocated_vars[ret].clone()); + } + + // Extract return structure (registers and next_pc) + if let Some(noirc_abi::AbiReturnType { abi_type: AbiType::Struct { fields, .. }, .. }) = + &self.abi.return_type + { + let registers_field = fields + .iter() + .find(|(name, _)| name == "registers") + .unwrap_or_else(|| panic!("Missing 'registers' field")); + + let registers_length = match ®isters_field.1 { + AbiType::Array { length, .. } => *length as usize, + _ => panic!("Expected registers to be an array type"), + }; + + if return_values.len() > registers_length { + let registers = return_values[0..registers_length].to_vec(); + let next_pc = Some(return_values[registers_length].clone()); + + trace!("Extracted {} registers and program counter", registers.len()); + return Ok((next_pc, registers)); + } + error!( + "Not enough return values. Expected at least {}, got {}", + registers_length + 1, + return_values.len() + ); + return Err(SynthesisError::Unsatisfiable); + } + + Err(SynthesisError::Unsatisfiable) + } +} + +/// Converts a field element from ACIR representation to Halo2 representation +/// +/// # Arguments +/// +/// * `f` - The field element in ACIR representation +/// +/// # Returns +/// +/// The field element in Halo2 representation +fn convert_to_halo2_field(f: GenericFieldElement) -> Scalar { + let bytes = f.to_be_bytes(); + let mut arr = [0u8; 32]; + arr.copy_from_slice(&bytes[..32]); + arr.reverse(); + Scalar::from_repr(arr).unwrap() +} + +/// Converts a field element from Halo2 representation to ACIR representation +/// +/// # Arguments +/// +/// * `f` - The field element in Halo2 representation +/// +/// # Returns +/// +/// The field element in ACIR representation +fn convert_to_acir_field(f: Scalar) -> GenericFieldElement { + let mut bytes = f.to_bytes(); + bytes.reverse(); + GenericFieldElement::from_be_bytes_reduce(&bytes) +} + +#[cfg(test)] +mod tests { + use client_side_prover::bellpepper::shape_cs::ShapeCS; + + use super::*; + use crate::demo::{basic, poseidon, square_zeroth}; + + fn add_external() -> NoirProgram { + let json_path = "../target/add_external.json"; + let json_data = std::fs::read(json_path).expect("Failed to read add_external.json"); + + serde_json::from_slice(&json_data).expect("Failed to deserialize add_external.json") + } + + #[test] + fn test_conversions() { + let f = Scalar::from(5); + let acir_f = convert_to_acir_field(f); + assert_eq!(acir_f, GenericFieldElement::from_repr(Fr::from(5))); + + let f = GenericFieldElement::from_repr(Fr::from(3)); + let halo2_f = convert_to_halo2_field(f); + assert_eq!(halo2_f, Scalar::from(3)); + } + + #[test] + fn test_deserialize_abi() { + let program = add_external(); + + // Verify parameters + assert_eq!(program.abi.parameters.len(), 3); + assert_eq!(program.abi.parameters[0].name, "folding_variables"); + assert_eq!(program.abi.parameters[1].name, "external"); + assert_eq!(program.abi.parameters[2].name, "next_pc"); + + // Verify return type + if let AbiType::Struct { fields, path } = &program.abi.return_type.as_ref().unwrap().abi_type { + assert_eq!(fields.len(), 2); + assert_eq!(path, "nivc::FoldingVariables"); + assert_eq!(fields[0].0, "registers"); + assert_eq!(fields[1].0, "program_counter"); + } else { + panic!("Expected tuple return type, got {:?}", program.abi.return_type); + } + } + + // TODO: Worth checking here that each gate has mul, add, and constant terms. + #[test] + fn test_constraint_system_basic() { + let program = basic(); + + let mut cs = ShapeCS::::new(); + let pc = Some(AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(0))).unwrap()); + let z = vec![ + AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(2))).unwrap(), + AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(1))).unwrap(), + ]; + + let _ = program.synthesize(&mut cs, pc.as_ref(), z.as_ref()).unwrap(); + assert_eq!(cs.num_constraints(), 3); + } + + #[test] + fn test_constraint_system_add_external() { + let program = add_external(); + + let mut cs = ShapeCS::::new(); + let pc = Some(AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(0))).unwrap()); + let z = vec![ + AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(2))).unwrap(), + AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(1))).unwrap(), + ]; + + let _ = program.synthesize(&mut cs, pc.as_ref(), z.as_ref()).unwrap(); + assert_eq!(cs.num_constraints(), 3); + } + + #[test] + fn test_constraint_system_square_zeroth() { + let program = square_zeroth(); + + let mut cs = ShapeCS::::new(); + let pc = Some(AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(0))).unwrap()); + let z = vec![ + AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(2))).unwrap(), + AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(1))).unwrap(), + ]; + + let _ = program.synthesize(&mut cs, pc.as_ref(), z.as_ref()).unwrap(); + assert_eq!(cs.num_constraints(), 4); + } + + #[test] + fn test_constraint_system_poseidon() { + let program = poseidon(); + + let mut cs = ShapeCS::::new(); + let pc = Some(AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(0))).unwrap()); + let z = vec![ + AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(2))).unwrap(), + AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(1))).unwrap(), + ]; + + let _ = program.synthesize(&mut cs, pc.as_ref(), z.as_ref()).unwrap(); + assert_eq!(cs.num_constraints(), 560); + } +} diff --git a/frontend/src/program.rs b/frontend/src/program.rs new file mode 100644 index 0000000..8e13171 --- /dev/null +++ b/frontend/src/program.rs @@ -0,0 +1,422 @@ +//! # Program Execution +//! +//! This module provides the core execution functionality for NIVC (Non-uniform Incrementally +//! Verifiable Computation) with Noir circuits. It defines the memory models, switchboard logic for +//! circuit coordination, and functions for running programs and compressing proofs. +//! +//! ## Memory Models +//! +//! Two memory models are supported: +//! - **ROM (Read-Only Memory)**: Programs with all inputs known in advance +//! - **RAM (Random-Access Memory)**: Programs that compute inputs dynamically during execution +//! +//! ## Switchboard +//! +//! The [`Switchboard`] struct manages a collection of Noir circuits and controls the execution flow +//! between them. It maintains: +//! - A list of circuits +//! - The current program counter (circuit index) +//! - Input data appropriate for the memory model +//! +//! ## Execution Functions +//! +//! - [`run`]: Executes a program with the appropriate memory model +//! - [`compress`]: Compresses a recursive SNARK into a more compact form for verification + +use client_side_prover::supernova::{NonUniformCircuit, RecursiveSNARK}; +use halo2curves::{ff::PrimeField, grumpkin}; +use noirc_abi::InputMap; +use tracing::trace; + +use super::*; +use crate::{ + noir::NoirProgram, + setup::{Ready, Setup}, +}; + +/// Trait for memory models used in the NIVC system +/// +/// This trait is sealed, meaning it can only be implemented by the types in this crate +/// (specifically, `ROM` and `RAM`). +pub trait Memory: private::Sealed { + /// The data type associated with this memory model + type Data; +} + +/// Private module containing implementation details for sealing the Memory trait +mod private { + use super::{Configuration, RAM, ROM}; + /// Seals the [`Memory`] trait + pub trait Sealed {} + impl Sealed for ROM {} + impl Sealed for RAM {} + impl Sealed for Configuration {} +} + +/// Read-Only Memory model +/// +/// In ROM mode, all inputs for the computation are known in advance and provided +/// as a sequence of witness values. +#[derive(Debug, Clone)] +pub struct ROM {} +impl Memory for ROM { + /// ROM uses a vector of `InputMaps` as its data + type Data = Vec; +} + +/// Random-Access Memory model +/// +/// In RAM mode, inputs are computed dynamically during execution. Each circuit +/// can influence the execution path by setting the program counter for the next step. +#[derive(Debug, Clone)] +pub struct RAM {} +impl Memory for RAM { + /// RAM doesn't require any additional input data + type Data = (); +} + +/// A memory model that doesn't require any additional input data. +/// +/// This is just a placeholder to allow the setup to be easily created then serialized and +/// deserialized. +#[derive(Debug, Clone)] +pub struct Configuration {} +impl Memory for Configuration { + type Data = (); +} + +/// Manages a collection of circuits and controls execution flow +/// +/// The switchboard holds all the circuits that can be executed in a NIVC computation, +/// and maintains the program counter (current circuit index). It is parameterized by +/// a memory model that determines how inputs are handled. +#[derive(Debug, Clone)] +pub struct Switchboard { + /// The collection of Noir circuits that can be executed + pub(crate) circuits: Vec, + + /// Public input values (initial registers for the computation) + pub(crate) public_input: Vec, + + /// The initial circuit index to start execution from + pub(crate) initial_circuit_index: usize, + + /// Input data specific to the memory model + pub(crate) switchboard_inputs: M::Data, +} + +impl Switchboard { + /// Creates a new switchboard with Blank memory model + /// + /// # Arguments + /// + /// * `circuits` - Collection of Noir circuits that can be executed + /// + /// # Returns + pub fn new(mut circuits: Vec) -> Self { + // Set the index of each circuit given the order they are passed in since this is skipped in + // serde + circuits.iter_mut().enumerate().for_each(|(i, c)| c.index = i); + Self { circuits, public_input: vec![], initial_circuit_index: 0, switchboard_inputs: () } + } + + pub fn into_rom( + self, + initial_circuit_index: usize, + switchboard_inputs: Vec, + public_input: Vec, + ) -> Switchboard { + Switchboard { circuits: self.circuits, public_input, initial_circuit_index, switchboard_inputs } + } + + pub fn into_ram( + self, + initial_circuit_index: usize, + public_input: Vec, + ) -> Switchboard { + Switchboard { + circuits: self.circuits, + public_input, + initial_circuit_index, + switchboard_inputs: self.switchboard_inputs, + } + } +} + +impl Switchboard { + /// Creates a new switchboard with Read-Only Memory model + /// + /// # Arguments + /// + /// * `circuits` - Collection of Noir circuits that can be executed + /// * `switchboard_inputs` - Sequence of inputs for each execution step + /// * `public_input` - Initial register values + /// * `initial_circuit_index` - The starting circuit index + /// + /// # Returns + /// + /// A new `Switchboard` instance configured for ROM execution + pub fn new( + mut circuits: Vec, + switchboard_inputs: Vec, + public_input: Vec, + initial_circuit_index: usize, + ) -> Self { + // Set the index of each circuit given the order they are passed in since this is skipped in + // serde + circuits.iter_mut().enumerate().for_each(|(i, c)| c.index = i); + Self { circuits, public_input, initial_circuit_index, switchboard_inputs } + } +} + +impl Switchboard { + /// Creates a new switchboard with Random-Access Memory model + /// + /// # Arguments + /// + /// * `circuits` - Collection of Noir circuits that can be executed + /// * `public_input` - Initial register values + /// * `initial_circuit_index` - The starting circuit index + /// + /// # Returns + /// + /// A new [`Switchboard`] instance configured for RAM execution + pub fn new( + mut circuits: Vec, + public_input: Vec, + initial_circuit_index: usize, + ) -> Self { + // Set the index of each circuit given the order they are passed in since this is skipped in + // serde + circuits.iter_mut().enumerate().for_each(|(i, c)| c.index = i); + Self { circuits, public_input, initial_circuit_index, switchboard_inputs: () } + } +} + +impl NonUniformCircuit for Switchboard { + type C1 = NoirProgram; + type C2 = TrivialCircuit; + + /// Returns the number of circuits in the switchboard + fn num_circuits(&self) -> usize { self.circuits.len() } + + /// Returns the primary circuit at the given index + fn primary_circuit(&self, circuit_index: usize) -> Self::C1 { + self.circuits[circuit_index].clone() + } + + /// Returns the secondary circuit (always trivial for NIVC with Noir) + fn secondary_circuit(&self) -> Self::C2 { TrivialCircuit::default() } + + /// Returns the initial circuit index to start execution from + fn initial_circuit_index(&self) -> usize { self.initial_circuit_index } +} + +/// Executes a program with the appropriate memory model +/// +/// This function dispatches to either [`run_rom`] or [`run_ram`] based on the memory model. +/// +/// # Arguments +/// +/// * `setup` - The setup parameters for the program +/// +/// # Returns +/// +/// A [`RecursiveSNARK`] representing the execution trace +/// +/// # Errors +/// +/// Returns a [`FrontendError`] if execution fails +pub fn run(setup: &Setup>) -> Result, FrontendError> { + if std::any::type_name::() == std::any::type_name::() { + // Safety: We've verified the type matches ROM + let setup = unsafe { + &*std::ptr::from_ref::>>(setup) + .cast::>>() + }; + run_rom(setup) + } else if std::any::type_name::() == std::any::type_name::() { + // Safety: We've verified the type matches RAM + let setup = unsafe { + &*std::ptr::from_ref::>>(setup) + .cast::>>() + }; + run_ram(setup) + } else { + unreachable!("The trait `Memory` is sealed, so you cannot reach this point") + } +} + +/// Executes a program using the ROM memory model +/// +/// In ROM mode, all inputs are known in advance and provided as a sequence. +/// The program executes each step with the corresponding input. +/// +/// # Arguments +/// +/// * `setup` - The setup parameters for the program +/// +/// # Returns +/// +/// A [`RecursiveSNARK`] representing the execution trace +/// +/// # Errors +/// +/// Returns a [`FrontendError`] if execution fails +pub fn run_rom(setup: &Setup>) -> Result, FrontendError> { + info!("Starting SuperNova program with ROM memory model..."); + + let z0_primary = &setup.switchboard.public_input; + let z0_secondary = &[grumpkin::Fr::ZERO]; + let time = std::time::Instant::now(); + + let mut recursive_snark: Option> = None; + + // ROM-specific: iterate through predefined sequence of inputs + for (idx, witness) in setup.switchboard.switchboard_inputs.iter().enumerate() { + info!("Step {} of {} witnesses", idx + 1, setup.switchboard.switchboard_inputs.len()); + + // TODO: We should not clone the witness here + recursive_snark = + prove_single_step(setup, recursive_snark, Some(witness.clone()), z0_primary, z0_secondary)?; + } + + trace!("Recursive loop of `program::run()` elapsed: {:?}", time.elapsed()); + Ok(recursive_snark.unwrap()) +} + +/// Executes a program using the RAM memory model +/// +/// In RAM mode, inputs are computed dynamically during execution. Each circuit +/// can influence the execution path by setting the program counter for the next step. +/// +/// # Arguments +/// +/// * `setup` - The setup parameters for the program +/// +/// # Returns +/// +/// A [`RecursiveSNARK`] representing the execution trace +/// +/// # Errors +/// +/// Returns a [`FrontendError`] if execution fails +pub fn run_ram(setup: &Setup>) -> Result, FrontendError> { + info!("Starting SuperNova program with RAM memory model..."); + + let z0_primary = &setup.switchboard.public_input; + let z0_secondary = &[grumpkin::Fr::ZERO]; + let time = std::time::Instant::now(); + + let mut recursive_snark: Option> = None; + let termination_pc = Scalar::ZERO - Scalar::ONE; + + // RAM-specific: loop until termination condition is met + loop { + // Check termination condition if we have a SNARK + if let Some(snark) = &recursive_snark { + let current_pc = snark.program_counter(); + if current_pc == termination_pc { + break; + } + } + + recursive_snark = prove_single_step( + setup, + recursive_snark, + None, // RAM doesn't use predefined witness values + z0_primary, + z0_secondary, + )?; + } + + trace!("Recursive loop of `program::run()` elapsed: {:?}", time.elapsed()); + Ok(recursive_snark.unwrap()) +} + +/// Helper function to prove a single step of execution +/// +/// This handles the common logic between ROM and RAM execution modes. +fn prove_single_step( + setup: &Setup>, + recursive_snark: Option>, + witness: Option, + z0_primary: &[Scalar], + z0_secondary: &[grumpkin::Fr], +) -> Result>, FrontendError> { + let program_counter = recursive_snark.as_ref().map_or_else( + || setup.switchboard.initial_circuit_index(), + |snark| { + u32::from_le_bytes(snark.program_counter().to_repr().as_ref()[0..4].try_into().unwrap()) + as usize + }, + ); + + debug!("Program counter = {:?}", program_counter); + + let mut circuit_primary = setup.switchboard.primary_circuit(program_counter); + + if let Some(w) = witness { + circuit_primary.witness = Some(w); + } else { + circuit_primary.witness = Some(InputMap::new()); + } + + let circuit_secondary = setup.switchboard.secondary_circuit(); + + let mut result = recursive_snark; + if result.is_none() { + result = Some(RecursiveSNARK::new( + &setup.params, + &setup.switchboard, + &circuit_primary, + &circuit_secondary, + z0_primary, + z0_secondary, + )?); + } + + // Prove the next step + info!("Proving single step..."); + let snark = result.as_mut().unwrap(); + snark.prove_step(&setup.params, &circuit_primary, &circuit_secondary)?; + info!("Done proving single step..."); + + Ok(result) +} + +/// Compresses a recursive SNARK into a compact proof for efficient verification +/// +/// # Arguments +/// +/// * `setup` - The setup parameters for the program +/// * `recursive_snark` - The recursive SNARK to compress +/// +/// # Returns +/// +/// A `CompressedProof` that can be serialized and later verified +/// +/// # Errors +/// +/// Returns a `FrontendError` if compression fails +pub fn compress( + setup: &Setup>, + recursive_snark: &RecursiveSNARK, +) -> Result { + let pk = CompressedSNARK::initialize_pk( + &setup.params, + setup.vk_digest_primary, + setup.vk_digest_secondary, + )?; + trace!( + "initialized pk pk_primary.digest={:?}, pk_secondary.digest={:?}", + pk.pk_primary.vk_digest, + pk.pk_secondary.vk_digest + ); + + debug!("`CompressedSNARK::prove STARTING PROVING!"); + let proof = CompressedSNARK::prove(&setup.params, &pk, recursive_snark)?; + debug!("`CompressedSNARK::prove completed!"); + + Ok(proof) +} diff --git a/frontend/src/setup.rs b/frontend/src/setup.rs new file mode 100644 index 0000000..0ff752d --- /dev/null +++ b/frontend/src/setup.rs @@ -0,0 +1,319 @@ +//! # Setup and Parameter Management +//! +//! This module handles the setup and parameter management for the NIVC system. +//! It provides functionality for: +//! +//! - Creating and managing cryptographic parameters +//! - Storing and loading setup data +//! - Converting between different setup states +//! +//! ## Setup States +//! +//! The setup can be in one of two states: +//! - **Offline**: Contains only auxiliary parameters without a switchboard (can be serialized for +//! storage) +//! - **Ready**: Complete setup with a switchboard that's ready for program execution +//! +//! ## Storage +//! +//! Setup parameters can be serialized and stored to disk, then later deserialized and +//! combined with a switchboard to create a ready setup. + +use std::io::Cursor; + +use client_side_prover::{ + fast_serde::{self, FastSerde, SerdeByteError, SerdeByteTypes}, + supernova::{get_circuit_shapes, snark::CompressedSNARK, PublicParams}, + traits::{snark::default_ck_hint, Dual, Engine}, +}; +use tracing::debug; + +use super::*; +use crate::program::{Memory, Switchboard}; + +/// Trait that defines the status of a setup +/// +/// This sealed trait can only be implemented by the predefined status types: +/// - `Ready`: A setup that is ready for execution with a specific memory model +/// - `Offline`: A setup that only contains cryptographic parameters without a switchboard +pub trait Status: private::Sealed { + /// The switchboard type associated with this status + type Switchboard; + + /// The public parameters type associated with this status + type PublicParams; +} + +/// Private module for sealing the Status trait +mod private { + use super::{Offline, Ready}; + + /// Sealed trait implementation to restrict Status implementations + pub trait Sealed {} + impl Sealed for Ready {} + impl Sealed for Offline {} +} + +/// Represents a setup that is ready for execution with a specific memory model +/// +/// A `Ready` setup contains both the cryptographic parameters and a switchboard, +/// making it ready to execute programs. +#[derive(Debug, Clone)] +pub struct Ready { + /// Marker for the memory model type + _marker: std::marker::PhantomData, +} + +impl Status for Ready { + /// A ready setup uses a switchboard with the specified memory model + type PublicParams = PublicParams; + /// A ready setup has a specific switchboard associated with it + type Switchboard = Switchboard; +} + +/// Represents a setup that only contains cryptographic parameters without a switchboard +/// +/// An `Offline` setup can be serialized and stored, making it useful for saving +/// computationally expensive cryptographic parameters. +#[derive(Debug, Clone)] +pub struct Offline; + +impl Status for Offline { + /// An offline setup only contains auxiliary parameters + type PublicParams = AuxParams; + /// An offline setup doesn't have a switchboard + type Switchboard = (); +} + +/// Setup parameters for NIVC computation +/// +/// This structure holds the cryptographic parameters, verification key digests, +/// and optionally a switchboard depending on its status. +#[derive(Clone, Debug)] +pub struct Setup { + /// Cryptographic parameters (type depends on the status) + pub params: S::PublicParams, + + /// Primary verification key digest + pub vk_digest_primary: ::Scalar, + + /// Secondary verification key digest + pub vk_digest_secondary: as Engine>::Scalar, + + /// Switchboard (if the setup is [`Ready`]) or unit (if [`Offline`]) + pub switchboard: S::Switchboard, +} + +#[cfg(test)] +impl PartialEq for Setup { + fn eq(&self, other: &Self) -> bool { + self.vk_digest_primary == other.vk_digest_primary + && self.vk_digest_secondary == other.vk_digest_secondary + } +} + +// TODO: Possibly have a `get_vk` method that returns the verification key for the given setup + +impl Setup> { + /// Creates a new ready setup with the given switchboard + /// + /// This initializes the cryptographic parameters based on the circuits in the switchboard + /// and generates the verification key digests. + /// + /// # Arguments + /// + /// * `switchboard` - The switchboard containing the circuits to be executed + /// + /// # Returns + /// + /// A new ready setup that can be used to execute programs + pub fn new(switchboard: Switchboard) -> Result { + let public_params = PublicParams::setup(&switchboard, &*default_ck_hint(), &*default_ck_hint()); + let (pk, _vk) = CompressedSNARK::::setup(&public_params)?; + + Ok(Self { + params: public_params, + vk_digest_primary: pk.pk_primary.vk_digest, + vk_digest_secondary: pk.pk_secondary.vk_digest, + switchboard, + }) + } + + /// Converts a ready setup to an offline setup + /// + /// This extracts the auxiliary parameters from the public parameters and + /// creates an offline setup without the switchboard, which can be serialized. + /// + /// # Returns + /// + /// An offline setup containing only the auxiliary parameters + fn into_offline(self) -> Setup { + Setup { + params: self.params.into_parts().1, + vk_digest_primary: self.vk_digest_primary, + vk_digest_secondary: self.vk_digest_secondary, + switchboard: (), + } + } + + /// Serializes the setup and stores it to a file + /// + /// This converts the setup to an offline setup, serializes it, and writes + /// the resulting bytes to the specified file path. + /// + /// # Arguments + /// + /// * `path` - The file path where the setup should be stored + /// + /// # Returns + /// + /// The serialized bytes on success, or a `FrontendError` on failure + pub fn store_file(self, path: &std::path::PathBuf) -> Result, FrontendError> { + let bytes = self.into_offline().to_bytes(); + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent)?; + } + + debug!("using path={:?}", path); + std::io::Write::write_all(&mut std::fs::File::create(path)?, &bytes)?; + + Ok(bytes) + } + + /// Returns the verifier key for the setup + /// + /// This method generates the verifier key for the setup using the public parameters. + /// + /// # Returns + /// + /// The verifier key for the setup + pub fn verifier_key(&self) -> Result { + let (_, vk) = CompressedSNARK::setup(&self.params)?; + Ok(vk) + } +} + +impl Setup { + /// Converts an offline setup to a ready setup + /// + /// This combines the auxiliary parameters with a switchboard to create + /// a ready setup that can be used to execute programs. + /// + /// # Arguments + /// + /// * `switchboard` - The switchboard to be used for execution + /// + /// # Returns + /// + /// A ready setup containing the parameters and switchboard + pub fn into_ready(self, switchboard: Switchboard) -> Setup> { + Setup { + params: PublicParams::from_parts(get_circuit_shapes(&switchboard), self.params), + vk_digest_primary: self.vk_digest_primary, + vk_digest_secondary: self.vk_digest_secondary, + switchboard, + } + } + + /// Deserializes a setup from a file + /// + /// # Arguments + /// + /// * `path` - The file path where the setup should be stored + /// + /// # Returns + /// + /// The deserialized setup, or a [`FrontendError`] on failure + pub fn load_file(path: &std::path::PathBuf) -> Result { + let bytes = std::fs::read(path)?; + Ok(Self::from_bytes(&bytes)?) + } +} + +// TODO: We should consider using `rkyv` for serialization and deserialization +impl FastSerde for Setup { + /// Deserializes a setup from bytes + /// + /// # Arguments + /// + /// * `bytes` - The serialized setup data + /// + /// # Returns + /// + /// The deserialized offline setup, or a `SerdeByteError` on failure + fn from_bytes(bytes: &[u8]) -> Result { + let mut cursor = Cursor::new(bytes); + Self::validate_header(&mut cursor, SerdeByteTypes::ProverParams, 3)?; + + let params = + Self::read_section_bytes(&mut cursor, 1).map(|bytes| AuxParams::from_bytes(&bytes))??; + + let vk_digest_primary = Self::read_section_bytes(&mut cursor, 2) + .and_then(|bytes| bytes.try_into().map_err(|_| SerdeByteError::G1DecodeError)) + .map(|bytes| ::Scalar::from_bytes(&bytes))? + .into_option() + .ok_or(SerdeByteError::G1DecodeError)?; + + let vk_digest_secondary = Self::read_section_bytes(&mut cursor, 3) + .and_then(|bytes| bytes.try_into().map_err(|_| SerdeByteError::G2DecodeError)) + .map(|bytes| as Engine>::Scalar::from_bytes(&bytes))? + .into_option() + .ok_or(SerdeByteError::G1DecodeError)?; + + Ok(Self { params, vk_digest_primary, vk_digest_secondary, switchboard: () }) + } + + /// Serializes a setup to bytes + /// + /// # Returns + /// + /// The serialized setup data + fn to_bytes(&self) -> Vec { + let mut out = Vec::new(); + out.extend_from_slice(&fast_serde::MAGIC_NUMBER); + out.push(SerdeByteTypes::ProverParams as u8); + out.push(3); // num_sections + + Self::write_section_bytes(&mut out, 1, &self.params.to_bytes()); + Self::write_section_bytes(&mut out, 2, &self.vk_digest_primary.to_bytes()); + Self::write_section_bytes(&mut out, 3, &self.vk_digest_secondary.to_bytes()); + + out + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{demo::square_zeroth, program::Configuration}; + + #[test] + fn test_setup_and_params() { + let setup = Setup::new(Switchboard::::new(vec![square_zeroth()])).unwrap(); + assert_eq!(setup.params.num_constraints_and_variables(0), (10009, 10001)); + } + + #[test] + fn test_setup_serialize() { + let setup = Setup::new(Switchboard::::new(vec![square_zeroth()])).unwrap(); + let offline_setup = setup.into_offline(); + let serialized = offline_setup.to_bytes(); + let deserialized = Setup::::from_bytes(&serialized).unwrap(); + assert_eq!(offline_setup, deserialized); + } + + #[test] + fn test_setup_store_file() { + let switchboard = Switchboard::::new(vec![square_zeroth()]); + let setup = Setup::new(switchboard.clone()).unwrap(); + let vk_digest_primary = setup.vk_digest_primary; + let vk_digest_secondary = setup.vk_digest_secondary; + let path = tempfile::tempdir().unwrap().into_path(); + let _bytes = setup.store_file(&path.join("setup.bytes")).unwrap(); + let stored_bytes = std::fs::read(path.join("setup.bytes")).unwrap(); + let deserialized = Setup::::from_bytes(&stored_bytes).unwrap(); + let ready_setup = deserialized.into_ready(switchboard); + assert_eq!(vk_digest_primary, ready_setup.vk_digest_primary); + assert_eq!(vk_digest_secondary, ready_setup.vk_digest_secondary); + } +} diff --git a/frontend/tests/end_to_end/mod.rs b/frontend/tests/end_to_end/mod.rs new file mode 100644 index 0000000..553bb28 --- /dev/null +++ b/frontend/tests/end_to_end/mod.rs @@ -0,0 +1,98 @@ +use std::fs; + +use acvm::acir::acir_field::GenericFieldElement; +use client_side_prover_frontend::{ + demo, + program::{self, Configuration, Switchboard}, + setup::Setup, + CompressedSNARK, Scalar, +}; +use noirc_abi::{input_parser::InputValue, InputMap}; +use tempfile::tempdir; + +use super::*; + +/// Note that this test goes through a flow that mimics the offline setup component, online proving +/// component, and a separate verification component. +#[test] +#[traced_test] +fn test_end_to_end_workflow() { + // ----------------------------------------------------------------------------------------------------------------- // + // Offline Setup Phase + // ----------------------------------------------------------------------------------------------------------------- // + // Step 1: Create demo programs for our test + let swap_memory_program = demo::swap_memory(); + let square_program = demo::square_zeroth(); + println!("1. Read programs"); + + // Step 2: Create switchboard with ROM memory model, no inputs are necessary since this is just + // creating the setup + let switchboard = + Switchboard::::new(vec![swap_memory_program.clone(), square_program.clone()]); + println!("2. Created switchboard"); + + // Step 3: Initialize the setup + let setup = Setup::new(switchboard.clone()).unwrap(); + println!("3. Initialized setup"); + + // Step 4: Save the setup to a file + let temp_dir = tempdir().unwrap(); + let file_path = temp_dir.path().join("test_setup.bytes"); + setup.store_file(&file_path).unwrap(); + println!("4. Saved setup to file"); + // ----------------------------------------------------------------------------------------------------------------- // + + // ----------------------------------------------------------------------------------------------------------------- // + // Online Proving Phase + // ----------------------------------------------------------------------------------------------------------------- // + // Step 5: Read the setup from the file + let psetup = Setup::load_file(&file_path).unwrap(); + println!("5. Read setup from file"); + + // Step 6: Ready the setup for proving with the switchboard + let input1 = + InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(1_u64)))]); + let input2 = InputMap::from([( + "next_pc".to_string(), + InputValue::Field(GenericFieldElement::from(-1_i128)), + )]); + // Briefly test the switchboard into_rom method + let pswitchboard = + switchboard.into_rom(0, vec![input1, input2], vec![Scalar::from(3), Scalar::from(5)]); + let psetup = psetup.into_ready(pswitchboard); + println!("6. Ready the setup for proving with the switchboard"); + + // Step 7: Run a proof + let recursive_snark = program::run(&psetup).unwrap(); + println!("7. Run a proof"); + + // Step 8: Compress the proof + let compressed_proof = program::compress(&psetup, &recursive_snark).unwrap(); + println!("8. Compressed the proof"); + + // Step 9: Serialize and store the proof in a file + let serialized_proof = bincode::serialize(&compressed_proof).unwrap(); + let proof_file_path = temp_dir.path().join("test_proof.bytes"); + fs::write(&proof_file_path, &serialized_proof).unwrap(); + println!("9. Saved the serialized proof to a file"); + // ----------------------------------------------------------------------------------------------------------------- // + + // ----------------------------------------------------------------------------------------------------------------- // + // Separate Verification Phase + // ----------------------------------------------------------------------------------------------------------------- // + // Step 10: Read and deserialize the proof + let proof_bytes_from_file = fs::read(&proof_file_path).unwrap(); + let deserialized_proof: CompressedSNARK = bincode::deserialize(&proof_bytes_from_file).unwrap(); + println!("10. Read and deserialized the proof"); + + // Step 11: Verify the proof digests match by loading the setup from file as if we were a verifier + let vsetup = Setup::load_file(&file_path).unwrap(); + let vswitchboard = Switchboard::::new(vec![swap_memory_program, square_program]); + let vsetup = vsetup.into_ready(vswitchboard); + let vk = vsetup.verifier_key().unwrap(); + deserialized_proof + .verify(&vsetup.params, &vk, recursive_snark.z0_primary(), recursive_snark.z0_secondary()) + .unwrap(); + println!("11. Verified the proof"); + // ----------------------------------------------------------------------------------------------------------------- // +} diff --git a/frontend/tests/ivc/mod.rs b/frontend/tests/ivc/mod.rs new file mode 100644 index 0000000..ce828a3 --- /dev/null +++ b/frontend/tests/ivc/mod.rs @@ -0,0 +1,263 @@ +use acvm::acir::acir_field::GenericFieldElement; +use client_side_prover::supernova::snark::CompressedSNARK; +use client_side_prover_frontend::{ + program::{compress, run, Switchboard, RAM, ROM}, + setup::Setup, + Scalar, +}; +use halo2curves::{ff::Field, grumpkin}; +use noirc_abi::{input_parser::InputValue, InputMap}; + +use super::*; + +#[test] +#[traced_test] +fn test_ivc() { + let programs = vec![square_zeroth()]; + // TODO: This is a hack to get the correct number of folds when there are no external inputs. + let switchboard_inputs = vec![ + InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), + InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), + InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64)))]), + ]; + let switchboard = Switchboard::::new( + programs, + switchboard_inputs, + vec![Scalar::from(2), Scalar::from(1)], + 0, + ); + let setup = Setup::new(switchboard).unwrap(); + let snark = run(&setup).unwrap(); + dbg!(&snark.zi_primary()); + assert_eq!(snark.zi_primary()[0], Scalar::from(256)); + assert_eq!(snark.zi_primary()[1], Scalar::from(1)); +} + +#[test] +#[traced_test] +fn test_ivc_private_inputs() { + let programs = vec![add_external()]; + let switchboard_inputs = vec![ + InputMap::from([ + ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64))), + ( + "external".to_string(), + InputValue::Vec(vec![ + InputValue::Field(GenericFieldElement::from(3_u64)), + InputValue::Field(GenericFieldElement::from(3_u64)), + ]), + ), + ]), + InputMap::from([ + ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(0_u64))), + ( + "external".to_string(), + InputValue::Vec(vec![ + InputValue::Field(GenericFieldElement::from(420_u64)), + InputValue::Field(GenericFieldElement::from(69_u64)), + ]), + ), + ]), + ]; + let switchboard = Switchboard::::new( + programs, + switchboard_inputs, + vec![Scalar::from(1), Scalar::from(2)], + 0, + ); + let setup = Setup::new(switchboard).unwrap(); + let snark = run(&setup).unwrap(); + let zi = snark.zi_primary(); + dbg!(zi); + assert_eq!(zi[0], Scalar::from(424)); + assert_eq!(zi[1], Scalar::from(74)); +} + +#[test] +#[traced_test] +fn test_nivc() { + let programs = vec![add_external(), square_zeroth(), swap_memory()]; + let switchboard_inputs = vec![ + InputMap::from([ + ("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(1_u64))), + ( + "external".to_string(), + InputValue::Vec(vec![ + InputValue::Field(GenericFieldElement::from(5_u64)), + InputValue::Field(GenericFieldElement::from(7_u64)), + ]), + ), + ]), + InputMap::from([("next_pc".to_string(), InputValue::Field(GenericFieldElement::from(2_u64)))]), + InputMap::from([( + "next_pc".to_string(), + InputValue::Field(GenericFieldElement::from(-1_i128)), + )]), + ]; + let switchboard = Switchboard::::new( + programs, + switchboard_inputs, + vec![Scalar::from(1), Scalar::from(2)], + 0, + ); + let setup = Setup::new(switchboard).unwrap(); + let snark = run(&setup).unwrap(); + let zi = snark.zi_primary(); + dbg!(zi); + // First fold: + // step_out[0] == 1 + 5 == 6 + // step_out[1] == 2 + 7 == 9 + // Second fold: + // step_out[0] == 6 ** 2 == 36 + // step_out[1] == 9 + // Third fold: + // step_out[0] == 9 + // step_out[1] == 36 + assert_eq!(zi[0], Scalar::from(9)); + assert_eq!(zi[1], Scalar::from(36)); +} + +#[test] +#[traced_test] +fn test_ivc_verify() { + let programs = vec![square_zeroth()]; + let switchboard_inputs = vec![InputMap::from([( + "next_pc".to_string(), + InputValue::Field(GenericFieldElement::from(0_u64)), + )])]; + let switchboard = Switchboard::::new( + programs, + switchboard_inputs, + vec![Scalar::from(2), Scalar::from(1)], + 0, + ); + let setup = Setup::new(switchboard).unwrap(); + let snark = run(&setup).unwrap(); + let (z1_primary, z1_secondary) = + snark.verify(&setup.params, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); + assert_eq!(&z1_primary, snark.zi_primary()); + assert_eq!(&z1_secondary, snark.zi_secondary()); + assert_eq!(z1_primary, vec![Scalar::from(4), Scalar::from(1)]); + assert_eq!(z1_secondary, vec![grumpkin::Fr::ZERO]); +} + +// TODO: Lots of clones here now. +#[test] +#[traced_test] +fn test_ivc_compression() { + let programs = vec![square_zeroth()]; + let switchboard_inputs = vec![InputMap::from([( + "next_pc".to_string(), + InputValue::Field(GenericFieldElement::from(0_u64)), + )])]; + let switchboard = Switchboard::::new( + programs, + switchboard_inputs, + vec![Scalar::from(2), Scalar::from(1)], + 0, + ); + let setup = Setup::new(switchboard).unwrap(); + let snark = run(&setup).unwrap(); + let compressed_proof = compress(&setup, &snark).unwrap(); + + let (_, vk) = CompressedSNARK::setup(&setup.params).unwrap(); + compressed_proof.verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); +} + +#[test] +#[traced_test] +fn test_ivc_verify_basic() { + let programs = vec![basic()]; + let switchboard_inputs = vec![InputMap::from([ + ("external_mul".to_string(), InputValue::Field(GenericFieldElement::from(3_u64))), + ("external_add".to_string(), InputValue::Field(GenericFieldElement::from(10_u64))), + ])]; + let switchboard = Switchboard::::new(programs, switchboard_inputs, vec![Scalar::from(2)], 0); + let setup = Setup::new(switchboard).unwrap(); + let snark = run(&setup).unwrap(); + let (z1_primary, z1_secondary) = + snark.verify(&setup.params, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); + assert_eq!(&z1_primary, snark.zi_primary()); + assert_eq!(&z1_secondary, snark.zi_secondary()); + assert_eq!(z1_primary, vec![Scalar::from(436)]); + assert_eq!(z1_secondary, vec![grumpkin::Fr::ZERO]); +} + +#[test] +#[traced_test] +fn test_ivc_compression_basic() { + let programs = vec![basic()]; + let switchboard_inputs = vec![InputMap::from([ + ("external_mul".to_string(), InputValue::Field(GenericFieldElement::from(3_u64))), + ("external_add".to_string(), InputValue::Field(GenericFieldElement::from(10_u64))), + ])]; + let switchboard = Switchboard::::new(programs, switchboard_inputs, vec![Scalar::from(2)], 0); + let setup = Setup::new(switchboard).unwrap(); + let snark = run(&setup).unwrap(); + let compressed_proof = compress(&setup, &snark).unwrap(); + let (_, vk) = CompressedSNARK::setup(&setup.params).unwrap(); + compressed_proof.verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); +} + +#[test] +#[traced_test] +fn test_ivc_verify_poseidon() { + let programs = vec![poseidon()]; + let switchboard_inputs = vec![InputMap::new()]; + let switchboard = Switchboard::::new( + programs, + switchboard_inputs, + vec![Scalar::from(2), Scalar::from(1)], + 0, + ); + let setup = Setup::new(switchboard).unwrap(); + let snark = run(&setup).unwrap(); + let (z1_primary, z1_secondary) = + snark.verify(&setup.params, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); + assert_eq!(&z1_primary, snark.zi_primary()); + assert_eq!(&z1_secondary, snark.zi_secondary()); +} + +#[test] +#[traced_test] +fn test_ivc_compression_poseidon() { + let programs = vec![poseidon()]; + let switchboard_inputs = vec![InputMap::new()]; + let switchboard = Switchboard::::new( + programs, + switchboard_inputs, + vec![Scalar::from(2), Scalar::from(1)], + 0, + ); + let setup = Setup::new(switchboard).unwrap(); + let snark = run(&setup).unwrap(); + let compressed_proof = compress(&setup, &snark).unwrap(); + + let (_, vk) = CompressedSNARK::setup(&setup.params).unwrap(); + compressed_proof.verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); +} + +#[test] +#[traced_test] +fn test_collatz() { + let programs = vec![collatz_even(), collatz_odd()]; + let collatz_start = 19; + let initial_circuit_index = collatz_start % 2; + let switchboard = Switchboard::::new( + programs, + vec![Scalar::from(collatz_start)], + initial_circuit_index as usize, + ); + let setup = Setup::new(switchboard).unwrap(); + let snark = run(&setup).unwrap(); + let (z1_primary, z1_secondary) = + snark.verify(&setup.params, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); + dbg!(&z1_primary); + dbg!(&snark.program_counter()); + assert_eq!(&z1_primary, snark.zi_primary()); + assert_eq!(&z1_secondary, snark.zi_secondary()); + + let compressed_proof = compress(&setup, &snark).unwrap(); + let (_, vk) = CompressedSNARK::setup(&setup.params).unwrap(); + compressed_proof.verify(&setup.params, &vk, &snark.z0_primary(), &snark.z0_secondary()).unwrap(); +} diff --git a/frontend/tests/lib.rs b/frontend/tests/lib.rs new file mode 100644 index 0000000..e15f91a --- /dev/null +++ b/frontend/tests/lib.rs @@ -0,0 +1,5 @@ +use client_side_prover_frontend::demo::*; +use tracing_test::traced_test; + +mod end_to_end; +mod ivc; diff --git a/nivc/Nargo.toml b/nivc/Nargo.toml new file mode 100644 index 0000000..8cff867 --- /dev/null +++ b/nivc/Nargo.toml @@ -0,0 +1,6 @@ +[package] +authors =["Colin Roberts"] +compiler_version=">=0.36.0" +name ="nivc" +type ="lib" +version ="0.1.0" diff --git a/nivc/src/lib.nr b/nivc/src/lib.nr new file mode 100644 index 0000000..49ac633 --- /dev/null +++ b/nivc/src/lib.nr @@ -0,0 +1,8 @@ +pub struct FoldingVariables { + pub registers: [Field; N], + pub program_counter: Field, +} + +// TODO: It would be nice to be able to force the `main` function to have a specific signature. In particular, we want: +// pub fn main(registers: pub [Field; N], input: T) -> pub FoldingOutput { .. } +// Perhaps this can be done with a macro almost like `#[nivc_main]`? diff --git a/prover/Cargo.toml b/prover/Cargo.toml new file mode 100644 index 0000000..3fc21c7 --- /dev/null +++ b/prover/Cargo.toml @@ -0,0 +1,79 @@ +[package] +name ="client-side-prover" +version ="0.1.0" +authors =["Pluto Engineering"] +edition ="2021" +description ="Client side proving" +readme ="README.md" +repository ="https://github.com/pluto/client-side-prover" +license-file="LICENSE" +keywords =["zkSNARKs", "cryptography", "proofs"] + +[dependencies] +bellpepper-core ={ workspace=true } +bellpepper ={ workspace=true } +ff ={ workspace=true } +digest ={ workspace=true } +halo2curves ={ workspace=true } +sha3 ={ workspace=true } +rayon ={ workspace=true } +rand_core ={ workspace=true } +rand_chacha ={ workspace=true } +subtle ={ workspace=true } +neptune ={ workspace=true } +generic-array ={ workspace=true } +num-bigint ={ workspace=true } +num-traits ={ workspace=true } +num-integer ={ workspace=true } +serde ={ workspace=true } +bincode ={ workspace=true } +bitvec ={ workspace=true } +byteorder ={ workspace=true } +thiserror ={ workspace=true } +group ={ workspace=true } +pairing ={ workspace=true } +tracing ={ workspace=true } +cfg-if ={ workspace=true } +once_cell ={ workspace=true } +itertools ={ workspace=true } +rand ={ workspace=true } +ref-cast ={ workspace=true } +static_assertions={ workspace=true } +rayon-scan ={ workspace=true } + +[target.'cfg(any(target_arch = "x86_64", target_arch = "aarch64"))'.dependencies] +# grumpkin-msm has been patched to support MSMs for the pasta curve cycle +# see: https://github.com/argumentcomputer/grumpkin-msm/pull/3 +grumpkin-msm={ workspace=true } + +[target.'cfg(target_arch = "wasm32")'.dependencies] +getrandom={ workspace=true } + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +proptest={ workspace=true } + +[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] +criterion={ version="0.5", features=["html_reports"] } + +[dev-dependencies] +flate2 ={ workspace=true } +hex ={ workspace=true } +sha2 ={ workspace=true } +tracing-test ={ workspace=true } +expect-test ={ workspace=true } +anyhow ={ workspace=true } +tap ={ workspace=true } +tracing-texray ={ workspace=true } +tracing-subscriber={ workspace=true } +handlebars ={ workspace=true } +serde_json ={ workspace=true } + +# [build-dependencies] +# vergen = { workspace = true } + +[features] +default=["grumpkin-msm/portable"] +# asm = ["halo2curves/asm"] +# Compiles in portable mode, w/o ISA extensions => binary can be executed on all systems. +# portable = ["grumpkin-msm/portable"] +# cuda = ["grumpkin-msm/cuda"] diff --git a/prover/src/bellpepper/mod.rs b/prover/src/bellpepper/mod.rs new file mode 100644 index 0000000..4c19d83 --- /dev/null +++ b/prover/src/bellpepper/mod.rs @@ -0,0 +1,62 @@ +//! Support for generating R1CS from [Bellpepper]. +//! +//! [Bellpepper]: https://github.com/argumentcomputer/bellpepper + +pub mod r1cs; +pub mod shape_cs; +pub mod solver; +pub mod test_shape_cs; + +#[cfg(test)] +mod tests { + use bellpepper_core::{num::AllocatedNum, ConstraintSystem}; + use ff::PrimeField; + + use crate::{ + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + shape_cs::ShapeCS, + solver::SatisfyingAssignment, + }, + provider::Bn256EngineKZG, + traits::{snark::default_ck_hint, Engine}, + }; + + fn synthesize_alloc_bit>(cs: &mut CS) { + // get two bits as input and check that they are indeed bits + let a = AllocatedNum::alloc_infallible(cs.namespace(|| "a"), || Fr::ONE); + let _ = a.inputize(cs.namespace(|| "a is input")); + cs.enforce( + || "check a is 0 or 1", + |lc| lc + CS::one() - a.get_variable(), + |lc| lc + a.get_variable(), + |lc| lc, + ); + let b = AllocatedNum::alloc_infallible(cs.namespace(|| "b"), || Fr::ONE); + let _ = b.inputize(cs.namespace(|| "b is input")); + cs.enforce( + || "check b is 0 or 1", + |lc| lc + CS::one() - b.get_variable(), + |lc| lc + b.get_variable(), + |lc| lc, + ); + } + + fn test_alloc_bit_with() { + // First create the shape + let mut cs: ShapeCS = ShapeCS::new(); + synthesize_alloc_bit(&mut cs); + let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); + + // Now get the assignment + let mut cs = SatisfyingAssignment::::new(); + synthesize_alloc_bit(&mut cs); + let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); + + // Make sure that this is satisfiable + shape.is_sat(&ck, &inst, &witness).unwrap(); + } + + #[test] + fn test_alloc_bit() { test_alloc_bit_with::(); } +} diff --git a/prover/src/bellpepper/r1cs.rs b/prover/src/bellpepper/r1cs.rs new file mode 100644 index 0000000..0cbb9e8 --- /dev/null +++ b/prover/src/bellpepper/r1cs.rs @@ -0,0 +1,147 @@ +//! Support for generating R1CS using bellpepper. + +#![allow(non_snake_case)] + +use bellpepper_core::{Index, LinearCombination}; +use ff::PrimeField; + +use super::{shape_cs::ShapeCS, solver::SatisfyingAssignment, test_shape_cs::TestShapeCS}; +use crate::{ + errors::NovaError, + r1cs::{commitment_key, CommitmentKeyHint, R1CSInstance, R1CSShape, R1CSWitness, SparseMatrix}, + traits::Engine, + CommitmentKey, +}; + +/// `NovaWitness` provide a method for acquiring an `R1CSInstance` and +/// `R1CSWitness` from implementers. +pub trait NovaWitness { + /// Return an instance and witness, given a shape and ck. + fn r1cs_instance_and_witness( + self, + shape: &R1CSShape, + ck: &CommitmentKey, + ) -> Result<(R1CSInstance, R1CSWitness), NovaError>; +} + +/// `NovaShape` provides methods for acquiring `R1CSShape` and `CommitmentKey` +/// from implementers. +pub trait NovaShape { + /// Return an appropriate `R1CSShape` and `CommitmentKey` structs. + /// A `CommitmentKeyHint` should be provided to help guide the construction + /// of the `CommitmentKey`. This parameter is documented in + /// `r1cs::R1CS::commitment_key`. + fn r1cs_shape_and_key(&self, ck_hint: &CommitmentKeyHint) -> (R1CSShape, CommitmentKey) { + let S = self.r1cs_shape(); + let ck = commitment_key(&S, ck_hint); + + (S, ck) + } + /// Return an appropriate `R1CSShape`. + fn r1cs_shape(&self) -> R1CSShape; +} + +impl NovaWitness for SatisfyingAssignment { + fn r1cs_instance_and_witness( + self, + shape: &R1CSShape, + ck: &CommitmentKey, + ) -> Result<(R1CSInstance, R1CSWitness), NovaError> { + let (input_assignment, aux_assignment) = self.to_assignments(); + let W = R1CSWitness::::new(shape, aux_assignment)?; + let X = input_assignment[1..].to_owned(); + + let comm_W = W.commit(ck); + + let instance = R1CSInstance::::new(shape, comm_W, X)?; + + Ok((instance, W)) + } +} + +macro_rules! impl_nova_shape { + ($name:ident) => { + impl NovaShape for $name + where E::Scalar: PrimeField + { + fn r1cs_shape(&self) -> R1CSShape { + let mut A = SparseMatrix::::empty(); + let mut B = SparseMatrix::::empty(); + let mut C: SparseMatrix<::Scalar> = SparseMatrix::::empty(); + + let mut num_cons_added = 0; + let mut X = (&mut A, &mut B, &mut C, &mut num_cons_added); + let num_inputs = self.num_inputs(); + let num_constraints = self.num_constraints(); + let num_vars = self.num_aux(); + + for constraint in self.constraints.iter() { + add_constraint(&mut X, num_vars, &constraint.0, &constraint.1, &constraint.2); + } + assert_eq!(num_cons_added, num_constraints); + + A.cols = num_vars + num_inputs; + B.cols = num_vars + num_inputs; + C.cols = num_vars + num_inputs; + + // Don't count One as an input for shape's purposes. + let res = R1CSShape::new(num_constraints, num_vars, num_inputs - 1, A, B, C); + res.unwrap() + } + } + }; +} + +impl_nova_shape!(ShapeCS); +impl_nova_shape!(TestShapeCS); + +fn add_constraint( + X: &mut (&mut SparseMatrix, &mut SparseMatrix, &mut SparseMatrix, &mut usize), + num_vars: usize, + a_lc: &LinearCombination, + b_lc: &LinearCombination, + c_lc: &LinearCombination, +) { + let (A, B, C, nn) = X; + let n = **nn; + assert_eq!(n, A.num_rows(), "A: invalid shape"); + assert_eq!(n, B.num_rows(), "B: invalid shape"); + assert_eq!(n, C.num_rows(), "C: invalid shape"); + + let add_constraint_component = |index: Index, coeff: &S, M: &mut SparseMatrix| { + // we add constraints to the matrix only if the associated coefficient is + // non-zero + if *coeff != S::ZERO { + match index { + Index::Input(idx) => { + // Inputs come last, with input 0, representing 'one', + // at position num_vars within the witness vector. + let idx = idx + num_vars; + M.data.push(*coeff); + M.indices.push(idx); + }, + Index::Aux(idx) => { + M.data.push(*coeff); + M.indices.push(idx); + }, + } + } + }; + + for (index, coeff) in a_lc.iter() { + add_constraint_component(index.0, coeff, A); + } + A.indptr.push(A.indices.len()); + + for (index, coeff) in b_lc.iter() { + add_constraint_component(index.0, coeff, B) + } + B.indptr.push(B.indices.len()); + + for (index, coeff) in c_lc.iter() { + add_constraint_component(index.0, coeff, C) + } + C.indptr.push(C.indices.len()); + + **nn += 1; +} diff --git a/prover/src/bellpepper/shape_cs.rs b/prover/src/bellpepper/shape_cs.rs new file mode 100644 index 0000000..9752680 --- /dev/null +++ b/prover/src/bellpepper/shape_cs.rs @@ -0,0 +1,82 @@ +//! Support for generating R1CS shape using bellpepper. + +use bellpepper_core::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable}; +use ff::PrimeField; + +use crate::traits::Engine; + +/// `ShapeCS` is a `ConstraintSystem` for creating `R1CSShape`s for a circuit. +pub struct ShapeCS +where E::Scalar: PrimeField { + /// All constraints added to the `ShapeCS`. + pub constraints: + Vec<(LinearCombination, LinearCombination, LinearCombination)>, + inputs: usize, + aux: usize, +} + +impl ShapeCS { + /// Create a new, default `ShapeCS`, + pub fn new() -> Self { Self::default() } + + /// Returns the number of constraints defined for this `ShapeCS`. + pub fn num_constraints(&self) -> usize { self.constraints.len() } + + /// Returns the number of inputs defined for this `ShapeCS`. + pub fn num_inputs(&self) -> usize { self.inputs } + + /// Returns the number of aux inputs defined for this `ShapeCS`. + pub fn num_aux(&self) -> usize { self.aux } +} + +impl Default for ShapeCS { + fn default() -> Self { Self { constraints: vec![], inputs: 1, aux: 0 } } +} + +impl ConstraintSystem for ShapeCS { + type Root = Self; + + fn alloc(&mut self, _annotation: A, _f: F) -> Result + where + F: FnOnce() -> Result, + A: FnOnce() -> AR, + AR: Into, { + self.aux += 1; + + Ok(Variable::new_unchecked(Index::Aux(self.aux - 1))) + } + + fn alloc_input(&mut self, _annotation: A, _f: F) -> Result + where + F: FnOnce() -> Result, + A: FnOnce() -> AR, + AR: Into, { + self.inputs += 1; + + Ok(Variable::new_unchecked(Index::Input(self.inputs - 1))) + } + + fn enforce(&mut self, _annotation: A, a: LA, b: LB, c: LC) + where + A: FnOnce() -> AR, + AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination, { + let a = a(LinearCombination::zero()); + let b = b(LinearCombination::zero()); + let c = c(LinearCombination::zero()); + + self.constraints.push((a, b, c)); + } + + fn push_namespace(&mut self, _name_fn: N) + where + NR: Into, + N: FnOnce() -> NR, { + } + + fn pop_namespace(&mut self) {} + + fn get_root(&mut self) -> &mut Self::Root { self } +} diff --git a/src/bellpepper/solver.rs b/prover/src/bellpepper/solver.rs similarity index 100% rename from src/bellpepper/solver.rs rename to prover/src/bellpepper/solver.rs diff --git a/prover/src/bellpepper/test_shape_cs.rs b/prover/src/bellpepper/test_shape_cs.rs new file mode 100644 index 0000000..b420e8c --- /dev/null +++ b/prover/src/bellpepper/test_shape_cs.rs @@ -0,0 +1,298 @@ +//! Support for generating R1CS shape using bellpepper. +//! `TestShapeCS` implements a superset of `ShapeCS`, adding non-trivial +//! namespace support for use in testing. + +use core::fmt::Write; +use std::{ + cmp::Ordering, + collections::{BTreeMap, HashMap}, +}; + +use bellpepper_core::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable}; +use ff::{Field, PrimeField}; + +use crate::traits::Engine; + +#[derive(Clone, Copy)] +struct OrderedVariable(Variable); + +#[allow(dead_code)] +#[derive(Debug)] +enum NamedObject { + Constraint(usize), + Var(Variable), + Namespace, +} + +impl Eq for OrderedVariable {} +impl PartialEq for OrderedVariable { + fn eq(&self, other: &Self) -> bool { + match (self.0.get_unchecked(), other.0.get_unchecked()) { + (Index::Input(ref a), Index::Input(ref b)) | (Index::Aux(ref a), Index::Aux(ref b)) => a == b, + _ => false, + } + } +} +impl PartialOrd for OrderedVariable { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } +} +impl Ord for OrderedVariable { + fn cmp(&self, other: &Self) -> Ordering { + match (self.0.get_unchecked(), other.0.get_unchecked()) { + (Index::Input(ref a), Index::Input(ref b)) | (Index::Aux(ref a), Index::Aux(ref b)) => + a.cmp(b), + (Index::Input(_), Index::Aux(_)) => Ordering::Less, + (Index::Aux(_), Index::Input(_)) => Ordering::Greater, + } + } +} + +/// `TestShapeCS` is a `ConstraintSystem` for creating `R1CSShape`s for a +/// circuit. +pub struct TestShapeCS { + named_objects: HashMap, + current_namespace: Vec, + /// All constraints added to the `TestShapeCS`. + #[allow(clippy::type_complexity)] + pub constraints: Vec<( + LinearCombination, + LinearCombination, + LinearCombination, + String, + )>, + inputs: Vec, + aux: Vec, +} + +fn proc_lc( + terms: &LinearCombination, +) -> BTreeMap { + let mut map = BTreeMap::new(); + for (var, &coeff) in terms.iter() { + map.entry(OrderedVariable(var)).or_insert_with(|| Scalar::ZERO).add_assign(&coeff); + } + + // Remove terms that have a zero coefficient to normalize + let mut to_remove = vec![]; + for (var, coeff) in &map { + if coeff.is_zero().into() { + to_remove.push(*var); + } + } + + for var in to_remove { + map.remove(&var); + } + + map +} + +impl TestShapeCS +where E::Scalar: PrimeField +{ + #[allow(unused)] + /// Create a new, default `TestShapeCS`, + pub fn new() -> Self { Self::default() } + + /// Returns the number of constraints defined for this `TestShapeCS`. + pub fn num_constraints(&self) -> usize { self.constraints.len() } + + /// Returns the number of inputs defined for this `TestShapeCS`. + pub fn num_inputs(&self) -> usize { self.inputs.len() } + + /// Returns the number of aux inputs defined for this `TestShapeCS`. + pub fn num_aux(&self) -> usize { self.aux.len() } + + /// Print all public inputs, aux inputs, and constraint names. + #[allow(dead_code)] + pub fn pretty_print_list(&self) -> Vec { + let mut result = Vec::new(); + + for input in &self.inputs { + result.push(format!("INPUT {input}")); + } + for aux in &self.aux { + result.push(format!("AUX {aux}")); + } + + for (_a, _b, _c, name) in &self.constraints { + result.push(name.to_string()); + } + + result + } + + /// Print all iputs and a detailed representation of each constraint. + #[allow(dead_code)] + pub fn pretty_print(&self) -> String { + let mut s = String::new(); + + for input in &self.inputs { + writeln!(s, "INPUT {}", &input).unwrap() + } + + let negone = -::ONE; + + let powers_of_two = (0..E::Scalar::NUM_BITS) + .map(|i| E::Scalar::from(2u64).pow_vartime([u64::from(i)])) + .collect::>(); + + let pp = |s: &mut String, lc: &LinearCombination| { + s.push('('); + let mut is_first = true; + for (var, coeff) in proc_lc::(lc) { + if coeff == negone { + s.push_str(" - ") + } else if !is_first { + s.push_str(" + ") + } + is_first = false; + + if coeff != ::ONE && coeff != negone { + for (i, x) in powers_of_two.iter().enumerate() { + if x == &coeff { + write!(s, "2^{i} . ").unwrap(); + break; + } + } + + write!(s, "{coeff:?} . ").unwrap() + } + + match var.0.get_unchecked() { + Index::Input(i) => { + write!(s, "`I{}`", &self.inputs[i]).unwrap(); + }, + Index::Aux(i) => { + write!(s, "`A{}`", &self.aux[i]).unwrap(); + }, + } + } + if is_first { + // Nothing was visited, print 0. + s.push('0'); + } + s.push(')'); + }; + + for (a, b, c, name) in &self.constraints { + s.push('\n'); + + write!(s, "{name}: ").unwrap(); + pp(&mut s, a); + write!(s, " * ").unwrap(); + pp(&mut s, b); + s.push_str(" = "); + pp(&mut s, c); + } + + s.push('\n'); + + s + } + + /// Associate `NamedObject` with `path`. + /// `path` must not already have an associated object. + fn set_named_obj(&mut self, path: String, to: NamedObject) { + assert!( + !self.named_objects.contains_key(&path), + "tried to create object at existing path: {path}" + ); + + self.named_objects.insert(path, to); + } +} + +impl Default for TestShapeCS { + fn default() -> Self { + let mut map = HashMap::new(); + map.insert("ONE".into(), NamedObject::Var(Self::one())); + Self { + named_objects: map, + current_namespace: vec![], + constraints: vec![], + inputs: vec![String::from("ONE")], + aux: vec![], + } + } +} + +impl ConstraintSystem for TestShapeCS +where E::Scalar: PrimeField +{ + type Root = Self; + + fn alloc(&mut self, annotation: A, _f: F) -> Result + where + F: FnOnce() -> Result, + A: FnOnce() -> AR, + AR: Into, { + let path = compute_path(&self.current_namespace, &annotation().into()); + self.aux.push(path); + + Ok(Variable::new_unchecked(Index::Aux(self.aux.len() - 1))) + } + + fn alloc_input(&mut self, annotation: A, _f: F) -> Result + where + F: FnOnce() -> Result, + A: FnOnce() -> AR, + AR: Into, { + let path = compute_path(&self.current_namespace, &annotation().into()); + self.inputs.push(path); + + Ok(Variable::new_unchecked(Index::Input(self.inputs.len() - 1))) + } + + fn enforce(&mut self, annotation: A, a: LA, b: LB, c: LC) + where + A: FnOnce() -> AR, + AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination, { + let path = compute_path(&self.current_namespace, &annotation().into()); + let index = self.constraints.len(); + self.set_named_obj(path.clone(), NamedObject::Constraint(index)); + + let a = a(LinearCombination::zero()); + let b = b(LinearCombination::zero()); + let c = c(LinearCombination::zero()); + + self.constraints.push((a, b, c, path)); + } + + fn push_namespace(&mut self, name_fn: N) + where + NR: Into, + N: FnOnce() -> NR, { + let name = name_fn().into(); + let path = compute_path(&self.current_namespace, &name); + self.set_named_obj(path, NamedObject::Namespace); + self.current_namespace.push(name); + } + + fn pop_namespace(&mut self) { + assert!(self.current_namespace.pop().is_some()); + } + + fn get_root(&mut self) -> &mut Self::Root { self } +} + +fn compute_path(ns: &[String], this: &str) -> String { + assert!(!this.contains('/'), "'/' is not allowed in names"); + + let mut name = String::new(); + + let mut needs_separation = false; + for ns in ns.iter().chain(Some(this.to_string()).iter()) { + if needs_separation { + name += "/"; + } + + name += ns; + needs_separation = true; + } + + name +} diff --git a/prover/src/circuit.rs b/prover/src/circuit.rs new file mode 100644 index 0000000..298aab0 --- /dev/null +++ b/prover/src/circuit.rs @@ -0,0 +1,496 @@ +//! There are two augmented circuits: the primary and the secondary. +//! Each of them is over a curve in a 2-cycle of elliptic curves. +//! We have two running instances. Each circuit takes as input 2 hashes: one for +//! each of the running instances. Each of these hashes is H(params = H(shape, +//! ck), i, z0, zi, U). Each circuit folds the last invocation of the other into +//! the running instance + +use bellpepper::gadgets::{boolean_utils::conditionally_select_slice, Assignment}; +use bellpepper_core::{ + boolean::{AllocatedBit, Boolean}, + num::AllocatedNum, + ConstraintSystem, SynthesisError, +}; +use ff::Field; +use serde::{Deserialize, Serialize}; + +use crate::{ + constants::{NIO_NOVA_FOLD, NUM_FE_WITHOUT_IO_FOR_CRHF, NUM_HASH_BITS}, + gadgets::{ + alloc_num_equals, alloc_scalar_as_base, alloc_zero, le_bits_to_num, AllocatedPoint, + AllocatedR1CSInstance, AllocatedRelaxedR1CSInstance, + }, + r1cs::{R1CSInstance, RelaxedR1CSInstance}, + supernova::StepCircuit, + traits::{commitment::CommitmentTrait, Engine, ROCircuitTrait, ROConstantsCircuit}, + Commitment, +}; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct NovaAugmentedCircuitParams { + limb_width: usize, + n_limbs: usize, + is_primary_circuit: bool, // A boolean indicating if this is the primary circuit +} + +impl NovaAugmentedCircuitParams { + pub const fn new(limb_width: usize, n_limbs: usize, is_primary_circuit: bool) -> Self { + Self { limb_width, n_limbs, is_primary_circuit } + } +} + +// NOTES: All these options here seem to point towards using a typestate pattern +// or something. + +#[derive(Debug, Serialize)] +#[serde(bound = "")] +pub struct NovaAugmentedCircuitInputs { + params: E::Scalar, + i: E::Base, + z0: Vec, + zi: Option>, + U: Option>, + u: Option>, + T: Option>, +} + +impl NovaAugmentedCircuitInputs { + /// Create new inputs/witness for the verification circuit + pub fn new( + params: E::Scalar, + i: E::Base, + z0: Vec, + zi: Option>, + U: Option>, + u: Option>, + T: Option>, + ) -> Self { + Self { params, i, z0, zi, U, u, T } + } +} + +/// The augmented circuit F' in Nova that includes a step circuit F +/// and the circuit for the verifier in Nova's non-interactive folding scheme +pub struct NovaAugmentedCircuit<'a, E: Engine, SC: StepCircuit> { + params: &'a NovaAugmentedCircuitParams, + ro_consts: ROConstantsCircuit, + inputs: Option>, + step_circuit: &'a SC, // The function that is applied for each step +} + +impl<'a, E: Engine, SC: StepCircuit> NovaAugmentedCircuit<'a, E, SC> { + /// Create a new verification circuit for the input relaxed r1cs instances + pub const fn new( + params: &'a NovaAugmentedCircuitParams, + inputs: Option>, + step_circuit: &'a SC, + ro_consts: ROConstantsCircuit, + ) -> Self { + Self { params, inputs, step_circuit, ro_consts } + } + + /// Allocate all witnesses and return + fn alloc_witness::Base>>( + &self, + mut cs: CS, + arity: usize, + ) -> Result< + ( + AllocatedNum, + AllocatedNum, + Vec>, + Vec>, + AllocatedRelaxedR1CSInstance, + AllocatedR1CSInstance, + AllocatedPoint, + ), + SynthesisError, + > { + // Allocate the params + let params = alloc_scalar_as_base::( + cs.namespace(|| "params"), + self.inputs.as_ref().map(|inputs| inputs.params), + )?; + + // Allocate i + let i = AllocatedNum::alloc(cs.namespace(|| "i"), || Ok(self.inputs.get()?.i))?; + + // Allocate z0 + let z_0 = (0..arity) + .map(|i| { + AllocatedNum::alloc(cs.namespace(|| format!("z0_{i}")), || Ok(self.inputs.get()?.z0[i])) + }) + .collect::>, _>>()?; + + // Allocate zi. If inputs.zi is not provided (base case) allocate default value + // 0 + let zero = vec![E::Base::ZERO; arity]; + let z_i = (0..arity) + .map(|i| { + AllocatedNum::alloc(cs.namespace(|| format!("zi_{i}")), || { + Ok(self.inputs.get()?.zi.as_ref().unwrap_or(&zero)[i]) + }) + }) + .collect::>, _>>()?; + + // Allocate the running instance + let U: AllocatedRelaxedR1CSInstance = AllocatedRelaxedR1CSInstance::alloc( + cs.namespace(|| "Allocate U"), + self.inputs.as_ref().and_then(|inputs| inputs.U.as_ref()), + self.params.limb_width, + self.params.n_limbs, + )?; + + // Allocate the instance to be folded in + let u = AllocatedR1CSInstance::alloc( + cs.namespace(|| "allocate instance u to fold"), + self.inputs.as_ref().and_then(|inputs| inputs.u.as_ref()), + )?; + + // Allocate T + let T = AllocatedPoint::alloc( + cs.namespace(|| "allocate T"), + self.inputs.as_ref().and_then(|inputs| inputs.T.map(|T| T.to_coordinates())), + )?; + T.check_on_curve(cs.namespace(|| "check T on curve"))?; + + Ok((params, i, z_0, z_i, U, u, T)) + } + + /// Synthesizes base case and returns the new relaxed `R1CSInstance` + fn synthesize_base_case::Base>>( + &self, + mut cs: CS, + u: AllocatedR1CSInstance, + ) -> Result, SynthesisError> { + let U_default: AllocatedRelaxedR1CSInstance = + if self.params.is_primary_circuit { + // The primary circuit just returns the default R1CS instance + AllocatedRelaxedR1CSInstance::default( + cs.namespace(|| "Allocate U_default"), + self.params.limb_width, + self.params.n_limbs, + )? + } else { + // The secondary circuit returns the incoming R1CS instance + AllocatedRelaxedR1CSInstance::from_r1cs_instance( + cs.namespace(|| "Allocate U_default"), + u, + self.params.limb_width, + self.params.n_limbs, + )? + }; + Ok(U_default) + } + + /// Synthesizes non base case and returns the new relaxed `R1CSInstance` + /// And a boolean indicating if all checks pass + fn synthesize_non_base_case::Base>>( + &self, + mut cs: CS, + params: &AllocatedNum, + i: &AllocatedNum, + z_0: &[AllocatedNum], + z_i: &[AllocatedNum], + U: &AllocatedRelaxedR1CSInstance, + u: &AllocatedR1CSInstance, + T: &AllocatedPoint, + arity: usize, + ) -> Result<(AllocatedRelaxedR1CSInstance, AllocatedBit), SynthesisError> { + // Check that u.x[0] = Hash(params, U, i, z0, zi) + let mut ro = E::ROCircuit::new(self.ro_consts.clone(), NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * arity); + ro.absorb(params); + ro.absorb(i); + for e in z_0 { + ro.absorb(e); + } + for e in z_i { + ro.absorb(e); + } + U.absorb_in_ro(cs.namespace(|| "absorb U"), &mut ro)?; + + let hash_bits = ro.squeeze(cs.namespace(|| "Input hash"), NUM_HASH_BITS)?; + let hash = le_bits_to_num(cs.namespace(|| "bits to hash"), &hash_bits)?; + let check_pass = alloc_num_equals( + cs.namespace(|| "check consistency of u.X[0] with H(params, U, i, z0, zi)"), + &u.X[0], + &hash, + )?; + + // Run NIFS Verifier + let U_fold = U.fold_with_r1cs( + cs.namespace(|| "compute fold of U and u"), + params, + u, + T, + self.ro_consts.clone(), + self.params.limb_width, + self.params.n_limbs, + )?; + + Ok((U_fold, check_pass)) + } +} + +impl> NovaAugmentedCircuit<'_, E, SC> { + /// synthesize circuit giving constraint system + pub fn synthesize::Base>>( + self, + cs: &mut CS, + ) -> Result>, SynthesisError> { + let arity = self.step_circuit.arity(); + + // Allocate all witnesses + let (params, i, z_0, z_i, U, u, T) = + self.alloc_witness(cs.namespace(|| "allocate the circuit witness"), arity)?; + + // Compute variable indicating if this is the base case + let zero = alloc_zero(cs.namespace(|| "zero")); + let is_base_case = alloc_num_equals(cs.namespace(|| "Check if base case"), &i.clone(), &zero)?; + + // Synthesize the circuit for the base case and get the new running instance + let Unew_base = self.synthesize_base_case(cs.namespace(|| "base case"), u.clone())?; + + // Synthesize the circuit for the non-base case and get the new running + // instance along with a boolean indicating if all checks have passed + let (Unew_non_base, check_non_base_pass) = self.synthesize_non_base_case( + cs.namespace(|| "synthesize non base case"), + ¶ms, + &i, + &z_0, + &z_i, + &U, + &u, + &T, + arity, + )?; + + // Either check_non_base_pass=true or we are in the base case + let should_be_false = AllocatedBit::nor( + cs.namespace(|| "check_non_base_pass nor base_case"), + &check_non_base_pass, + &is_base_case, + )?; + cs.enforce( + || "check_non_base_pass nor base_case = false", + |lc| lc + should_be_false.get_variable(), + |lc| lc + CS::one(), + |lc| lc, + ); + + // Compute the U_new + let Unew = Unew_base.conditionally_select( + cs.namespace(|| "compute U_new"), + &Unew_non_base, + &Boolean::from(is_base_case.clone()), + )?; + + // Compute i + 1 + let i_new = + AllocatedNum::alloc(cs.namespace(|| "i + 1"), || Ok(*i.get_value().get()? + E::Base::ONE))?; + cs.enforce( + || "check i + 1", + |lc| lc, + |lc| lc, + |lc| lc + i_new.get_variable() - CS::one() - i.get_variable(), + ); + + // Compute z_{i+1} + let z_input = conditionally_select_slice( + cs.namespace(|| "select input to F"), + &z_0, + &z_i, + &Boolean::from(is_base_case), + )?; + + // TODO: Note, I changed this here because I removed the other `StepCircuit` + // trait. + let (_pc, z_next) = self.step_circuit.synthesize(&mut cs.namespace(|| "F"), None, &z_input)?; + + if z_next.len() != arity { + return Err(SynthesisError::IncompatibleLengthVector("z_next".to_string())); + } + + // Compute the new hash H(params, Unew, i+1, z0, z_{i+1}) + let mut ro = E::ROCircuit::new(self.ro_consts, NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * arity); + ro.absorb(¶ms); + ro.absorb(&i_new); + for e in &z_0 { + ro.absorb(e); + } + for e in &z_next { + ro.absorb(e); + } + Unew.absorb_in_ro(cs.namespace(|| "absorb U_new"), &mut ro)?; + let hash_bits = ro.squeeze(cs.namespace(|| "output hash bits"), NUM_HASH_BITS)?; + let hash = le_bits_to_num(cs.namespace(|| "convert hash to num"), &hash_bits)?; + + // Outputs the computed hash and u.X[1] that corresponds to the hash of the + // other circuit + u.X[1].inputize(cs.namespace(|| "Output unmodified hash of the other circuit"))?; + hash.inputize(cs.namespace(|| "output new hash of this circuit"))?; + + Ok(z_next) + } +} + +// #[cfg(test)] +// mod tests { +// use expect_test::{expect, Expect}; + +// use super::*; +// use crate::{ +// bellpepper::{ +// r1cs::{NovaShape, NovaWitness}, +// solver::SatisfyingAssignment, +// test_shape_cs::TestShapeCS, +// }, +// constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, +// gadgets::scalar_as_base, +// provider::{ +// poseidon::PoseidonConstantsCircuit, Bn256EngineKZG, +// GrumpkinEngine, PallasEngine, Secp256k1Engine, Secq256k1Engine, +// VestaEngine, }, +// traits::{snark::default_ck_hint, CurveCycleEquipped, Dual}, +// }; + +// // In the following we use 1 to refer to the primary, and 2 to refer to +// the // secondary circuit +// fn test_recursive_circuit_with( +// primary_params: &NovaAugmentedCircuitParams, +// secondary_params: &NovaAugmentedCircuitParams, +// ro_consts1: ROConstantsCircuit>, +// ro_consts2: ROConstantsCircuit, +// expected_num_constraints_primary: &Expect, +// expected_num_constraints_secondary: &Expect, +// ) where +// E1: CurveCycleEquipped, +// { +// let tc1 = TrivialCircuit::default(); +// // Initialize the shape and ck for the primary +// let circuit1: NovaAugmentedCircuit< +// '_, +// Dual, +// TrivialCircuit< as Engine>::Base>, +// > = NovaAugmentedCircuit::new(primary_params, None, &tc1, +// > ro_consts1.clone()); +// let mut cs: TestShapeCS = TestShapeCS::new(); +// let _ = circuit1.synthesize(&mut cs); +// let (shape1, ck1) = cs.r1cs_shape_and_key(&*default_ck_hint()); + +// expected_num_constraints_primary.assert_eq(&cs.num_constraints(). +// to_string()); + +// let tc2 = TrivialCircuit::default(); +// // Initialize the shape and ck for the secondary +// let circuit2: NovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = NovaAugmentedCircuit::new(secondary_params, +// None, &tc2, ro_consts2.clone()); let mut cs: TestShapeCS> = +// TestShapeCS::new(); let _ = circuit2.synthesize(&mut cs); +// let (shape2, ck2) = cs.r1cs_shape_and_key(&*default_ck_hint()); + +// expected_num_constraints_secondary.assert_eq(&cs.num_constraints(). +// to_string()); + +// // Execute the base case for the primary +// let zero1 = < as Engine>::Base as Field>::ZERO; +// let mut cs1 = SatisfyingAssignment::::new(); +// let inputs1: NovaAugmentedCircuitInputs> = +// NovaAugmentedCircuitInputs::new( scalar_as_base::(zero1), // +// pass zero for testing zero1, +// vec![zero1], +// None, +// None, +// None, +// None, +// ); +// let circuit1: NovaAugmentedCircuit< +// '_, +// Dual, +// TrivialCircuit< as Engine>::Base>, +// > = NovaAugmentedCircuit::new(primary_params, Some(inputs1), &tc1, +// > ro_consts1); +// let _ = circuit1.synthesize(&mut cs1); +// let (inst1, witness1) = cs1.r1cs_instance_and_witness(&shape1, +// &ck1).unwrap(); // Make sure that this is satisfiable +// shape1.is_sat(&ck1, &inst1, &witness1).unwrap(); + +// // Execute the base case for the secondary +// let zero2 = <::Base as Field>::ZERO; +// let mut cs2 = SatisfyingAssignment::>::new(); +// let inputs2: NovaAugmentedCircuitInputs = +// NovaAugmentedCircuitInputs::new( +// scalar_as_base::>(zero2), // pass zero for testing +// zero2, vec![zero2], +// None, +// None, +// Some(inst1), +// None, +// ); +// let circuit2: NovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = NovaAugmentedCircuit::new(secondary_params, +// Some(inputs2), &tc2, ro_consts2); let _ = circuit2.synthesize(&mut +// cs2); let (inst2, witness2) = cs2.r1cs_instance_and_witness(&shape2, +// &ck2).unwrap(); // Make sure that it is satisfiable +// shape2.is_sat(&ck2, &inst2, &witness2).unwrap(); +// } + +// #[test] +// fn test_recursive_circuit_pasta() { +// // this test checks against values that must be replicated in +// benchmarks if // changed here +// let params1 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, +// BN_N_LIMBS, true); let params2 = +// NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); +// let ro_consts1: ROConstantsCircuit = +// PoseidonConstantsCircuit::default(); let ro_consts2: +// ROConstantsCircuit = PoseidonConstantsCircuit::default(); + +// test_recursive_circuit_with::( +// ¶ms1, +// ¶ms2, +// ro_consts1, +// ro_consts2, +// &expect!["9817"], +// &expect!["10349"], +// ); +// } + +// #[test] +// fn test_recursive_circuit_bn256_grumpkin() { +// let params1 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, +// BN_N_LIMBS, true); let params2 = +// NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); +// let ro_consts1: ROConstantsCircuit = +// PoseidonConstantsCircuit::default(); let ro_consts2: +// ROConstantsCircuit = PoseidonConstantsCircuit::default(); + +// test_recursive_circuit_with::( +// ¶ms1, +// ¶ms2, +// ro_consts1, +// ro_consts2, +// &expect!["9985"], +// &expect!["10538"], +// ); +// } + +// #[test] +// fn test_recursive_circuit_secpq() { +// let params1 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, +// BN_N_LIMBS, true); let params2 = +// NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); +// let ro_consts1: ROConstantsCircuit = +// PoseidonConstantsCircuit::default(); let ro_consts2: +// ROConstantsCircuit = PoseidonConstantsCircuit::default(); + +// test_recursive_circuit_with::( +// ¶ms1, +// ¶ms2, +// ro_consts1, +// ro_consts2, +// &expect!["10264"], +// &expect!["10961"], +// ); +// } +// } diff --git a/src/constants.rs b/prover/src/constants.rs similarity index 100% rename from src/constants.rs rename to prover/src/constants.rs diff --git a/prover/src/cyclefold/circuit.rs b/prover/src/cyclefold/circuit.rs new file mode 100644 index 0000000..b3f126c --- /dev/null +++ b/prover/src/cyclefold/circuit.rs @@ -0,0 +1,257 @@ +//! This module defines Cyclefold circuit + +use bellpepper::gadgets::boolean_utils::conditionally_select; +use bellpepper_core::{ + boolean::{AllocatedBit, Boolean}, + ConstraintSystem, SynthesisError, +}; +use ff::Field; +use neptune::{circuit2::poseidon_hash_allocated, poseidon::PoseidonConstants}; + +use crate::{ + constants::NUM_CHALLENGE_BITS, + gadgets::{alloc_zero, le_bits_to_num, AllocatedPoint}, + traits::{commitment::CommitmentTrait, Engine}, + Commitment, +}; + +/// A structure containing the CycleFold circuit inputs and implementing the +/// synthesize function +pub struct CycleFoldCircuit { + commit_1: Option>, + commit_2: Option>, + scalar: Option<[bool; NUM_CHALLENGE_BITS]>, + poseidon_constants: PoseidonConstants, +} + +impl Default for CycleFoldCircuit { + fn default() -> Self { + let poseidon_constants = PoseidonConstants::new(); + Self { commit_1: None, commit_2: None, scalar: None, poseidon_constants } + } +} +impl CycleFoldCircuit { + /// Create a new CycleFold circuit with the given inputs + pub fn new( + commit_1: Option>, + commit_2: Option>, + scalar: Option<[bool; NUM_CHALLENGE_BITS]>, + ) -> Self { + let poseidon_constants = PoseidonConstants::new(); + Self { commit_1, commit_2, scalar, poseidon_constants } + } + + #[allow(clippy::type_complexity)] + fn alloc_witness::Base>>( + &self, + mut cs: CS, + ) -> Result< + ( + AllocatedPoint, // commit_1 + AllocatedPoint, // commit_2 + Vec, // scalar + ), + SynthesisError, + > { + let commit_1 = AllocatedPoint::alloc( + cs.namespace(|| "allocate C_1"), + self.commit_1.map(|C_1| C_1.to_coordinates()), + )?; + commit_1.check_on_curve(cs.namespace(|| "commit_1 on curve"))?; + + let commit_2 = AllocatedPoint::alloc( + cs.namespace(|| "allocate C_2"), + self.commit_2.map(|C_2| C_2.to_coordinates()), + )?; + commit_2.check_on_curve(cs.namespace(|| "commit_2 on curve"))?; + + let scalar: Vec = self + .scalar + .unwrap_or([false; NUM_CHALLENGE_BITS]) + .into_iter() + .enumerate() + .map(|(idx, bit)| { + AllocatedBit::alloc(cs.namespace(|| format!("scalar bit {idx}")), Some(bit)) + }) + .collect::, _>>()?; + + Ok((commit_1, commit_2, scalar)) + } + + /// Synthesize the CycleFold circuit + pub fn synthesize::Base>>( + &self, + mut cs: CS, + ) -> Result<(), SynthesisError> { + let (C_1, C_2, r) = self.alloc_witness(cs.namespace(|| "allocate circuit witness"))?; + + // Calculate C_final + let r_C_2 = C_2.scalar_mul(cs.namespace(|| "r * C_2"), &r)?; + + let C_final = C_1.add(cs.namespace(|| "C_1 + r * C_2"), &r_C_2)?; + + self.inputize_point(&C_1, cs.namespace(|| "inputize C_1"))?; + self.inputize_point(&C_2, cs.namespace(|| "inputize C_2"))?; + self.inputize_point(&C_final, cs.namespace(|| "inputize C_final"))?; + + let scalar = le_bits_to_num(cs.namespace(|| "get scalar"), &r)?; + + scalar.inputize(cs.namespace(|| "scalar"))?; + + Ok(()) + } + + // Represent the point in the public IO as its 2-ary Poseidon hash + fn inputize_point( + &self, + point: &AllocatedPoint, + mut cs: CS, + ) -> Result<(), SynthesisError> + where + E: Engine, + CS: ConstraintSystem, + { + let (x, y, is_infinity) = point.get_coordinates(); + let preimage = vec![x.clone(), y.clone()]; + let val = + poseidon_hash_allocated(cs.namespace(|| "hash point"), preimage, &self.poseidon_constants)?; + + let zero = alloc_zero(cs.namespace(|| "zero")); + + let is_infinity_bit = AllocatedBit::alloc( + cs.namespace(|| "is_infinity"), + Some(is_infinity.get_value().unwrap_or(E::Base::ONE) == E::Base::ONE), + )?; + + cs.enforce( + || "infinity_bit matches", + |lc| lc, + |lc| lc, + |lc| lc + is_infinity_bit.get_variable() - is_infinity.get_variable(), + ); + + // Output 0 when it is the point at infinity + let output = conditionally_select( + cs.namespace(|| "select output"), + &zero, + &val, + &Boolean::from(is_infinity_bit), + )?; + + output.inputize(cs.namespace(|| "inputize hash"))?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use expect_test::{expect, Expect}; + use ff::{Field, PrimeField, PrimeFieldBits}; + use neptune::Poseidon; + use rand_core::OsRng; + + use super::*; + use crate::{ + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + shape_cs::ShapeCS, + solver::SatisfyingAssignment, + }, + constants::NIO_CYCLE_FOLD, + gadgets::scalar_as_base, + provider::Bn256EngineKZG, + traits::{commitment::CommitmentEngineTrait, snark::default_ck_hint, CurveCycleEquipped, Dual}, + }; + + fn test_cyclefold_circuit_size_with(expected_constraints: &Expect, expected_vars: &Expect) + where E1: CurveCycleEquipped { + // Instantiate the circuit with trivial inputs + let circuit: CycleFoldCircuit> = CycleFoldCircuit::default(); + + // Synthesize the R1CS shape + let mut cs: ShapeCS = ShapeCS::new(); + let _ = circuit.synthesize(cs.namespace(|| "synthesizing shape")); + + // Extract the number of constraints and variables + let num_constraints = cs.num_constraints(); + let num_variables = cs.num_aux(); + let num_io = cs.num_inputs(); + + // Check the number of constraints and variables match the expected values + expected_constraints.assert_eq(&num_constraints.to_string()); + expected_vars.assert_eq(&num_variables.to_string()); + assert_eq!(num_io, NIO_CYCLE_FOLD + 1); // includes 1 + } + + #[test] + fn test_cyclefold_circuit_size() { + test_cyclefold_circuit_size_with::(&expect!("2090"), &expect!("2081")); + } + + fn test_cyclefold_circuit_sat_with() { + let rng = OsRng; + + let ck = < as Engine>::CE as CommitmentEngineTrait>>::setup(b"test", 5); + + // Generate random vectors to commit to + let v1 = + (0..5).map(|_| < as Engine>::Scalar as Field>::random(rng)).collect::>(); + let v2 = + (0..5).map(|_| < as Engine>::Scalar as Field>::random(rng)).collect::>(); + + // Calculate the random commitments + let C_1 = < as Engine>::CE as CommitmentEngineTrait>>::commit(&ck, &v1); + let C_2 = < as Engine>::CE as CommitmentEngineTrait>>::commit(&ck, &v2); + + // Generate a random scalar + let val: u128 = rand::random(); + let r = < as Engine>::Scalar as PrimeField>::from_u128(val); + let r_bits = r.to_le_bits().into_iter().take(128).collect::>().try_into().unwrap(); + + let circuit: CycleFoldCircuit> = + CycleFoldCircuit::new(Some(C_1), Some(C_2), Some(r_bits)); + + // Calculate the result out of circuit + let native_result = C_1 + C_2 * r; + + // Generate the R1CS shape and commitment key + let mut cs: ShapeCS = ShapeCS::new(); + let _ = circuit.synthesize(cs.namespace(|| "synthesizing shape")); + let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); + + // Synthesize the R1CS circuit on the random inputs + let mut cs = SatisfyingAssignment::::new(); + circuit.synthesize(cs.namespace(|| "synthesizing witness")).unwrap(); + + let (instance, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); + let X = &instance.X; + + // Helper functio to calculate the hash + let compute_hash = |P: Commitment>| -> E::Scalar { + let (x, y, is_infinity) = P.to_coordinates(); + if is_infinity { + return E::Scalar::ZERO; + } + + let mut hasher = Poseidon::new_with_preimage(&[x, y], &circuit.poseidon_constants); + + hasher.hash() + }; + + // Check the circuit calculates the right thing + let hash_1 = compute_hash(C_1); + assert_eq!(hash_1, X[0]); + let hash_2 = compute_hash(C_2); + assert_eq!(hash_2, X[1]); + let hash_res = compute_hash(native_result); + assert_eq!(hash_res, X[2]); + assert_eq!(r, scalar_as_base::(X[3])); + + // Check the R1CS equation is satisfied + shape.is_sat(&ck, &instance, &witness).unwrap(); + } + + #[test] + fn test_cyclefold_circuit_sat() { test_cyclefold_circuit_sat_with::(); } +} diff --git a/prover/src/cyclefold/gadgets.rs b/prover/src/cyclefold/gadgets.rs new file mode 100644 index 0000000..9e4a494 --- /dev/null +++ b/prover/src/cyclefold/gadgets.rs @@ -0,0 +1,633 @@ +//! This module defines many of the gadgets needed in the primary folding +//! circuit + +use bellpepper::gadgets::Assignment; +use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; +use ff::Field; +use itertools::Itertools; + +use super::util::FoldingData; +use crate::{ + constants::{BN_N_LIMBS, NIO_CYCLE_FOLD, NUM_CHALLENGE_BITS}, + gadgets::{ + alloc_bignat_constant, f_to_nat, le_bits_to_num, AllocatedPoint, AllocatedRelaxedR1CSInstance, + BigNat, Num, + }, + r1cs::R1CSInstance, + traits::{commitment::CommitmentTrait, Engine, Group, ROCircuitTrait, ROConstantsCircuit}, +}; + +// An allocated version of the R1CS instance obtained from a single cyclefold +// invocation +pub struct AllocatedCycleFoldInstance { + W: AllocatedPoint, + X: [BigNat; NIO_CYCLE_FOLD], +} + +impl AllocatedCycleFoldInstance { + pub fn alloc>( + mut cs: CS, + inst: Option<&R1CSInstance>, + limb_width: usize, + n_limbs: usize, + ) -> Result { + let W = AllocatedPoint::alloc( + cs.namespace(|| "allocate W"), + inst.map(|u| u.comm_W.to_coordinates()), + )?; + W.check_on_curve(cs.namespace(|| "check W on curve"))?; + + if let Some(inst) = inst { + if inst.X.len() != NIO_CYCLE_FOLD { + return Err(SynthesisError::IncompatibleLengthVector(String::from( + "R1CS instance has wrong arity", + ))); + } + } + + let X: [BigNat; NIO_CYCLE_FOLD] = (0..NIO_CYCLE_FOLD) + .map(|idx| { + BigNat::alloc_from_nat( + cs.namespace(|| format!("allocating IO {idx}")), + || Ok(f_to_nat(inst.map_or(&E::Scalar::ZERO, |inst| &inst.X[idx]))), + limb_width, + n_limbs, + ) + }) + .collect::, _>>()? + .try_into() + .map_err(|err: Vec<_>| { + SynthesisError::IncompatibleLengthVector(format!("{} != {NIO_CYCLE_FOLD}", err.len())) + })?; + + Ok(Self { W, X }) + } + + pub fn absorb_in_ro( + &self, + mut cs: CS, + ro: &mut impl ROCircuitTrait, + ) -> Result<(), SynthesisError> + where + CS: ConstraintSystem, + { + ro.absorb(&self.W.x); + ro.absorb(&self.W.y); + ro.absorb(&self.W.is_infinity); + self.X.iter().enumerate().try_for_each(|(io_idx, x)| -> Result<(), SynthesisError> { + x.as_limbs().iter().enumerate().try_for_each( + |(limb_idx, limb)| -> Result<(), SynthesisError> { + ro.absorb(&limb.as_allocated_num( + cs.namespace(|| format!("convert limb {limb_idx} of X[{io_idx}] to num")), + )?); + Ok(()) + }, + ) + })?; + + Ok(()) + } +} + +/// An circuit allocated version of the `FoldingData` coming from a CycleFold +/// invocation. +pub struct AllocatedCycleFoldData { + pub U: AllocatedRelaxedR1CSInstance, + pub u: AllocatedCycleFoldInstance, + pub T: AllocatedPoint, +} + +impl AllocatedCycleFoldData { + pub fn alloc>( + mut cs: CS, + inst: Option<&FoldingData>, + limb_width: usize, + n_limbs: usize, + ) -> Result { + let U = AllocatedRelaxedR1CSInstance::alloc( + cs.namespace(|| "U"), + inst.map(|x| &x.U), + limb_width, + n_limbs, + )?; + + let u = AllocatedCycleFoldInstance::alloc( + cs.namespace(|| "u"), + inst.map(|x| &x.u), + limb_width, + n_limbs, + )?; + + let T = AllocatedPoint::alloc(cs.namespace(|| "T"), inst.map(|x| x.T.to_coordinates()))?; + T.check_on_curve(cs.namespace(|| "T on curve"))?; + + Ok(Self { U, u, T }) + } + + /// The NIFS verifier which folds the CycleFold instance into a running + /// relaxed R1CS instance. + pub fn apply_fold( + &self, + mut cs: CS, + params: &AllocatedNum, + ro_consts: ROConstantsCircuit, + limb_width: usize, + n_limbs: usize, + ) -> Result, SynthesisError> + where + CS: ConstraintSystem, + { + // Compute r: + let mut ro = E::ROCircuit::new( + ro_consts, + 1 + (3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS) + (3 + NIO_CYCLE_FOLD * BN_N_LIMBS) + 3, /* digest + (U) + (u) + T */ + ); + ro.absorb(params); + + self.U.absorb_in_ro(cs.namespace(|| "absorb cyclefold running instance"), &mut ro)?; + // running instance `U` does not need to absorbed since u.X[0] = Hash(params, U, + // i, z0, zi) + self.u.absorb_in_ro(cs.namespace(|| "absorb cyclefold instance"), &mut ro)?; + + ro.absorb(&self.T.x); + ro.absorb(&self.T.y); + ro.absorb(&self.T.is_infinity); + let r_bits = ro.squeeze(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; + let r = le_bits_to_num(cs.namespace(|| "r"), &r_bits)?; + + // W_fold = self.W + r * u.W + let rW = self.u.W.scalar_mul(cs.namespace(|| "r * u.W"), &r_bits)?; + let W_fold = self.U.W.add(cs.namespace(|| "self.W + r * u.W"), &rW)?; + + // E_fold = self.E + r * T + let rT = self.T.scalar_mul(cs.namespace(|| "r * T"), &r_bits)?; + let E_fold = self.U.E.add(cs.namespace(|| "self.E + r * T"), &rT)?; + + // u_fold = u_r + r + let u_fold = AllocatedNum::alloc(cs.namespace(|| "u_fold"), || { + Ok(*self.U.u.get_value().get()? + r.get_value().get()?) + })?; + cs.enforce( + || "Check u_fold", + |lc| lc, + |lc| lc, + |lc| lc + u_fold.get_variable() - self.U.u.get_variable() - r.get_variable(), + ); + + // Fold the IO: + // Analyze r into limbs + let r_bn = + BigNat::from_num(cs.namespace(|| "allocate r_bn"), &Num::from(r), limb_width, n_limbs)?; + + // Allocate the order of the non-native field as a constant + let m_bn = alloc_bignat_constant( + cs.namespace(|| "alloc m"), + &E::GE::group_params().2, + limb_width, + n_limbs, + )?; + + let mut X_fold = vec![]; + + // Calculate the + for (idx, (X, x)) in self.U.X.iter().zip_eq(self.u.X.iter()).enumerate() { + let (_, r) = x.mult_mod(cs.namespace(|| format!("r*u.X[{idx}]")), &r_bn, &m_bn)?; + let r_new = X.add(&r)?; + let X_i_fold = r_new.red_mod(cs.namespace(|| format!("reduce folded X[{idx}]")), &m_bn)?; + X_fold.push(X_i_fold); + } + + let X_fold = X_fold.try_into().map_err(|err: Vec<_>| { + SynthesisError::IncompatibleLengthVector(format!("{} != {NIO_CYCLE_FOLD}", err.len())) + })?; + + Ok(AllocatedRelaxedR1CSInstance { W: W_fold, E: E_fold, u: u_fold, X: X_fold }) + } +} + +pub mod emulated { + use bellpepper::gadgets::{boolean_utils::conditionally_select, Assignment}; + use bellpepper_core::{ + boolean::{AllocatedBit, Boolean}, + num::AllocatedNum, + ConstraintSystem, SynthesisError, + }; + use ff::Field; + + use super::FoldingData; + use crate::{ + constants::{NUM_CHALLENGE_BITS, NUM_FE_IN_EMULATED_POINT}, + gadgets::{ + alloc_bignat_constant, alloc_zero, conditionally_select_allocated_bit, + conditionally_select_bignat, f_to_nat, le_bits_to_num, BigNat, + }, + traits::{commitment::CommitmentTrait, Engine, Group, ROCircuitTrait, ROConstantsCircuit}, + RelaxedR1CSInstance, + }; + + /// An allocated version of a curve point from the non-native curve + #[derive(Clone)] + pub struct AllocatedEmulPoint + where G: Group { + pub x: BigNat, + pub y: BigNat, + pub is_infinity: AllocatedBit, + } + + impl AllocatedEmulPoint + where G: Group + { + pub fn alloc( + mut cs: CS, + coords: Option<(G::Scalar, G::Scalar, bool)>, + limb_width: usize, + n_limbs: usize, + ) -> Result + where + CS: ConstraintSystem<::Base>, + { + let x = BigNat::alloc_from_nat( + cs.namespace(|| "x"), + || Ok(f_to_nat(&coords.map_or(::ZERO, |val| val.0))), + limb_width, + n_limbs, + )?; + + let y = BigNat::alloc_from_nat( + cs.namespace(|| "y"), + || Ok(f_to_nat(&coords.map_or(::ZERO, |val| val.1))), + limb_width, + n_limbs, + )?; + + let is_infinity = AllocatedBit::alloc( + cs.namespace(|| "alloc is_infinity"), + coords.map_or(Some(true), |(_, _, is_infinity)| Some(is_infinity)), + )?; + + Ok(Self { x, y, is_infinity }) + } + + pub fn absorb_in_ro( + &self, + mut cs: CS, + ro: &mut impl ROCircuitTrait, + ) -> Result<(), SynthesisError> + where + CS: ConstraintSystem, + { + let x_bn = self + .x + .as_limbs() + .iter() + .enumerate() + .map(|(i, limb)| { + limb.as_allocated_num(cs.namespace(|| format!("convert limb {i} of x to num"))) + }) + .collect::>, _>>()?; + + for limb in x_bn { + ro.absorb(&limb) + } + + let y_bn = self + .y + .as_limbs() + .iter() + .enumerate() + .map(|(i, limb)| { + limb.as_allocated_num(cs.namespace(|| format!("convert limb {i} of y to num"))) + }) + .collect::>, _>>()?; + + for limb in y_bn { + ro.absorb(&limb) + } + + let is_infinity_num: AllocatedNum = + AllocatedNum::alloc(cs.namespace(|| "is_infinity"), || { + self.is_infinity.get_value().map_or(Err(SynthesisError::AssignmentMissing), |bit| { + if bit { + Ok(G::Base::ONE) + } else { + Ok(G::Base::ZERO) + } + }) + })?; + + cs.enforce( + || "constrain num equals bit", + |lc| lc, + |lc| lc, + |lc| lc + is_infinity_num.get_variable() - self.is_infinity.get_variable(), + ); + + ro.absorb(&is_infinity_num); + + Ok(()) + } + + fn conditionally_select>( + &self, + mut cs: CS, + other: &Self, + condition: &Boolean, + ) -> Result { + let x = conditionally_select_bignat( + cs.namespace(|| "x = cond ? self.x : other.x"), + &self.x, + &other.x, + condition, + )?; + + let y = conditionally_select_bignat( + cs.namespace(|| "y = cond ? self.y : other.y"), + &self.y, + &other.y, + condition, + )?; + + let is_infinity = conditionally_select_allocated_bit( + cs.namespace(|| "is_infinity = cond ? self.is_infinity : other.is_infinity"), + &self.is_infinity, + &other.is_infinity, + condition, + )?; + + Ok(Self { x, y, is_infinity }) + } + + pub fn default>( + mut cs: CS, + limb_width: usize, + n_limbs: usize, + ) -> Result { + let x = alloc_bignat_constant( + cs.namespace(|| "allocate x_default = 0"), + &f_to_nat(&G::Base::ZERO), + limb_width, + n_limbs, + )?; + let y = alloc_bignat_constant( + cs.namespace(|| "allocate y_default = 0"), + &f_to_nat(&G::Base::ZERO), + limb_width, + n_limbs, + )?; + + let is_infinity = AllocatedBit::alloc(cs.namespace(|| "allocate is_infinity"), Some(true))?; + cs.enforce( + || "is_infinity = 1", + |lc| lc, + |lc| lc, + |lc| lc + CS::one() - is_infinity.get_variable(), + ); + + Ok(Self { x, y, is_infinity }) + } + } + + /// A non-native circuit version of a `RelaxedR1CSInstance`. This is used + /// for the in-circuit representation of the primary running instance + pub struct AllocatedEmulRelaxedR1CSInstance { + pub comm_W: AllocatedEmulPoint, + pub comm_E: AllocatedEmulPoint, + u: AllocatedNum, + x0: AllocatedNum, + x1: AllocatedNum, + } + + impl AllocatedEmulRelaxedR1CSInstance + where E: Engine + { + pub fn alloc>( + mut cs: CS, + inst: Option<&RelaxedR1CSInstance>, + limb_width: usize, + n_limbs: usize, + ) -> Result + where + CS: ConstraintSystem<::Base>, + { + let comm_W = AllocatedEmulPoint::alloc( + cs.namespace(|| "allocate comm_W"), + inst.map(|x| x.comm_W.to_coordinates()), + limb_width, + n_limbs, + )?; + + let comm_E = AllocatedEmulPoint::alloc( + cs.namespace(|| "allocate comm_E"), + inst.map(|x| x.comm_E.to_coordinates()), + limb_width, + n_limbs, + )?; + + let u = AllocatedNum::alloc(cs.namespace(|| "allocate u"), || { + inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.u)) + })?; + + let x0 = AllocatedNum::alloc(cs.namespace(|| "allocate x0"), || { + inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.X[0])) + })?; + + let x1 = AllocatedNum::alloc(cs.namespace(|| "allocate x1"), || { + inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.X[1])) + })?; + + Ok(Self { comm_W, comm_E, u, x0, x1 }) + } + + /// Performs a folding of a primary R1CS instance (`u_W`, `u_x0`, + /// `u_x1`) into a running `AllocatedEmulRelaxedR1CSInstance` + /// As the curve operations are performed in the CycleFold circuit and + /// provided to the primary circuit as non-deterministic advice, + /// this folding simply sets those values as the new witness and + /// error vector commitments. + #[allow(clippy::too_many_arguments, clippy::type_complexity)] + pub fn fold_with_r1cs::Base>>( + &self, + mut cs: CS, + pp_digest: &AllocatedNum, + W_new: AllocatedEmulPoint, + E_new: AllocatedEmulPoint, + u_W: &AllocatedEmulPoint, + u_x0: &AllocatedNum, + u_x1: &AllocatedNum, + comm_T: &AllocatedEmulPoint, + ro_consts: ROConstantsCircuit, + ) -> Result { + let mut ro = E::ROCircuit::new( + ro_consts, + 1 + NUM_FE_IN_EMULATED_POINT + 2 + NUM_FE_IN_EMULATED_POINT, /* pp_digest + u.W + u.x + + * comm_T */ + ); + ro.absorb(pp_digest); + + // Absorb u + // Absorb the witness + u_W.absorb_in_ro(cs.namespace(|| "absorb u_W"), &mut ro)?; + // Absorb public IO + ro.absorb(u_x0); + ro.absorb(u_x1); + + // Absorb comm_T + comm_T.absorb_in_ro(cs.namespace(|| "absorb comm_T"), &mut ro)?; + + let r_bits = ro.squeeze(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; + let r = le_bits_to_num(cs.namespace(|| "r"), &r_bits)?; + + let u_fold = self.u.add(cs.namespace(|| "u_fold = u + r"), &r)?; + let x0_fold = AllocatedNum::alloc(cs.namespace(|| "x0"), || { + Ok(*self.x0.get_value().get()? + *r.get_value().get()? * *u_x0.get_value().get()?) + })?; + cs.enforce( + || "x0_fold = x0 + r * u_x0", + |lc| lc + r.get_variable(), + |lc| lc + u_x0.get_variable(), + |lc| lc + x0_fold.get_variable() - self.x0.get_variable(), + ); + + let x1_fold = AllocatedNum::alloc(cs.namespace(|| "x1"), || { + Ok(*self.x1.get_value().get()? + *r.get_value().get()? * *u_x1.get_value().get()?) + })?; + cs.enforce( + || "x1_fold = x1 + r * u_x1", + |lc| lc + r.get_variable(), + |lc| lc + u_x1.get_variable(), + |lc| lc + x1_fold.get_variable() - self.x1.get_variable(), + ); + + Ok(Self { comm_W: W_new, comm_E: E_new, u: u_fold, x0: x0_fold, x1: x1_fold }) + } + + pub fn absorb_in_ro( + &self, + mut cs: CS, + ro: &mut impl ROCircuitTrait, + ) -> Result<(), SynthesisError> + where + CS: ConstraintSystem<::Base>, + { + self.comm_W.absorb_in_ro(cs.namespace(|| "absorb comm_W"), ro)?; + self.comm_E.absorb_in_ro(cs.namespace(|| "absorb comm_E"), ro)?; + + ro.absorb(&self.u); + ro.absorb(&self.x0); + ro.absorb(&self.x1); + + Ok(()) + } + + pub fn conditionally_select::Base>>( + &self, + mut cs: CS, + other: &Self, + condition: &Boolean, + ) -> Result { + let comm_W = self.comm_W.conditionally_select( + cs.namespace(|| "comm_W = cond ? self.comm_W : other.comm_W"), + &other.comm_W, + condition, + )?; + + let comm_E = self.comm_E.conditionally_select( + cs.namespace(|| "comm_E = cond? self.comm_E : other.comm_E"), + &other.comm_E, + condition, + )?; + + let u = conditionally_select( + cs.namespace(|| "u = cond ? self.u : other.u"), + &self.u, + &other.u, + condition, + )?; + + let x0 = conditionally_select( + cs.namespace(|| "x0 = cond ? self.x0 : other.x0"), + &self.x0, + &other.x0, + condition, + )?; + + let x1 = conditionally_select( + cs.namespace(|| "x1 = cond ? self.x1 : other.x1"), + &self.x1, + &other.x1, + condition, + )?; + + Ok(Self { comm_W, comm_E, u, x0, x1 }) + } + + pub fn default::Base>>( + mut cs: CS, + limb_width: usize, + n_limbs: usize, + ) -> Result { + let comm_W = + AllocatedEmulPoint::default(cs.namespace(|| "default comm_W"), limb_width, n_limbs)?; + let comm_E = comm_W.clone(); + + let u = alloc_zero(cs.namespace(|| "u = 0")); + + let x0 = u.clone(); + let x1 = u.clone(); + + Ok(Self { comm_W, comm_E, u, x0, x1 }) + } + } + + /// The in-circuit representation of the primary folding data. + pub struct AllocatedFoldingData { + pub U: AllocatedEmulRelaxedR1CSInstance, + pub u_W: AllocatedEmulPoint, + pub u_x0: AllocatedNum, + pub u_x1: AllocatedNum, + pub T: AllocatedEmulPoint, + } + + impl AllocatedFoldingData { + pub fn alloc>( + mut cs: CS, + inst: Option<&FoldingData>, + limb_width: usize, + n_limbs: usize, + ) -> Result + where + CS: ConstraintSystem<::Base>, + { + let U = AllocatedEmulRelaxedR1CSInstance::alloc( + cs.namespace(|| "allocate U"), + inst.map(|x| &x.U), + limb_width, + n_limbs, + )?; + + let u_W = AllocatedEmulPoint::alloc( + cs.namespace(|| "allocate u_W"), + inst.map(|x| x.u.comm_W.to_coordinates()), + limb_width, + n_limbs, + )?; + + let u_x0 = AllocatedNum::alloc(cs.namespace(|| "allocate u_x0"), || { + inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.u.X[0])) + })?; + + let u_x1 = AllocatedNum::alloc(cs.namespace(|| "allocate u_x1"), || { + inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.u.X[1])) + })?; + + let T = AllocatedEmulPoint::alloc( + cs.namespace(|| "allocate T"), + inst.map(|x| x.T.to_coordinates()), + limb_width, + n_limbs, + )?; + + Ok(Self { U, u_W, u_x0, u_x1, T }) + } + } +} diff --git a/src/cyclefold/mod.rs b/prover/src/cyclefold/mod.rs similarity index 100% rename from src/cyclefold/mod.rs rename to prover/src/cyclefold/mod.rs diff --git a/prover/src/cyclefold/nifs.rs b/prover/src/cyclefold/nifs.rs new file mode 100644 index 0000000..fc199bc --- /dev/null +++ b/prover/src/cyclefold/nifs.rs @@ -0,0 +1,143 @@ +//! This module defines the needed wrong-field NIFS prover + +use std::marker::PhantomData; + +use super::util::{absorb_cyclefold_r1cs, absorb_primary_commitment, absorb_primary_r1cs}; +use crate::{ + constants::{NIO_CYCLE_FOLD, NUM_CHALLENGE_BITS, NUM_FE_IN_EMULATED_POINT}, + errors::NovaError, + gadgets::scalar_as_base, + r1cs::{R1CSInstance, R1CSShape, R1CSWitness, RelaxedR1CSInstance, RelaxedR1CSWitness}, + traits::{commitment::CommitmentTrait, AbsorbInROTrait, Engine, ROConstants, ROTrait}, + CommitmentKey, CompressedCommitment, +}; + +/// A SNARK that holds the proof of a step of an incremental computation of the +/// primary circuit in the CycleFold folding scheme. +/// The difference of this folding scheme from the Nova NIFS in `src/nifs.rs` is +/// that this +#[derive(Debug)] +pub struct PrimaryNIFS +where + E1: Engine::Scalar>, + E2: Engine::Scalar>, { + pub(crate) comm_T: CompressedCommitment, + _p: PhantomData, +} + +impl PrimaryNIFS +where + E1: Engine::Scalar>, + E2: Engine::Scalar>, +{ + /// Takes a relaxed R1CS instance-witness pair (U1, W1) and an R1CS + /// instance-witness pair (U2, W2) and folds them into a new relaxed + /// R1CS instance-witness pair (U, W) and a commitment to the cross term + /// T. It also provides the challenge r used to fold the instances. + #[allow(clippy::too_many_arguments, clippy::type_complexity)] + pub fn prove( + ck: &CommitmentKey, + ro_consts: &ROConstants, + pp_digest: &E1::Scalar, + S: &R1CSShape, + U1: &RelaxedR1CSInstance, + W1: &RelaxedR1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + ) -> Result<(Self, (RelaxedR1CSInstance, RelaxedR1CSWitness), E1::Scalar), NovaError> { + let arity = U1.X.len(); + + if arity != U2.X.len() { + return Err(NovaError::InvalidInputLength); + } + + let mut ro = E2::RO::new( + ro_consts.clone(), + 1 + NUM_FE_IN_EMULATED_POINT + arity + NUM_FE_IN_EMULATED_POINT, /* pp_digest + u.W + * + u.X + T */ + ); + + ro.absorb(*pp_digest); + + absorb_primary_r1cs::(U2, &mut ro); + + let (T, comm_T) = S.commit_T(ck, U1, W1, U2, W2)?; + + absorb_primary_commitment::(&comm_T, &mut ro); + + let r = scalar_as_base::(ro.squeeze(NUM_CHALLENGE_BITS)); + + let U = U1.fold(U2, &comm_T, &r); + + let W = W1.fold(W2, &T, &r)?; + + Ok((Self { comm_T: comm_T.compress(), _p: PhantomData }, (U, W), r)) + } +} + +/// A SNARK that holds the proof of a step of an incremental computation of the +/// CycleFold circuit The difference of this folding scheme from the Nova NIFS +/// in `src/nifs.rs` is that this folding prover and verifier must fold in the +/// `RelaxedR1CSInstance` accumulator because the optimization in the +#[derive(Debug)] +pub struct CycleFoldNIFS { + pub(crate) comm_T: CompressedCommitment, +} + +impl CycleFoldNIFS { + /// Folds an R1CS instance/witness pair (U2, W2) into a relaxed R1CS + /// instance/witness (U1, W1) returning the new folded accumulator and a + /// commitment to the cross-term. + #[allow(clippy::too_many_arguments, clippy::type_complexity)] + pub fn prove( + ck: &CommitmentKey, + ro_consts: &ROConstants, + pp_digest: &E::Scalar, + S: &R1CSShape, + U1: &RelaxedR1CSInstance, + W1: &RelaxedR1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + ) -> Result<(Self, (RelaxedR1CSInstance, RelaxedR1CSWitness)), NovaError> { + // Check `U1` and `U2` have the same arity + if U2.X.len() != NIO_CYCLE_FOLD || U1.X.len() != NIO_CYCLE_FOLD { + return Err(NovaError::InvalidInputLength); + } + + // initialize a new RO + let mut ro = E::RO::new( + ro_consts.clone(), + 46, /* 1 + (3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS) + (3 + NIO_CYCLE_FOLD * + * BN_N_LIMBS) + 3, // digest + (U) + (u) + T */ + ); + + // append the digest of pp to the transcript + ro.absorb(scalar_as_base::(*pp_digest)); + + // append U1 to the transcript. + // NOTE: this must be here because the IO for `U2` does not have the data of the + // hash of `U1` + U1.absorb_in_ro(&mut ro); + + // append U2 to transcript + absorb_cyclefold_r1cs(U2, &mut ro); + + // compute a commitment to the cross-term + let (T, comm_T) = S.commit_T(ck, U1, W1, U2, W2)?; + + // append `comm_T` to the transcript and obtain a challenge + comm_T.absorb_in_ro(&mut ro); + + // compute a challenge from the RO + let r = ro.squeeze(NUM_CHALLENGE_BITS); + + // fold the instance using `r` and `comm_T` + let U = U1.fold(U2, &comm_T, &r); + + // fold the witness using `r` and `T` + let W = W1.fold(W2, &T, &r)?; + + // return the folded instance and witness + Ok((Self { comm_T: comm_T.compress() }, (U, W))) + } +} diff --git a/prover/src/cyclefold/nova_circuit.rs b/prover/src/cyclefold/nova_circuit.rs new file mode 100644 index 0000000..cf5e6c5 --- /dev/null +++ b/prover/src/cyclefold/nova_circuit.rs @@ -0,0 +1,513 @@ +//! This module defines the Nova augmented circuit used for Cyclefold + +use bellpepper::gadgets::{ + boolean::Boolean, boolean_utils::conditionally_select_slice, num::AllocatedNum, Assignment, +}; +use bellpepper_core::{boolean::AllocatedBit, ConstraintSystem, SynthesisError}; +use ff::Field; +use serde::{Deserialize, Serialize}; + +use super::{ + gadgets::{emulated, AllocatedCycleFoldData}, + util::FoldingData, +}; +use crate::{ + constants::{BN_N_LIMBS, NIO_CYCLE_FOLD, NUM_FE_IN_EMULATED_POINT, NUM_HASH_BITS}, + gadgets::{ + alloc_num_equals, alloc_scalar_as_base, alloc_zero, le_bits_to_num, + AllocatedRelaxedR1CSInstance, + }, + supernova::StepCircuit, + traits::{commitment::CommitmentTrait, Engine, ROCircuitTrait, ROConstantsCircuit}, + Commitment, +}; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct AugmentedCircuitParams { + limb_width: usize, + n_limbs: usize, +} + +impl AugmentedCircuitParams { + pub const fn new(limb_width: usize, n_limbs: usize) -> Self { Self { limb_width, n_limbs } } +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct AugmentedCircuitInputs +where + E1: Engine::Scalar>, + E2: Engine::Scalar>, { + pp_digest: E1::Scalar, + i: E1::Base, + z0: Vec, + + zi: Option>, + data_p: Option>, + + data_c_1: Option>, + data_c_2: Option>, + + E_new: Option>, + W_new: Option>, +} + +impl AugmentedCircuitInputs +where + E1: Engine::Scalar>, + E2: Engine::Scalar>, +{ + #[allow(clippy::too_many_arguments, clippy::type_complexity)] + pub fn new( + pp_digest: E1::Scalar, + i: E1::Base, + z0: Vec, + zi: Option>, + data_p: Option>, + data_c_1: Option>, + data_c_2: Option>, + E_new: Option>, + W_new: Option>, + ) -> Self { + Self { pp_digest, i, z0, zi, data_p, data_c_1, data_c_2, E_new, W_new } + } +} +pub struct AugmentedCircuit<'a, E1, E2, SC> +where + E1: Engine::Scalar>, + E2: Engine::Scalar>, + SC: StepCircuit, { + params: &'a AugmentedCircuitParams, + ro_consts: ROConstantsCircuit, + inputs: Option>, + step_circuit: &'a SC, +} + +impl<'a, E1, E2, SC> AugmentedCircuit<'a, E1, E2, SC> +where + E1: Engine::Scalar>, + E2: Engine::Scalar>, + SC: StepCircuit, +{ + pub const fn new( + params: &'a AugmentedCircuitParams, + ro_consts: ROConstantsCircuit, + inputs: Option>, + step_circuit: &'a SC, + ) -> Self { + Self { params, ro_consts, inputs, step_circuit } + } + + #[allow(clippy::type_complexity)] + fn alloc_witness::Base>>( + &self, + mut cs: CS, + arity: usize, + ) -> Result< + ( + AllocatedNum, // pp_digest + AllocatedNum, // i + Vec>, // z0 + Vec>, // zi + emulated::AllocatedFoldingData, // data_p + AllocatedCycleFoldData, // data_c_1 + AllocatedCycleFoldData, // data_c_2 + emulated::AllocatedEmulPoint, // E_new + emulated::AllocatedEmulPoint, // W_new + ), + SynthesisError, + > { + let pp_digest = alloc_scalar_as_base::( + cs.namespace(|| "params"), + self.inputs.as_ref().map(|inputs| inputs.pp_digest), + )?; + + let i = AllocatedNum::alloc(cs.namespace(|| "i"), || Ok(self.inputs.get()?.i))?; + + let z_0 = (0..arity) + .map(|i| { + AllocatedNum::alloc(cs.namespace(|| format!("z0_{i}")), || Ok(self.inputs.get()?.z0[i])) + }) + .collect::>, _>>()?; + + // Allocate zi. If inputs.zi is not provided (base case) allocate default value + // 0 + let zero = vec![E1::Base::ZERO; arity]; + let z_i = (0..arity) + .map(|i| { + AllocatedNum::alloc(cs.namespace(|| format!("zi_{i}")), || { + Ok(self.inputs.get()?.zi.as_ref().unwrap_or(&zero)[i]) + }) + }) + .collect::>, _>>()?; + + let data_p = emulated::AllocatedFoldingData::alloc( + cs.namespace(|| "data_p"), + self.inputs.as_ref().and_then(|inputs| inputs.data_p.as_ref()), + self.params.limb_width, + self.params.n_limbs, + )?; + + let data_c_1 = AllocatedCycleFoldData::alloc( + cs.namespace(|| "data_c_1"), + self.inputs.as_ref().and_then(|inputs| inputs.data_c_1.as_ref()), + self.params.limb_width, + self.params.n_limbs, + )?; + + let data_c_2 = AllocatedCycleFoldData::alloc( + cs.namespace(|| "data_c_2"), + self.inputs.as_ref().and_then(|inputs| inputs.data_c_2.as_ref()), + self.params.limb_width, + self.params.n_limbs, + )?; + + let E_new = emulated::AllocatedEmulPoint::alloc( + cs.namespace(|| "E_new"), + self + .inputs + .as_ref() + .and_then(|inputs| inputs.E_new.as_ref()) + .map(|E_new| E_new.to_coordinates()), + self.params.limb_width, + self.params.n_limbs, + )?; + + let W_new = emulated::AllocatedEmulPoint::alloc( + cs.namespace(|| "W_new"), + self + .inputs + .as_ref() + .and_then(|inputs| inputs.W_new.as_ref()) + .map(|W_new| W_new.to_coordinates()), + self.params.limb_width, + self.params.n_limbs, + )?; + + Ok((pp_digest, i, z_0, z_i, data_p, data_c_1, data_c_2, E_new, W_new)) + } + + pub fn synthesize_base_case::Base>>( + &self, + mut cs: CS, + ) -> Result< + ( + AllocatedRelaxedR1CSInstance, + emulated::AllocatedEmulRelaxedR1CSInstance, + ), + SynthesisError, + > { + let U_c_default = AllocatedRelaxedR1CSInstance::default( + cs.namespace(|| "Allocate U_c_default"), + self.params.limb_width, + self.params.n_limbs, + )?; + + let U_p_default = emulated::AllocatedEmulRelaxedR1CSInstance::default( + cs.namespace(|| "Allocated U_p_default"), + self.params.limb_width, + self.params.n_limbs, + )?; + + // In the first folding step return the default relaxed instances for both the + // CycleFold and primary running accumulators + Ok((U_c_default, U_p_default)) + } + + #[allow(clippy::too_many_arguments, clippy::type_complexity)] + pub fn synthesize_non_base_case::Base>>( + &self, + mut cs: CS, + pp_digest: &AllocatedNum, + i: &AllocatedNum, + z_0: &[AllocatedNum], + z_i: &[AllocatedNum], + data_p: &emulated::AllocatedFoldingData, + data_c_1: &AllocatedCycleFoldData, + data_c_2: &AllocatedCycleFoldData, + E_new: emulated::AllocatedEmulPoint, + W_new: emulated::AllocatedEmulPoint, + arity: usize, + ) -> Result< + ( + AllocatedRelaxedR1CSInstance, + emulated::AllocatedEmulRelaxedR1CSInstance, + AllocatedBit, + ), + SynthesisError, + > { + // Follows the outline written down here https://hackmd.io/@argumentcomputer/HybHrnNFT + + // Calculate the hash of the non-deterministic advice for the primary circuit + let mut ro_p = + E1::ROCircuit::new(self.ro_consts.clone(), 2 + 2 * arity + 2 * NUM_FE_IN_EMULATED_POINT + 3); + + ro_p.absorb(pp_digest); + ro_p.absorb(i); + for e in z_0 { + ro_p.absorb(e) + } + for e in z_i { + ro_p.absorb(e) + } + data_p.U.absorb_in_ro(cs.namespace(|| "absorb U_p"), &mut ro_p)?; + + let hash_bits_p = ro_p.squeeze(cs.namespace(|| "primary hash bits"), NUM_HASH_BITS)?; + let hash_p = le_bits_to_num(cs.namespace(|| "primary hash"), &hash_bits_p)?; + + // check the hash matches the public IO from the last primary instance + let check_primary = alloc_num_equals( + cs.namespace(|| "u.X[0] = H(params, i, z0, zi, U_p)"), + &data_p.u_x0, + &hash_p, + )?; + + // Calculate the hash of the non-dterministic advice for the secondary circuit + let mut ro_c = E1::ROCircuit::new( + self.ro_consts.clone(), + 1 + 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, // pp + i + W + E + u + X + ); + + ro_c.absorb(pp_digest); + ro_c.absorb(i); + data_c_1.U.absorb_in_ro(cs.namespace(|| "absorb U_c"), &mut ro_c)?; + let hash_c_bits = ro_c.squeeze(cs.namespace(|| "cyclefold hash bits"), NUM_HASH_BITS)?; + let hash_c = le_bits_to_num(cs.namespace(|| "cyclefold hash"), &hash_c_bits)?; + + // check the hash matches the public IO from the last primary instance + let check_cyclefold = + alloc_num_equals(cs.namespace(|| "u.X[1] = H(params, U_c)"), &data_p.u_x1, &hash_c)?; + + let check_io = + AllocatedBit::and(cs.namespace(|| "both IOs match"), &check_primary, &check_cyclefold)?; + + // Run NIVC.V on U_c, u_c_1, T_c_1 + let U_int = data_c_1.apply_fold( + cs.namespace(|| "fold u_c_1 into U_c"), + pp_digest, + self.ro_consts.clone(), + self.params.limb_width, + self.params.n_limbs, + )?; + + // Calculate h_int = H(pp, U_c_int) + let mut ro_c_int = E1::ROCircuit::new( + self.ro_consts.clone(), + 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, // pp + W + E + u + X + ); + ro_c_int.absorb(pp_digest); + U_int.absorb_in_ro(cs.namespace(|| "absorb U_c_int"), &mut ro_c_int)?; + let h_c_int_bits = + ro_c_int.squeeze(cs.namespace(|| "intermediate hash bits"), NUM_HASH_BITS)?; + let h_c_int = le_bits_to_num(cs.namespace(|| "intermediate hash"), &h_c_int_bits)?; + + // Calculate h_1 = H(pp, U_c_1) + let mut ro_c_1 = E1::ROCircuit::new( + self.ro_consts.clone(), + 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, // pp + W + E + u + X + ); + + ro_c_1.absorb(pp_digest); + data_c_2.U.absorb_in_ro(cs.namespace(|| "absorb U_c_1"), &mut ro_c_1)?; + let h_c_1_bits = ro_c_1.squeeze(cs.namespace(|| "cyclefold_1 hash bits"), NUM_HASH_BITS)?; + let h_c_1 = le_bits_to_num(cs.namespace(|| "cyclefold_1 hash"), &h_c_1_bits)?; + + // Check the intermediate-calculated running instance matches the + // non-deterministic advice provided to the prover + let check_cyclefold_int = alloc_num_equals(cs.namespace(|| "h_int = h_c_1"), &h_c_int, &h_c_1)?; + + let checks_pass = + AllocatedBit::and(cs.namespace(|| "all checks passed"), &check_io, &check_cyclefold_int)?; + + // calculate the folded CycleFold accumulator + let U_c = data_c_2.apply_fold( + cs.namespace(|| "fold u_c_2 into U_c_1"), + pp_digest, + self.ro_consts.clone(), + self.params.limb_width, + self.params.n_limbs, + )?; + + // calculate the folded primary circuit accumulator + let U_p = data_p.U.fold_with_r1cs( + cs.namespace(|| "fold u_p into U_p"), + pp_digest, + W_new, + E_new, + &data_p.u_W, + &data_p.u_x0, + &data_p.u_x1, + &data_p.T, + self.ro_consts.clone(), + )?; + + Ok((U_c, U_p, checks_pass)) + } + + pub fn synthesize::Base>>( + self, + cs: &mut CS, + ) -> Result>, SynthesisError> { + // Circuit is documented here: https://hackmd.io/SBvAur_2RQmaduDi7gYbhw + let arity = self.step_circuit.arity(); + + // Allocate the witness + let (pp_digest, i, z_0, z_i, data_p, data_c_1, data_c_2, E_new, W_new) = + self.alloc_witness(cs.namespace(|| "alloc_witness"), arity)?; + + let zero = alloc_zero(cs.namespace(|| "zero")); + let is_base_case = alloc_num_equals(cs.namespace(|| "is base case"), &i, &zero)?; + + let (U_new_c_base, U_new_p_base) = self.synthesize_base_case(cs.namespace(|| "base case"))?; + + let (U_new_c_non_base, U_new_p_non_base, check_non_base_pass) = self.synthesize_non_base_case( + cs.namespace(|| "synthesize non base case"), + &pp_digest, + &i, + &z_0, + &z_i, + &data_p, + &data_c_1, + &data_c_2, + E_new, + W_new, + arity, + )?; + + let should_be_false = AllocatedBit::nor( + cs.namespace(|| "check_non_base_pass nor base_case"), + &check_non_base_pass, + &is_base_case, + )?; + cs.enforce( + || "check_non_base_pass nor base_case = false", + |lc| lc + should_be_false.get_variable(), + |lc| lc + CS::one(), + |lc| lc, + ); + + // select the new running primary instance + let Unew_p = U_new_p_base.conditionally_select( + cs.namespace(|| "compute Unew_p"), + &U_new_p_non_base, + &Boolean::from(is_base_case.clone()), + )?; + + // select the new running CycleFold instance + let Unew_c = U_new_c_base.conditionally_select( + cs.namespace(|| "compute Unew_c"), + &U_new_c_non_base, + &Boolean::from(is_base_case.clone()), + )?; + + // Compute i + 1 + let i_new = + AllocatedNum::alloc(cs.namespace(|| "i + 1"), || Ok(*i.get_value().get()? + E1::Base::ONE))?; + cs.enforce( + || "check i + 1", + |lc| lc, + |lc| lc, + |lc| lc + i_new.get_variable() - CS::one() - i.get_variable(), + ); + + // Compute z_{i+1} + let z_input = conditionally_select_slice( + cs.namespace(|| "select input to F"), + &z_0, + &z_i, + &Boolean::from(is_base_case), + )?; + + let (_pc, z_next) = self.step_circuit.synthesize(&mut cs.namespace(|| "F"), None, &z_input)?; + + if z_next.len() != arity { + return Err(SynthesisError::IncompatibleLengthVector("z_next".to_string())); + } + + // Calculate the first component of the public IO as the hash of the calculated + // primary running instance + let mut ro_p = E1::ROCircuit::new( + self.ro_consts.clone(), + 2 + 2 * arity + (2 * NUM_FE_IN_EMULATED_POINT + 3), // pp + i + z_0 + z_next + (U_p) + ); + ro_p.absorb(&pp_digest); + ro_p.absorb(&i_new); + for e in &z_0 { + ro_p.absorb(e); + } + for e in &z_next { + ro_p.absorb(e); + } + Unew_p.absorb_in_ro(cs.namespace(|| "absorb Unew_p"), &mut ro_p)?; + let hash_p_bits = ro_p.squeeze(cs.namespace(|| "hash_p_bits"), NUM_HASH_BITS)?; + let hash_p = le_bits_to_num(cs.namespace(|| "hash_p"), &hash_p_bits)?; + + // Calculate the second component of the public IO as the hash of the calculated + // CycleFold running instance + let mut ro_c = E1::ROCircuit::new( + self.ro_consts, + 1 + 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, // pp + i + W + E + u + X + ); + ro_c.absorb(&pp_digest); + ro_c.absorb(&i_new); + Unew_c.absorb_in_ro(cs.namespace(|| "absorb Unew_c"), &mut ro_c)?; + let hash_c_bits = ro_c.squeeze(cs.namespace(|| "hash_c_bits"), NUM_HASH_BITS)?; + let hash_c = le_bits_to_num(cs.namespace(|| "hash_c"), &hash_c_bits)?; + + hash_p.inputize(cs.namespace(|| "u_p.x[0] = hash_p"))?; + hash_c.inputize(cs.namespace(|| "u_p.x[1] = hash_c"))?; + + Ok(z_next) + } +} + +// #[cfg(test)] +// mod test { +// use expect_test::{expect, Expect}; + +// use super::*; +// use crate::{ +// bellpepper::test_shape_cs::TestShapeCS, +// constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, +// provider::{Bn256EngineKZG, PallasEngine, Secp256k1Engine}, +// traits::{circuit::TrivialCircuit, CurveCycleEquipped, Dual}, +// }; + +// fn test_augmented_circuit_size_with(expected_cons: &Expect, +// expected_var: &Expect) where +// E: CurveCycleEquipped, +// { +// let params = AugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS); + +// let ro_consts = ROConstantsCircuit::::default(); + +// let step_circuit = TrivialCircuit::::default(); + +// let circuit = AugmentedCircuit::, +// TrivialCircuit>::new( ¶ms, +// ro_consts, +// None, +// &step_circuit, +// ); +// let mut cs: TestShapeCS> = TestShapeCS::default(); + +// let res = circuit.synthesize(&mut cs); + +// res.unwrap(); + +// let num_constraints = cs.num_constraints(); +// let num_variables = cs.num_aux(); + +// expected_cons.assert_eq(&num_constraints.to_string()); +// expected_var.assert_eq(&num_variables.to_string()); +// } + +// #[test] +// fn test_augmented_circuit_size() { +// test_augmented_circuit_size_with::(&expect!["33289"], +// &expect!["33323"]); +// test_augmented_circuit_size_with::(&expect!["35125" +// ], &expect!["35159"]); +// test_augmented_circuit_size_with::(&expect!["33856"], +// &expect!["33890"]); } +// } diff --git a/prover/src/cyclefold/snark.rs b/prover/src/cyclefold/snark.rs new file mode 100644 index 0000000..1ac176a --- /dev/null +++ b/prover/src/cyclefold/snark.rs @@ -0,0 +1,555 @@ +//! This module defines the Cyclefold `RecursiveSNARK` type with its `new`, +//! `prove_step`, and `verify` methods. + +use bellpepper_core::{ConstraintSystem, SynthesisError}; +use ff::PrimeFieldBits; +use once_cell::sync::OnceCell; +use serde::{Deserialize, Serialize}; + +use super::{ + nifs::{CycleFoldNIFS, PrimaryNIFS}, + nova_circuit::{AugmentedCircuit, AugmentedCircuitInputs, AugmentedCircuitParams}, + util::{absorb_primary_relaxed_r1cs, FoldingData}, +}; +use crate::{ + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + shape_cs::ShapeCS, + solver::SatisfyingAssignment, + }, + constants::{ + BN_LIMB_WIDTH, BN_N_LIMBS, NIO_CYCLE_FOLD, NUM_CHALLENGE_BITS, NUM_FE_IN_EMULATED_POINT, + NUM_HASH_BITS, + }, + cyclefold::circuit::CycleFoldCircuit, + errors::NovaError, + gadgets::scalar_as_base, + r1cs::{ + self, CommitmentKeyHint, R1CSInstance, R1CSResult, R1CSWitness, RelaxedR1CSInstance, + RelaxedR1CSWitness, + }, + supernova::StepCircuit, + traits::{ + commitment::CommitmentTrait, AbsorbInROTrait, CurveCycleEquipped, Dual, Engine, + ROConstantsCircuit, ROTrait, + }, + Commitment, CommitmentKey, DigestComputer, R1CSWithArity, ROConstants, ResourceBuffer, + SimpleDigestible, +}; + +/// The public parameters used in the CycleFold recursive SNARK proof and +/// verification +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct PublicParams +where E1: CurveCycleEquipped { + F_arity_primary: usize, + ro_consts_primary: ROConstants>, + ro_consts_circuit_primary: ROConstantsCircuit>, + ck_primary: CommitmentKey, + circuit_shape_primary: R1CSWithArity, + augmented_circuit_params: AugmentedCircuitParams, + + ro_consts_cyclefold: ROConstants>, + ck_cyclefold: CommitmentKey>, + circuit_shape_cyclefold: R1CSWithArity>, + #[serde(skip, default = "OnceCell::new")] + digest: OnceCell, +} + +impl PublicParams +where E1: CurveCycleEquipped +{ + /// Builds the public parameters for the circuit `C1`. + /// The same note for public parameter hints apply as in the case for Nova's + /// public parameters: For some final compressing SNARKs the size of the + /// commitment key must be larger, so we include `ck_hint_primary` and + /// `ck_hint_cyclefold` parameters to accommodate this. + pub fn setup>( + c_primary: &C1, + ck_hint_primary: &CommitmentKeyHint, + ck_hint_cyclefold: &CommitmentKeyHint>, + ) -> Self { + let F_arity_primary = c_primary.arity(); + let ro_consts_primary = ROConstants::>::default(); + let ro_consts_circuit_primary = ROConstantsCircuit::>::default(); + + let augmented_circuit_params = AugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS); + let circuit_primary: AugmentedCircuit<'_, Dual, E1, C1> = AugmentedCircuit::new( + &augmented_circuit_params, + ro_consts_circuit_primary.clone(), + None, + c_primary, + ); + let mut cs: ShapeCS = ShapeCS::new(); + let _ = circuit_primary.synthesize(&mut cs); + let (r1cs_shape_primary, ck_primary) = cs.r1cs_shape_and_key(ck_hint_primary); + let circuit_shape_primary = R1CSWithArity::new(r1cs_shape_primary, F_arity_primary); + + let ro_consts_cyclefold = ROConstants::>::default(); + let mut cs: ShapeCS> = ShapeCS::new(); + let circuit_cyclefold: CycleFoldCircuit = CycleFoldCircuit::default(); + let _ = circuit_cyclefold.synthesize(&mut cs); + let (r1cs_shape_cyclefold, ck_cyclefold) = cs.r1cs_shape_and_key(ck_hint_cyclefold); + let circuit_shape_cyclefold = R1CSWithArity::new(r1cs_shape_cyclefold, 0); + + Self { + F_arity_primary, + ro_consts_primary, + ro_consts_circuit_primary, + ck_primary, + circuit_shape_primary, + augmented_circuit_params, + ro_consts_cyclefold, + ck_cyclefold, + circuit_shape_cyclefold, + digest: OnceCell::new(), + } + } + + /// Calculate the digest of the public parameters. + pub fn digest(&self) -> E1::Scalar { + self + .digest + .get_or_try_init(|| DigestComputer::new(self).digest()) + .cloned() + .expect("Failure in retrieving digest") + } + + /// Returns the number of constraints in the primary and cyclefold circuits + pub const fn num_constraints(&self) -> (usize, usize) { + ( + self.circuit_shape_primary.r1cs_shape.num_cons, + self.circuit_shape_cyclefold.r1cs_shape.num_cons, + ) + } + + /// Returns the number of variables in the primary and cyclefold circuits + pub const fn num_variables(&self) -> (usize, usize) { + ( + self.circuit_shape_primary.r1cs_shape.num_vars, + self.circuit_shape_cyclefold.r1cs_shape.num_vars, + ) + } +} + +impl SimpleDigestible for PublicParams where E1: CurveCycleEquipped {} + +/// A SNARK that proves the correct execution of an incremental computation in +/// the CycleFold folding scheme. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct RecursiveSNARK +where E1: CurveCycleEquipped { + // Input + z0_primary: Vec, + + // primary circuit data + r_W_primary: RelaxedR1CSWitness, + r_U_primary: RelaxedR1CSInstance, + l_w_primary: R1CSWitness, + l_u_primary: R1CSInstance, + + // cyclefold circuit data + r_W_cyclefold: RelaxedR1CSWitness>, + r_U_cyclefold: RelaxedR1CSInstance>, + + // memory buffers for folding steps + buffer_primary: ResourceBuffer, + buffer_cyclefold: ResourceBuffer>, + + i: usize, + zi_primary: Vec, +} + +impl RecursiveSNARK +where E1: CurveCycleEquipped +{ + /// Create a new instance of a recursive SNARK + pub fn new>( + pp: &PublicParams, + c_primary: &C1, + z0_primary: &[E1::Scalar], + ) -> Result { + if z0_primary.len() != pp.F_arity_primary { + return Err(NovaError::InvalidInitialInputLength); + } + + let r1cs_primary = &pp.circuit_shape_primary.r1cs_shape; + let r1cs_cyclefold = &pp.circuit_shape_cyclefold.r1cs_shape; + + let r_U_cyclefold = RelaxedR1CSInstance::default(&pp.ck_cyclefold, r1cs_cyclefold); + let r_W_cyclefold = RelaxedR1CSWitness::default(r1cs_cyclefold); + + let mut cs_primary = SatisfyingAssignment::::new(); + let inputs_primary: AugmentedCircuitInputs, E1> = AugmentedCircuitInputs::new( + scalar_as_base::(pp.digest()), + as Engine>::Base::from(0u64), + z0_primary.to_vec(), + None, + None, + None, + None, + None, + None, + ); + + let circuit_primary = AugmentedCircuit::new( + &pp.augmented_circuit_params, + pp.ro_consts_circuit_primary.clone(), + Some(inputs_primary), + c_primary, + ); + + let zi_primary = circuit_primary.synthesize(&mut cs_primary)?; + let (l_u_primary, l_w_primary) = + cs_primary.r1cs_instance_and_witness(r1cs_primary, &pp.ck_primary)?; + + let r_U_primary = RelaxedR1CSInstance::default(&pp.ck_primary, r1cs_primary); + let r_W_primary = RelaxedR1CSWitness::default(r1cs_primary); + + let zi_primary = zi_primary + .iter() + .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) + .collect::, _>>()?; + + let buffer_primary = ResourceBuffer { + l_w: None, + l_u: None, + ABC_Z_1: R1CSResult::default(r1cs_primary.num_cons), + ABC_Z_2: R1CSResult::default(r1cs_primary.num_cons), + T: r1cs::default_T::(r1cs_primary.num_cons), + }; + + let buffer_cyclefold = ResourceBuffer { + l_w: None, + l_u: None, + ABC_Z_1: R1CSResult::default(r1cs_cyclefold.num_cons), + ABC_Z_2: R1CSResult::default(r1cs_cyclefold.num_cons), + T: r1cs::default_T::>(r1cs_cyclefold.num_cons), + }; + + Ok(Self { + z0_primary: z0_primary.to_vec(), + r_W_primary, + r_U_primary, + l_w_primary, + l_u_primary, + r_W_cyclefold, + r_U_cyclefold, + buffer_primary, + buffer_cyclefold, + i: 0, + zi_primary, + }) + } + + /// Update the `RecursiveSNARK` by proving a step of the incremental + /// computation. + pub fn prove_step>( + &mut self, + pp: &PublicParams, + c_primary: &C1, + ) -> Result<(), NovaError> { + if self.i == 0 { + self.i = 1; + return Ok(()); + } + + let (nifs_primary, (r_U_primary, r_W_primary), r) = PrimaryNIFS::>::prove( + &pp.ck_primary, + &pp.ro_consts_primary, + &pp.digest(), + &pp.circuit_shape_primary.r1cs_shape, + &self.r_U_primary, + &self.r_W_primary, + &self.l_u_primary, + &self.l_w_primary, + )?; + + let r_bools = r + .to_le_bits() + .iter() + .map(|b| Some(*b)) + .take(NUM_CHALLENGE_BITS) + .collect::>>() + .map(|v| v.try_into().unwrap()); + + let comm_T = Commitment::::decompress(&nifs_primary.comm_T)?; + let E_new = self.r_U_primary.comm_E + comm_T * r; + + let W_new = self.r_U_primary.comm_W + self.l_u_primary.comm_W * r; + + let mut cs_cyclefold_E = SatisfyingAssignment::>::with_capacity( + pp.circuit_shape_cyclefold.r1cs_shape.num_io + 1, + pp.circuit_shape_cyclefold.r1cs_shape.num_vars, + ); + + let circuit_cyclefold_E: CycleFoldCircuit = + CycleFoldCircuit::new(Some(self.r_U_primary.comm_E), Some(comm_T), r_bools); + + let _ = circuit_cyclefold_E.synthesize(&mut cs_cyclefold_E); + + let (l_u_cyclefold_E, l_w_cyclefold_E) = cs_cyclefold_E + .r1cs_instance_and_witness(&pp.circuit_shape_cyclefold.r1cs_shape, &pp.ck_cyclefold) + .map_err(|_| NovaError::UnSat)?; + + // TODO: check if this is better or worse than `prove_mut` with a clone of + // `self.r_U_cyclefold` + let (nifs_cyclefold_E, (r_U_cyclefold_E, r_W_cyclefold_E)) = CycleFoldNIFS::prove( + &pp.ck_cyclefold, + &pp.ro_consts_cyclefold, + &scalar_as_base::(pp.digest()), + &pp.circuit_shape_cyclefold.r1cs_shape, + &self.r_U_cyclefold, + &self.r_W_cyclefold, + &l_u_cyclefold_E, + &l_w_cyclefold_E, + )?; + + let comm_T_E = Commitment::>::decompress(&nifs_cyclefold_E.comm_T)?; + + let mut cs_cyclefold_W = SatisfyingAssignment::>::with_capacity( + pp.circuit_shape_cyclefold.r1cs_shape.num_io + 1, + pp.circuit_shape_cyclefold.r1cs_shape.num_vars, + ); + + let circuit_cyclefold_W: CycleFoldCircuit = + CycleFoldCircuit::new(Some(self.r_U_primary.comm_W), Some(self.l_u_primary.comm_W), r_bools); + + let _ = circuit_cyclefold_W.synthesize(&mut cs_cyclefold_W); + + let (l_u_cyclefold_W, l_w_cyclefold_W) = cs_cyclefold_W + .r1cs_instance_and_witness(&pp.circuit_shape_cyclefold.r1cs_shape, &pp.ck_cyclefold) + .map_err(|_| NovaError::UnSat)?; + + // TODO: check if this is better or worse than `prove_mut` with a clone of + // r_U_cyclefold_E + let (nifs_cyclefold_W, (r_U_cyclefold_W, r_W_cyclefold_W)) = CycleFoldNIFS::prove( + &pp.ck_cyclefold, + &pp.ro_consts_cyclefold, + &scalar_as_base::(pp.digest()), + &pp.circuit_shape_cyclefold.r1cs_shape, + &r_U_cyclefold_E, + &r_W_cyclefold_E, + &l_u_cyclefold_W, + &l_w_cyclefold_W, + )?; + + let comm_T_W = Commitment::>::decompress(&nifs_cyclefold_W.comm_T)?; + + let mut cs_primary = SatisfyingAssignment::::with_capacity( + pp.circuit_shape_primary.r1cs_shape.num_io + 1, + pp.circuit_shape_primary.r1cs_shape.num_vars, + ); + + let data_p = FoldingData::new(self.r_U_primary.clone(), self.l_u_primary.clone(), comm_T); + let data_c_E = FoldingData::new(self.r_U_cyclefold.clone(), l_u_cyclefold_E, comm_T_E); + let data_c_W = FoldingData::new(r_U_cyclefold_E, l_u_cyclefold_W, comm_T_W); + + let inputs_primary: AugmentedCircuitInputs, E1> = AugmentedCircuitInputs::new( + scalar_as_base::(pp.digest()), + as Engine>::Base::from(self.i as u64), + self.z0_primary.clone(), + Some(self.zi_primary.clone()), + Some(data_p), + Some(data_c_E), + Some(data_c_W), + Some(E_new), + Some(W_new), + ); + + let circuit_primary: AugmentedCircuit<'_, Dual, E1, C1> = AugmentedCircuit::new( + &pp.augmented_circuit_params, + pp.ro_consts_circuit_primary.clone(), + Some(inputs_primary), + c_primary, + ); + + let zi_primary = circuit_primary.synthesize(&mut cs_primary)?; + + let (l_u_primary, l_w_primary) = cs_primary + .r1cs_instance_and_witness(&pp.circuit_shape_primary.r1cs_shape, &pp.ck_primary) + .map_err(|_| NovaError::UnSat)?; + + self.zi_primary = zi_primary + .iter() + .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) + .collect::, _>>()?; + + self.r_U_primary = r_U_primary; + self.r_W_primary = r_W_primary; + self.l_u_primary = l_u_primary; + self.l_w_primary = l_w_primary; + self.r_U_cyclefold = r_U_cyclefold_W; + self.r_W_cyclefold = r_W_cyclefold_W; + + self.i += 1; + + Ok(()) + } + + /// Verify the correctness of the `RecursiveSNARK` + pub fn verify( + &self, + pp: &PublicParams, + num_steps: usize, + z0_primary: &[E1::Scalar], + ) -> Result, NovaError> { + // number of steps cannot be zero + let is_num_steps_zero = num_steps == 0; + + // check if the provided proof has executed num_steps + let is_num_steps_not_match = self.i != num_steps; + + // check if the initial inputs match + let is_inputs_not_match = self.z0_primary != z0_primary; + + // check if the (relaxed) R1CS instances have two public outputs + let is_instance_has_two_outputs = self.r_U_primary.X.len() != 2; + + if is_num_steps_zero + || is_num_steps_not_match + || is_inputs_not_match + || is_instance_has_two_outputs + { + return Err(NovaError::ProofVerifyError); + } + + // Calculate the hashes of the primary running instance and cyclefold running + // instance + let (hash_primary, hash_cyclefold) = { + let mut hasher = as Engine>::RO::new( + pp.ro_consts_primary.clone(), + 2 + 2 * pp.F_arity_primary + 2 * NUM_FE_IN_EMULATED_POINT + 3, + ); + hasher.absorb(pp.digest()); + hasher.absorb(E1::Scalar::from(num_steps as u64)); + for e in z0_primary { + hasher.absorb(*e); + } + for e in &self.zi_primary { + hasher.absorb(*e); + } + absorb_primary_relaxed_r1cs::>(&self.r_U_primary, &mut hasher); + let hash_primary = hasher.squeeze(NUM_HASH_BITS); + + let mut hasher = as Engine>::RO::new( + pp.ro_consts_cyclefold.clone(), + 1 + 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, + ); + hasher.absorb(pp.digest()); + hasher.absorb(E1::Scalar::from(num_steps as u64)); + self.r_U_cyclefold.absorb_in_ro(&mut hasher); + let hash_cyclefold = hasher.squeeze(NUM_HASH_BITS); + + (hash_primary, hash_cyclefold) + }; + + // Verify the hashes equal the public IO for the final primary instance + if scalar_as_base::>(hash_primary) != self.l_u_primary.X[0] + || scalar_as_base::>(hash_cyclefold) != self.l_u_primary.X[1] + { + return Err(NovaError::ProofVerifyError); + } + + // Verify the satisfiability of running relaxed instances, and the final primary + // instance. + let (res_r_primary, (res_l_primary, res_r_cyclefold)) = rayon::join( + || { + pp.circuit_shape_primary.r1cs_shape.is_sat_relaxed( + &pp.ck_primary, + &self.r_U_primary, + &self.r_W_primary, + ) + }, + || { + rayon::join( + || { + pp.circuit_shape_primary.r1cs_shape.is_sat( + &pp.ck_primary, + &self.l_u_primary, + &self.l_w_primary, + ) + }, + || { + pp.circuit_shape_cyclefold.r1cs_shape.is_sat_relaxed( + &pp.ck_cyclefold, + &self.r_U_cyclefold, + &self.r_W_cyclefold, + ) + }, + ) + }, + ); + + res_r_primary?; + res_l_primary?; + res_r_cyclefold?; + + Ok(self.zi_primary.clone()) + } +} + +// #[cfg(test)] +// mod test { +// use std::marker::PhantomData; + +// use bellpepper_core::num::AllocatedNum; + +// use super::*; +// use crate::{ +// provider::{Bn256EngineKZG, PallasEngine, Secp256k1Engine}, +// traits::snark::default_ck_hint, +// }; + +// #[derive(Clone)] +// struct SquareCircuit { +// _p: PhantomData, +// } + +// impl StepCircuit for SquareCircuit { +// fn arity(&self) -> usize { +// 1 +// } + +// fn synthesize>( +// &self, +// cs: &mut CS, +// z: &[AllocatedNum], +// ) -> Result>, SynthesisError> { +// let x = &z[0]; +// let x_sq = x.square(cs.namespace(|| "x_sq"))?; + +// Ok(vec![x_sq]) +// } +// } + +// fn test_trivial_cyclefold_prove_verify_with() { +// let primary_circuit = SquareCircuit:: { _p: PhantomData }; + +// let pp = +// PublicParams::::setup(&primary_circuit, &*default_ck_hint(), +// &*default_ck_hint()); + +// let z0 = vec![E::Scalar::from(2u64)]; + +// let mut recursive_snark = RecursiveSNARK::new(&pp, &primary_circuit, +// &z0).unwrap(); + +// (1..5).for_each(|iter| { +// let res_proof = recursive_snark.prove_step(&pp, +// &primary_circuit); res_proof.unwrap(); + +// let res_verify = recursive_snark.verify(&pp, iter, &z0); +// res_verify.unwrap(); +// }); +// } + +// #[test] +// fn test_cyclefold_prove_verify() { +// test_trivial_cyclefold_prove_verify_with::(); +// test_trivial_cyclefold_prove_verify_with::(); +// test_trivial_cyclefold_prove_verify_with::(); +// } +// } diff --git a/prover/src/cyclefold/util.rs b/prover/src/cyclefold/util.rs new file mode 100644 index 0000000..a23e8b4 --- /dev/null +++ b/prover/src/cyclefold/util.rs @@ -0,0 +1,87 @@ +//! This module defines some useful utilities for RO absorbing, and the Folding +//! data used in the CycleFold folding scheme. + +use ff::Field; +use serde::{Deserialize, Serialize}; + +use crate::{ + constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, + gadgets::{f_to_nat, nat_to_limbs, scalar_as_base}, + r1cs::{R1CSInstance, RelaxedR1CSInstance}, + traits::{commitment::CommitmentTrait, AbsorbInROTrait, Engine, ROTrait}, + Commitment, +}; + +/// Absorb a commitment over engine `E1` into an RO over engine `E2` by +/// absorbing the limbs +pub(super) fn absorb_primary_commitment( + comm: &impl CommitmentTrait, + ro: &mut impl ROTrait, +) where + E1: Engine::Scalar>, + E2: Engine::Scalar>, +{ + let (x, y, is_infinity) = comm.to_coordinates(); + + let x_limbs = nat_to_limbs(&f_to_nat(&x), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); + let y_limbs = nat_to_limbs(&f_to_nat(&y), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); + + for limb in x_limbs { + ro.absorb(scalar_as_base::(limb)); + } + for limb in y_limbs { + ro.absorb(scalar_as_base::(limb)); + } + if is_infinity { + ro.absorb(::Scalar::ONE); + } else { + ro.absorb(::Scalar::ZERO); + } +} + +pub(super) fn absorb_primary_r1cs( + u: &R1CSInstance, + ro: &mut impl ROTrait, +) where + E1: Engine::Scalar>, + E2: Engine::Scalar>, +{ + absorb_primary_commitment::(&u.comm_W, ro); + for x in &u.X { + ro.absorb(*x); + } +} + +pub(super) fn absorb_cyclefold_r1cs(u: &R1CSInstance, ro: &mut E::RO) { + u.comm_W.absorb_in_ro(ro); + u.X.iter().for_each(|x| { + let limbs: Vec = nat_to_limbs(&f_to_nat(x), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); + limbs.into_iter().for_each(|limb| ro.absorb(scalar_as_base::(limb))); + }); +} + +pub(super) fn absorb_primary_relaxed_r1cs(U: &RelaxedR1CSInstance, ro: &mut E2::RO) +where + E1: Engine::Scalar>, + E2: Engine::Scalar>, { + absorb_primary_commitment::(&U.comm_W, ro); + absorb_primary_commitment::(&U.comm_E, ro); + ro.absorb(U.u); + for e in &U.X { + ro.absorb(*e); + } +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub(super) struct FoldingData { + pub U: RelaxedR1CSInstance, + pub u: R1CSInstance, + pub T: Commitment, +} + +impl FoldingData { + pub fn new(U: RelaxedR1CSInstance, u: R1CSInstance, T: Commitment) -> Self { + Self { U, u, T } + } +} diff --git a/prover/src/digest.rs b/prover/src/digest.rs new file mode 100644 index 0000000..4063ede --- /dev/null +++ b/prover/src/digest.rs @@ -0,0 +1,136 @@ +use std::{io, marker::PhantomData}; + +use bincode::Options; +use ff::PrimeField; +use serde::Serialize; +use sha3::{Digest, Sha3_256}; + +use crate::constants::NUM_HASH_BITS; + +/// Trait for components with potentially discrete digests to be included in +/// their container's digest. +pub trait Digestible { + /// Write the byte representation of Self in a byte buffer + fn write_bytes(&self, byte_sink: &mut W) -> Result<(), io::Error>; +} + +/// Marker trait to be implemented for types that implement `Digestible` and +/// `Serialize`. Their instances will be serialized to bytes then digested. +pub trait SimpleDigestible: Serialize {} + +impl Digestible for T { + fn write_bytes(&self, byte_sink: &mut W) -> Result<(), io::Error> { + let config = bincode::DefaultOptions::new().with_little_endian().with_fixint_encoding(); + // Note: bincode recursively length-prefixes every field! + config + .serialize_into(byte_sink, self) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + } +} + +pub struct DigestComputer<'a, F, T> { + inner: &'a T, + _phantom: PhantomData, +} + +impl<'a, F: PrimeField, T: Digestible> DigestComputer<'a, F, T> { + fn hasher() -> Sha3_256 { Sha3_256::new() } + + fn map_to_field(digest: &[u8]) -> F { + let bv = (0..NUM_HASH_BITS).map(|i| { + let (byte_pos, bit_pos) = (i / 8, i % 8); + let bit = (digest[byte_pos] >> bit_pos) & 1; + bit == 1 + }); + + // turn the bit vector into a scalar + let mut digest = F::ZERO; + let mut coeff = F::ONE; + for bit in bv { + if bit { + digest += coeff; + } + coeff += coeff; + } + digest + } + + /// Create a new `DigestComputer` + pub fn new(inner: &'a T) -> Self { DigestComputer { inner, _phantom: PhantomData } } + + /// Compute the digest of a `Digestible` instance. + pub fn digest(&self) -> Result { + let mut hasher = Self::hasher(); + self.inner.write_bytes(&mut hasher)?; + let bytes: [u8; 32] = hasher.finalize().into(); + Ok(Self::map_to_field(&bytes)) + } +} + +#[cfg(test)] +mod tests { + use ff::Field; + use once_cell::sync::OnceCell; + use serde::{Deserialize, Serialize}; + + use super::{DigestComputer, SimpleDigestible}; + use crate::{provider::GrumpkinEngine, traits::Engine}; + + type E = GrumpkinEngine; + + #[derive(Serialize, Deserialize)] + struct S { + i: usize, + #[serde(skip, default = "OnceCell::new")] + digest: OnceCell, + } + + impl SimpleDigestible for S {} + + impl S { + fn new(i: usize) -> Self { Self { i, digest: OnceCell::new() } } + + fn digest(&self) -> E::Scalar { + self.digest.get_or_try_init(|| DigestComputer::new(self).digest()).cloned().unwrap() + } + } + + #[test] + fn test_digest_field_not_ingested_in_computation() { + let s1 = S::::new(42); + + // let's set up a struct with a weird digest field to make sure the digest + // computation does not depend of it + let oc = OnceCell::new(); + oc.set(::Scalar::ONE).unwrap(); + + let s2: S = S { i: 42, digest: oc }; + + assert_eq!( + DigestComputer::<::Scalar, _>::new(&s1).digest().unwrap(), + DigestComputer::<::Scalar, _>::new(&s2).digest().unwrap() + ); + + // note: because of the semantics of `OnceCell::get_or_try_init`, the above + // equality will not result in `s1.digest() == s2.digest` + assert_ne!(s2.digest(), DigestComputer::<::Scalar, _>::new(&s2).digest().unwrap()); + } + + #[test] + fn test_digest_impervious_to_serialization() { + let good_s = S::::new(42); + + // let's set up a struct with a weird digest field to confuse deserializers + let oc = OnceCell::new(); + oc.set(::Scalar::ONE).unwrap(); + + let bad_s: S = S { i: 42, digest: oc }; + // this justifies the adjective "bad" + assert_ne!(good_s.digest(), bad_s.digest()); + + let naughty_bytes = bincode::serialize(&bad_s).unwrap(); + + let retrieved_s: S = bincode::deserialize(&naughty_bytes).unwrap(); + assert_eq!(good_s.digest(), retrieved_s.digest()) + } +} diff --git a/prover/src/errors.rs b/prover/src/errors.rs new file mode 100644 index 0000000..1e91e53 --- /dev/null +++ b/prover/src/errors.rs @@ -0,0 +1,99 @@ +//! This module defines errors returned by the library. +use core::fmt::Debug; + +use thiserror::Error; + +/// Errors returned by Nova +#[derive(Debug, Eq, PartialEq, Error)] +#[non_exhaustive] +pub enum NovaError { + /// returned if the supplied row or col in (row,col,val) tuple is out of + /// range + #[error("InvalidIndex")] + InvalidIndex, + /// returned if the step circuit calls inputize or alloc_io in its + /// synthesize method instead of passing output with the return value + #[error("InvalidStepCircuitIO")] + InvalidStepCircuitIO, + /// returned if the supplied input is not of the right length + #[error("InvalidInputLength")] + InvalidInputLength, + /// returned if the supplied witness is not of the right length + #[error("InvalidWitnessLength")] + InvalidWitnessLength, + /// returned if the supplied witness is not a satisfying witness to a given + /// shape and instance + #[error("UnSat")] + UnSat, + /// returned if the supplied witness is not a satisfying witness to a given + /// shape and instance, with error constraint index + #[error("UnSatIndex")] + UnSatIndex(usize), + /// returned when the supplied compressed commitment cannot be decompressed + #[error("DecompressionError")] + DecompressionError, + /// returned if proof verification fails + #[error("ProofVerifyError")] + ProofVerifyError, + /// returned if the provided commitment key is not of sufficient length + #[error("InvalidCommitmentKeyLength")] + InvalidCommitmentKeyLength, + /// returned if the provided number of steps is zero + #[error("InvalidNumSteps")] + InvalidNumSteps, + /// returned if there is an error in the proof/verification of a PCS + #[error("PCSError")] + PCSError(#[from] PCSError), + /// returned when an invalid sum-check proof is provided + #[error("InvalidSumcheckProof")] + InvalidSumcheckProof, + /// returned when the initial input to an incremental computation differs + /// from a previously declared arity + #[error("InvalidInitialInputLength")] + InvalidInitialInputLength, + /// returned when the step execution produces an output whose length differs + /// from a previously declared arity + #[error("InvalidStepOutputLength")] + InvalidStepOutputLength, + /// returned when the transcript engine encounters an overflow of the round + /// number + #[error("InternalTranscriptError")] + InternalTranscriptError, + /// returned when the multiset check fails + #[error("InvalidMultisetProof")] + InvalidMultisetProof, + /// returned when the product proof check fails + #[error("InvalidProductProof")] + InvalidProductProof, + /// returned when the consistency with public IO and assignment used fails + #[error("IncorrectWitness")] + IncorrectWitness, + /// return when error during synthesis + #[error("SynthesisError: {0}")] + SynthesisError(String), + /// returned when there is an error creating a digest + #[error("DigestError")] + DigestError, + /// returned when the prover cannot prove the provided statement due to + /// completeness error + #[error("InternalError")] + InternalError, +} + +/// Errors specific to the Polynomial commitment scheme +#[derive(Debug, Eq, PartialEq, Error)] +pub enum PCSError { + /// returned when an invalid PCS evaluation argument is provided + #[error("InvalidPCS")] + InvalidPCS, + /// returned when there is a Zeromorph error + #[error("ZMError")] + ZMError, + /// returned when a length check fails in a PCS + #[error("LengthError")] + LengthError, +} + +impl From for NovaError { + fn from(err: bellpepper_core::SynthesisError) -> Self { Self::SynthesisError(err.to_string()) } +} diff --git a/prover/src/fast_serde.rs b/prover/src/fast_serde.rs new file mode 100644 index 0000000..5279c20 --- /dev/null +++ b/prover/src/fast_serde.rs @@ -0,0 +1,100 @@ +//! This module implements fast serde for reading and writing +//! key objects requires for proof generation and verification. +//! With WASM in particular, serializing via standard binary serializers +//! like bincode causes a dramatic decrease in performance. This simple +//! serializers parses in bytes very efficiently. +//! +//! In the future, it can be extended to do direct memory access to the +//! javascript runtime. For now it does a single copy of the data into +//! the rust runtime. + +use std::io::{Cursor, Read}; + +use thiserror::Error; + +pub static MAGIC_NUMBER: [u8; 4] = [0x50, 0x4C, 0x55, 0x54]; +pub enum SerdeByteTypes { + AuxParams = 0x01, + UniversalKZGParam = 0x02, + CommitmentKey = 0x03, + ProverParams = 0x04, +} + +#[derive(Debug, Error)] +pub enum SerdeByteError { + #[error("{}", "invalid magic number")] + InvalidMagicNumber, + #[error("{}", "invalid serde type")] + InvalidSerdeType, + #[error("{}", "invalid section count")] + InvalidSectionCount, + #[error("{}", "invalid section type")] + InvalidSectionType, + #[error("{}", "invalid section size")] + InvalidSectionSize, + #[error(transparent)] + IoError(#[from] std::io::Error), + #[error(transparent)] + BincodeError(#[from] Box), + #[error("{}", "g1 decode error")] + G1DecodeError, + #[error("{}", "g2 decode error")] + G2DecodeError, +} + +/// A trait for fast conversions to bytes +pub trait FastSerde: Sized { + fn to_bytes(&self) -> Vec; + fn from_bytes(bytes: &[u8]) -> Result; + + fn validate_header( + cursor: &mut Cursor<&[u8]>, + expected_type: SerdeByteTypes, + expected_sections: u8, + ) -> Result<(), SerdeByteError> { + let mut magic = [0u8; 4]; + cursor.read_exact(&mut magic)?; + if magic != MAGIC_NUMBER { + return Err(SerdeByteError::InvalidMagicNumber); + } + + let mut serde_type = [0u8; 1]; + cursor.read_exact(&mut serde_type)?; + if serde_type[0] != expected_type as u8 { + return Err(SerdeByteError::InvalidSerdeType); + } + + let mut num_sections = [0u8; 1]; + cursor.read_exact(&mut num_sections)?; + if num_sections[0] != expected_sections { + return Err(SerdeByteError::InvalidSectionCount); + } + + Ok(()) + } + + fn read_section_bytes( + cursor: &mut Cursor<&[u8]>, + expected_type: u8, + ) -> Result, SerdeByteError> { + let mut section_type = [0u8; 1]; + cursor.read_exact(&mut section_type)?; + if section_type[0] != expected_type { + return Err(SerdeByteError::InvalidSectionType); + } + + let mut section_size = [0u8; 4]; + cursor.read_exact(&mut section_size)?; + let size = u32::from_le_bytes(section_size) as usize; + let mut section_data = vec![0u8; size]; + cursor.read_exact(&mut section_data)?; + + Ok(section_data) + } + + fn write_section_bytes(out: &mut Vec, section_type: u8, data: &[u8]) { + out.push(section_type); + out.extend_from_slice(&(data.len() as u32).to_le_bytes()); + out.extend_from_slice(data); + } +} diff --git a/prover/src/gadgets/ecc.rs b/prover/src/gadgets/ecc.rs new file mode 100644 index 0000000..e617658 --- /dev/null +++ b/prover/src/gadgets/ecc.rs @@ -0,0 +1,1071 @@ +//! This module implements various elliptic curve gadgets +#![allow(non_snake_case)] +use bellpepper::gadgets::{boolean_utils::conditionally_select, Assignment}; +use bellpepper_core::{ + boolean::{AllocatedBit, Boolean}, + num::AllocatedNum, + ConstraintSystem, SynthesisError, +}; +use ff::{Field, PrimeField}; + +use crate::{ + gadgets::utils::{ + alloc_num_equals, alloc_one, alloc_zero, conditionally_select2, select_num_or_one, + select_num_or_zero, select_num_or_zero2, select_one_or_diff2, select_one_or_num2, + select_zero_or_num2, + }, + traits::Group, +}; + +/// `AllocatedPoint` provides an elliptic curve abstraction inside a circuit. +#[derive(Debug, Clone)] +pub struct AllocatedPoint { + pub(crate) x: AllocatedNum, + pub(crate) y: AllocatedNum, + pub(crate) is_infinity: AllocatedNum, +} + +impl AllocatedPoint { + /// Allocates a new point on the curve using coordinates provided by + /// `coords`. If coords = None, it allocates the default infinity point + pub fn alloc>( + mut cs: CS, + coords: Option<(G::Base, G::Base, bool)>, + ) -> Result { + let x = + AllocatedNum::alloc(cs.namespace(|| "x"), || Ok(coords.map_or(G::Base::ZERO, |c| c.0)))?; + let y = + AllocatedNum::alloc(cs.namespace(|| "y"), || Ok(coords.map_or(G::Base::ZERO, |c| c.1)))?; + let is_infinity = AllocatedNum::alloc(cs.namespace(|| "is_infinity"), || { + Ok(if coords.is_none_or(|c| c.2) { G::Base::ONE } else { G::Base::ZERO }) + })?; + cs.enforce( + || "is_infinity is bit", + |lc| lc + is_infinity.get_variable(), + |lc| lc + CS::one() - is_infinity.get_variable(), + |lc| lc, + ); + + Ok(Self { x, y, is_infinity }) + } + + /// checks if `self` is on the curve or if it is infinity + pub fn check_on_curve(&self, mut cs: CS) -> Result<(), SynthesisError> + where CS: ConstraintSystem { + // check that (x,y) is on the curve if it is not infinity + // we will check that (1- is_infinity) * y^2 = (1-is_infinity) * (x^3 + Ax + B) + // note that is_infinity is already restricted to be in the set {0, 1} + let y_square = self.y.square(cs.namespace(|| "y_square"))?; + let x_square = self.x.square(cs.namespace(|| "x_square"))?; + let x_cube = self.x.mul(cs.namespace(|| "x_cube"), &x_square)?; + + let rhs = AllocatedNum::alloc(cs.namespace(|| "rhs"), || { + if *self.is_infinity.get_value().get()? == G::Base::ONE { + Ok(G::Base::ZERO) + } else { + Ok( + *x_cube.get_value().get()? + + *self.x.get_value().get()? * G::group_params().0 + + G::group_params().1, + ) + } + })?; + + cs.enforce( + || "rhs = (1-is_infinity) * (x^3 + Ax + B)", + |lc| { + lc + x_cube.get_variable() + + (G::group_params().0, self.x.get_variable()) + + (G::group_params().1, CS::one()) + }, + |lc| lc + CS::one() - self.is_infinity.get_variable(), + |lc| lc + rhs.get_variable(), + ); + + // check that (1-infinity) * y_square = rhs + cs.enforce( + || "check that y_square * (1 - is_infinity) = rhs", + |lc| lc + y_square.get_variable(), + |lc| lc + CS::one() - self.is_infinity.get_variable(), + |lc| lc + rhs.get_variable(), + ); + + Ok(()) + } + + /// Allocates a default point on the curve, set to the identity point. + pub fn default>(mut cs: CS) -> Self { + let zero = alloc_zero(cs.namespace(|| "zero")); + let one = alloc_one(cs.namespace(|| "one")); + + Self { x: zero.clone(), y: zero, is_infinity: one } + } + + /// Returns coordinates associated with the point. + #[allow(unused)] + pub const fn get_coordinates( + &self, + ) -> (&AllocatedNum, &AllocatedNum, &AllocatedNum) { + (&self.x, &self.y, &self.is_infinity) + } + + /// Negates the provided point + pub fn negate>(&self, mut cs: CS) -> Result { + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || Ok(-*self.y.get_value().get()?))?; + + cs.enforce( + || "check y = - self.y", + |lc| lc + self.y.get_variable(), + |lc| lc + CS::one(), + |lc| lc - y.get_variable(), + ); + + Ok(Self { x: self.x.clone(), y, is_infinity: self.is_infinity.clone() }) + } + + /// Add two points (may be equal) + pub fn add>( + &self, + mut cs: CS, + other: &Self, + ) -> Result { + // Compute boolean equal indicating if self = other + + let equal_x = alloc_num_equals(cs.namespace(|| "check self.x == other.x"), &self.x, &other.x)?; + + let equal_y = alloc_num_equals(cs.namespace(|| "check self.y == other.y"), &self.y, &other.y)?; + + // Compute the result of the addition and the result of double self + let result_from_add = self.add_internal(cs.namespace(|| "add internal"), other, &equal_x)?; + let result_from_double = self.double(cs.namespace(|| "double"))?; + + // Output: + // If (self == other) { + // return double(self) + // }else { + // if (self.x == other.x){ + // return infinity [negation] + // } else { + // return add(self, other) + // } + // } + let result_for_equal_x = Self::select_point_or_infinity( + cs.namespace(|| "equal_y ? result_from_double : infinity"), + &result_from_double, + &Boolean::from(equal_y), + )?; + + Self::conditionally_select( + cs.namespace(|| "equal ? result_from_double : result_from_add"), + &result_for_equal_x, + &result_from_add, + &Boolean::from(equal_x), + ) + } + + /// Adds other point to this point and returns the result. Assumes that the + /// two points are different and that both `other.is_infinity` and + /// `this.is_infinity` are bits + pub fn add_internal>( + &self, + mut cs: CS, + other: &Self, + equal_x: &AllocatedBit, + ) -> Result { + //************************************************************************/ + // lambda = (other.y - self.y) * (other.x - self.x).invert().unwrap(); + //************************************************************************/ + // First compute (other.x - self.x).inverse() + // If either self or other are the infinity point or self.x = other.x then + // compute bogus values Specifically, + // x_diff = self != inf && other != inf && self.x == other.x ? (other.x - + // self.x) : 1 + + // Compute self.is_infinity OR other.is_infinity = + // NOT(NOT(self.is_ifninity) AND NOT(other.is_infinity)) + let at_least_one_inf = AllocatedNum::alloc(cs.namespace(|| "at least one inf"), || { + Ok( + G::Base::ONE + - (G::Base::ONE - *self.is_infinity.get_value().get()?) + * (G::Base::ONE - *other.is_infinity.get_value().get()?), + ) + })?; + cs.enforce( + || "1 - at least one inf = (1-self.is_infinity) * (1-other.is_infinity)", + |lc| lc + CS::one() - self.is_infinity.get_variable(), + |lc| lc + CS::one() - other.is_infinity.get_variable(), + |lc| lc + CS::one() - at_least_one_inf.get_variable(), + ); + + // Now compute x_diff_is_actual = at_least_one_inf OR equal_x + let x_diff_is_actual = + AllocatedNum::alloc(cs.namespace(|| "allocate x_diff_is_actual"), || { + Ok(if *equal_x.get_value().get()? { + G::Base::ONE + } else { + *at_least_one_inf.get_value().get()? + }) + })?; + cs.enforce( + || "1 - x_diff_is_actual = (1-equal_x) * (1-at_least_one_inf)", + |lc| lc + CS::one() - at_least_one_inf.get_variable(), + |lc| lc + CS::one() - equal_x.get_variable(), + |lc| lc + CS::one() - x_diff_is_actual.get_variable(), + ); + + // x_diff = 1 if either self.is_infinity or other.is_infinity or self.x = + // other.x else self.x - other.x + let x_diff = + select_one_or_diff2(cs.namespace(|| "Compute x_diff"), &other.x, &self.x, &x_diff_is_actual)?; + + let lambda = AllocatedNum::alloc(cs.namespace(|| "lambda"), || { + let x_diff_inv = if *x_diff_is_actual.get_value().get()? == G::Base::ONE { + // Set to default + G::Base::ONE + } else { + // Set to the actual inverse + (*other.x.get_value().get()? - *self.x.get_value().get()?).invert().unwrap() + }; + + Ok((*other.y.get_value().get()? - *self.y.get_value().get()?) * x_diff_inv) + })?; + cs.enforce( + || "Check that lambda is correct", + |lc| lc + lambda.get_variable(), + |lc| lc + x_diff.get_variable(), + |lc| lc + other.y.get_variable() - self.y.get_variable(), + ); + + //************************************************************************/ + // x = lambda * lambda - self.x - other.x; + //************************************************************************/ + let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { + Ok( + *lambda.get_value().get()? * lambda.get_value().get()? + - *self.x.get_value().get()? + - *other.x.get_value().get()?, + ) + })?; + cs.enforce( + || "check that x is correct", + |lc| lc + lambda.get_variable(), + |lc| lc + lambda.get_variable(), + |lc| lc + x.get_variable() + self.x.get_variable() + other.x.get_variable(), + ); + + //************************************************************************/ + // y = lambda * (self.x - x) - self.y; + //************************************************************************/ + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + Ok( + *lambda.get_value().get()? * (*self.x.get_value().get()? - *x.get_value().get()?) + - *self.y.get_value().get()?, + ) + })?; + + cs.enforce( + || "Check that y is correct", + |lc| lc + lambda.get_variable(), + |lc| lc + self.x.get_variable() - x.get_variable(), + |lc| lc + y.get_variable() + self.y.get_variable(), + ); + + //************************************************************************/ + // We only return the computed x, y if neither of the points is infinity and + // self.x != other.y if self.is_infinity return other.clone() + // elif other.is_infinity return self.clone() + // elif self.x == other.x return infinity + // Otherwise return the computed points. + //************************************************************************/ + // Now compute the output x + + let x1 = conditionally_select2( + cs.namespace(|| "x1 = other.is_infinity ? self.x : x"), + &self.x, + &x, + &other.is_infinity, + )?; + + let x = conditionally_select2( + cs.namespace(|| "x = self.is_infinity ? other.x : x1"), + &other.x, + &x1, + &self.is_infinity, + )?; + + let y1 = conditionally_select2( + cs.namespace(|| "y1 = other.is_infinity ? self.y : y"), + &self.y, + &y, + &other.is_infinity, + )?; + + let y = conditionally_select2( + cs.namespace(|| "y = self.is_infinity ? other.y : y1"), + &other.y, + &y1, + &self.is_infinity, + )?; + + let is_infinity1 = select_num_or_zero2( + cs.namespace(|| "is_infinity1 = other.is_infinity ? self.is_infinity : 0"), + &self.is_infinity, + &other.is_infinity, + )?; + + let is_infinity = conditionally_select2( + cs.namespace(|| "is_infinity = self.is_infinity ? other.is_infinity : is_infinity1"), + &other.is_infinity, + &is_infinity1, + &self.is_infinity, + )?; + + Ok(Self { x, y, is_infinity }) + } + + /// Doubles the supplied point. + pub fn double>(&self, mut cs: CS) -> Result { + //*************************************************************/ + // lambda = (G::Base::from(3) * self.x * self.x + G::GG::A()) + // * (G::Base::from(2)) * self.y).invert().unwrap(); + // ********************************************************** + + // Compute tmp = (G::Base::ONE + G::Base::ONE)* self.y ? self != inf : 1 + let tmp_actual = AllocatedNum::alloc(cs.namespace(|| "tmp_actual"), || { + Ok(*self.y.get_value().get()? + *self.y.get_value().get()?) + })?; + cs.enforce( + || "check tmp_actual", + |lc| lc + CS::one() + CS::one(), + |lc| lc + self.y.get_variable(), + |lc| lc + tmp_actual.get_variable(), + ); + + let tmp = select_one_or_num2(cs.namespace(|| "tmp"), &tmp_actual, &self.is_infinity)?; + + // Now compute lambda as (G::Base::from(3) * self.x * self.x + G::GG::A()) * + // tmp_inv + + let prod_1 = AllocatedNum::alloc(cs.namespace(|| "alloc prod 1"), || { + Ok(G::Base::from(3) * self.x.get_value().get()? * self.x.get_value().get()?) + })?; + cs.enforce( + || "Check prod 1", + |lc| lc + (G::Base::from(3), self.x.get_variable()), + |lc| lc + self.x.get_variable(), + |lc| lc + prod_1.get_variable(), + ); + + let lambda = AllocatedNum::alloc(cs.namespace(|| "alloc lambda"), || { + let tmp_inv = if *self.is_infinity.get_value().get()? == G::Base::ONE { + // Return default value 1 + G::Base::ONE + } else { + // Return the actual inverse + (*tmp.get_value().get()?).invert().unwrap() + }; + + Ok(tmp_inv * (*prod_1.get_value().get()? + G::group_params().0)) + })?; + + cs.enforce( + || "Check lambda", + |lc| lc + tmp.get_variable(), + |lc| lc + lambda.get_variable(), + |lc| lc + prod_1.get_variable() + (G::group_params().0, CS::one()), + ); + + // ********************************************************** + // x = lambda * lambda - self.x - self.x; + // ********************************************************** + let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { + Ok( + ((*lambda.get_value().get()?) * (*lambda.get_value().get()?)) + - *self.x.get_value().get()? + - self.x.get_value().get()?, + ) + })?; + cs.enforce( + || "Check x", + |lc| lc + lambda.get_variable(), + |lc| lc + lambda.get_variable(), + |lc| lc + x.get_variable() + self.x.get_variable() + self.x.get_variable(), + ); + + // ********************************************************** + // y = lambda * (self.x - x) - self.y; + // ********************************************************** + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + Ok( + (*lambda.get_value().get()?) * (*self.x.get_value().get()? - x.get_value().get()?) + - self.y.get_value().get()?, + ) + })?; + cs.enforce( + || "Check y", + |lc| lc + lambda.get_variable(), + |lc| lc + self.x.get_variable() - x.get_variable(), + |lc| lc + y.get_variable() + self.y.get_variable(), + ); + + // ********************************************************** + // Only return the computed x and y if the point is not infinity + // ********************************************************** + // x + let x = select_zero_or_num2(cs.namespace(|| "final x"), &x, &self.is_infinity)?; + + // y + let y = select_zero_or_num2(cs.namespace(|| "final y"), &y, &self.is_infinity)?; + + // is_infinity + let is_infinity = self.is_infinity.clone(); + + Ok(Self { x, y, is_infinity }) + } + + /// A gadget for scalar multiplication, optimized to use incomplete addition + /// law. The optimization here is analogous to , + /// except we use complete addition law over affine coordinates instead of + /// projective coordinates for the tail bits + pub fn scalar_mul>( + &self, + mut cs: CS, + scalar_bits: &[AllocatedBit], + ) -> Result { + let split_len = core::cmp::min(scalar_bits.len(), (G::Base::NUM_BITS - 2) as usize); + let (incomplete_bits, complete_bits) = scalar_bits.split_at(split_len); + + // we convert AllocatedPoint into AllocatedPointNonInfinity; we deal with the + // case where self.is_infinity = 1 below + let mut p = AllocatedPointNonInfinity::from_allocated_point(self); + + // we assume the first bit to be 1, so we must initialize acc to self and double + // it we remove this assumption below + let mut acc = p; + p = acc.double_incomplete(cs.namespace(|| "double"))?; + + // perform the double-and-add loop to compute the scalar mul using incomplete + // addition law + for (i, bit) in incomplete_bits.iter().enumerate().skip(1) { + let temp = acc.add_incomplete(cs.namespace(|| format!("add {i}")), &p)?; + acc = AllocatedPointNonInfinity::conditionally_select( + cs.namespace(|| format!("acc_iteration_{i}")), + &temp, + &acc, + &Boolean::from(bit.clone()), + )?; + + p = p.double_incomplete(cs.namespace(|| format!("double {i}")))?; + } + + // convert back to AllocatedPoint + let res = { + // we set acc.is_infinity = self.is_infinity + let acc = acc.to_allocated_point(&self.is_infinity); + + // we remove the initial slack if bits[0] is as not as assumed (i.e., it is not + // 1) + let acc_minus_initial = { + let neg = self.negate(cs.namespace(|| "negate"))?; + acc.add(cs.namespace(|| "res minus self"), &neg) + }?; + + Self::conditionally_select( + cs.namespace(|| "remove slack if necessary"), + &acc, + &acc_minus_initial, + &Boolean::from(scalar_bits[0].clone()), + )? + }; + + // when self.is_infinity = 1, return the default point, else return res + // we already set res.is_infinity to be self.is_infinity, so we do not need to + // set it here + let default = Self::default(cs.namespace(|| "default")); + let x = conditionally_select2( + cs.namespace(|| "check if self.is_infinity is zero (x)"), + &default.x, + &res.x, + &self.is_infinity, + )?; + + let y = conditionally_select2( + cs.namespace(|| "check if self.is_infinity is zero (y)"), + &default.y, + &res.y, + &self.is_infinity, + )?; + + // we now perform the remaining scalar mul using complete addition law + let mut acc = Self { x, y, is_infinity: res.is_infinity }; + let mut p_complete = p.to_allocated_point(&self.is_infinity); + + for (i, bit) in complete_bits.iter().enumerate() { + let temp = acc.add(cs.namespace(|| format!("add_complete {i}")), &p_complete)?; + acc = Self::conditionally_select( + cs.namespace(|| format!("acc_complete_iteration_{i}")), + &temp, + &acc, + &Boolean::from(bit.clone()), + )?; + + p_complete = p_complete.double(cs.namespace(|| format!("double_complete {i}")))?; + } + + Ok(acc) + } + + /// If condition outputs a otherwise outputs b + pub fn conditionally_select>( + mut cs: CS, + a: &Self, + b: &Self, + condition: &Boolean, + ) -> Result { + let x = conditionally_select(cs.namespace(|| "select x"), &a.x, &b.x, condition)?; + + let y = conditionally_select(cs.namespace(|| "select y"), &a.y, &b.y, condition)?; + + let is_infinity = conditionally_select( + cs.namespace(|| "select is_infinity"), + &a.is_infinity, + &b.is_infinity, + condition, + )?; + + Ok(Self { x, y, is_infinity }) + } + + /// If condition outputs a otherwise infinity + pub fn select_point_or_infinity>( + mut cs: CS, + a: &Self, + condition: &Boolean, + ) -> Result { + let x = select_num_or_zero(cs.namespace(|| "select x"), &a.x, condition)?; + + let y = select_num_or_zero(cs.namespace(|| "select y"), &a.y, condition)?; + + let is_infinity = + select_num_or_one(cs.namespace(|| "select is_infinity"), &a.is_infinity, condition)?; + + Ok(Self { x, y, is_infinity }) + } +} + +#[derive(Clone, Debug)] +/// `AllocatedPoint` but one that is guaranteed to be not infinity +pub struct AllocatedPointNonInfinity { + x: AllocatedNum, + y: AllocatedNum, +} + +impl AllocatedPointNonInfinity { + /// Creates a new `AllocatedPointNonInfinity` from the specified coordinates + #[allow(unused)] + pub const fn new(x: AllocatedNum, y: AllocatedNum) -> Self { Self { x, y } } + + /// Allocates a new point on the curve using coordinates provided by + /// `coords`. + #[allow(unused)] + pub fn alloc>( + mut cs: CS, + coords: Option<(G::Base, G::Base)>, + ) -> Result { + let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { + coords.map_or(Err(SynthesisError::AssignmentMissing), |c| Ok(c.0)) + })?; + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + coords.map_or(Err(SynthesisError::AssignmentMissing), |c| Ok(c.1)) + })?; + + Ok(Self { x, y }) + } + + /// Turns an `AllocatedPoint` into an `AllocatedPointNonInfinity` (assumes + /// it is not infinity) + pub fn from_allocated_point(p: &AllocatedPoint) -> Self { + Self { x: p.x.clone(), y: p.y.clone() } + } + + /// Returns an `AllocatedPoint` from an `AllocatedPointNonInfinity` + pub fn to_allocated_point(&self, is_infinity: &AllocatedNum) -> AllocatedPoint { + AllocatedPoint { + x: self.x.clone(), + y: self.y.clone(), + is_infinity: is_infinity.clone(), + } + } + + /// Returns coordinates associated with the point. + #[allow(unused)] + pub const fn get_coordinates(&self) -> (&AllocatedNum, &AllocatedNum) { + (&self.x, &self.y) + } + + /// Add two points assuming self != +/- other + pub fn add_incomplete(&self, mut cs: CS, other: &Self) -> Result + where CS: ConstraintSystem { + // allocate a free variable that an honest prover sets to lambda = + // (y2-y1)/(x2-x1) + let lambda = AllocatedNum::alloc(cs.namespace(|| "lambda"), || { + if *other.x.get_value().get()? == *self.x.get_value().get()? { + Ok(G::Base::ONE) + } else { + Ok( + (*other.y.get_value().get()? - *self.y.get_value().get()?) + * (*other.x.get_value().get()? - *self.x.get_value().get()?).invert().unwrap(), + ) + } + })?; + cs.enforce( + || "Check that lambda is computed correctly", + |lc| lc + lambda.get_variable(), + |lc| lc + other.x.get_variable() - self.x.get_variable(), + |lc| lc + other.y.get_variable() - self.y.get_variable(), + ); + + //************************************************************************/ + // x = lambda * lambda - self.x - other.x; + //************************************************************************/ + let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { + Ok( + *lambda.get_value().get()? * lambda.get_value().get()? + - *self.x.get_value().get()? + - *other.x.get_value().get()?, + ) + })?; + cs.enforce( + || "check that x is correct", + |lc| lc + lambda.get_variable(), + |lc| lc + lambda.get_variable(), + |lc| lc + x.get_variable() + self.x.get_variable() + other.x.get_variable(), + ); + + //************************************************************************/ + // y = lambda * (self.x - x) - self.y; + //************************************************************************/ + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + Ok( + *lambda.get_value().get()? * (*self.x.get_value().get()? - *x.get_value().get()?) + - *self.y.get_value().get()?, + ) + })?; + + cs.enforce( + || "Check that y is correct", + |lc| lc + lambda.get_variable(), + |lc| lc + self.x.get_variable() - x.get_variable(), + |lc| lc + y.get_variable() + self.y.get_variable(), + ); + + Ok(Self { x, y }) + } + + /// doubles the point; since this is called with a point not at infinity, it + /// is guaranteed to be not infinity + pub fn double_incomplete>( + &self, + mut cs: CS, + ) -> Result { + // lambda = (3 x^2 + a) / 2 * y + + let x_sq = self.x.square(cs.namespace(|| "x_sq"))?; + + let lambda = AllocatedNum::alloc(cs.namespace(|| "lambda"), || { + let n = G::Base::from(3) * x_sq.get_value().get()? + G::group_params().0; + let d = G::Base::from(2) * *self.y.get_value().get()?; + if d == G::Base::ZERO { + Ok(G::Base::ONE) + } else { + Ok(n * d.invert().unwrap()) + } + })?; + cs.enforce( + || "Check that lambda is computed correctly", + |lc| lc + lambda.get_variable(), + |lc| lc + (G::Base::from(2), self.y.get_variable()), + |lc| lc + (G::Base::from(3), x_sq.get_variable()) + (G::group_params().0, CS::one()), + ); + + let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { + Ok( + *lambda.get_value().get()? * *lambda.get_value().get()? + - *self.x.get_value().get()? + - *self.x.get_value().get()?, + ) + })?; + + cs.enforce( + || "check that x is correct", + |lc| lc + lambda.get_variable(), + |lc| lc + lambda.get_variable(), + |lc| lc + x.get_variable() + (G::Base::from(2), self.x.get_variable()), + ); + + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + Ok( + *lambda.get_value().get()? * (*self.x.get_value().get()? - *x.get_value().get()?) + - *self.y.get_value().get()?, + ) + })?; + + cs.enforce( + || "Check that y is correct", + |lc| lc + lambda.get_variable(), + |lc| lc + self.x.get_variable() - x.get_variable(), + |lc| lc + y.get_variable() + self.y.get_variable(), + ); + + Ok(Self { x, y }) + } + + /// If condition outputs a otherwise outputs b + pub fn conditionally_select>( + mut cs: CS, + a: &Self, + b: &Self, + condition: &Boolean, + ) -> Result { + let x = conditionally_select(cs.namespace(|| "select x"), &a.x, &b.x, condition)?; + let y = conditionally_select(cs.namespace(|| "select y"), &a.y, &b.y, condition)?; + + Ok(Self { x, y }) + } +} + +#[cfg(test)] +mod tests { + use expect_test::{expect, Expect}; + use ff::{Field, PrimeFieldBits}; + use group::Curve; + use halo2curves::CurveAffine; + use rand::rngs::OsRng; + + use super::*; + use crate::{ + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + solver::SatisfyingAssignment, + test_shape_cs::TestShapeCS, + }, + provider::{ + bn256_grumpkin::{bn256, grumpkin}, + Bn256EngineIPA, Bn256EngineKZG, GrumpkinEngine, + }, + traits::{snark::default_ck_hint, Engine}, + }; + + #[derive(Debug, Clone)] + pub struct Point { + x: G::Base, + y: G::Base, + is_infinity: bool, + } + + impl Point { + pub fn new(x: G::Base, y: G::Base, is_infinity: bool) -> Self { Self { x, y, is_infinity } } + + pub fn random_vartime() -> Self { + loop { + let x = G::Base::random(&mut OsRng); + let y = (x.square() * x + G::group_params().1).sqrt(); + if y.is_some().unwrap_u8() == 1 { + return Self { x, y: y.unwrap(), is_infinity: false }; + } + } + } + + /// Add any two points + pub fn add(&self, other: &Self) -> Self { + if self.x == other.x { + // If self == other then call double + if self.y == other.y { + self.double() + } else { + // if self.x == other.x and self.y != other.y then return infinity + Self { x: G::Base::ZERO, y: G::Base::ZERO, is_infinity: true } + } + } else { + self.add_internal(other) + } + } + + /// Add two different points + pub fn add_internal(&self, other: &Self) -> Self { + if self.is_infinity { + return other.clone(); + } + + if other.is_infinity { + return self.clone(); + } + + let lambda = (other.y - self.y) * (other.x - self.x).invert().unwrap(); + let x = lambda * lambda - self.x - other.x; + let y = lambda * (self.x - x) - self.y; + Self { x, y, is_infinity: false } + } + + pub fn double(&self) -> Self { + if self.is_infinity { + return Self { x: G::Base::ZERO, y: G::Base::ZERO, is_infinity: true }; + } + + let lambda = G::Base::from(3) + * self.x + * self.x + * ((G::Base::ONE + G::Base::ONE) * self.y).invert().unwrap(); + let x = lambda * lambda - self.x - self.x; + let y = lambda * (self.x - x) - self.y; + Self { x, y, is_infinity: false } + } + + pub fn scalar_mul(&self, scalar: &G::Scalar) -> Self { + let mut res = + Self { x: G::Base::ZERO, y: G::Base::ZERO, is_infinity: true }; + + let bits = scalar.to_le_bits(); + for i in (0..bits.len()).rev() { + res = res.double(); + if bits[i] { + res = self.add(&res); + } + } + res + } + } + + // Allocate a random point. Only used for testing + pub fn alloc_random_point>( + mut cs: CS, + ) -> Result, SynthesisError> { + // get a random point + let p = Point::::random_vartime(); + AllocatedPoint::alloc(cs.namespace(|| "alloc p"), Some((p.x, p.y, p.is_infinity))) + } + + /// Make the point io + pub fn inputize_allocated_point>( + p: &AllocatedPoint, + mut cs: CS, + ) { + let _ = p.x.inputize(cs.namespace(|| "Input point.x")); + let _ = p.y.inputize(cs.namespace(|| "Input point.y")); + let _ = p.is_infinity.inputize(cs.namespace(|| "Input point.is_infinity")); + } + + #[test] + fn test_ecc_ops() { + test_ecc_ops_with::::GE>(); + test_ecc_ops_with::::GE>(); + } + + fn test_ecc_ops_with() + where + G: Group, + C: CurveAffine, { + // perform some curve arithmetic + let a = Point::::random_vartime(); + let b = Point::::random_vartime(); + let c = a.add(&b); + let d = a.double(); + let s = G::Scalar::random(&mut OsRng); + let e = a.scalar_mul(&s); + + // perform the same computation by translating to curve types + let a_curve = C::from_xy( + C::Base::from_repr(a.x.to_repr()).unwrap(), + C::Base::from_repr(a.y.to_repr()).unwrap(), + ) + .unwrap(); + let b_curve = C::from_xy( + C::Base::from_repr(b.x.to_repr()).unwrap(), + C::Base::from_repr(b.y.to_repr()).unwrap(), + ) + .unwrap(); + let c_curve = (a_curve + b_curve).to_affine(); + let d_curve = (a_curve + a_curve).to_affine(); + let e_curve = a_curve.mul(C::Scalar::from_repr(s.to_repr()).unwrap()).to_affine(); + + // transform c, d, and e into curve types + let c_curve_2 = C::from_xy( + C::Base::from_repr(c.x.to_repr()).unwrap(), + C::Base::from_repr(c.y.to_repr()).unwrap(), + ) + .unwrap(); + let d_curve_2 = C::from_xy( + C::Base::from_repr(d.x.to_repr()).unwrap(), + C::Base::from_repr(d.y.to_repr()).unwrap(), + ) + .unwrap(); + let e_curve_2 = C::from_xy( + C::Base::from_repr(e.x.to_repr()).unwrap(), + C::Base::from_repr(e.y.to_repr()).unwrap(), + ) + .unwrap(); + + // check that we have the same outputs + assert_eq!(c_curve, c_curve_2); + assert_eq!(d_curve, d_curve_2); + assert_eq!(e_curve, e_curve_2); + } + + fn synthesize_smul(mut cs: CS) -> (AllocatedPoint, AllocatedPoint, G::Scalar) + where + G: Group, + CS: ConstraintSystem, { + let a = alloc_random_point(cs.namespace(|| "a")).unwrap(); + inputize_allocated_point(&a, cs.namespace(|| "inputize a")); + + let s = G::Scalar::random(&mut OsRng); + // Allocate bits for s + let bits: Vec = s + .to_le_bits() + .into_iter() + .enumerate() + .map(|(i, bit)| AllocatedBit::alloc(cs.namespace(|| format!("bit {i}")), Some(bit))) + .collect::, SynthesisError>>() + .unwrap(); + let e = a.scalar_mul(cs.namespace(|| "Scalar Mul"), &bits).unwrap(); + inputize_allocated_point(&e, cs.namespace(|| "inputize e")); + (a, e, s) + } + + #[test] + fn test_ecc_circuit_ops() { + test_ecc_circuit_ops_with::(&expect!["2738"], &expect!["2724"]); + test_ecc_circuit_ops_with::(&expect!["2738"], &expect!["2724"]); + } + + fn test_ecc_circuit_ops_with(expected_constraints: &Expect, expected_variables: &Expect) + where + E1: Engine::Scalar>, + E2: Engine::Scalar>, { + // First create the shape + let mut cs: TestShapeCS = TestShapeCS::new(); + let _ = synthesize_smul::(cs.namespace(|| "synthesize")); + expected_constraints.assert_eq(&cs.num_constraints().to_string()); + expected_variables.assert_eq(&cs.num_aux().to_string()); + let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); + + // Then the satisfying assignment + let mut cs = SatisfyingAssignment::::new(); + let (a, e, s) = synthesize_smul::(cs.namespace(|| "synthesize")); + let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); + + let a_p: Point = Point::new( + a.x.get_value().unwrap(), + a.y.get_value().unwrap(), + a.is_infinity.get_value().unwrap() == ::Base::ONE, + ); + let e_p: Point = Point::new( + e.x.get_value().unwrap(), + e.y.get_value().unwrap(), + e.is_infinity.get_value().unwrap() == ::Base::ONE, + ); + let e_new = a_p.scalar_mul(&s); + assert!(e_p.x == e_new.x && e_p.y == e_new.y); + // Make sure that this is satisfiable + shape.is_sat(&ck, &inst, &witness).unwrap(); + } + + fn synthesize_add_equal(mut cs: CS) -> (AllocatedPoint, AllocatedPoint) + where + G: Group, + CS: ConstraintSystem, { + let a = alloc_random_point(cs.namespace(|| "a")).unwrap(); + inputize_allocated_point(&a, cs.namespace(|| "inputize a")); + let e = a.add(cs.namespace(|| "add a to a"), &a).unwrap(); + inputize_allocated_point(&e, cs.namespace(|| "inputize e")); + (a, e) + } + + #[test] + fn test_ecc_circuit_add_equal() { + test_ecc_circuit_add_equal_with::(); + test_ecc_circuit_add_equal_with::(); + } + + fn test_ecc_circuit_add_equal_with() + where + E1: Engine::Scalar>, + E2: Engine::Scalar>, { + // First create the shape + let mut cs: TestShapeCS = TestShapeCS::new(); + let _ = synthesize_add_equal::(cs.namespace(|| "synthesize add equal")); + println!("Number of constraints: {}", cs.num_constraints()); + let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); + + // Then the satisfying assignment + let mut cs = SatisfyingAssignment::::new(); + let (a, e) = synthesize_add_equal::(cs.namespace(|| "synthesize add equal")); + let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); + let a_p: Point = Point::new( + a.x.get_value().unwrap(), + a.y.get_value().unwrap(), + a.is_infinity.get_value().unwrap() == ::Base::ONE, + ); + let e_p: Point = Point::new( + e.x.get_value().unwrap(), + e.y.get_value().unwrap(), + e.is_infinity.get_value().unwrap() == ::Base::ONE, + ); + let e_new = a_p.add(&a_p); + assert!(e_p.x == e_new.x && e_p.y == e_new.y); + // Make sure that it is satisfiable + shape.is_sat(&ck, &inst, &witness).unwrap(); + } + + fn synthesize_add_negation(mut cs: CS) -> AllocatedPoint + where + G: Group, + CS: ConstraintSystem, { + let a = alloc_random_point(cs.namespace(|| "a")).unwrap(); + inputize_allocated_point(&a, cs.namespace(|| "inputize a")); + let b = &mut a.clone(); + b.y = + AllocatedNum::alloc(cs.namespace(|| "allocate negation of a"), || Ok(G::Base::ZERO)).unwrap(); + inputize_allocated_point(b, cs.namespace(|| "inputize b")); + let e = a.add(cs.namespace(|| "add a to b"), b).unwrap(); + e + } + + #[test] + fn test_ecc_circuit_add_negation() { + test_ecc_circuit_add_negation_with::(&expect!["39"], &expect![ + "34" + ]); + test_ecc_circuit_add_negation_with::(&expect!["39"], &expect![ + "34" + ]); + } + + fn test_ecc_circuit_add_negation_with( + expected_constraints: &Expect, + expected_variables: &Expect, + ) where + E1: Engine::Scalar>, + E2: Engine::Scalar>, + { + // First create the shape + let mut cs: TestShapeCS = TestShapeCS::new(); + let _ = synthesize_add_negation::(cs.namespace(|| "synthesize add equal")); + expected_constraints.assert_eq(&cs.num_constraints().to_string()); + expected_variables.assert_eq(&cs.num_aux().to_string()); + let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); + + // Then the satisfying assignment + let mut cs = SatisfyingAssignment::::new(); + let e = synthesize_add_negation::(cs.namespace(|| "synthesize add negation")); + let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); + let e_p: Point = Point::new( + e.x.get_value().unwrap(), + e.y.get_value().unwrap(), + e.is_infinity.get_value().unwrap() == ::Base::ONE, + ); + assert!(e_p.is_infinity); + // Make sure that it is satisfiable + shape.is_sat(&ck, &inst, &witness).unwrap(); + } +} diff --git a/prover/src/gadgets/mod.rs b/prover/src/gadgets/mod.rs new file mode 100644 index 0000000..4345bbd --- /dev/null +++ b/prover/src/gadgets/mod.rs @@ -0,0 +1,24 @@ +//! This module implements various gadgets necessary for Nova and applications +//! built with Nova. +mod ecc; +pub(crate) use ecc::AllocatedPoint; + +mod nonnative; +pub(crate) use nonnative::{ + bignat::{nat_to_limbs, BigNat}, + util::{f_to_nat, Num}, +}; + +mod r1cs; +pub(crate) use r1cs::{ + conditionally_select_alloc_relaxed_r1cs, + conditionally_select_vec_allocated_relaxed_r1cs_instance, AllocatedR1CSInstance, + AllocatedRelaxedR1CSInstance, +}; + +mod utils; +#[cfg(test)] pub(crate) use utils::alloc_one; +pub(crate) use utils::{ + alloc_bignat_constant, alloc_num_equals, alloc_scalar_as_base, alloc_zero, + conditionally_select_allocated_bit, conditionally_select_bignat, le_bits_to_num, scalar_as_base, +}; diff --git a/prover/src/gadgets/nonnative/bignat.rs b/prover/src/gadgets/nonnative/bignat.rs new file mode 100644 index 0000000..19f03d3 --- /dev/null +++ b/prover/src/gadgets/nonnative/bignat.rs @@ -0,0 +1,849 @@ +use std::{ + borrow::Borrow, + cmp::{max, min}, + convert::From, +}; + +use bellpepper_core::{ConstraintSystem, LinearCombination, SynthesisError}; +use ff::PrimeField; +use itertools::Itertools as _; +use num_bigint::BigInt; +use num_traits::cast::ToPrimitive; + +use super::{ + util::{f_to_nat, nat_to_f, Bitvector, Num}, + OptionExt, +}; + +/// Compute the natural number represented by an array of limbs. +/// The limbs are assumed to be based the `limb_width` power of 2. +pub fn limbs_to_nat, I: DoubleEndedIterator>( + limbs: I, + limb_width: usize, +) -> BigInt { + limbs.rev().fold(BigInt::from(0), |mut acc, limb| { + acc <<= limb_width as u32; + acc += f_to_nat(limb.borrow()); + acc + }) +} + +fn int_with_n_ones(n: usize) -> BigInt { + let mut m = BigInt::from(1); + m <<= n as u32; + m -= 1; + m +} + +/// Compute the limbs encoding a natural number. +/// The limbs are assumed to be based the `limb_width` power of 2. +pub fn nat_to_limbs( + nat: &BigInt, + limb_width: usize, + n_limbs: usize, +) -> Result, SynthesisError> { + let mask = int_with_n_ones(limb_width); + let mut nat = nat.clone(); + if nat.bits() as usize <= n_limbs * limb_width { + Ok( + (0..n_limbs) + .map(|_| { + let r = &nat & &mask; + nat >>= limb_width as u32; + nat_to_f(&r).unwrap() + }) + .collect(), + ) + } else { + eprintln!("nat {nat} does not fit in {n_limbs} limbs of width {limb_width}"); + Err(SynthesisError::Unsatisfiable) + } +} + +#[derive(Clone, PartialEq, Eq)] +pub struct BigNatParams { + pub min_bits: usize, + pub max_word: BigInt, + pub limb_width: usize, + pub n_limbs: usize, +} + +impl BigNatParams { + pub fn new(limb_width: usize, n_limbs: usize) -> Self { + let mut max_word = BigInt::from(1) << limb_width as u32; + max_word -= 1; + Self { max_word, n_limbs, limb_width, min_bits: 0 } + } +} + +/// A representation of a large natural number (a member of {0, 1, 2, ... }) +#[derive(Clone)] +pub struct BigNat { + /// The linear combinations which constrain the value of each limb of the + /// number + pub limbs: Vec>, + /// The witness values for each limb (filled at witness-time) + pub limb_values: Option>, + /// The value of the whole number (filled at witness-time) + pub value: Option, + /// Parameters + pub params: BigNatParams, +} + +impl PartialEq for BigNat { + fn eq(&self, other: &Self) -> bool { self.value == other.value && self.params == other.params } +} +impl Eq for BigNat {} + +impl From> for Polynomial { + fn from(other: BigNat) -> Self { + Self { coefficients: other.limbs, values: other.limb_values } + } +} + +impl BigNat { + /// Allocates a `BigNat` in the circuit with `n_limbs` limbs of width + /// `limb_width` each. If `max_word` is missing, then it is assumed to + /// be `(2 << limb_width) - 1`. The value is provided by a closure + /// returning limb values. + pub fn alloc_from_limbs( + mut cs: CS, + f: F, + max_word: Option, + limb_width: usize, + n_limbs: usize, + ) -> Result + where + CS: ConstraintSystem, + F: FnOnce() -> Result, SynthesisError>, + { + let values_cell = f(); + let mut value = None; + let mut limb_values = None; + let limbs = (0..n_limbs) + .map(|limb_i| { + cs.alloc( + || format!("limb {limb_i}"), + || match values_cell { + Ok(ref vs) => { + if vs.len() != n_limbs { + eprintln!("Values do not match stated limb count"); + return Err(SynthesisError::Unsatisfiable); + } + if value.is_none() { + value = Some(limbs_to_nat::(vs.iter(), limb_width)); + } + if limb_values.is_none() { + limb_values = Some(vs.clone()); + } + Ok(vs[limb_i]) + }, + // Hack b/c SynthesisError and io::Error don't implement Clone + Err(ref e) => Err(SynthesisError::from(std::io::Error::new( + std::io::ErrorKind::Other, + format!("{e}"), + ))), + }, + ) + .map(|v| LinearCombination::zero() + v) + }) + .collect::, _>>()?; + Ok(Self { + value, + limb_values, + limbs, + params: BigNatParams { + min_bits: 0, + n_limbs, + max_word: max_word.unwrap_or_else(|| int_with_n_ones(limb_width)), + limb_width, + }, + }) + } + + /// Allocates a `BigNat` in the circuit with `n_limbs` limbs of width + /// `limb_width` each. The `max_word` is guaranteed to be `(2 << + /// limb_width) - 1`. The value is provided by a closure returning a + /// natural number. + pub fn alloc_from_nat( + mut cs: CS, + f: F, + limb_width: usize, + n_limbs: usize, + ) -> Result + where + CS: ConstraintSystem, + F: FnOnce() -> Result, + { + let all_values_cell = + f().and_then(|v| Ok((nat_to_limbs::(&v, limb_width, n_limbs)?, v))); + let mut value = None; + let mut limb_values = Vec::new(); + let limbs = (0..n_limbs) + .map(|limb_i| { + cs.alloc( + || format!("limb {limb_i}"), + || match all_values_cell { + Ok((ref vs, ref v)) => { + if value.is_none() { + value = Some(v.clone()); + } + limb_values.push(vs[limb_i]); + Ok(vs[limb_i]) + }, + // Hack b/c SynthesisError and io::Error don't implement Clone + Err(ref e) => Err(SynthesisError::from(std::io::Error::new( + std::io::ErrorKind::Other, + format!("{e}"), + ))), + }, + ) + .map(|v| LinearCombination::zero() + v) + }) + .collect::, _>>()?; + Ok(Self { + value, + limb_values: (!limb_values.is_empty()).then_some(limb_values), + limbs, + params: BigNatParams::new(limb_width, n_limbs), + }) + } + + /// Allocates a `BigNat` in the circuit with `n_limbs` limbs of width + /// `limb_width` each. The `max_word` is guaranteed to be `(2 << + /// limb_width) - 1`. The value is provided by an allocated number + pub fn from_num>( + mut cs: CS, + n: &Num, + limb_width: usize, + n_limbs: usize, + ) -> Result { + let bignat = Self::alloc_from_nat( + cs.namespace(|| "bignat"), + || n.value.as_ref().map(|n| f_to_nat(n)).ok_or(SynthesisError::AssignmentMissing), + limb_width, + n_limbs, + )?; + + // check if bignat equals n + // (1) decompose `bignat` into a bitvector `bv` + let bv = bignat.decompose(cs.namespace(|| "bv"))?; + // (2) recompose bits and check if it equals n + n.is_equal(cs.namespace(|| "n"), &bv); + + Ok(bignat) + } + + pub fn as_limbs(&self) -> Vec> { + let mut limbs = Vec::new(); + for (i, lc) in self.limbs.iter().enumerate() { + limbs.push(Num::new(self.limb_values.as_ref().map(|vs| vs[i]), lc.clone())); + } + limbs + } + + pub fn assert_well_formed>( + &self, + mut cs: CS, + ) -> Result<(), SynthesisError> { + // swap the option and iterator + let limb_values_split = + (0..self.limbs.len()).map(|i| self.limb_values.as_ref().map(|vs| vs[i])); + for (i, (limb, limb_value)) in self.limbs.iter().zip_eq(limb_values_split).enumerate() { + Num::new(limb_value, limb.clone()) + .fits_in_bits(cs.namespace(|| format!("{i}")), self.params.limb_width)?; + } + Ok(()) + } + + /// Break `self` up into a bit-vector. + pub fn decompose>( + &self, + mut cs: CS, + ) -> Result, SynthesisError> { + let limb_values_split = + (0..self.limbs.len()).map(|i| self.limb_values.as_ref().map(|vs| vs[i])); + let bitvectors: Vec> = self + .limbs + .iter() + .zip_eq(limb_values_split) + .enumerate() + .map(|(i, (limb, limb_value))| { + Num::new(limb_value, limb.clone()) + .decompose(cs.namespace(|| format!("subdecmop {i}")), self.params.limb_width) + }) + .collect::, _>>()?; + let mut bits = Vec::new(); + let mut values = Vec::new(); + let mut allocations = Vec::new(); + for bv in bitvectors { + bits.extend(bv.bits); + if let Some(vs) = bv.values { + values.extend(vs) + }; + allocations.extend(bv.allocations); + } + let values = (!values.is_empty()).then_some(values); + Ok(Bitvector { bits, values, allocations }) + } + + pub fn enforce_limb_width_agreement( + &self, + other: &Self, + location: &str, + ) -> Result { + if self.params.limb_width == other.params.limb_width { + Ok(self.params.limb_width) + } else { + eprintln!( + "Limb widths {}, {}, do not agree at {}", + self.params.limb_width, other.params.limb_width, location + ); + Err(SynthesisError::Unsatisfiable) + } + } + + pub fn from_poly(poly: Polynomial, limb_width: usize, max_word: BigInt) -> Self { + Self { + params: BigNatParams { + min_bits: 0, + max_word, + n_limbs: poly.coefficients.len(), + limb_width, + }, + limbs: poly.coefficients, + value: poly + .values + .as_ref() + .map(|limb_values| limbs_to_nat::(limb_values.iter(), limb_width)), + limb_values: poly.values, + } + } + + /// Constrain `self` to be equal to `other`, after carrying both. + pub fn equal_when_carried>( + &self, + mut cs: CS, + other: &Self, + ) -> Result<(), SynthesisError> { + self.enforce_limb_width_agreement(other, "equal_when_carried")?; + + // We'll propagate carries over the first `n` limbs. + let n = min(self.limbs.len(), other.limbs.len()); + let target_base = BigInt::from(1u8) << self.params.limb_width as u32; + let mut accumulated_extra = BigInt::from(0usize); + let max_word = max(&self.params.max_word, &other.params.max_word); + let carry_bits = (((max_word.to_f64().unwrap() * 2.0).log2() - self.params.limb_width as f64) + .ceil() + + 0.1) as usize; + let mut carry_in = Num::new(Some(Scalar::ZERO), LinearCombination::zero()); + + for i in 0..n { + let carry = Num::alloc(cs.namespace(|| format!("carry value {i}")), || { + Ok( + nat_to_f( + &((f_to_nat(&self.limb_values.grab()?[i]) + + f_to_nat(&carry_in.value.unwrap()) + + max_word + - f_to_nat(&other.limb_values.grab()?[i])) + / &target_base), + ) + .unwrap(), + ) + })?; + accumulated_extra += max_word; + + cs.enforce( + || format!("carry {i}"), + |lc| lc, + |lc| lc, + |lc| { + lc + &carry_in.num + &self.limbs[i] - &other.limbs[i] + + (nat_to_f(max_word).unwrap(), CS::one()) + - (nat_to_f(&target_base).unwrap(), &carry.num) + - (nat_to_f(&(&accumulated_extra % &target_base)).unwrap(), CS::one()) + }, + ); + + accumulated_extra /= &target_base; + + if i < n - 1 { + carry.fits_in_bits(cs.namespace(|| format!("carry {i} decomp")), carry_bits)?; + } else { + cs.enforce( + || format!("carry {i} is out"), + |lc| lc, + |lc| lc, + |lc| lc + &carry.num - (nat_to_f(&accumulated_extra).unwrap(), CS::one()), + ); + } + carry_in = carry; + } + + for (i, zero_limb) in self.limbs.iter().enumerate().skip(n) { + cs.enforce(|| format!("zero self {i}"), |lc| lc, |lc| lc, |lc| lc + zero_limb); + } + for (i, zero_limb) in other.limbs.iter().enumerate().skip(n) { + cs.enforce(|| format!("zero other {i}"), |lc| lc, |lc| lc, |lc| lc + zero_limb); + } + Ok(()) + } + + /// Constrain `self` to be equal to `other`, after carrying both. + /// Uses regrouping internally to take full advantage of the field size and + /// reduce the amount of carrying. + pub fn equal_when_carried_regroup>( + &self, + mut cs: CS, + other: &Self, + ) -> Result<(), SynthesisError> { + self.enforce_limb_width_agreement(other, "equal_when_carried_regroup")?; + let max_word = max(&self.params.max_word, &other.params.max_word); + let carry_bits = (((max_word.to_f64().unwrap() * 2.0).log2() - self.params.limb_width as f64) + .ceil() + + 0.1) as usize; + let limbs_per_group = max((Scalar::CAPACITY as usize - carry_bits) / self.params.limb_width, 1); + + let self_grouped = self.group_limbs(limbs_per_group); + let other_grouped = other.group_limbs(limbs_per_group); + self_grouped.equal_when_carried(cs.namespace(|| "grouped"), &other_grouped) + } + + pub fn add(&self, other: &Self) -> Result { + self.enforce_limb_width_agreement(other, "add")?; + let n_limbs = max(self.params.n_limbs, other.params.n_limbs); + let max_word = &self.params.max_word + &other.params.max_word; + let limbs: Vec> = (0..n_limbs) + .map(|i| match (self.limbs.get(i), other.limbs.get(i)) { + (Some(a), Some(b)) => a.clone() + b, + (Some(a), None) => a.clone(), + (None, Some(b)) => b.clone(), + (None, None) => unreachable!(), + }) + .collect(); + let limb_values: Option> = self.limb_values.as_ref().and_then(|x| { + other.limb_values.as_ref().map(|y| { + (0..n_limbs) + .map(|i| match (x.get(i), y.get(i)) { + (Some(a), Some(b)) => { + let mut t = *a; + t.add_assign(b); + t + }, + (Some(a), None) | (None, Some(a)) => *a, + (None, None) => unreachable!(), + }) + .collect() + }) + }); + let value = self.value.as_ref().and_then(|x| other.value.as_ref().map(|y| x + y)); + Ok(Self { + limb_values, + value, + limbs, + params: BigNatParams { + min_bits: max(self.params.min_bits, other.params.min_bits), + n_limbs, + max_word, + limb_width: self.params.limb_width, + }, + }) + } + + /// Compute a `BigNat` constrained to be equal to `self * other % modulus`. + pub fn mult_mod>( + &self, + mut cs: CS, + other: &Self, + modulus: &Self, + ) -> Result<(Self, Self), SynthesisError> { + self.enforce_limb_width_agreement(other, "mult_mod")?; + let limb_width = self.params.limb_width; + let quotient_bits = (self.n_bits() + other.n_bits()).saturating_sub(modulus.params.min_bits); + let quotient_limbs = quotient_bits.saturating_sub(1) / limb_width + 1; + let quotient = Self::alloc_from_nat( + cs.namespace(|| "quotient"), + || { + Ok({ + let mut x = self.value.grab()?.clone(); + x *= other.value.grab()?; + x /= modulus.value.grab()?; + x + }) + }, + self.params.limb_width, + quotient_limbs, + )?; + quotient.assert_well_formed(cs.namespace(|| "quotient rangecheck"))?; + let remainder = Self::alloc_from_nat( + cs.namespace(|| "remainder"), + || { + Ok({ + let mut x = self.value.grab()?.clone(); + x *= other.value.grab()?; + x %= modulus.value.grab()?; + x + }) + }, + self.params.limb_width, + modulus.limbs.len(), + )?; + remainder.assert_well_formed(cs.namespace(|| "remainder rangecheck"))?; + let a_poly = Polynomial::from(self.clone()); + let b_poly = Polynomial::from(other.clone()); + let mod_poly = Polynomial::from(modulus.clone()); + let q_poly = Polynomial::from(quotient.clone()); + let r_poly = Polynomial::from(remainder.clone()); + + // a * b + let left = a_poly.alloc_product(cs.namespace(|| "left"), &b_poly)?; + let right_product = q_poly.alloc_product(cs.namespace(|| "right_product"), &mod_poly)?; + // q * m + r + let right = right_product.sum(&r_poly); + + let left_max_word = { + let mut x = BigInt::from(min(self.limbs.len(), other.limbs.len())); + x *= &self.params.max_word; + x *= &other.params.max_word; + x + }; + let right_max_word = { + let mut x = BigInt::from(min(quotient.limbs.len(), modulus.limbs.len())); + x *= "ient.params.max_word; + x *= &modulus.params.max_word; + x += &remainder.params.max_word; + x + }; + + let left_int = Self::from_poly(left, limb_width, left_max_word); + let right_int = Self::from_poly(right, limb_width, right_max_word); + left_int.equal_when_carried_regroup(cs.namespace(|| "carry"), &right_int)?; + Ok((quotient, remainder)) + } + + /// Compute a `BigNat` constrained to be equal to `self * other % modulus`. + pub fn red_mod>( + &self, + mut cs: CS, + modulus: &Self, + ) -> Result { + self.enforce_limb_width_agreement(modulus, "red_mod")?; + let limb_width = self.params.limb_width; + let quotient_bits = self.n_bits().saturating_sub(modulus.params.min_bits); + let quotient_limbs = quotient_bits.saturating_sub(1) / limb_width + 1; + let quotient = Self::alloc_from_nat( + cs.namespace(|| "quotient"), + || Ok(self.value.grab()? / modulus.value.grab()?), + self.params.limb_width, + quotient_limbs, + )?; + quotient.assert_well_formed(cs.namespace(|| "quotient rangecheck"))?; + let remainder = Self::alloc_from_nat( + cs.namespace(|| "remainder"), + || Ok(self.value.grab()? % modulus.value.grab()?), + self.params.limb_width, + modulus.limbs.len(), + )?; + remainder.assert_well_formed(cs.namespace(|| "remainder rangecheck"))?; + let mod_poly = Polynomial::from(modulus.clone()); + let q_poly = Polynomial::from(quotient.clone()); + let r_poly = Polynomial::from(remainder.clone()); + + // q * m + r + let right_product = q_poly.alloc_product(cs.namespace(|| "right_product"), &mod_poly)?; + let right = right_product.sum(&r_poly); + + let right_max_word = { + let mut x = BigInt::from(min(quotient.limbs.len(), modulus.limbs.len())); + x *= "ient.params.max_word; + x *= &modulus.params.max_word; + x += &remainder.params.max_word; + x + }; + + let right_int = Self::from_poly(right, limb_width, right_max_word); + self.equal_when_carried_regroup(cs.namespace(|| "carry"), &right_int)?; + Ok(remainder) + } + + /// Combines limbs into groups. + pub fn group_limbs(&self, limbs_per_group: usize) -> Self { + let n_groups = (self.limbs.len() - 1) / limbs_per_group + 1; + let limb_values = self.limb_values.as_ref().map(|vs| { + let mut values: Vec = vec![Scalar::ZERO; n_groups]; + let mut shift = Scalar::ONE; + let limb_block = (0..self.params.limb_width).fold(Scalar::ONE, |mut l, _| { + l = l.double(); + l + }); + for (i, v) in vs.iter().enumerate() { + if i % limbs_per_group == 0 { + shift = Scalar::ONE; + } + let mut a = shift; + a *= v; + values[i / limbs_per_group].add_assign(&a); + shift.mul_assign(&limb_block); + } + values + }); + let limbs = { + let mut limbs: Vec> = vec![LinearCombination::zero(); n_groups]; + let mut shift = Scalar::ONE; + let limb_block = (0..self.params.limb_width).fold(Scalar::ONE, |mut l, _| { + l = l.double(); + l + }); + for (i, limb) in self.limbs.iter().enumerate() { + if i % limbs_per_group == 0 { + shift = Scalar::ONE; + } + limbs[i / limbs_per_group] = + std::mem::replace(&mut limbs[i / limbs_per_group], LinearCombination::zero()) + + (shift, limb); + shift.mul_assign(&limb_block); + } + limbs + }; + let max_word = (0..limbs_per_group).fold(BigInt::from(0u8), |mut acc, i| { + acc.set_bit((i * self.params.limb_width) as u64, true); + acc + }) * &self.params.max_word; + Self { + params: BigNatParams { + min_bits: self.params.min_bits, + limb_width: self.params.limb_width * limbs_per_group, + n_limbs: limbs.len(), + max_word, + }, + limbs, + limb_values, + value: self.value.clone(), + } + } + + pub fn n_bits(&self) -> usize { + assert!(self.params.n_limbs > 0); + self.params.limb_width * (self.params.n_limbs - 1) + self.params.max_word.bits() as usize + } +} + +pub struct Polynomial { + pub coefficients: Vec>, + pub values: Option>, +} + +impl Polynomial { + pub fn alloc_product>( + &self, + mut cs: CS, + other: &Self, + ) -> Result { + let n_product_coeffs = self.coefficients.len() + other.coefficients.len() - 1; + let values = self.values.as_ref().and_then(|self_vs| { + other.values.as_ref().map(|other_vs| { + let mut values: Vec = + std::iter::repeat_with(|| Scalar::ZERO).take(n_product_coeffs).collect(); + for (self_i, self_v) in self_vs.iter().enumerate() { + for (other_i, other_v) in other_vs.iter().enumerate() { + let mut v = *self_v; + v.mul_assign(other_v); + values[self_i + other_i].add_assign(&v); + } + } + values + }) + }); + let coefficients = (0..n_product_coeffs) + .map(|i| { + Ok(LinearCombination::zero() + cs.alloc(|| format!("prod {i}"), || Ok(values.grab()?[i]))?) + }) + .collect::>, SynthesisError>>()?; + let product = Self { coefficients, values }; + let one = Scalar::ONE; + let mut x = Scalar::ZERO; + for _ in 1..(n_product_coeffs + 1) { + x.add_assign(&one); + cs.enforce( + || format!("pointwise product @ {x:?}"), + |lc| { + let mut i = Scalar::ONE; + self.coefficients.iter().fold(lc, |lc, c| { + let r = lc + (i, c); + i.mul_assign(&x); + r + }) + }, + |lc| { + let mut i = Scalar::ONE; + other.coefficients.iter().fold(lc, |lc, c| { + let r = lc + (i, c); + i.mul_assign(&x); + r + }) + }, + |lc| { + let mut i = Scalar::ONE; + product.coefficients.iter().fold(lc, |lc, c| { + let r = lc + (i, c); + i.mul_assign(&x); + r + }) + }, + ) + } + Ok(product) + } + + pub fn sum(&self, other: &Self) -> Self { + let n_coeffs = max(self.coefficients.len(), other.coefficients.len()); + let values = self.values.as_ref().and_then(|self_vs| { + other.values.as_ref().map(|other_vs| { + (0..n_coeffs) + .map(|i| { + let mut s = Scalar::ZERO; + if i < self_vs.len() { + s.add_assign(&self_vs[i]); + } + if i < other_vs.len() { + s.add_assign(&other_vs[i]); + } + s + }) + .collect() + }) + }); + let coefficients = (0..n_coeffs) + .map(|i| { + let mut lc = LinearCombination::zero(); + if i < self.coefficients.len() { + lc = lc + &self.coefficients[i]; + } + if i < other.coefficients.len() { + lc = lc + &other.coefficients[i]; + } + lc + }) + .collect(); + Self { coefficients, values } + } +} + +#[cfg(test)] +mod tests { + use bellpepper_core::{test_cs::TestConstraintSystem, Circuit}; + #[cfg(not(target_arch = "wasm32"))] + use proptest::prelude::*; + + use super::*; + use crate::provider::bn256_grumpkin::bn256::Scalar; + + pub struct PolynomialMultiplier { + pub a: Vec, + pub b: Vec, + } + + impl Circuit for PolynomialMultiplier { + fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { + let a = Polynomial { + coefficients: self + .a + .iter() + .enumerate() + .map(|(i, x)| { + Ok(LinearCombination::zero() + cs.alloc(|| format!("coeff_a {i}"), || Ok(*x))?) + }) + .collect::>, SynthesisError>>()?, + values: Some(self.a), + }; + let b = Polynomial { + coefficients: self + .b + .iter() + .enumerate() + .map(|(i, x)| { + Ok(LinearCombination::zero() + cs.alloc(|| format!("coeff_b {i}"), || Ok(*x))?) + }) + .collect::>, SynthesisError>>()?, + values: Some(self.b), + }; + let _prod = a.alloc_product(cs.namespace(|| "product"), &b)?; + Ok(()) + } + } + + #[test] + fn test_polynomial_multiplier_circuit() { + let mut cs = TestConstraintSystem::::new(); + + let circuit = PolynomialMultiplier { + a: [1, 1, 1].iter().map(|i| Scalar::from_u128(*i)).collect(), + b: [1, 1].iter().map(|i| Scalar::from_u128(*i)).collect(), + }; + + circuit.synthesize(&mut cs).expect("synthesis failed"); + + if let Some(token) = cs.which_is_unsatisfied() { + eprintln!("Error: {} is unsatisfied", token); + } + } + + #[derive(Debug)] + pub struct BigNatBitDecompInputs { + pub n: BigInt, + } + + pub struct BigNatBitDecompParams { + pub limb_width: usize, + pub n_limbs: usize, + } + + pub struct BigNatBitDecomp { + inputs: Option, + params: BigNatBitDecompParams, + } + + impl Circuit for BigNatBitDecomp { + fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { + let n = BigNat::alloc_from_nat( + cs.namespace(|| "n"), + || Ok(self.inputs.grab()?.n.clone()), + self.params.limb_width, + self.params.n_limbs, + )?; + n.decompose(cs.namespace(|| "decomp"))?; + Ok(()) + } + } + + #[cfg(not(target_arch = "wasm32"))] + proptest! { + #![proptest_config(ProptestConfig { + cases: 10, // this test is costlier as max n gets larger + .. ProptestConfig::default() + })] + #[test] + fn test_big_nat_can_decompose(n in any::(), limb_width in 40u8..200) { + let n = n as usize; + + let n_limbs = if n == 0 { + 1 + } else { + (n - 1) / limb_width as usize + 1 + }; + + let circuit = BigNatBitDecomp { + inputs: Some(BigNatBitDecompInputs { + n: BigInt::from(n), + }), + params: BigNatBitDecompParams { + limb_width: limb_width as usize, + n_limbs, + }, + }; + let mut cs = TestConstraintSystem::::new(); + circuit.synthesize(&mut cs).expect("synthesis failed"); + prop_assert!(cs.is_satisfied()); + } + } +} diff --git a/prover/src/gadgets/nonnative/mod.rs b/prover/src/gadgets/nonnative/mod.rs new file mode 100644 index 0000000..f81b709 --- /dev/null +++ b/prover/src/gadgets/nonnative/mod.rs @@ -0,0 +1,35 @@ +//! This module implements various gadgets necessary for doing non-native +//! arithmetic Code in this module is adapted from [bellman-bignat](https://github.com/alex-ozdemir/bellman-bignat), which is licenced under MIT + +use bellpepper_core::SynthesisError; +use ff::PrimeField; + +trait OptionExt { + fn grab(&self) -> Result<&T, SynthesisError>; +} + +impl OptionExt for Option { + fn grab(&self) -> Result<&T, SynthesisError> { + self.as_ref().ok_or(SynthesisError::AssignmentMissing) + } +} + +trait BitAccess { + fn get_bit(&self, i: usize) -> Option; +} + +impl BitAccess for Scalar { + fn get_bit(&self, i: usize) -> Option { + if i as u32 >= Scalar::NUM_BITS { + return None; + } + + let (byte_pos, bit_pos) = (i / 8, i % 8); + let byte = self.to_repr().as_ref()[byte_pos]; + let bit = byte >> bit_pos & 1; + Some(bit == 1) + } +} + +pub mod bignat; +pub mod util; diff --git a/prover/src/gadgets/nonnative/util.rs b/prover/src/gadgets/nonnative/util.rs new file mode 100644 index 0000000..e5f8d78 --- /dev/null +++ b/prover/src/gadgets/nonnative/util.rs @@ -0,0 +1,254 @@ +use std::{ + convert::From, + io::{self, Write}, +}; + +use bellpepper_core::{ + num::AllocatedNum, ConstraintSystem, LinearCombination, SynthesisError, Variable, +}; +use byteorder::WriteBytesExt; +use ff::PrimeField; +use num_bigint::{BigInt, Sign}; + +use super::{BitAccess, OptionExt}; + +#[derive(Clone)] +/// A representation of a bit +pub struct Bit { + /// The linear combination which constrain the value of the bit + pub bit: LinearCombination, + #[allow(unused)] + /// The value of the bit (filled at witness-time) + pub value: Option, +} + +#[derive(Clone)] +/// A representation of a bit-vector +pub struct Bitvector { + /// The linear combination which constrain the values of the bits + pub bits: Vec>, + /// The value of the bits (filled at witness-time) + pub values: Option>, + /// Allocated bit variables + pub allocations: Vec>, +} + +impl Bit { + /// Allocate a variable in the constraint system which can only be a + /// boolean value. + pub fn alloc>( + mut cs: CS, + value: Option, + ) -> Result { + let var = cs.alloc( + || "boolean", + || { + if *value.grab()? { + Ok(Scalar::ONE) + } else { + Ok(Scalar::ZERO) + } + }, + )?; + + // Constrain: (1 - a) * a = 0 + // This constrains a to be either 0 or 1. + cs.enforce(|| "boolean constraint", |lc| lc + CS::one() - var, |lc| lc + var, |lc| lc); + + Ok(Self { bit: LinearCombination::zero() + var, value }) + } +} + +pub struct Num { + pub num: LinearCombination, + pub value: Option, +} + +impl Num { + pub const fn new(value: Option, num: LinearCombination) -> Self { + Self { value, num } + } + + pub fn alloc(mut cs: CS, value: F) -> Result + where + CS: ConstraintSystem, + F: FnOnce() -> Result, { + let mut new_value = None; + let var = cs.alloc( + || "num", + || { + let tmp = value()?; + + new_value = Some(tmp); + + Ok(tmp) + }, + )?; + + Ok(Self { value: new_value, num: LinearCombination::zero() + var }) + } + + pub fn fits_in_bits>( + &self, + mut cs: CS, + n_bits: usize, + ) -> Result<(), SynthesisError> { + let v = self.value; + + // Allocate all but the first bit. + let bits: Vec = (1..n_bits) + .map(|i| { + cs.alloc( + || format!("bit {i}"), + || { + let r = if *v.grab()?.get_bit(i).grab()? { Scalar::ONE } else { Scalar::ZERO }; + Ok(r) + }, + ) + }) + .collect::>()?; + + for (i, v) in bits.iter().enumerate() { + cs.enforce(|| format!("{i} is bit"), |lc| lc + *v, |lc| lc + CS::one() - *v, |lc| lc) + } + + // Last bit + cs.enforce( + || "last bit", + |mut lc| { + let mut f = Scalar::ONE; + lc = lc + &self.num; + for v in bits.iter() { + f = f.double(); + lc = lc - (f, *v); + } + lc + }, + |mut lc| { + lc = lc + CS::one(); + let mut f = Scalar::ONE; + lc = lc - &self.num; + for v in bits.iter() { + f = f.double(); + lc = lc + (f, *v); + } + lc + }, + |lc| lc, + ); + Ok(()) + } + + /// Checks if the natural number equals an array of bits. + pub fn is_equal>(&self, mut cs: CS, other: &Bitvector) { + let mut f = Scalar::ONE; + let sum = other.allocations.iter().fold(LinearCombination::zero(), |lc, bit| { + let l = lc + (f, &bit.bit); + f = f.double(); + l + }); + let sum_lc = LinearCombination::zero() + &self.num - ∑ + cs.enforce(|| "sum", |lc| lc + &sum_lc, |lc| lc + CS::one(), |lc| lc); + } + + /// Compute the natural number represented by an array of limbs. + /// The limbs are assumed to be based the `limb_width` power of 2. + /// Low-index bits are low-order + pub fn decompose>( + &self, + mut cs: CS, + n_bits: usize, + ) -> Result, SynthesisError> { + let values: Option> = self.value.as_ref().map(|v| { + let num = *v; + (0..n_bits).map(|i| num.get_bit(i).unwrap()).collect() + }); + let allocations: Vec> = (0..n_bits) + .map(|bit_i| { + Bit::alloc(cs.namespace(|| format!("bit{bit_i}")), values.as_ref().map(|vs| vs[bit_i])) + }) + .collect::, _>>()?; + let mut f = Scalar::ONE; + let sum = allocations.iter().fold(LinearCombination::zero(), |lc, bit| { + let l = lc + (f, &bit.bit); + f = f.double(); + l + }); + let sum_lc = LinearCombination::zero() + &self.num - ∑ + cs.enforce(|| "sum", |lc| lc + &sum_lc, |lc| lc + CS::one(), |lc| lc); + let bits: Vec> = + allocations.iter().map(|a| LinearCombination::zero() + &a.bit).collect(); + Ok(Bitvector { allocations, values, bits }) + } + + pub fn as_allocated_num>( + &self, + mut cs: CS, + ) -> Result, SynthesisError> { + let new = AllocatedNum::alloc(cs.namespace(|| "alloc"), || Ok(*self.value.grab()?))?; + cs.enforce(|| "eq", |lc| lc, |lc| lc, |lc| lc + new.get_variable() - &self.num); + Ok(new) + } +} + +impl From> for Num { + fn from(a: AllocatedNum) -> Self { + Self::new(a.get_value(), LinearCombination::zero() + a.get_variable()) + } +} + +fn write_be(f: &F, mut writer: W) -> io::Result<()> { + for digit in f.to_repr().as_ref().iter().rev() { + writer.write_u8(*digit)?; + } + + Ok(()) +} + +/// Convert a field element to a natural number +pub fn f_to_nat(f: &Scalar) -> BigInt { + let mut s = Vec::new(); + write_be(f, &mut s).unwrap(); + BigInt::from_bytes_le(Sign::Plus, f.to_repr().as_ref()) +} + +/// Convert a natural number to a field element. +/// Returns `None` if the number is too big for the field. +pub fn nat_to_f(n: &BigInt) -> Option { + Scalar::from_str_vartime(&format!("{n}")) +} + +#[cfg(test)] +mod tests { + use bitvec::field::BitField as _; + use ff::PrimeFieldBits; + use rand::SeedableRng; + use rand_chacha::ChaCha20Rng; + + // the write_be function above assumes Field::to_repr() outputs a representation + // that's an instance of `AsRef<[u8]>` in lower endian. We test that here, + // as this is not what the I2OSP standard recommends and may change in some + // implementations. + fn test_repr_is_le_with() { + let mut rng = ChaCha20Rng::from_seed([0u8; 32]); + for _i in 0..50 { + let f = F::random(&mut rng); + // This is guaranteed to be in LE + let le_bits = f.to_le_bits(); + let leftmost_u64 = le_bits[..64].load_le::(); + + // This is not + let f_repr = f.to_repr(); + let bytes: [u8; 8] = f_repr.as_ref()[..8].try_into().unwrap(); + let u64_from_repr = u64::from_le_bytes(bytes); + + assert_eq!(leftmost_u64, u64_from_repr); + } + } + + #[test] + fn test_repr_is_le() { + test_repr_is_le_with::(); + test_repr_is_le_with::(); + } +} diff --git a/prover/src/gadgets/r1cs.rs b/prover/src/gadgets/r1cs.rs new file mode 100644 index 0000000..c958331 --- /dev/null +++ b/prover/src/gadgets/r1cs.rs @@ -0,0 +1,395 @@ +//! This module implements various gadgets necessary for folding R1CS types. +use bellpepper::gadgets::{ + boolean::Boolean, boolean_utils::conditionally_select, num::AllocatedNum, Assignment, +}; +use bellpepper_core::{ConstraintSystem, SynthesisError}; +use ff::Field; +use itertools::Itertools as _; + +use super::nonnative::{ + bignat::BigNat, + util::{f_to_nat, Num}, +}; +use crate::{ + constants::{NUM_CHALLENGE_BITS, NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD}, + gadgets::{ + ecc::AllocatedPoint, + utils::{ + alloc_bignat_constant, alloc_one, alloc_scalar_as_base, conditionally_select_bignat, + le_bits_to_num, + }, + }, + r1cs::{R1CSInstance, RelaxedR1CSInstance}, + traits::{commitment::CommitmentTrait, Engine, Group, ROCircuitTrait, ROConstantsCircuit}, +}; + +/// An Allocated R1CS Instance +#[derive(Clone)] +pub struct AllocatedR1CSInstance { + pub(crate) W: AllocatedPoint, + pub(crate) X: [AllocatedNum; N], +} + +impl AllocatedR1CSInstance { + /// Takes the r1cs instance and creates a new allocated r1cs instance + pub fn alloc::Base>>( + mut cs: CS, + u: Option<&R1CSInstance>, + ) -> Result { + let W = + AllocatedPoint::alloc(cs.namespace(|| "allocate W"), u.map(|u| u.comm_W.to_coordinates()))?; + W.check_on_curve(cs.namespace(|| "check W on curve"))?; + + let X: [AllocatedNum; N] = (0..N) + .map(|idx| { + alloc_scalar_as_base::( + cs.namespace(|| format!("allocating X[{idx}]")), + u.map(|u| u.X[idx]), + ) + }) + .collect::, _>>()? + .try_into() + .map_err(|err: Vec<_>| { + SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) + })?; + + Ok(Self { W, X }) + } + + /// Absorb the provided instance in the RO + pub fn absorb_in_ro(&self, ro: &mut E::ROCircuit) { + ro.absorb(&self.W.x); + ro.absorb(&self.W.y); + ro.absorb(&self.W.is_infinity); + self.X.iter().for_each(|x| ro.absorb(x)); + } +} + +/// An Allocated Relaxed R1CS Instance +#[derive(Clone)] +pub struct AllocatedRelaxedR1CSInstance { + pub(crate) W: AllocatedPoint, + pub(crate) E: AllocatedPoint, + pub(crate) u: AllocatedNum, + pub(crate) X: [BigNat; N], +} + +impl AllocatedRelaxedR1CSInstance { + /// Allocates the given `RelaxedR1CSInstance` as a witness of the circuit + pub fn alloc::Base>>( + mut cs: CS, + inst: Option<&RelaxedR1CSInstance>, + limb_width: usize, + n_limbs: usize, + ) -> Result { + // We do not need to check that W or E are well-formed (e.g., on the curve) as + // we do a hash check in the Nova augmented circuit, which ensures that + // the relaxed instance came from a prior iteration of Nova. + let W = AllocatedPoint::alloc( + cs.namespace(|| "allocate W"), + inst.map(|inst| inst.comm_W.to_coordinates()), + )?; + + let E = AllocatedPoint::alloc( + cs.namespace(|| "allocate E"), + inst.map(|inst| inst.comm_E.to_coordinates()), + )?; + + // u << |E::Base| despite the fact that u is a scalar. + // So we parse all of its bytes as a E::Base element + let u = alloc_scalar_as_base::(cs.namespace(|| "allocate u"), inst.map(|inst| inst.u))?; + + // Allocate X. If the input instance is None then allocate components as zero. + let X = (0..N) + .map(|idx| { + BigNat::alloc_from_nat( + cs.namespace(|| format!("allocate X[{idx}]")), + || Ok(f_to_nat(&inst.map_or(E::Scalar::ZERO, |inst| inst.X[idx]))), + limb_width, + n_limbs, + ) + }) + .collect::, _>>()? + .try_into() + .map_err(|err: Vec<_>| { + SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) + })?; + + Ok(Self { W, E, u, X }) + } + + /// Allocates the hardcoded default `RelaxedR1CSInstance` in the circuit. + /// W = E = 0, u = 0, X0 = X1 = 0 + pub fn default::Base>>( + mut cs: CS, + limb_width: usize, + n_limbs: usize, + ) -> Result { + let W = AllocatedPoint::default(cs.namespace(|| "allocate W")); + let E = W.clone(); + + let u = W.x.clone(); // In the default case, W.x = u = 0 + + // X is allocated and in the honest prover case set to zero + // If the prover is malicious, it can set to arbitrary values, but the resulting + // relaxed R1CS instance with the the checked default values of W, E, and u must + // still be satisfying + + let X = (0..N) + .map(|idx| { + BigNat::alloc_from_nat( + cs.namespace(|| format!("allocate X_default[{idx}]")), + || Ok(f_to_nat(&E::Scalar::ZERO)), + limb_width, + n_limbs, + ) + }) + .collect::, _>>()? + .try_into() + .map_err(|err: Vec<_>| { + SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) + })?; + + Ok(Self { W, E, u, X }) + } + + /// Allocates the R1CS Instance as a `RelaxedR1CSInstance` in the circuit. + /// E = 0, u = 1 + pub fn from_r1cs_instance::Base>>( + mut cs: CS, + inst: AllocatedR1CSInstance, + limb_width: usize, + n_limbs: usize, + ) -> Result { + let E = AllocatedPoint::default(cs.namespace(|| "allocate default E")); + + let u = alloc_one(cs.namespace(|| "one")); + + let X = inst + .X + .into_iter() + .enumerate() + .map(|(idx, x)| { + BigNat::from_num( + cs.namespace(|| format!("allocate X[{idx}] from relaxed r1cs")), + &Num::from(x), + limb_width, + n_limbs, + ) + }) + .collect::, _>>()? + .try_into() + .map_err(|err: Vec<_>| { + SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) + })?; + + Ok(Self { W: inst.W, E, u, X }) + } + + /// Absorb the provided instance in the RO + pub fn absorb_in_ro::Base>>( + &self, + mut cs: CS, + ro: &mut E::ROCircuit, + ) -> Result<(), SynthesisError> { + ro.absorb(&self.W.x); + ro.absorb(&self.W.y); + ro.absorb(&self.W.is_infinity); + ro.absorb(&self.E.x); + ro.absorb(&self.E.y); + ro.absorb(&self.E.is_infinity); + ro.absorb(&self.u); + + self.X.iter().enumerate().try_for_each(|(idx, X)| { + X.as_limbs().iter().enumerate().try_for_each(|(i, limb)| -> Result<(), SynthesisError> { + ro.absorb( + &limb + .as_allocated_num(cs.namespace(|| format!("convert limb {i} of X_r[{idx}] to num")))?, + ); + Ok(()) + }) + })?; + + Ok(()) + } + + /// Folds self with a relaxed r1cs instance and returns the result + pub fn fold_with_r1cs::Base>>( + &self, + mut cs: CS, + params: &AllocatedNum, // hash of R1CSShape of F' + u: &AllocatedR1CSInstance, + T: &AllocatedPoint, + ro_consts: ROConstantsCircuit, + limb_width: usize, + n_limbs: usize, + ) -> Result { + // Compute r: + let mut ro = E::ROCircuit::new(ro_consts, NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD + N); + ro.absorb(params); + + // running instance `U` does not need to absorbed since u.X[0] = Hash(params, U, + // i, z0, zi) + u.absorb_in_ro(&mut ro); + + ro.absorb(&T.x); + ro.absorb(&T.y); + ro.absorb(&T.is_infinity); + let r_bits = ro.squeeze(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; + let r = le_bits_to_num(cs.namespace(|| "r"), &r_bits)?; + + // W_fold = self.W + r * u.W + let rW = u.W.scalar_mul(cs.namespace(|| "r * u.W"), &r_bits)?; + let W_fold = self.W.add(cs.namespace(|| "self.W + r * u.W"), &rW)?; + + // E_fold = self.E + r * T + let rT = T.scalar_mul(cs.namespace(|| "r * T"), &r_bits)?; + let E_fold = self.E.add(cs.namespace(|| "self.E + r * T"), &rT)?; + + // u_fold = u_r + r + let u_fold = AllocatedNum::alloc(cs.namespace(|| "u_fold"), || { + Ok(*self.u.get_value().get()? + r.get_value().get()?) + })?; + cs.enforce( + || "Check u_fold", + |lc| lc, + |lc| lc, + |lc| lc + u_fold.get_variable() - self.u.get_variable() - r.get_variable(), + ); + + // Fold the IO: + // Analyze r into limbs + let r_bn = + BigNat::from_num(cs.namespace(|| "allocate r_bn"), &Num::from(r), limb_width, n_limbs)?; + + // Allocate the order of the non-native field as a constant + let m_bn = alloc_bignat_constant( + cs.namespace(|| "alloc m"), + &E::GE::group_params().2, + limb_width, + n_limbs, + )?; + + let mut X_fold = vec![]; + + for (idx, (X, x)) in self.X.iter().zip_eq(u.X.iter()).enumerate() { + let x_bn = BigNat::from_num( + cs.namespace(|| format!("allocate u.X[{idx}]_bn")), + &Num::from(x.clone()), + limb_width, + n_limbs, + )?; + + let (_, r) = x_bn.mult_mod(cs.namespace(|| format!("r*u.X[{idx}]")), &r_bn, &m_bn)?; + let r_new = X.add(&r)?; + let X_i_fold = r_new.red_mod(cs.namespace(|| format!("reduce folded X[{idx}]")), &m_bn)?; + X_fold.push(X_i_fold); + } + + let X_fold = X_fold.try_into().map_err(|err: Vec<_>| { + SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) + })?; + + Ok(Self { W: W_fold, E: E_fold, u: u_fold, X: X_fold }) + } + + /// If the condition is true then returns this otherwise it returns the + /// other + pub fn conditionally_select::Base>>( + &self, + cs: CS, + other: &Self, + condition: &Boolean, + ) -> Result { + conditionally_select_alloc_relaxed_r1cs(cs, self, other, condition) + } +} + +/// c = cond ? a: b, where a, b: `AllocatedRelaxedR1CSInstance` +pub fn conditionally_select_alloc_relaxed_r1cs< + E: Engine, + CS: ConstraintSystem<::Base>, + const N: usize, +>( + mut cs: CS, + a: &AllocatedRelaxedR1CSInstance, + b: &AllocatedRelaxedR1CSInstance, + condition: &Boolean, +) -> Result, SynthesisError> { + let c_X = a + .X + .iter() + .zip_eq(b.X.iter()) + .enumerate() + .map(|(idx, (a, b))| { + conditionally_select_bignat( + cs.namespace(|| format!("X[{idx}] = cond ? a.X[{idx}] : b.X[{idx}]")), + a, + b, + condition, + ) + }) + .collect::, _>>()?; + + let c_X = c_X.try_into().map_err(|err: Vec<_>| { + SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) + })?; + + let c = AllocatedRelaxedR1CSInstance { + W: conditionally_select_point(cs.namespace(|| "W = cond ? a.W : b.W"), &a.W, &b.W, condition)?, + E: conditionally_select_point(cs.namespace(|| "E = cond ? a.E : b.E"), &a.E, &b.E, condition)?, + u: conditionally_select(cs.namespace(|| "u = cond ? a.u : b.u"), &a.u, &b.u, condition)?, + X: c_X, + }; + Ok(c) +} + +/// c = cond ? a: b, where a, b: `Vec` +pub fn conditionally_select_vec_allocated_relaxed_r1cs_instance< + E: Engine, + CS: ConstraintSystem<::Base>, + const N: usize, +>( + mut cs: CS, + a: &[AllocatedRelaxedR1CSInstance], + b: &[AllocatedRelaxedR1CSInstance], + condition: &Boolean, +) -> Result>, SynthesisError> { + a.iter() + .enumerate() + .zip_eq(b.iter()) + .map(|((i, a), b)| { + a.conditionally_select(cs.namespace(|| format!("cond ? a[{}]: b[{}]", i, i)), b, condition) + }) + .collect::>, _>>() +} + +/// c = cond ? a: b, where a, b: `AllocatedPoint` +pub fn conditionally_select_point>( + mut cs: CS, + a: &AllocatedPoint, + b: &AllocatedPoint, + condition: &Boolean, +) -> Result, SynthesisError> { + let c = AllocatedPoint { + x: conditionally_select( + cs.namespace(|| "x = cond ? a.x : b.x"), + &a.x, + &b.x, + condition, + )?, + y: conditionally_select( + cs.namespace(|| "y = cond ? a.y : b.y"), + &a.y, + &b.y, + condition, + )?, + is_infinity: conditionally_select( + cs.namespace(|| "is_infinity = cond ? a.is_infinity : b.is_infinity"), + &a.is_infinity, + &b.is_infinity, + condition, + )?, + }; + Ok(c) +} diff --git a/prover/src/gadgets/utils.rs b/prover/src/gadgets/utils.rs new file mode 100644 index 0000000..a5daa02 --- /dev/null +++ b/prover/src/gadgets/utils.rs @@ -0,0 +1,385 @@ +//! This module implements various low-level gadgets +use bellpepper::gadgets::Assignment; +use bellpepper_core::{ + boolean::{AllocatedBit, Boolean}, + num::AllocatedNum, + ConstraintSystem, LinearCombination, SynthesisError, +}; +use ff::{Field, PrimeField, PrimeFieldBits}; +use num_bigint::BigInt; + +use super::nonnative::bignat::{nat_to_limbs, BigNat}; +use crate::traits::Engine; + +/// Gets as input the little indian representation of a number and spits out the +/// number +pub fn le_bits_to_num( + mut cs: CS, + bits: &[AllocatedBit], +) -> Result, SynthesisError> +where + Scalar: PrimeField + PrimeFieldBits, + CS: ConstraintSystem, +{ + // We loop over the input bits and construct the constraint + // and the field element that corresponds to the result + let mut lc = LinearCombination::zero(); + let mut coeff = Scalar::ONE; + let mut fe = Some(Scalar::ZERO); + for bit in bits.iter() { + lc = lc + (coeff, bit.get_variable()); + fe = bit.get_value().map(|val| if val { fe.unwrap() + coeff } else { fe.unwrap() }); + coeff = coeff.double(); + } + let num = AllocatedNum::alloc(cs.namespace(|| "Field element"), || { + fe.ok_or(SynthesisError::AssignmentMissing) + })?; + lc = lc - num.get_variable(); + cs.enforce(|| "compute number from bits", |lc| lc, |lc| lc, |_| lc); + Ok(num) +} + +/// Allocate a variable that is set to zero +pub fn alloc_zero>(mut cs: CS) -> AllocatedNum { + let zero = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc"), || F::ZERO); + cs.enforce(|| "check zero is valid", |lc| lc, |lc| lc, |lc| lc + zero.get_variable()); + zero +} + +/// Allocate a variable that is set to one +pub fn alloc_one>(mut cs: CS) -> AllocatedNum { + let one = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc"), || F::ONE); + cs.enforce( + || "check one is valid", + |lc| lc + CS::one(), + |lc| lc + CS::one(), + |lc| lc + one.get_variable(), + ); + + one +} + +/// Allocate a scalar as a base. Only to be used is the scalar fits in base! +pub fn alloc_scalar_as_base( + mut cs: CS, + input: Option, +) -> Result, SynthesisError> +where + E: Engine, + CS: ConstraintSystem<::Base>, +{ + AllocatedNum::alloc(cs.namespace(|| "allocate scalar as base"), || { + let val = scalar_as_base::(input.unwrap_or(E::Scalar::ZERO)); + Ok(val) + }) +} + +/// interpret scalar as base +pub fn scalar_as_base(input: E::Scalar) -> E::Base { + let input_bits = input.to_le_bits(); + let mut mult = E::Base::ONE; + let mut val = E::Base::ZERO; + for bit in input_bits { + if bit { + val += mult; + } + mult = mult + mult; + } + val +} + +/// Allocate bignat a constant +pub fn alloc_bignat_constant>( + mut cs: CS, + val: &BigInt, + limb_width: usize, + n_limbs: usize, +) -> Result, SynthesisError> { + let limbs = nat_to_limbs(val, limb_width, n_limbs).unwrap(); + let bignat = BigNat::alloc_from_limbs( + cs.namespace(|| "alloc bignat"), + || Ok(limbs.clone()), + None, + limb_width, + n_limbs, + )?; + // Now enforce that the limbs are all equal to the constants + (0..n_limbs).for_each(|i| { + cs.enforce( + || format!("check limb {i}"), + |lc| lc + &bignat.limbs[i], + |lc| lc + CS::one(), + |lc| lc + (limbs[i], CS::one()), + ); + }); + Ok(bignat) +} + +/// Check that two numbers are equal and return a bit +pub fn alloc_num_equals>( + mut cs: CS, + a: &AllocatedNum, + b: &AllocatedNum, +) -> Result { + // Allocate and constrain `r`: result boolean bit. + // It equals `true` if `a` equals `b`, `false` otherwise + let r_value = match (a.get_value(), b.get_value()) { + (Some(a), Some(b)) => Some(a == b), + _ => None, + }; + + let r = AllocatedBit::alloc(cs.namespace(|| "r"), r_value)?; + + // Allocate t s.t. t=1 if a == b else 1/(a - b) + + let t = AllocatedNum::alloc(cs.namespace(|| "t"), || { + let a_val = *a.get_value().get()?; + let b_val = *b.get_value().get()?; + Ok(if a_val == b_val { F::ONE } else { (a_val - b_val).invert().unwrap() }) + })?; + + cs.enforce( + || "t*(a - b) = 1 - r", + |lc| lc + t.get_variable(), + |lc| lc + a.get_variable() - b.get_variable(), + |lc| lc + CS::one() - r.get_variable(), + ); + + cs.enforce( + || "r*(a - b) = 0", + |lc| lc + r.get_variable(), + |lc| lc + a.get_variable() - b.get_variable(), + |lc| lc, + ); + + Ok(r) +} + +// TODO: Figure out if this can be done better +pub fn conditionally_select_allocated_bit>( + mut cs: CS, + a: &AllocatedBit, + b: &AllocatedBit, + condition: &Boolean, +) -> Result { + let c = AllocatedBit::alloc( + cs.namespace(|| "conditionally select result"), + if condition.get_value().unwrap_or(false) { a.get_value() } else { b.get_value() }, + )?; + + // a * condition + b*(1-condition) = c -> + // a * condition - b*condition = c - b + cs.enforce( + || "conditional select constraint", + |lc| lc + a.get_variable() - b.get_variable(), + |_| condition.lc(CS::one(), F::ONE), + |lc| lc + c.get_variable() - b.get_variable(), + ); + + Ok(c) +} +/// If condition return a otherwise b where a and b are `BigNats` +pub fn conditionally_select_bignat>( + mut cs: CS, + a: &BigNat, + b: &BigNat, + condition: &Boolean, +) -> Result, SynthesisError> { + assert!(a.limbs.len() == b.limbs.len()); + let c = BigNat::alloc_from_nat( + cs.namespace(|| "conditional select result"), + || { + if *condition.get_value().get()? { + Ok(a.value.get()?.clone()) + } else { + Ok(b.value.get()?.clone()) + } + }, + a.params.limb_width, + a.params.n_limbs, + )?; + + // a * condition + b*(1-condition) = c -> + // a * condition - b*condition = c - b + for i in 0..c.limbs.len() { + cs.enforce( + || format!("conditional select constraint {i}"), + |lc| lc + &a.limbs[i] - &b.limbs[i], + |_| condition.lc(CS::one(), F::ONE), + |lc| lc + &c.limbs[i] - &b.limbs[i], + ); + } + Ok(c) +} + +/// Same as the above but Condition is an `AllocatedNum` that needs to be +/// 0 or 1. 1 => True, 0 => False +pub fn conditionally_select2>( + mut cs: CS, + a: &AllocatedNum, + b: &AllocatedNum, + condition: &AllocatedNum, +) -> Result, SynthesisError> { + let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { + if *condition.get_value().get()? == F::ONE { + Ok(*a.get_value().get()?) + } else { + Ok(*b.get_value().get()?) + } + })?; + + // a * condition + b*(1-condition) = c -> + // a * condition - b*condition = c - b + cs.enforce( + || "conditional select constraint", + |lc| lc + a.get_variable() - b.get_variable(), + |lc| lc + condition.get_variable(), + |lc| lc + c.get_variable() - b.get_variable(), + ); + + Ok(c) +} + +/// If condition set to 0 otherwise a. Condition is an allocated num +pub fn select_zero_or_num2>( + mut cs: CS, + a: &AllocatedNum, + condition: &AllocatedNum, +) -> Result, SynthesisError> { + let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { + if *condition.get_value().get()? == F::ONE { + Ok(F::ZERO) + } else { + Ok(*a.get_value().get()?) + } + })?; + + // a * (1 - condition) = c + cs.enforce( + || "conditional select constraint", + |lc| lc + a.get_variable(), + |lc| lc + CS::one() - condition.get_variable(), + |lc| lc + c.get_variable(), + ); + + Ok(c) +} + +/// If condition set to a otherwise 0. Condition is an allocated num +pub fn select_num_or_zero2>( + mut cs: CS, + a: &AllocatedNum, + condition: &AllocatedNum, +) -> Result, SynthesisError> { + let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { + if *condition.get_value().get()? == F::ONE { + Ok(*a.get_value().get()?) + } else { + Ok(F::ZERO) + } + })?; + + cs.enforce( + || "conditional select constraint", + |lc| lc + a.get_variable(), + |lc| lc + condition.get_variable(), + |lc| lc + c.get_variable(), + ); + + Ok(c) +} + +/// If condition set to a otherwise 0 +pub fn select_num_or_zero>( + mut cs: CS, + a: &AllocatedNum, + condition: &Boolean, +) -> Result, SynthesisError> { + let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { + if *condition.get_value().get()? { + Ok(*a.get_value().get()?) + } else { + Ok(F::ZERO) + } + })?; + + cs.enforce( + || "conditional select constraint", + |lc| lc + a.get_variable(), + |_| condition.lc(CS::one(), F::ONE), + |lc| lc + c.get_variable(), + ); + + Ok(c) +} + +/// If condition set to 1 otherwise a +pub fn select_one_or_num2>( + mut cs: CS, + a: &AllocatedNum, + condition: &AllocatedNum, +) -> Result, SynthesisError> { + let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { + if *condition.get_value().get()? == F::ONE { + Ok(F::ONE) + } else { + Ok(*a.get_value().get()?) + } + })?; + + cs.enforce( + || "conditional select constraint", + |lc| lc + CS::one() - a.get_variable(), + |lc| lc + condition.get_variable(), + |lc| lc + c.get_variable() - a.get_variable(), + ); + Ok(c) +} + +/// If condition set to 1 otherwise a - b +pub fn select_one_or_diff2>( + mut cs: CS, + a: &AllocatedNum, + b: &AllocatedNum, + condition: &AllocatedNum, +) -> Result, SynthesisError> { + let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { + if *condition.get_value().get()? == F::ONE { + Ok(F::ONE) + } else { + Ok(*a.get_value().get()? - *b.get_value().get()?) + } + })?; + + cs.enforce( + || "conditional select constraint", + |lc| lc + CS::one() - a.get_variable() + b.get_variable(), + |lc| lc + condition.get_variable(), + |lc| lc + c.get_variable() - a.get_variable() + b.get_variable(), + ); + Ok(c) +} + +/// If condition set to a otherwise 1 for boolean conditions +pub fn select_num_or_one>( + mut cs: CS, + a: &AllocatedNum, + condition: &Boolean, +) -> Result, SynthesisError> { + let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { + if *condition.get_value().get()? { + Ok(*a.get_value().get()?) + } else { + Ok(F::ONE) + } + })?; + + cs.enforce( + || "conditional select constraint", + |lc| lc + a.get_variable() - CS::one(), + |_| condition.lc(CS::one(), F::ONE), + |lc| lc + c.get_variable() - CS::one(), + ); + + Ok(c) +} diff --git a/prover/src/lib.rs b/prover/src/lib.rs new file mode 100644 index 0000000..24ad85f --- /dev/null +++ b/prover/src/lib.rs @@ -0,0 +1,1628 @@ +#![allow(non_snake_case, clippy::type_complexity, clippy::too_many_arguments)] + +// private modules +pub mod bellpepper; +pub mod circuit; +pub mod digest; +pub mod nifs; + +// public modules +pub mod constants; +pub mod errors; +pub mod fast_serde; +pub mod gadgets; +pub mod provider; +pub mod r1cs; +pub mod spartan; +pub mod traits; + +pub mod cyclefold; +pub mod supernova; + +use std::sync::Arc; + +use bellpepper_core::{ConstraintSystem, SynthesisError}; +use circuit::{NovaAugmentedCircuit, NovaAugmentedCircuitInputs, NovaAugmentedCircuitParams}; +use constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_WITHOUT_IO_FOR_CRHF, NUM_HASH_BITS}; +use errors::NovaError; +use ff::Field; +use gadgets::scalar_as_base; +use nifs::NIFS; +use once_cell::sync::OnceCell; +use r1cs::{ + CommitmentKeyHint, R1CSInstance, R1CSShape, R1CSWitness, RelaxedR1CSInstance, RelaxedR1CSWitness, +}; +use serde::{Deserialize, Serialize}; +use supernova::StepCircuit; +use traits::{ + commitment::{CommitmentEngineTrait, CommitmentTrait}, + snark::RelaxedR1CSSNARKTrait, + AbsorbInROTrait, CurveCycleEquipped, Dual, Engine, ROConstants, ROConstantsCircuit, ROTrait, +}; + +use crate::{ + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + shape_cs::ShapeCS, + solver::SatisfyingAssignment, + }, + digest::{DigestComputer, SimpleDigestible}, + r1cs::R1CSResult, +}; + +/// A type that holds parameters for the primary and secondary circuits of Nova +/// and SuperNova +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct R1CSWithArity { + F_arity: usize, + r1cs_shape: R1CSShape, +} + +impl SimpleDigestible for R1CSWithArity {} + +impl R1CSWithArity { + /// Create a new `R1CSWithArity` + pub fn new(r1cs_shape: R1CSShape, F_arity: usize) -> Self { Self { F_arity, r1cs_shape } } + + /// Return the [`R1CSWithArity`]' digest. + pub fn digest(&self) -> E::Scalar { + let dc: DigestComputer<'_, ::Scalar, Self> = DigestComputer::new(self); + dc.digest().expect("Failure in computing digest") + } +} + +/// A type that holds public parameters of Nova +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct PublicParams +where E: CurveCycleEquipped { + F_arity_primary: usize, + F_arity_secondary: usize, + ro_consts_primary: ROConstants, + ro_consts_circuit_primary: ROConstantsCircuit>, + pub ck_primary: Arc>, + circuit_shape_primary: R1CSWithArity, + ro_consts_secondary: ROConstants>, + ro_consts_circuit_secondary: ROConstantsCircuit, + pub ck_secondary: Arc>>, + circuit_shape_secondary: R1CSWithArity>, + augmented_circuit_params_primary: NovaAugmentedCircuitParams, + augmented_circuit_params_secondary: NovaAugmentedCircuitParams, + #[serde(skip, default = "OnceCell::new")] + digest: OnceCell, +} + +impl SimpleDigestible for PublicParams where E1: CurveCycleEquipped {} + +impl PublicParams +where E1: CurveCycleEquipped +{ + /// Set up builder to create `PublicParams` for a pair of circuits `C1` and + /// `C2`. + /// + /// # Note + /// + /// Public parameters set up a number of bases for the homomorphic + /// commitment scheme of Nova. + /// + /// Some final compressing SNARKs, like variants of Spartan, use computation + /// commitments that require larger sizes for these parameters. These + /// SNARKs provide a hint for these values by implementing + /// `RelaxedR1CSSNARKTrait::ck_floor()`, which can be passed to this + /// function. + /// + /// If you're not using such a SNARK, pass + /// `arecibo::traits::snark::default_ck_hint()` instead. + /// + /// # Arguments + /// + /// * `c_primary`: The primary circuit of type `C1`. + /// * `c_secondary`: The secondary circuit of type `C2`. + /// * `ck_hint1`: A `CommitmentKeyHint` for `G1`, which is a function that provides a hint for the + /// number of generators required in the commitment scheme for the primary circuit. + /// * `ck_hint2`: A `CommitmentKeyHint` for `G2`, similar to `ck_hint1`, but for the secondary + /// circuit. + /// + /// # Example + // TODO: THIS TEST DOES NOT WORK RIGHT NOW + // / ```rust + // / # use client_side_prover::spartan::ppsnark::RelaxedR1CSSNARK; + // / # use client_side_prover::provider::ipa_pc::EvaluationEngine; + // / # use client_side_prover::provider::{PallasEngine, VestaEngine}; + // / # use client_side_prover::traits::{circuit::TrivialCircuit, Engine, + // snark::RelaxedR1CSSNARKTrait}; / use client_side_prover::PublicParams; + // / + // / type E1 = PallasEngine; + // / type E2 = VestaEngine; + // / type EE = EvaluationEngine; + // / type SPrime = RelaxedR1CSSNARK>; + // / + // / let circuit1 = TrivialCircuit::<::Scalar>::default(); + // / let circuit2 = TrivialCircuit::<::Scalar>::default(); + // / // Only relevant for a SNARK using computation commitmnets, pass &(|_| 0) + // / // or &*nova_snark::traits::snark::default_ck_hint() otherwise. + // / let ck_hint1 = &*SPrime::::ck_floor(); + // / let ck_hint2 = &*SPrime::::ck_floor(); + // / + // / let pp = PublicParams::setup(&circuit1, &circuit2, ck_hint1, ck_hint2).unwrap(); + // / ``` + pub fn setup, C2: StepCircuit< as Engine>::Scalar>>( + c_primary: &C1, + c_secondary: &C2, + ck_hint1: &CommitmentKeyHint, + ck_hint2: &CommitmentKeyHint>, + ) -> Result { + let augmented_circuit_params_primary = + NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); + let augmented_circuit_params_secondary = + NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); + + let ro_consts_primary: ROConstants = ROConstants::::default(); + let ro_consts_secondary: ROConstants> = ROConstants::>::default(); + + let F_arity_primary = c_primary.arity(); + let F_arity_secondary = c_secondary.arity(); + + // ro_consts_circuit_primary are parameterized by E2 because the type alias uses + // E2::Base = E1::Scalar + let ro_consts_circuit_primary: ROConstantsCircuit> = + ROConstantsCircuit::>::default(); + let ro_consts_circuit_secondary: ROConstantsCircuit = ROConstantsCircuit::::default(); + + // Initialize ck for the primary + let circuit_primary: NovaAugmentedCircuit<'_, Dual, C1> = NovaAugmentedCircuit::new( + &augmented_circuit_params_primary, + None, + c_primary, + ro_consts_circuit_primary.clone(), + ); + let mut cs: ShapeCS = ShapeCS::new(); + let _ = circuit_primary.synthesize(&mut cs); + let (r1cs_shape_primary, ck_primary) = cs.r1cs_shape_and_key(ck_hint1); + let ck_primary = Arc::new(ck_primary); + + // Initialize ck for the secondary + let circuit_secondary: NovaAugmentedCircuit<'_, E1, C2> = NovaAugmentedCircuit::new( + &augmented_circuit_params_secondary, + None, + c_secondary, + ro_consts_circuit_secondary.clone(), + ); + let mut cs: ShapeCS> = ShapeCS::new(); + let _ = circuit_secondary.synthesize(&mut cs); + let (r1cs_shape_secondary, ck_secondary) = cs.r1cs_shape_and_key(ck_hint2); + let ck_secondary = Arc::new(ck_secondary); + + if r1cs_shape_primary.num_io != 2 || r1cs_shape_secondary.num_io != 2 { + return Err(NovaError::InvalidStepCircuitIO); + } + + let circuit_shape_primary = R1CSWithArity::new(r1cs_shape_primary, F_arity_primary); + let circuit_shape_secondary = R1CSWithArity::new(r1cs_shape_secondary, F_arity_secondary); + + Ok(Self { + F_arity_primary, + F_arity_secondary, + ro_consts_primary, + ro_consts_circuit_primary, + ck_primary, + circuit_shape_primary, + ro_consts_secondary, + ro_consts_circuit_secondary, + ck_secondary, + circuit_shape_secondary, + augmented_circuit_params_primary, + augmented_circuit_params_secondary, + digest: OnceCell::new(), + }) + } + + /// Retrieve the digest of the public parameters. + pub fn digest(&self) -> E1::Scalar { + self + .digest + .get_or_try_init(|| DigestComputer::new(self).digest()) + .cloned() + .expect("Failure in retrieving digest") + } + + /// Returns the number of constraints in the primary and secondary circuits + pub const fn num_constraints(&self) -> (usize, usize) { + ( + self.circuit_shape_primary.r1cs_shape.num_cons, + self.circuit_shape_secondary.r1cs_shape.num_cons, + ) + } + + /// Returns the number of variables in the primary and secondary circuits + pub const fn num_variables(&self) -> (usize, usize) { + ( + self.circuit_shape_primary.r1cs_shape.num_vars, + self.circuit_shape_secondary.r1cs_shape.num_vars, + ) + } +} + +/// A resource buffer for [`RecursiveSNARK`] for storing scratch values that are +/// computed by `prove_step`, which allows the reuse of memory allocations and +/// avoids unnecessary new allocations in the critical section. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct ResourceBuffer { + l_w: Option>, + l_u: Option>, + + ABC_Z_1: R1CSResult, + ABC_Z_2: R1CSResult, + + /// buffer for `commit_T` + T: Vec, +} + +/// A SNARK that proves the correct execution of an incremental computation +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct RecursiveSNARK +where E1: CurveCycleEquipped { + z0_primary: Vec, + z0_secondary: Vec< as Engine>::Scalar>, + r_W_primary: RelaxedR1CSWitness, + r_U_primary: RelaxedR1CSInstance, + r_W_secondary: RelaxedR1CSWitness>, + r_U_secondary: RelaxedR1CSInstance>, + l_w_secondary: R1CSWitness>, + l_u_secondary: R1CSInstance>, + + /// Buffer for memory needed by the primary fold-step + buffer_primary: ResourceBuffer, + /// Buffer for memory needed by the secondary fold-step + buffer_secondary: ResourceBuffer>, + + i: usize, + zi_primary: Vec, + zi_secondary: Vec< as Engine>::Scalar>, +} + +impl RecursiveSNARK +where E1: CurveCycleEquipped +{ + /// Create new instance of recursive SNARK + pub fn new, C2: StepCircuit< as Engine>::Scalar>>( + pp: &PublicParams, + c_primary: &C1, + c_secondary: &C2, + z0_primary: &[E1::Scalar], + z0_secondary: &[ as Engine>::Scalar], + ) -> Result { + if z0_primary.len() != pp.F_arity_primary || z0_secondary.len() != pp.F_arity_secondary { + return Err(NovaError::InvalidInitialInputLength); + } + + let r1cs_primary = &pp.circuit_shape_primary.r1cs_shape; + let r1cs_secondary = &pp.circuit_shape_secondary.r1cs_shape; + + // base case for the primary + let mut cs_primary = SatisfyingAssignment::::new(); + let inputs_primary: NovaAugmentedCircuitInputs> = NovaAugmentedCircuitInputs::new( + scalar_as_base::(pp.digest()), + E1::Scalar::ZERO, + z0_primary.to_vec(), + None, + None, + None, + None, + ); + + let circuit_primary: NovaAugmentedCircuit<'_, Dual, C1> = NovaAugmentedCircuit::new( + &pp.augmented_circuit_params_primary, + Some(inputs_primary), + c_primary, + pp.ro_consts_circuit_primary.clone(), + ); + let zi_primary = circuit_primary.synthesize(&mut cs_primary)?; + let (u_primary, w_primary) = + cs_primary.r1cs_instance_and_witness(r1cs_primary, &pp.ck_primary)?; + + // base case for the secondary + let mut cs_secondary = SatisfyingAssignment::>::new(); + let inputs_secondary: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( + pp.digest(), + as Engine>::Scalar::ZERO, + z0_secondary.to_vec(), + None, + None, + Some(u_primary.clone()), + None, + ); + let circuit_secondary: NovaAugmentedCircuit<'_, E1, C2> = NovaAugmentedCircuit::new( + &pp.augmented_circuit_params_secondary, + Some(inputs_secondary), + c_secondary, + pp.ro_consts_circuit_secondary.clone(), + ); + let zi_secondary = circuit_secondary.synthesize(&mut cs_secondary)?; + let (u_secondary, w_secondary) = cs_secondary + .r1cs_instance_and_witness(&pp.circuit_shape_secondary.r1cs_shape, &pp.ck_secondary)?; + + // IVC proof for the primary circuit + let l_w_primary = w_primary; + let l_u_primary = u_primary; + let r_W_primary = RelaxedR1CSWitness::from_r1cs_witness(r1cs_primary, l_w_primary); + let r_U_primary = RelaxedR1CSInstance::from_r1cs_instance( + &*pp.ck_primary, + &pp.circuit_shape_primary.r1cs_shape, + l_u_primary, + ); + + // IVC proof for the secondary circuit + let l_w_secondary = w_secondary; + let l_u_secondary = u_secondary; + let r_W_secondary = RelaxedR1CSWitness::>::default(r1cs_secondary); + let r_U_secondary = RelaxedR1CSInstance::>::default(&pp.ck_secondary, r1cs_secondary); + + assert!( + !(zi_primary.len() != pp.F_arity_primary || zi_secondary.len() != pp.F_arity_secondary), + "Invalid step length" + ); + + let zi_primary = zi_primary + .iter() + .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) + .collect::::Scalar>, _>>()?; + + let zi_secondary = zi_secondary + .iter() + .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) + .collect:: as Engine>::Scalar>, _>>()?; + + let buffer_primary = ResourceBuffer { + l_w: None, + l_u: None, + ABC_Z_1: R1CSResult::default(r1cs_primary.num_cons), + ABC_Z_2: R1CSResult::default(r1cs_primary.num_cons), + T: r1cs::default_T::(r1cs_primary.num_cons), + }; + + let buffer_secondary = ResourceBuffer { + l_w: None, + l_u: None, + ABC_Z_1: R1CSResult::default(r1cs_secondary.num_cons), + ABC_Z_2: R1CSResult::default(r1cs_secondary.num_cons), + T: r1cs::default_T::>(r1cs_secondary.num_cons), + }; + + Ok(Self { + z0_primary: z0_primary.to_vec(), + z0_secondary: z0_secondary.to_vec(), + r_W_primary, + r_U_primary, + r_W_secondary, + r_U_secondary, + l_w_secondary, + l_u_secondary, + + buffer_primary, + buffer_secondary, + i: 0, + zi_primary, + zi_secondary, + }) + } + + /// Inputs of the primary circuits + pub fn z0_primary(&self) -> &Vec { &self.z0_primary } + + /// Outputs of the primary circuits + pub fn zi_primary(&self) -> &Vec { &self.zi_primary } + + /// Create a new `RecursiveSNARK` (or updates the provided `RecursiveSNARK`) + /// by executing a step of the incremental computation + #[tracing::instrument(skip_all, name = "nova::RecursiveSNARK::prove_step")] + pub fn prove_step, C2: StepCircuit< as Engine>::Scalar>>( + &mut self, + pp: &PublicParams, + c_primary: &C1, + c_secondary: &C2, + ) -> Result<(), NovaError> { + // first step was already done in the constructor + if self.i == 0 { + self.i = 1; + return Ok(()); + } + + // save the inputs before proceeding to the `i+1`th step + let r_U_primary_i = self.r_U_primary.clone(); + let r_U_secondary_i = self.r_U_secondary.clone(); + let l_u_secondary_i = self.l_u_secondary.clone(); + + // fold the secondary circuit's instance + let (nifs_secondary, _) = NIFS::prove_mut( + &*pp.ck_secondary, + &pp.ro_consts_secondary, + &scalar_as_base::(pp.digest()), + &pp.circuit_shape_secondary.r1cs_shape, + &mut self.r_U_secondary, + &mut self.r_W_secondary, + &self.l_u_secondary, + &self.l_w_secondary, + &mut self.buffer_secondary.T, + &mut self.buffer_secondary.ABC_Z_1, + &mut self.buffer_secondary.ABC_Z_2, + )?; + + let mut cs_primary = SatisfyingAssignment::::with_capacity( + pp.circuit_shape_primary.r1cs_shape.num_io + 1, + pp.circuit_shape_primary.r1cs_shape.num_vars, + ); + let inputs_primary: NovaAugmentedCircuitInputs> = NovaAugmentedCircuitInputs::new( + scalar_as_base::(pp.digest()), + E1::Scalar::from(self.i as u64), + self.z0_primary.to_vec(), + Some(self.zi_primary.clone()), + Some(r_U_secondary_i), + Some(l_u_secondary_i), + Some(Commitment::>::decompress(&nifs_secondary.comm_T)?), + ); + + let circuit_primary: NovaAugmentedCircuit<'_, Dual, C1> = NovaAugmentedCircuit::new( + &pp.augmented_circuit_params_primary, + Some(inputs_primary), + c_primary, + pp.ro_consts_circuit_primary.clone(), + ); + + let zi_primary = circuit_primary.synthesize(&mut cs_primary)?; + + let (l_u_primary, l_w_primary) = + cs_primary.r1cs_instance_and_witness(&pp.circuit_shape_primary.r1cs_shape, &pp.ck_primary)?; + + // fold the primary circuit's instance + let (nifs_primary, _) = NIFS::prove_mut( + &*pp.ck_primary, + &pp.ro_consts_primary, + &pp.digest(), + &pp.circuit_shape_primary.r1cs_shape, + &mut self.r_U_primary, + &mut self.r_W_primary, + &l_u_primary, + &l_w_primary, + &mut self.buffer_primary.T, + &mut self.buffer_primary.ABC_Z_1, + &mut self.buffer_primary.ABC_Z_2, + )?; + + let mut cs_secondary = SatisfyingAssignment::>::with_capacity( + pp.circuit_shape_secondary.r1cs_shape.num_io + 1, + pp.circuit_shape_secondary.r1cs_shape.num_vars, + ); + let inputs_secondary: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( + pp.digest(), + as Engine>::Scalar::from(self.i as u64), + self.z0_secondary.to_vec(), + Some(self.zi_secondary.clone()), + Some(r_U_primary_i), + Some(l_u_primary), + Some(Commitment::::decompress(&nifs_primary.comm_T)?), + ); + + let circuit_secondary: NovaAugmentedCircuit<'_, E1, C2> = NovaAugmentedCircuit::new( + &pp.augmented_circuit_params_secondary, + Some(inputs_secondary), + c_secondary, + pp.ro_consts_circuit_secondary.clone(), + ); + let zi_secondary = circuit_secondary.synthesize(&mut cs_secondary)?; + + let (l_u_secondary, l_w_secondary) = cs_secondary + .r1cs_instance_and_witness(&pp.circuit_shape_secondary.r1cs_shape, &pp.ck_secondary) + .map_err(|_e| NovaError::UnSat)?; + + // update the running instances and witnesses + self.zi_primary = zi_primary + .iter() + .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) + .collect::::Scalar>, _>>()?; + self.zi_secondary = zi_secondary + .iter() + .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) + .collect:: as Engine>::Scalar>, _>>()?; + + self.l_u_secondary = l_u_secondary; + self.l_w_secondary = l_w_secondary; + + self.i += 1; + + Ok(()) + } + + /// Verify the correctness of the `RecursiveSNARK` + #[allow(clippy::type_complexity)] + pub fn verify( + &self, + pp: &PublicParams, + num_steps: usize, + z0_primary: &[E1::Scalar], + z0_secondary: &[ as Engine>::Scalar], + ) -> Result<(Vec, Vec< as Engine>::Scalar>), NovaError> { + // number of steps cannot be zero + let is_num_steps_zero = num_steps == 0; + + // check if the provided proof has executed num_steps + let is_num_steps_not_match = self.i != num_steps; + + // check if the initial inputs match + let is_inputs_not_match = self.z0_primary != z0_primary || self.z0_secondary != z0_secondary; + + // check if the (relaxed) R1CS instances have two public outputs + let is_instance_has_two_outputs = self.l_u_secondary.X.len() != 2 + || self.r_U_primary.X.len() != 2 + || self.r_U_secondary.X.len() != 2; + + if is_num_steps_zero + || is_num_steps_not_match + || is_inputs_not_match + || is_instance_has_two_outputs + { + return Err(NovaError::ProofVerifyError); + } + + // check if the output hashes in R1CS instances point to the right running + // instances + let (hash_primary, hash_secondary) = { + let mut hasher = as Engine>::RO::new( + pp.ro_consts_secondary.clone(), + NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * pp.F_arity_primary, + ); + hasher.absorb(pp.digest()); + hasher.absorb(E1::Scalar::from(num_steps as u64)); + for e in z0_primary { + hasher.absorb(*e); + } + for e in &self.zi_primary { + hasher.absorb(*e); + } + self.r_U_secondary.absorb_in_ro(&mut hasher); + + let mut hasher2 = ::RO::new( + pp.ro_consts_primary.clone(), + NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * pp.F_arity_secondary, + ); + hasher2.absorb(scalar_as_base::(pp.digest())); + hasher2.absorb( as Engine>::Scalar::from(num_steps as u64)); + for e in z0_secondary { + hasher2.absorb(*e); + } + for e in &self.zi_secondary { + hasher2.absorb(*e); + } + self.r_U_primary.absorb_in_ro(&mut hasher2); + + (hasher.squeeze(NUM_HASH_BITS), hasher2.squeeze(NUM_HASH_BITS)) + }; + + if hash_primary != self.l_u_secondary.X[0] + || hash_secondary != scalar_as_base::>(self.l_u_secondary.X[1]) + { + return Err(NovaError::ProofVerifyError); + } + + // check the satisfiability of the provided instances + let (res_r_primary, (res_r_secondary, res_l_secondary)) = rayon::join( + || { + pp.circuit_shape_primary.r1cs_shape.is_sat_relaxed( + &pp.ck_primary, + &self.r_U_primary, + &self.r_W_primary, + ) + }, + || { + rayon::join( + || { + pp.circuit_shape_secondary.r1cs_shape.is_sat_relaxed( + &pp.ck_secondary, + &self.r_U_secondary, + &self.r_W_secondary, + ) + }, + || { + pp.circuit_shape_secondary.r1cs_shape.is_sat( + &pp.ck_secondary, + &self.l_u_secondary, + &self.l_w_secondary, + ) + }, + ) + }, + ); + + // check the returned res objects + res_r_primary?; + res_r_secondary?; + res_l_secondary?; + + Ok((self.zi_primary.clone(), self.zi_secondary.clone())) + } + + /// Get the outputs after the last step of computation. + pub fn outputs(&self) -> (&[E1::Scalar], &[ as Engine>::Scalar]) { + (&self.zi_primary, &self.zi_secondary) + } + + /// The number of steps which have been executed thus far. + pub fn num_steps(&self) -> usize { self.i } +} + +/// A type that holds the prover key for `CompressedSNARK` +#[derive(Clone, Debug)] +pub struct ProverKey +where + E1: CurveCycleEquipped, + S1: RelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, { + pub pk_primary: S1::ProverKey, + pub pk_secondary: S2::ProverKey, +} + +/// A type that holds the verifier key for `CompressedSNARK` +#[derive(Debug, Clone, Serialize)] +#[serde(bound = "")] +pub struct VerifierKey +where + E1: CurveCycleEquipped, + S1: RelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, { + F_arity_primary: usize, + F_arity_secondary: usize, + ro_consts_primary: ROConstants, + ro_consts_secondary: ROConstants>, + pp_digest: E1::Scalar, + vk_primary: S1::VerifierKey, + vk_secondary: S2::VerifierKey, +} + +/// A SNARK that proves the knowledge of a valid `RecursiveSNARK` +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct CompressedSNARK +where + E1: CurveCycleEquipped, + S1: RelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, { + r_U_primary: RelaxedR1CSInstance, + r_W_snark_primary: S1, + + r_U_secondary: RelaxedR1CSInstance>, + l_u_secondary: R1CSInstance>, + nifs_secondary: NIFS>, + f_W_snark_secondary: S2, + + zn_primary: Vec, + zn_secondary: Vec< as Engine>::Scalar>, +} + +impl CompressedSNARK +where + E1: CurveCycleEquipped, + S1: RelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, +{ + /// Creates prover and verifier keys for `CompressedSNARK` + #[allow(clippy::type_complexity)] + pub fn setup( + pp: &PublicParams, + ) -> Result<(ProverKey, VerifierKey), NovaError> { + let (pk_primary, vk_primary) = + S1::setup(pp.ck_primary.clone(), &pp.circuit_shape_primary.r1cs_shape)?; + let (pk_secondary, vk_secondary) = + S2::setup(pp.ck_secondary.clone(), &pp.circuit_shape_secondary.r1cs_shape)?; + + let pk = ProverKey { pk_primary, pk_secondary }; + + let vk = VerifierKey { + F_arity_primary: pp.F_arity_primary, + F_arity_secondary: pp.F_arity_secondary, + ro_consts_primary: pp.ro_consts_primary.clone(), + ro_consts_secondary: pp.ro_consts_secondary.clone(), + pp_digest: pp.digest(), + vk_primary, + vk_secondary, + }; + + Ok((pk, vk)) + } + + /// Create a new `CompressedSNARK` + pub fn prove( + pp: &PublicParams, + pk: &ProverKey, + recursive_snark: &RecursiveSNARK, + ) -> Result { + // fold the secondary circuit's instance with its running instance + let (nifs_secondary, (f_U_secondary, f_W_secondary), _) = NIFS::prove( + &*pp.ck_secondary, + &pp.ro_consts_secondary, + &scalar_as_base::(pp.digest()), + &pp.circuit_shape_secondary.r1cs_shape, + &recursive_snark.r_U_secondary, + &recursive_snark.r_W_secondary, + &recursive_snark.l_u_secondary, + &recursive_snark.l_w_secondary, + )?; + + // create SNARKs proving the knowledge of f_W_primary and f_W_secondary + let (r_W_snark_primary, f_W_snark_secondary) = rayon::join( + || { + S1::prove( + &pp.ck_primary, + &pk.pk_primary, + &pp.circuit_shape_primary.r1cs_shape, + &recursive_snark.r_U_primary, + &recursive_snark.r_W_primary, + ) + }, + || { + S2::prove( + &pp.ck_secondary, + &pk.pk_secondary, + &pp.circuit_shape_secondary.r1cs_shape, + &f_U_secondary, + &f_W_secondary, + ) + }, + ); + + Ok(Self { + r_U_primary: recursive_snark.r_U_primary.clone(), + r_W_snark_primary: r_W_snark_primary?, + + r_U_secondary: recursive_snark.r_U_secondary.clone(), + l_u_secondary: recursive_snark.l_u_secondary.clone(), + nifs_secondary, + f_W_snark_secondary: f_W_snark_secondary?, + + zn_primary: recursive_snark.zi_primary.clone(), + zn_secondary: recursive_snark.zi_secondary.clone(), + }) + } + + #[allow(clippy::type_complexity)] + /// Verify the correctness of the `CompressedSNARK` + pub fn verify( + &self, + vk: &VerifierKey, + num_steps: usize, + z0_primary: &[E1::Scalar], + z0_secondary: &[ as Engine>::Scalar], + ) -> Result<(Vec, Vec< as Engine>::Scalar>), NovaError> { + // the number of steps cannot be zero + if num_steps == 0 { + return Err(NovaError::ProofVerifyError); + } + + // check if the (relaxed) R1CS instances have two public outputs + if self.l_u_secondary.X.len() != 2 + || self.r_U_primary.X.len() != 2 + || self.r_U_secondary.X.len() != 2 + { + return Err(NovaError::ProofVerifyError); + } + + // check if the output hashes in R1CS instances point to the right running + // instances + let (hash_primary, hash_secondary) = { + let mut hasher = as Engine>::RO::new( + vk.ro_consts_secondary.clone(), + NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * vk.F_arity_primary, + ); + hasher.absorb(vk.pp_digest); + hasher.absorb(E1::Scalar::from(num_steps as u64)); + for e in z0_primary { + hasher.absorb(*e); + } + for e in &self.zn_primary { + hasher.absorb(*e); + } + self.r_U_secondary.absorb_in_ro(&mut hasher); + + let mut hasher2 = ::RO::new( + vk.ro_consts_primary.clone(), + NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * vk.F_arity_secondary, + ); + hasher2.absorb(scalar_as_base::(vk.pp_digest)); + hasher2.absorb( as Engine>::Scalar::from(num_steps as u64)); + for e in z0_secondary { + hasher2.absorb(*e); + } + for e in &self.zn_secondary { + hasher2.absorb(*e); + } + self.r_U_primary.absorb_in_ro(&mut hasher2); + + (hasher.squeeze(NUM_HASH_BITS), hasher2.squeeze(NUM_HASH_BITS)) + }; + + if hash_primary != self.l_u_secondary.X[0] + || hash_secondary != scalar_as_base::>(self.l_u_secondary.X[1]) + { + return Err(NovaError::ProofVerifyError); + } + + // fold the secondary's running instance with the last instance to get a folded + // instance + let f_U_secondary = self.nifs_secondary.verify( + &vk.ro_consts_secondary, + &scalar_as_base::(vk.pp_digest), + &self.r_U_secondary, + &self.l_u_secondary, + )?; + + // check the satisfiability of the folded instances using + // SNARKs proving the knowledge of their satisfying witnesses + let (res_primary, res_secondary) = rayon::join( + || self.r_W_snark_primary.verify(&vk.vk_primary, &self.r_U_primary), + || self.f_W_snark_secondary.verify(&vk.vk_secondary, &f_U_secondary), + ); + + res_primary?; + res_secondary?; + + Ok((self.zn_primary.clone(), self.zn_secondary.clone())) + } +} + +/// Compute the circuit digest of a [`StepCircuit`]. +/// +/// Note for callers: This function should be called with its performance +/// characteristics in mind. It will synthesize and digest the full `circuit` +/// given. +pub fn circuit_digest>( + circuit: &C, +) -> E1::Scalar { + let augmented_circuit_params = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); + + // ro_consts_circuit are parameterized by G2 because the type alias uses + // G2::Base = G1::Scalar + let ro_consts_circuit: ROConstantsCircuit> = ROConstantsCircuit::>::default(); + + // Initialize ck for the primary + let augmented_circuit: NovaAugmentedCircuit<'_, Dual, C> = + NovaAugmentedCircuit::new(&augmented_circuit_params, None, circuit, ro_consts_circuit); + let mut cs: ShapeCS = ShapeCS::new(); + let _ = augmented_circuit.synthesize(&mut cs); + cs.r1cs_shape().digest() +} + +pub type CommitmentKey = <::CE as CommitmentEngineTrait>::CommitmentKey; +type Commitment = <::CE as CommitmentEngineTrait>::Commitment; +type CompressedCommitment = <<::CE as CommitmentEngineTrait>::Commitment as CommitmentTrait>::CompressedCommitment; +type CE = ::CE; + +// #[cfg(test)] +// mod tests { +// use core::{fmt::Write, marker::PhantomData}; + +// use ::bellpepper_core::{num::AllocatedNum, ConstraintSystem, +// SynthesisError}; use expect_test::{expect, Expect}; +// use ff::PrimeField; +// use halo2curves::bn256::Bn256; +// use traits::circuit::TrivialCircuit; + +// use self::traits::CurveCycleEquipped; +// use super::*; +// use crate::{ +// provider::{ +// non_hiding_zeromorph::ZMPCS, Bn256EngineIPA, Bn256EngineKZG, +// Bn256EngineZM, PallasEngine, Secp256k1Engine, +// }, +// traits::{evaluation::EvaluationEngineTrait, snark::default_ck_hint}, +// }; + +// type EE = provider::ipa_pc::EvaluationEngine; +// type S = spartan::snark::RelaxedR1CSSNARK; +// type SPrime = spartan::ppsnark::RelaxedR1CSSNARK; + +// #[derive(Clone, Debug, Default)] +// struct CubicCircuit { +// _p: PhantomData, +// } + +// impl StepCircuit for CubicCircuit { +// fn arity(&self) -> usize { +// 1 +// } + +// fn synthesize>( +// &self, +// cs: &mut CS, +// z: &[AllocatedNum], +// ) -> Result>, SynthesisError> { +// // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and +// `y` are // respectively the input and output. +// let x = &z[0]; +// let x_sq = x.square(cs.namespace(|| "x_sq"))?; +// let x_cu = x_sq.mul(cs.namespace(|| "x_cu"), x)?; +// let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { +// Ok(x_cu.get_value().unwrap() + x.get_value().unwrap() + +// F::from(5u64)) })?; + +// cs.enforce( +// || "y = x^3 + x + 5", +// |lc| { +// lc + x_cu.get_variable() +// + x.get_variable() +// + CS::one() +// + CS::one() +// + CS::one() +// + CS::one() +// + CS::one() +// }, +// |lc| lc + CS::one(), +// |lc| lc + y.get_variable(), +// ); + +// Ok(vec![y]) +// } +// } + +// impl CubicCircuit { +// fn output(&self, z: &[F]) -> Vec { +// vec![z[0] * z[0] * z[0] + z[0] + F::from(5u64)] +// } +// } + +// fn test_pp_digest_with(circuit1: &T1, circuit2: +// &T2, expected: &Expect) where +// E1: CurveCycleEquipped, +// T1: StepCircuit, +// T2: StepCircuit< as Engine>::Scalar>, +// EE1: EvaluationEngineTrait, +// EE2: EvaluationEngineTrait>, +// // this is due to the reliance on Abomonation +// ::Repr: Abomonation, +// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, +// { +// // this tests public parameters with a size specifically intended for +// a // spark-compressed SNARK +// let ck_hint1 = &*SPrime::::ck_floor(); +// let ck_hint2 = &*SPrime::, EE2>::ck_floor(); +// let pp = PublicParams::::setup(circuit1, circuit2, ck_hint1, +// ck_hint2).unwrap(); + +// let digest_str = +// pp.digest() +// .to_repr() +// .as_ref() +// .iter() +// .fold(String::new(), |mut output, b| { +// let _ = write!(output, "{b:02x}"); +// output +// }); + +// expected.assert_eq(&digest_str); +// } + +// #[test] +// fn test_pp_digest() { +// test_pp_digest_with::, EE<_>>( +// &TrivialCircuit::default(), +// &TrivialCircuit::default(), +// +// &expect!["e5a6a85b77f3fb958b69722a5a21bf656fd21a6b5a012708a4b086b6be6d2b03"], +// ); + +// test_pp_digest_with::, EE<_>>( +// &CubicCircuit::default(), +// &TrivialCircuit::default(), +// +// &expect!["ec707a8b822baebca114b6e61b238374f9ed358c542dd37ee73febb47832cd01"], +// ); + +// test_pp_digest_with::, EE<_>>( +// &TrivialCircuit::default(), +// &TrivialCircuit::default(), +// +// &expect!["df52de22456157eb056003d4dc580a167ab8ce40a151c9944ea09a6fd0028600"], +// ); + +// test_pp_digest_with::, EE<_>>( +// &CubicCircuit::default(), +// &TrivialCircuit::default(), +// +// &expect!["b3ad0f4b734c5bd2ab9e83be8ee0cbaaa120e5cd0270b51cb9d7778a33f0b801"], +// ); + +// test_pp_digest_with::, EE<_>>( +// &TrivialCircuit::default(), +// &TrivialCircuit::default(), +// +// &expect!["e1feca53664212ee750da857c726b2a09bb30b2964f22ea85a19b58c9eaf5701"], +// ); +// test_pp_digest_with::, EE<_>>( +// &CubicCircuit::default(), +// &TrivialCircuit::default(), +// +// &expect!["4ad6b10b6fd24fecba49f08d35bc874a6da9c77735bc0bcf4b78b1914a97e602"], +// ); +// } + +// fn test_ivc_trivial_with() +// where +// E1: CurveCycleEquipped, +// { +// let test_circuit1 = TrivialCircuit::<::Scalar>::default(); let test_circuit2 = +// TrivialCircuit::< as Engine>::Scalar>::default(); + +// // produce public parameters +// let pp = PublicParams::::setup( +// &test_circuit1, +// &test_circuit2, +// &*default_ck_hint(), +// &*default_ck_hint(), +// ) +// .unwrap(); +// let num_steps = 1; + +// // produce a recursive SNARK +// let mut recursive_snark = RecursiveSNARK::new( +// &pp, +// &test_circuit1, +// &test_circuit2, +// &[::Scalar::ZERO], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); + +// recursive_snark +// .prove_step(&pp, &test_circuit1, &test_circuit2) +// .unwrap(); + +// // verify the recursive SNARK +// recursive_snark +// .verify( +// &pp, +// num_steps, +// &[::Scalar::ZERO], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); +// } + +// #[test] +// fn test_ivc_trivial() { +// test_ivc_trivial_with::(); +// test_ivc_trivial_with::(); +// test_ivc_trivial_with::(); +// } + +// fn test_ivc_nontrivial_with() +// where +// E1: CurveCycleEquipped, +// { +// let circuit_primary = TrivialCircuit::default(); +// let circuit_secondary = CubicCircuit::default(); + +// // produce public parameters +// let pp = PublicParams::::setup( +// &circuit_primary, +// &circuit_secondary, +// &*default_ck_hint(), +// &*default_ck_hint(), +// ) +// .unwrap(); + +// let num_steps = 3; + +// // produce a recursive SNARK +// let mut recursive_snark = RecursiveSNARK::::new( +// &pp, +// &circuit_primary, +// &circuit_secondary, +// &[::Scalar::ONE], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); + +// for i in 0..num_steps { +// recursive_snark +// .prove_step(&pp, &circuit_primary, &circuit_secondary) +// .unwrap(); + +// // verify the recursive snark at each step of recursion +// recursive_snark +// .verify( +// &pp, +// i + 1, +// &[::Scalar::ONE], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); +// } + +// // verify the recursive SNARK +// let (zn_primary, zn_secondary) = recursive_snark +// .verify( +// &pp, +// num_steps, +// &[::Scalar::ONE], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); + +// // sanity: check the claimed output with a direct computation of the +// same assert_eq!(zn_primary, vec![::Scalar::ONE]); +// let mut zn_secondary_direct = vec![ as +// Engine>::Scalar::ZERO]; for _i in 0..num_steps { +// zn_secondary_direct = +// circuit_secondary.clone().output(&zn_secondary_direct); } +// assert_eq!(zn_secondary, zn_secondary_direct); +// assert_eq!( +// zn_secondary, +// vec![ as Engine>::Scalar::from(2460515u64)] +// ); +// } + +// #[test] +// fn test_ivc_nontrivial() { +// test_ivc_nontrivial_with::(); +// test_ivc_nontrivial_with::(); +// test_ivc_nontrivial_with::(); +// } + +// fn test_ivc_nontrivial_with_some_compression_with() +// where +// E1: CurveCycleEquipped, +// // this is due to the reliance on Abomonation +// ::Repr: Abomonation, +// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, +// S1: RelaxedR1CSSNARKTrait, +// S2: RelaxedR1CSSNARKTrait>, +// { +// let circuit_primary = TrivialCircuit::default(); +// let circuit_secondary = CubicCircuit::default(); + +// // produce public parameters, which we'll maybe use with a +// preprocessing // compressed SNARK +// let pp = PublicParams::::setup( +// &circuit_primary, +// &circuit_secondary, +// &*S1::ck_floor(), +// &*S2::ck_floor(), +// ) +// .unwrap(); + +// let num_steps = 3; + +// // produce a recursive SNARK +// let mut recursive_snark = RecursiveSNARK::::new( +// &pp, +// &circuit_primary, +// &circuit_secondary, +// &[::Scalar::ONE], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); + +// for _i in 0..num_steps { +// recursive_snark +// .prove_step(&pp, &circuit_primary, &circuit_secondary) +// .unwrap(); +// } + +// // verify the recursive SNARK +// let (zn_primary, zn_secondary) = recursive_snark +// .verify( +// &pp, +// num_steps, +// &[::Scalar::ONE], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); + +// // sanity: check the claimed output with a direct computation of the +// same assert_eq!(zn_primary, vec![::Scalar::ONE]); +// let mut zn_secondary_direct = vec![ as +// Engine>::Scalar::ZERO]; for _i in 0..num_steps { +// zn_secondary_direct = +// circuit_secondary.clone().output(&zn_secondary_direct); } +// assert_eq!(zn_secondary, zn_secondary_direct); +// assert_eq!( +// zn_secondary, +// vec![ as Engine>::Scalar::from(2460515u64)] +// ); + +// // run the compressed snark +// // produce the prover and verifier keys for compressed snark +// let (pk, vk) = CompressedSNARK::<_, S1, S2>::setup(&pp).unwrap(); + +// // produce a compressed SNARK +// let compressed_snark = +// CompressedSNARK::<_, S1, S2>::prove(&pp, &pk, +// &recursive_snark).unwrap(); + +// // verify the compressed SNARK +// compressed_snark +// .verify( +// &vk, +// num_steps, +// &[::Scalar::ONE], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); +// } + +// fn test_ivc_nontrivial_with_compression_with() +// where +// E1: CurveCycleEquipped, +// EE1: EvaluationEngineTrait, +// EE2: EvaluationEngineTrait>, +// // this is due to the reliance on Abomonation +// ::Repr: Abomonation, +// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, +// { +// test_ivc_nontrivial_with_some_compression_with::, S<_, +// EE2>>() } + +// #[test] +// fn test_ivc_nontrivial_with_compression() { +// test_ivc_nontrivial_with_compression_with::, +// EE<_>>(); test_ivc_nontrivial_with_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_compression_with::, +// EE<_>>(); test_ivc_nontrivial_with_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_compression_with::< +// Bn256EngineKZG, +// provider::hyperkzg::EvaluationEngine, +// EE<_>, +// >(); +// } + +// fn test_ivc_nontrivial_with_spark_compression_with() +// where +// E1: CurveCycleEquipped, +// EE1: EvaluationEngineTrait, +// EE2: EvaluationEngineTrait>, +// // this is due to the reliance on Abomonation +// ::Repr: Abomonation, +// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, +// { +// test_ivc_nontrivial_with_some_compression_with::, +// SPrime<_, EE2>>() } + +// #[test] +// fn test_ivc_nontrivial_with_spark_compression() { +// test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_spark_compression_with::< +// Bn256EngineKZG, +// provider::hyperkzg::EvaluationEngine, +// EE<_>, +// >(); +// } + +// type BatchedS = spartan::batched::BatchedRelaxedR1CSSNARK; +// type BatchedSPrime = spartan::batched::BatchedRelaxedR1CSSNARK; + +// fn test_ivc_nontrivial_with_batched_compression_with() +// where +// E1: CurveCycleEquipped, +// EE1: EvaluationEngineTrait, +// EE2: EvaluationEngineTrait>, +// // this is due to the reliance on Abomonation +// ::Repr: Abomonation, +// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, +// { +// // this tests compatibility of the batched workflow with the +// non-batched one test_ivc_nontrivial_with_some_compression_with::, BatchedS<_, EE2>>() } + +// #[test] +// fn test_ivc_nontrivial_with_batched_compression() { +// test_ivc_nontrivial_with_batched_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_batched_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_batched_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_batched_compression_with::, EE<_>>( ); +// test_ivc_nontrivial_with_batched_compression_with::< +// Bn256EngineKZG, +// provider::hyperkzg::EvaluationEngine, +// EE<_>, +// >(); +// } + +// fn test_ivc_nontrivial_with_batched_spark_compression_with() where +// E1: CurveCycleEquipped, +// EE1: EvaluationEngineTrait, +// EE2: EvaluationEngineTrait>, +// // this is due to the reliance on Abomonation +// ::Repr: Abomonation, +// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, +// { +// // this tests compatibility of the batched workflow with the +// non-batched one test_ivc_nontrivial_with_some_compression_with::< +// E1, +// BatchedSPrime<_, EE1>, +// BatchedSPrime<_, EE2>, +// >() +// } + +// #[test] +// fn test_ivc_nontrivial_with_batched_spark_compression() { +// test_ivc_nontrivial_with_batched_spark_compression_with::, EE<_>>(); +// test_ivc_nontrivial_with_batched_spark_compression_with::, +// EE<_>>(); test_ivc_nontrivial_with_batched_spark_compression_with::, EE<_>>(); test_ivc_nontrivial_with_batched_spark_compression_with::< +// Bn256EngineZM, +// ZMPCS, +// EE<_>, +// >(); +// test_ivc_nontrivial_with_batched_spark_compression_with::< +// Bn256EngineKZG, +// provider::hyperkzg::EvaluationEngine, +// EE<_>, +// >(); +// } + +// fn test_ivc_nondet_with_compression_with() +// where +// E1: CurveCycleEquipped, +// EE1: EvaluationEngineTrait, +// EE2: EvaluationEngineTrait>, +// // this is due to the reliance on Abomonation +// ::Repr: Abomonation, +// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, +// { +// // y is a non-deterministic advice representing the fifth root of the +// input at a // step. +// #[derive(Clone, Debug)] +// struct FifthRootCheckingCircuit { +// y: F, +// } + +// impl FifthRootCheckingCircuit { +// fn new(num_steps: usize) -> (Vec, Vec) { +// let mut powers = Vec::new(); +// let rng = &mut rand::rngs::OsRng; +// let mut seed = F::random(rng); +// for _i in 0..num_steps + 1 { +// seed *= seed.clone().square().square(); + +// powers.push(Self { y: seed }); +// } + +// // reverse the powers to get roots +// let roots = powers.into_iter().rev().collect::>(); +// (vec![roots[0].y], roots[1..].to_vec()) +// } +// } + +// impl StepCircuit for FifthRootCheckingCircuit +// where +// F: PrimeField, +// { +// fn arity(&self) -> usize { +// 1 +// } + +// fn synthesize>( +// &self, +// cs: &mut CS, +// z: &[AllocatedNum], +// ) -> Result>, SynthesisError> { +// let x = &z[0]; + +// // we allocate a variable and set it to the provided +// non-deterministic advice. let y = +// AllocatedNum::alloc_infallible(cs.namespace(|| "y"), || self.y); + +// // We now check if y = x^{1/5} by checking if y^5 = x +// let y_sq = y.square(cs.namespace(|| "y_sq"))?; +// let y_quad = y_sq.square(cs.namespace(|| "y_quad"))?; +// let y_pow_5 = y_quad.mul(cs.namespace(|| "y_fifth"), &y)?; + +// cs.enforce( +// || "y^5 = x", +// |lc| lc + y_pow_5.get_variable(), +// |lc| lc + CS::one(), +// |lc| lc + x.get_variable(), +// ); + +// Ok(vec![y]) +// } +// } + +// let circuit_primary = FifthRootCheckingCircuit { +// y: ::Scalar::ZERO, +// }; + +// let circuit_secondary = TrivialCircuit::default(); + +// // produce public parameters +// let pp = PublicParams::::setup( +// &circuit_primary, +// &circuit_secondary, +// &*default_ck_hint(), +// &*default_ck_hint(), +// ) +// .unwrap(); + +// let num_steps = 3; + +// // produce non-deterministic advice +// let (z0_primary, roots) = FifthRootCheckingCircuit::new(num_steps); +// let z0_secondary = vec![ as Engine>::Scalar::ZERO]; + +// // produce a recursive SNARK +// let mut recursive_snark = RecursiveSNARK::::new( +// &pp, +// &roots[0], +// &circuit_secondary, +// &z0_primary, +// &z0_secondary, +// ) +// .unwrap(); + +// for circuit_primary in roots.iter().take(num_steps) { +// recursive_snark +// .prove_step(&pp, circuit_primary, &circuit_secondary) +// .unwrap(); +// } + +// // verify the recursive SNARK +// recursive_snark +// .verify(&pp, num_steps, &z0_primary, &z0_secondary) +// .unwrap(); + +// // produce the prover and verifier keys for compressed snark +// let (pk, vk) = CompressedSNARK::<_, S, S<_, +// EE2>>::setup(&pp).unwrap(); + +// // produce a compressed SNARK +// let compressed_snark = +// CompressedSNARK::<_, S, S<_, EE2>>::prove(&pp, &pk, +// &recursive_snark).unwrap(); + +// // verify the compressed SNARK +// compressed_snark +// .verify(&vk, num_steps, &z0_primary, &z0_secondary) +// .unwrap(); +// } + +// #[test] +// fn test_ivc_nondet_with_compression() { +// test_ivc_nondet_with_compression_with::, +// EE<_>>(); test_ivc_nondet_with_compression_with::, EE<_>>(); +// test_ivc_nondet_with_compression_with::, +// EE<_>>(); test_ivc_nondet_with_compression_with::, EE<_>>(); } + +// fn test_ivc_base_with() +// where +// E1: CurveCycleEquipped, +// { +// let test_circuit1 = TrivialCircuit::<::Scalar>::default(); let test_circuit2 = +// CubicCircuit::< as Engine>::Scalar>::default(); + +// // produce public parameters +// let pp = PublicParams::::setup( +// &test_circuit1, +// &test_circuit2, +// &*default_ck_hint(), +// &*default_ck_hint(), +// ) +// .unwrap(); + +// let num_steps = 1; + +// // produce a recursive SNARK +// let mut recursive_snark = RecursiveSNARK::::new( +// &pp, +// &test_circuit1, +// &test_circuit2, +// &[::Scalar::ONE], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); + +// // produce a recursive SNARK +// recursive_snark +// .prove_step(&pp, &test_circuit1, &test_circuit2) +// .unwrap(); + +// // verify the recursive SNARK +// let (zn_primary, zn_secondary) = recursive_snark +// .verify( +// &pp, +// num_steps, +// &[::Scalar::ONE], +// &[ as Engine>::Scalar::ZERO], +// ) +// .unwrap(); + +// assert_eq!(zn_primary, vec![::Scalar::ONE]); +// assert_eq!(zn_secondary, vec![ as +// Engine>::Scalar::from(5u64)]); } + +// #[test] +// fn test_ivc_base() { +// test_ivc_base_with::(); +// test_ivc_base_with::(); +// test_ivc_base_with::(); +// } + +// fn test_setup_with() { +// #[derive(Clone, Debug, Default)] +// struct CircuitWithInputize { +// _p: PhantomData, +// } + +// impl StepCircuit for CircuitWithInputize { +// fn arity(&self) -> usize { +// 1 +// } + +// fn synthesize>( +// &self, +// cs: &mut CS, +// z: &[AllocatedNum], +// ) -> Result>, SynthesisError> { +// let x = &z[0]; +// // a simplified version of this test would only have one +// input // but beside the Nova Public parameter requirement for +// a num_io = 2, being // probed in this test, we *also* require +// num_io to be even, so // negative testing requires at least 4 +// inputs let y = x.square(cs.namespace(|| "x_sq"))?; +// y.inputize(cs.namespace(|| "y"))?; // inputize y +// let y2 = x.square(cs.namespace(|| "x_sq2"))?; +// y2.inputize(cs.namespace(|| "y2"))?; // inputize y2 +// let y3 = x.square(cs.namespace(|| "x_sq3"))?; +// y3.inputize(cs.namespace(|| "y3"))?; // inputize y2 +// let y4 = x.square(cs.namespace(|| "x_sq4"))?; +// y4.inputize(cs.namespace(|| "y4"))?; // inputize y2 +// Ok(vec![y, y2, y3, y4]) +// } +// } + +// // produce public parameters with trivial secondary +// let circuit = CircuitWithInputize::<::Scalar>::default(); let pp = PublicParams::::setup( +// &circuit, +// &TrivialCircuit::default(), +// &*default_ck_hint(), +// &*default_ck_hint(), +// ); +// assert!(pp.is_err()); +// assert_eq!(pp.err(), Some(NovaError::InvalidStepCircuitIO)); + +// // produce public parameters with the trivial primary +// let circuit = CircuitWithInputize::< as +// Engine>::Scalar>::default(); let pp = PublicParams::::setup( +// &TrivialCircuit::default(), +// &circuit, +// &*default_ck_hint(), +// &*default_ck_hint(), +// ); +// assert!(pp.is_err()); +// assert_eq!(pp.err(), Some(NovaError::InvalidStepCircuitIO)); +// } + +// #[test] +// fn test_setup() { +// test_setup_with::(); +// } +// } diff --git a/prover/src/nifs.rs b/prover/src/nifs.rs new file mode 100644 index 0000000..9848bdf --- /dev/null +++ b/prover/src/nifs.rs @@ -0,0 +1,370 @@ +//! This module implements a non-interactive folding scheme +#![allow(non_snake_case)] + +use serde::{Deserialize, Serialize}; + +use crate::{ + constants::{NUM_CHALLENGE_BITS, NUM_FE_FOR_RO, NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD}, + errors::NovaError, + r1cs::{ + R1CSInstance, R1CSResult, R1CSShape, R1CSWitness, RelaxedR1CSInstance, RelaxedR1CSWitness, + }, + scalar_as_base, + traits::{commitment::CommitmentTrait, AbsorbInROTrait, Engine, ROConstants, ROTrait}, + Commitment, CommitmentKey, CompressedCommitment, +}; + +/// A SNARK that holds the proof of a step of an incremental computation +#[allow(clippy::upper_case_acronyms)] +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct NIFS { + pub(crate) comm_T: CompressedCommitment, +} + +impl NIFS { + /// Takes as input a Relaxed R1CS instance-witness tuple `(U1, W1)` and + /// an R1CS instance-witness tuple `(U2, W2)` with the same structure + /// `shape` and defined with respect to the same `ck`, and outputs + /// a folded Relaxed R1CS instance-witness tuple `(U, W)` of the same shape + /// `shape`, with the guarantee that the folded witness `W` satisfies + /// the folded instance `U` if and only if `W1` satisfies `U1` and `W2` + /// satisfies `U2`. + /// + /// Note that this code is tailored for use with Nova's IVC scheme, which + /// enforces certain requirements between the two instances that are + /// folded. In particular, it requires that `U1` and `U2` are such that + /// the hash of `U1` is stored in the public IO of `U2`. + /// In this particular setting, this means that if `U2` is absorbed in the + /// RO, it implicitly absorbs `U1` as well. So the code below avoids + /// absorbing `U1` in the RO. + #[allow(clippy::too_many_arguments)] + #[tracing::instrument(skip_all, level = "trace", name = "NIFS::prove")] + pub fn prove( + ck: &CommitmentKey, + ro_consts: &ROConstants, + pp_digest: &E::Scalar, + S: &R1CSShape, + U1: &RelaxedR1CSInstance, + W1: &RelaxedR1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + ) -> Result<(Self, (RelaxedR1CSInstance, RelaxedR1CSWitness), E::Scalar), NovaError> { + // Check `U1` and `U2` have the same arity + let io_arity = U1.X.len(); + if io_arity != U2.X.len() { + return Err(NovaError::InvalidInputLength); + } + + // initialize a new RO + let mut ro = E::RO::new(ro_consts.clone(), NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD + io_arity); + + // append the digest of pp to the transcript + ro.absorb(scalar_as_base::(*pp_digest)); + + // append U2 to transcript, U1 does not need to absorbed since U2.X[0] = + // Hash(params, U1, i, z0, zi) + U2.absorb_in_ro(&mut ro); + + // compute a commitment to the cross-term + let (T, comm_T) = S.commit_T(ck, U1, W1, U2, W2)?; + + // append `comm_T` to the transcript and obtain a challenge + comm_T.absorb_in_ro(&mut ro); + + // compute a challenge from the RO + let r = ro.squeeze(NUM_CHALLENGE_BITS); + + // fold the instance using `r` and `comm_T` + let U = U1.fold(U2, &comm_T, &r); + + // fold the witness using `r` and `T` + let W = W1.fold(W2, &T, &r)?; + + // return the folded instance and witness + Ok((Self { comm_T: comm_T.compress() }, (U, W), r)) + } + + /// Takes as input a Relaxed R1CS instance-witness tuple `(U1, W1)` and + /// an R1CS instance-witness tuple `(U2, W2)` with the same structure + /// `shape` and defined with respect to the same `ck`, and updates `(U1, + /// W1)` by folding `(U2, W2)` into it with the guarantee that the + /// updated witness `W` satisfies the updated instance `U` if and only + /// if `W1` satisfies `U1` and `W2` satisfies `U2`. + #[allow(clippy::too_many_arguments)] + #[tracing::instrument(skip_all, level = "trace", name = "NIFS::prove_mut")] + pub fn prove_mut( + ck: &CommitmentKey, + ro_consts: &ROConstants, + pp_digest: &E::Scalar, + S: &R1CSShape, + U1: &mut RelaxedR1CSInstance, + W1: &mut RelaxedR1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + T: &mut Vec, + ABC_Z_1: &mut R1CSResult, + ABC_Z_2: &mut R1CSResult, + ) -> Result<(Self, E::Scalar), NovaError> { + // initialize a new RO + let mut ro = E::RO::new(ro_consts.clone(), NUM_FE_FOR_RO); + + // append the digest of pp to the transcript + ro.absorb(scalar_as_base::(*pp_digest)); + + // append U2 to transcript, U1 does not need to absorbed since U2.X[0] = + // Hash(params, U1, i, z0, zi) + U2.absorb_in_ro(&mut ro); + + // compute a commitment to the cross-term + let comm_T = S.commit_T_into(ck, U1, W1, U2, W2, T, ABC_Z_1, ABC_Z_2)?; + + // append `comm_T` to the transcript and obtain a challenge + comm_T.absorb_in_ro(&mut ro); + + // compute a challenge from the RO + let r = ro.squeeze(NUM_CHALLENGE_BITS); + + // fold the instance using `r` and `comm_T` + U1.fold_mut(U2, &comm_T, &r); + + // fold the witness using `r` and `T` + W1.fold_mut(W2, T, &r)?; + + // return the commitment + Ok((Self { comm_T: comm_T.compress() }, r)) + } + + /// Takes as input a relaxed R1CS instance `U1` and R1CS instance `U2` + /// with the same shape and defined with respect to the same parameters, + /// and outputs a folded instance `U` with the same shape, + /// with the guarantee that the folded instance `U` + /// if and only if `U1` and `U2` are satisfiable. + pub fn verify( + &self, + ro_consts: &ROConstants, + pp_digest: &E::Scalar, + U1: &RelaxedR1CSInstance, + U2: &R1CSInstance, + ) -> Result, NovaError> { + // initialize a new RO + let mut ro = E::RO::new(ro_consts.clone(), NUM_FE_FOR_RO); + + // append the digest of pp to the transcript + ro.absorb(scalar_as_base::(*pp_digest)); + + // append U2 to transcript, U1 does not need to absorbed since U2.X[0] = + // Hash(params, U1, i, z0, zi) + U2.absorb_in_ro(&mut ro); + + // append `comm_T` to the transcript and obtain a challenge + let comm_T = Commitment::::decompress(&self.comm_T)?; + comm_T.absorb_in_ro(&mut ro); + + // compute a challenge from the RO + let r = ro.squeeze(NUM_CHALLENGE_BITS); + + // fold the instance using `r` and `comm_T` + let U = U1.fold(U2, &comm_T, &r); + + // return the folded instance + Ok(U) + } +} + +#[cfg(test)] +mod tests { + use ::bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; + use ff::{Field, PrimeField}; + use rand::rngs::OsRng; + + use super::*; + use crate::{ + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + solver::SatisfyingAssignment, + test_shape_cs::TestShapeCS, + }, + provider::Bn256EngineKZG, + r1cs::commitment_key, + traits::{snark::default_ck_hint, Engine}, + }; + + fn synthesize_tiny_r1cs_bellpepper>( + cs: &mut CS, + x_val: Option, + ) -> Result<(), SynthesisError> { + // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and `y` are + // respectively the input and output. + let x = AllocatedNum::alloc_infallible(cs.namespace(|| "x"), || x_val.unwrap()); + let _ = x.inputize(cs.namespace(|| "x is input")); + + let x_sq = x.square(cs.namespace(|| "x_sq"))?; + let x_cu = x_sq.mul(cs.namespace(|| "x_cu"), &x)?; + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + Ok(x_cu.get_value().unwrap() + x.get_value().unwrap() + Scalar::from(5u64)) + })?; + let _ = y.inputize(cs.namespace(|| "y is output")); + + cs.enforce( + || "y = x^3 + x + 5", + |lc| { + lc + x_cu.get_variable() + + x.get_variable() + + CS::one() + + CS::one() + + CS::one() + + CS::one() + + CS::one() + }, + |lc| lc + CS::one(), + |lc| lc + y.get_variable(), + ); + + Ok(()) + } + + fn test_tiny_r1cs_bellpepper_with() { + // First create the shape + let mut cs: TestShapeCS = TestShapeCS::new(); + let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, None); + let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); + let ro_consts = <::RO as ROTrait< + ::Base, + ::Scalar, + >>::Constants::default(); + + // Now get the instance and assignment for one instance + let mut cs = SatisfyingAssignment::::new(); + let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, Some(E::Scalar::from(5))); + let (U1, W1) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); + + // Make sure that the first instance is satisfiable + shape.is_sat(&ck, &U1, &W1).unwrap(); + + // Now get the instance and assignment for second instance + let mut cs = SatisfyingAssignment::::new(); + let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, Some(E::Scalar::from(135))); + let (U2, W2) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); + + // Make sure that the second instance is satisfiable + shape.is_sat(&ck, &U2, &W2).unwrap(); + + // execute a sequence of folds + execute_sequence(&ck, &ro_consts, &::Scalar::ZERO, &shape, &U1, &W1, &U2, &W2); + } + + #[test] + fn test_tiny_r1cs_bellpepper() { test_tiny_r1cs_bellpepper_with::(); } + + fn execute_sequence( + ck: &CommitmentKey, + ro_consts: &<::RO as ROTrait<::Base, ::Scalar>>::Constants, + pp_digest: &::Scalar, + shape: &R1CSShape, + U1: &R1CSInstance, + W1: &R1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + ) { + // produce a default running instance + let mut r_W = RelaxedR1CSWitness::default(shape); + let mut r_U = RelaxedR1CSInstance::default(ck, shape); + + // produce a step SNARK with (W1, U1) as the first incoming witness-instance + // pair + let res = NIFS::prove(ck, ro_consts, pp_digest, shape, &r_U, &r_W, U1, W1); + assert!(res.is_ok()); + let (nifs, (_U, W), _) = res.unwrap(); + + // verify the step SNARK with U1 as the first incoming instance + let res = nifs.verify(ro_consts, pp_digest, &r_U, U1); + assert!(res.is_ok()); + let U = res.unwrap(); + + assert_eq!(U, _U); + + // update the running witness and instance + r_W = W; + r_U = U; + + // produce a step SNARK with (W2, U2) as the second incoming witness-instance + // pair + let res = NIFS::prove(ck, ro_consts, pp_digest, shape, &r_U, &r_W, U2, W2); + assert!(res.is_ok()); + let (nifs, (_U, W), _) = res.unwrap(); + + // verify the step SNARK with U1 as the first incoming instance + let res = nifs.verify(ro_consts, pp_digest, &r_U, U2); + assert!(res.is_ok()); + let U = res.unwrap(); + + assert_eq!(U, _U); + + // update the running witness and instance + r_W = W; + r_U = U; + + // check if the running instance is satisfiable + shape.is_sat_relaxed(ck, &r_U, &r_W).unwrap(); + } + + fn test_tiny_r1cs_with() { + let num_vars = 3; + let S = crate::r1cs::tests::tiny_r1cs::(num_vars); + let one = ::ONE; + + // generate generators and ro constants + let ck = commitment_key(&S, &*default_ck_hint()); + let ro_consts = <::RO as ROTrait< + ::Base, + ::Scalar, + >>::Constants::default(); + + let rand_inst_witness_generator = + |ck: &CommitmentKey, I: &E::Scalar| -> (E::Scalar, R1CSInstance, R1CSWitness) { + let i0 = *I; + + // compute a satisfying (vars, X) tuple + let (O, vars, X) = { + let z0 = i0 * i0; // constraint 0 + let z1 = i0 * z0; // constraint 1 + let z2 = z1 + i0; // constraint 2 + let i1 = z2 + one + one + one + one + one; // constraint 3 + + // store the witness and IO for the instance + let W = vec![z0, z1, z2]; + let X = vec![i0, i1]; + (i1, W, X) + }; + + let W = { + let res = R1CSWitness::new(&S, vars); + assert!(res.is_ok()); + res.unwrap() + }; + let U = { + let comm_W = W.commit(ck); + let res = R1CSInstance::new(&S, comm_W, X); + assert!(res.is_ok()); + res.unwrap() + }; + + // check that generated instance is satisfiable + S.is_sat(ck, &U, &W).unwrap(); + + (O, U, W) + }; + + let mut csprng: OsRng = OsRng; + let I = E::Scalar::random(&mut csprng); // the first input is picked randomly for the first instance + let (O, U1, W1) = rand_inst_witness_generator(&ck, &I); + let (_O, U2, W2) = rand_inst_witness_generator(&ck, &O); + + // execute a sequence of folds + execute_sequence(&ck, &ro_consts, &::Scalar::ZERO, &S, &U1, &W1, &U2, &W2); + } + + #[test] + fn test_tiny_r1cs() { test_tiny_r1cs_with::(); } +} diff --git a/prover/src/provider/bn256_grumpkin.rs b/prover/src/provider/bn256_grumpkin.rs new file mode 100644 index 0000000..35d3f76 --- /dev/null +++ b/prover/src/provider/bn256_grumpkin.rs @@ -0,0 +1,109 @@ +//! This module implements the Nova traits for `bn256::Point`, `bn256::Scalar`, +//! `grumpkin::Point`, `grumpkin::Scalar`. +use std::io::Read; + +use digest::{ExtendableOutput, Update}; +use ff::{FromUniformBytes, PrimeField}; +use group::{cofactor::CofactorCurveAffine, Curve, Group as AnotherGroup}; +#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] +use grumpkin_msm::{bn256 as bn256_msm, grumpkin as grumpkin_msm}; +// Remove this when https://github.com/zcash/pasta_curves/issues/41 resolves +use halo2curves::{bn256::G2Affine, CurveAffine, CurveExt}; +use num_bigint::BigInt; +use num_traits::Num; +use rayon::prelude::*; +use sha3::Shake256; + +use crate::{ + impl_traits, + provider::{traits::DlogGroup, util::msm::cpu_best_msm}, + traits::{Group, PrimeFieldExt, TranscriptReprTrait}, +}; + +// Thus compile-time assertions checks important assumptions in the memory +// representation of group data that supports the use of Abomonation. +static_assertions::assert_eq_size!(G2Affine, [u64; 16]); + +/// Re-exports that give access to the standard aliases used in the code base, +/// for bn256 +pub mod bn256 { + pub use halo2curves::bn256::{ + Fq as Base, Fr as Scalar, G1Affine as Affine, G1Compressed as Compressed, G1 as Point, + }; +} + +/// Re-exports that give access to the standard aliases used in the code base, +/// for grumpkin +pub mod grumpkin { + pub use halo2curves::grumpkin::{ + Fq as Base, Fr as Scalar, G1Affine as Affine, G1Compressed as Compressed, G1 as Point, + }; +} + +#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] +impl_traits!( + bn256, + "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", + bn256_msm +); +#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] +impl_traits!( + bn256, + "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47" +); + +#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] +impl_traits!( + grumpkin, + "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", + "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + grumpkin_msm +); +#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] +impl_traits!( + grumpkin, + "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", + "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001" +); + +#[cfg(test)] +mod tests { + use ff::Field; + use rand::thread_rng; + + use crate::provider::{ + bn256_grumpkin::{bn256, grumpkin}, + traits::DlogGroup, + util::msm::cpu_best_msm, + }; + + #[test] + fn test_bn256_msm_correctness() { + let npoints = 1usize << 16; + let points = bn256::Point::from_label(b"test", npoints); + + let mut rng = thread_rng(); + let scalars = (0..npoints).map(|_| bn256::Scalar::random(&mut rng)).collect::>(); + + let cpu_msm = cpu_best_msm(&points, &scalars); + let gpu_msm = bn256::Point::vartime_multiscalar_mul(&scalars, &points); + + assert_eq!(cpu_msm, gpu_msm); + } + + #[test] + fn test_grumpkin_msm_correctness() { + let npoints = 1usize << 16; + let points = grumpkin::Point::from_label(b"test", npoints); + + let mut rng = thread_rng(); + let scalars = (0..npoints).map(|_| grumpkin::Scalar::random(&mut rng)).collect::>(); + + let cpu_msm = cpu_best_msm(&points, &scalars); + let gpu_msm = grumpkin::Point::vartime_multiscalar_mul(&scalars, &points); + + assert_eq!(cpu_msm, gpu_msm); + } +} diff --git a/prover/src/provider/hyperkzg.rs b/prover/src/provider/hyperkzg.rs new file mode 100644 index 0000000..f1f88b8 --- /dev/null +++ b/prover/src/provider/hyperkzg.rs @@ -0,0 +1,852 @@ +//! This module implements Nova's evaluation engine using `HyperKZG`, a +//! KZG-based polynomial commitment for multilinear polynomials HyperKZG is based on the transformation from univariate PCS to multilinear PCS in the Gemini paper (section 2.4.2 in ``). +//! However, there are some key differences: +//! (1) HyperKZG works with multilinear polynomials represented in evaluation +//! form (rather than in coefficient form in Gemini's transformation). +//! This means that Spartan's polynomial IOP can use commit to its polynomials +//! as-is without incurring any interpolations or FFTs. (2) HyperKZG is +//! specialized to use KZG as the univariate commitment scheme, so it includes +//! several optimizations (both during the transformation of +//! multilinear-to-univariate claims and within the KZG commitment scheme +//! implementation itself). (3) HyperKZG also includes optimisation based on so called Shplonk/HaloInfinite technique (``). +//! Compared to pure HyperKZG, this optimisation in theory improves prover (at +//! cost of using 1 fixed KZG opening) and verifier (at cost of eliminating MSM) +#![allow(non_snake_case)] +use core::marker::PhantomData; +use std::sync::Arc; + +use ff::{Field, PrimeFieldBits}; +use group::{prime::PrimeCurveAffine as _, Curve, Group as _}; +use itertools::Itertools as _; +use pairing::{Engine, MillerLoopResult, MultiMillerLoop}; +use rayon::{ + iter::{ + IndexedParallelIterator, IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator, + }, + prelude::*, +}; +use ref_cast::RefCast as _; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; + +use crate::{ + errors::NovaError, + provider::{ + kzg_commitment::{KZGCommitmentEngine, KZGProverKey, KZGVerifierKey, UniversalKZGParam}, + pedersen::Commitment, + traits::DlogGroup, + util::iterators::IndexedParallelIteratorExt as _, + }, + spartan::{math::Math, polys::univariate::UniPoly}, + traits::{ + commitment::{CommitmentEngineTrait, Len}, + evaluation::EvaluationEngineTrait, + Engine as NovaEngine, Group, TranscriptEngineTrait, TranscriptReprTrait, + }, +}; + +/// Provides an implementation of a polynomial evaluation argument +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound( + serialize = "E::G1Affine: Serialize, E::Fr: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>, E::Fr: Deserialize<'de>" +))] +pub struct EvaluationArgument { + comms: Vec, + evals: Vec>, + R_x: Vec, + C_Q: E::G1Affine, + C_H: E::G1Affine, +} + +/// Provides an implementation of a polynomial evaluation engine using KZG +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct EvaluationEngine { + _p: PhantomData<(E, NE)>, +} + +// This impl block defines helper functions that are not a part of +// EvaluationEngineTrait, but that we will use to implement the trait methods. +impl EvaluationEngine +where + E: Engine, + NE: NovaEngine>, + E::G1: DlogGroup, + // the following bounds repeat existing, satisfied bounds on associated types of the above + // but are required since the equality constraints we use in the above do not transitively + // carry bounds we should be able to remove most of those constraints when rust supports + // associated_type_bounds + E::Fr: Serialize + DeserializeOwned, + E::G1Affine: Serialize + DeserializeOwned, + E::G1Affine: TranscriptReprTrait, // TODO: this bound on DlogGroup is really unusable! + E::G2Affine: Serialize + DeserializeOwned, + E::Fr: PrimeFieldBits + TranscriptReprTrait, + ::Base: TranscriptReprTrait, +{ + fn compute_challenge( + com: &[E::G1Affine], + transcript: &mut impl TranscriptEngineTrait, + ) -> E::Fr { + transcript.absorb(b"c", &com); + transcript.squeeze(b"c").unwrap() + } + + // Compute challenge q = Hash(vk, C0, ..., C_{k-1}, u0, ...., u_{t-1}, + // (f_i(u_j))_{i=0..k-1,j=0..t-1}) + // It is assumed that both 'C' and 'u' are already absorbed by the transcript + fn get_batch_challenge( + v: &[Vec], + transcript: &mut impl TranscriptEngineTrait, + ) -> E::Fr { + transcript.absorb(b"v", &v.iter().flatten().cloned().collect::>().as_slice()); + + transcript.squeeze(b"r").unwrap() + } + + fn compute_a(c_q: &E::G1Affine, transcript: &mut impl TranscriptEngineTrait) -> E::Fr { + transcript.absorb(b"C_Q", c_q); + transcript.squeeze(b"a").unwrap() + } + + fn compute_pi_polynomials(hat_P: &[E::Fr], point: &[E::Fr]) -> Vec> { + let mut polys: Vec> = Vec::new(); + polys.push(hat_P.to_vec()); + + for i in 0..point.len() - 1 { + let Pi_len = polys[i].len() / 2; + let mut Pi = vec![E::Fr::ZERO; Pi_len]; + + (0..Pi_len) + .into_par_iter() + .map(|j| { + point[point.len() - i - 1] * (polys[i][2 * j + 1] - polys[i][2 * j]) + polys[i][2 * j] + }) + .collect_into_vec(&mut Pi); + + polys.push(Pi); + } + + assert_eq!(polys.len(), hat_P.len().log_2()); + + polys + } + + fn compute_commitments( + ck: &UniversalKZGParam, + _C: &Commitment, + polys: &[Vec], + ) -> Vec { + let comms: Vec = (1..polys.len()) + .into_par_iter() + .map(|i| >::commit(ck, &polys[i]).comm) + .collect(); + + let mut comms_affine: Vec = vec![E::G1Affine::identity(); comms.len()]; + NE::GE::batch_normalize(&comms, &mut comms_affine); + comms_affine + } + + fn compute_evals(polys: &[Vec], u: &[E::Fr]) -> Vec> { + let mut v = vec![vec!(E::Fr::ZERO; polys.len()); u.len()]; + v.par_iter_mut().enumerate().for_each(|(i, v_i)| { + // for each point u + v_i.par_iter_mut().zip_eq(polys).for_each(|(v_ij, f)| { + // for each poly f (except the last one - since it is constant) + *v_ij = UniPoly::ref_cast(f).evaluate(&u[i]); + }); + }); + v + } + + fn compute_k_polynomial( + batched_Pi: &UniPoly, + Q_x: &UniPoly, + D: &UniPoly, + R_x: &UniPoly, + a: E::Fr, + ) -> UniPoly { + let mut tmp = Q_x.clone(); + tmp *= &D.evaluate(&a); + tmp[0] += &R_x.evaluate(&a); + let mut K_x = batched_Pi.clone(); + K_x -= &tmp; + K_x + } +} + +impl EvaluationEngineTrait for EvaluationEngine +where + E: MultiMillerLoop, + NE: NovaEngine>, + E::Fr: Serialize + DeserializeOwned, + E::G1Affine: Serialize + DeserializeOwned, + E::G2Affine: Serialize + DeserializeOwned, + E::G1: DlogGroup, + ::Base: TranscriptReprTrait, /* Note: due to the move of the bound + * TranscriptReprTrait on G::Base + * from Group to Engine */ + E::Fr: PrimeFieldBits, // TODO due to use of gen_srs_for_testing, make optional + E::Fr: TranscriptReprTrait, + E::G1Affine: TranscriptReprTrait, +{ + type EvaluationArgument = EvaluationArgument; + type ProverKey = KZGProverKey; + type VerifierKey = KZGVerifierKey; + + fn setup(ck: Arc>) -> (Self::ProverKey, Self::VerifierKey) { + let len = ck.length() - 1; + UniversalKZGParam::trim(ck, len) + } + + fn prove( + ck: &UniversalKZGParam, + _pk: &Self::ProverKey, + transcript: &mut ::TE, + _C: &Commitment, + hat_P: &[E::Fr], + point: &[E::Fr], + _eval: &E::Fr, + ) -> Result { + let x: Vec = point.to_vec(); + let ell = x.len(); + let n = hat_P.len(); + assert_eq!(n, 1 << ell); // Below we assume that n is a power of two + + // Phase 1 -- create commitments com_1, ..., com_\ell + // We do not compute final Pi (and its commitment as well since it is already + // committed according to EvaluationEngineTrait API) as it is constant and + // equals to 'eval' also known to verifier, so can be derived on its + // side as well + let polys = Self::compute_pi_polynomials(hat_P, point); + let comms = Self::compute_commitments(ck, _C, &polys); + + // Phase 2 + let r = Self::compute_challenge(&comms, transcript); + let u = vec![r, -r, r * r]; + let evals = Self::compute_evals(&polys, &u); + + // Phase 3 + // Compute B(x) = f_0(x) + q * f_1(x) + ... + q^(k-1) * f_{k-1}(x) + let q = Self::get_batch_challenge(&evals, transcript); + let batched_Pi: UniPoly = polys.into_par_iter().map(UniPoly::new).rlc(&q); + + // Q(x), R(x) = P(x) / D(x), where D(x) = (x - r) * (x + r) * (x - r^2) = 1 * + // x^3 - r^2 * x^2 - r^2 * x + r^4 + let D = UniPoly::new(vec![u[2] * u[2], -u[2], -u[2], E::Fr::from(1)]); + let (Q_x, R_x) = batched_Pi.divide_with_q_and_r(&D).unwrap(); + + let C_Q = >::commit(ck, &Q_x.coeffs).comm.to_affine(); + + let a = Self::compute_a(&C_Q, transcript); + + // K(x) = P(x) - Q(x) * D(a) - R(a), note that R(a) should be subtracted from a + // free term of polynomial + let K_x = Self::compute_k_polynomial(&batched_Pi, &Q_x, &D, &R_x, a); + + // TODO: since this is a usual KZG10 we should use it as utility instead + let h = K_x.divide_minus_u(a); + let C_H = >::commit(ck, &h.coeffs).comm.to_affine(); + + Ok(EvaluationArgument:: { comms, evals, R_x: R_x.coeffs, C_Q, C_H }) + } + + /// A method to verify purported evaluations of a batch of polynomials + fn verify( + vk: &Self::VerifierKey, + transcript: &mut ::TE, + C: &Commitment, + point: &[E::Fr], + P_of_x: &E::Fr, + pi: &Self::EvaluationArgument, + ) -> Result<(), NovaError> { + let r = Self::compute_challenge(&pi.comms, transcript); + let u = [r, -r, r * r]; + + if pi.evals.len() != u.len() { + return Err(NovaError::ProofVerifyError); + } + if pi.R_x.len() != u.len() { + return Err(NovaError::ProofVerifyError); + } + + let mut comms = pi.comms.to_vec(); + comms.insert(0, C.comm.to_affine()); + + let q = Self::get_batch_challenge(&pi.evals, transcript); + let R_x = UniPoly::new(pi.R_x.clone()); + + let verification_failed = pi.evals.iter().zip_eq(u.iter()).any(|(evals_i, u_i)| { + // here we check correlation between R polynomial and batched evals, e.g.: + // 1) R(r) == eval at r + // 2) R(-r) == eval at -r + // 3) R(r^2) == eval at r^2 + let batched_eval = UniPoly::ref_cast(evals_i).evaluate(&q); + batched_eval != R_x.evaluate(u_i) + }); + if verification_failed { + return Err(NovaError::ProofVerifyError); + } + + // here we check that Pi polynomials were correctly constructed by the prover, + // using 'r' as a random point, e.g: P_i_even = P_i(r) + P_i(-r) * 1/2 + // P_i_odd = P_i(r) - P_i(-r) * 1/2*r + // P_i+1(r^2) == (1 - point_i) * P_i_even + point_i * P_i_odd -> should hold, + // according to Gemini transformation + let mut point = point.to_vec(); + point.reverse(); + + let r_mul_2 = E::Fr::from(2) * r; + #[allow(clippy::disallowed_methods)] + let verification_failed = pi.evals[0] + .par_iter() + .chain(&[*P_of_x]) + .zip_eq(pi.evals[1].par_iter().chain(&[*P_of_x])) + .zip(pi.evals[2][1..].par_iter().chain(&[*P_of_x])) + .enumerate() + .any(|(index, ((eval_r, eval_minus_r), eval_r_squared))| { + // some optimisation to avoid using expensive inversions: + // P_i+1(r^2) == (1 - point_i) * (P_i(r) + P_i(-r)) * 1/2 + point_i * (P_i(r) - + // P_i(-r)) * 1/2 * r is equivalent to: + // 2 * r * P_i+1(r^2) == r * (1 - point_i) * (P_i(r) + P_i(-r)) + point_i * + // (P_i(r) - P_i(-r)) + + let even = *eval_r + eval_minus_r; + let odd = *eval_r - eval_minus_r; + let right = r * ((E::Fr::ONE - point[index]) * even) + (point[index] * odd); + let left = *eval_r_squared * r_mul_2; + left != right + }); + + if verification_failed { + return Err(NovaError::ProofVerifyError); + } + + let C_P: E::G1 = comms.par_iter().map(|comm| comm.to_curve()).rlc(&q); + let C_Q = pi.C_Q; + let C_H = pi.C_H; + let r_squared = u[2]; + + // D = (x - r) * (x + r) * (x - r^2) = 1 * x^3 - r^2 * x^2 - r^2 * x + r^4 + let D = UniPoly::new(vec![r_squared * r_squared, -r_squared, -r_squared, E::Fr::from(1)]); + + let a = Self::compute_a(&C_Q, transcript); + + let C_K = C_P - (C_Q * D.evaluate(&a) + vk.g * R_x.evaluate(&a)); + + let pairing_inputs: Vec<(E::G1Affine, E::G2Prepared)> = + vec![(C_H, vk.beta_h.into()), ((C_H * (-a) - C_K).to_affine(), vk.h.into())]; + + #[allow(clippy::map_identity)] + let pairing_input_refs = pairing_inputs.iter().map(|(a, b)| (a, b)).collect::>(); + + let pairing_result = E::multi_miller_loop(pairing_input_refs.as_slice()).final_exponentiation(); + let successful: bool = pairing_result.is_identity().into(); + if !successful { + return Err(NovaError::ProofVerifyError); + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use bincode::Options; + use expect_test::expect; + use halo2curves::bn256::G1; + use itertools::Itertools; + + use super::*; + use crate::{ + provider::{ + keccak::Keccak256Transcript, + util::{iterators::DoubleEndedIteratorExt as _, test_utils::prove_verify_from_num_vars}, + }, + spartan::powers, + traits::TranscriptEngineTrait, + zip_with, CommitmentEngineTrait, CommitmentKey, + }; + + type E = halo2curves::bn256::Bn256; + type NE = crate::provider::Bn256EngineKZG; + type Fr = ::Scalar; + + fn test_commitment_to_k_polynomial_correctness( + ck: &CommitmentKey, + C: &Commitment, + poly: &[Fr], + point: &[Fr], + _eval: &Fr, + ) { + let polys = EvaluationEngine::::compute_pi_polynomials(poly, point); + let mut comms = EvaluationEngine::::compute_commitments(ck, C, &polys); + comms.insert(0, C.comm.to_affine()); + + let q = Fr::from(8165763); + let q_powers = batch_challenge_powers(q, polys.len()); + let batched_Pi: UniPoly = polys.clone().into_iter().map(UniPoly::new).rlc(&q); + + let r = Fr::from(1354678); + let r_squared = r * r; + + let divident = batched_Pi.clone(); + let D = UniPoly::new(vec![r_squared * r_squared, -r_squared, -r_squared, Fr::from(1)]); + let (Q_x, R_x) = divident.divide_with_q_and_r(&D).unwrap(); + + let a = Fr::from(938576); + + let K_x = EvaluationEngine::::compute_k_polynomial(&batched_Pi, &Q_x, &D, &R_x, a); + + let mut C_P = G1::identity(); + q_powers.iter().zip_eq(comms.iter()).for_each(|(q_i, C_i)| { + C_P += *C_i * q_i; + }); + + let C_Q = + <::CE as CommitmentEngineTrait>::commit( + ck, + &Q_x.coeffs, + ) + .comm + .to_affine(); + + // Check that Cp - Cq * D(a) - g1 * R(a) == MSM(ck, K(x)) + let C_K = C_P - C_Q * D.evaluate(&a) - ck.powers_of_g[0] * R_x.evaluate(&a); + + let C_K_expected = + <::CE as CommitmentEngineTrait>::commit( + ck, + &K_x.coeffs, + ) + .comm + .to_affine(); + + assert_eq!(C_K_expected, C_K.to_affine()); + } + + fn test_k_polynomial_correctness(poly: &[Fr], point: &[Fr], _eval: &Fr) { + let polys = EvaluationEngine::::compute_pi_polynomials(poly, point); + let q = Fr::from(8165763); + let batched_Pi: UniPoly = polys.clone().into_iter().map(UniPoly::new).rlc(&q); + + let r = Fr::from(56263); + let r_squared = r * r; + + let divident = batched_Pi.clone(); + let D = UniPoly::new(vec![r_squared * r_squared, -r_squared, -r_squared, Fr::from(1)]); + let (Q_x, R_x) = divident.divide_with_q_and_r(&D).unwrap(); + + let a = Fr::from(190837645); + + let K_x = EvaluationEngine::::compute_k_polynomial(&batched_Pi, &Q_x, &D, &R_x, a); + + assert_eq!(Fr::from(0), K_x.evaluate(&a)); + } + + fn test_d_polynomial_correctness(poly: &[Fr], point: &[Fr], _eval: &Fr) { + let polys = EvaluationEngine::::compute_pi_polynomials(poly, point); + let q = Fr::from(8165763); + let batched_Pi: UniPoly = polys.clone().into_iter().map(UniPoly::new).rlc(&q); + + let r = Fr::from(2895776832); + let r_squared = r * r; + + let divident = batched_Pi.clone(); + // D(x) = (x - r) * (x + r) * (x - r^2) + let D = UniPoly::new(vec![r_squared * r_squared, -r_squared, -r_squared, Fr::from(1)]); + let (Q_x, R_x) = divident.divide_with_q_and_r(&D).unwrap(); + + let evaluation_scalar = Fr::from(182746); + assert_eq!( + batched_Pi.evaluate(&evaluation_scalar), + D.evaluate(&evaluation_scalar) * Q_x.evaluate(&evaluation_scalar) + + R_x.evaluate(&evaluation_scalar) + ); + + // Check that Q(x) = (P(x) - R(x)) / D(x) + let mut P_x = batched_Pi.clone(); + let minus_R_x = + UniPoly::new(R_x.clone().coeffs.into_iter().map(|coeff| -coeff).collect::>()); + P_x += &minus_R_x; + + let divident = P_x.clone(); + let (Q_x_recomputed, _) = divident.divide_with_q_and_r(&D).unwrap(); + + assert_eq!(Q_x, Q_x_recomputed); + } + + fn test_batching_property_on_evaluation(poly: &[Fr], point: &[Fr], _eval: &Fr) { + let polys = EvaluationEngine::::compute_pi_polynomials(poly, point); + + let q = Fr::from(97652); + let u = [Fr::from(10), Fr::from(20), Fr::from(50)]; + + let batched_Pi: UniPoly = polys.clone().into_iter().map(UniPoly::new).rlc(&q); + + let q_powers = batch_challenge_powers(q, polys.len()); + for evaluation_scalar in u.iter() { + let evals = polys + .clone() + .into_iter() + .map(|poly| UniPoly::new(poly).evaluate(evaluation_scalar)) + .collect::>(); + + let expected = zip_with!((evals.iter(), q_powers.iter()), |eval, q| eval * q) + .collect::>() + .into_iter() + .sum::(); + + let actual = batched_Pi.evaluate(evaluation_scalar); + assert_eq!(expected, actual); + } + } + + #[test] + fn test_hyperkzg_shplonk_unit_tests() { + // poly = [1, 2, 1, 4, 1, 2, 1, 4] + let poly = vec![ + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + ]; + + // point = [4,3,8] + let point = vec![Fr::from(4), Fr::from(3), Fr::from(8)]; + + // eval = 57 + let eval = Fr::from(57); + + let ck: CommitmentKey = + as CommitmentEngineTrait>::setup(b"test", poly.len()); + + let ck = Arc::new(ck); + let C: Commitment = KZGCommitmentEngine::commit(&ck, &poly); + + test_batching_property_on_evaluation(&poly, &point, &eval); + test_d_polynomial_correctness(&poly, &point, &eval); + test_k_polynomial_correctness(&poly, &point, &eval); + test_commitment_to_k_polynomial_correctness(&ck, &C, &poly, &point, &eval); + } + + #[test] + fn test_hyperkzg_shplonk_pcs() { + let n = 8; + + // poly = [1, 2, 1, 4, 1, 2, 1, 4] + let poly = vec![ + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + ]; + + // point = [4,3,8] + let point = vec![Fr::from(4), Fr::from(3), Fr::from(8)]; + + // eval = 57 + let eval = Fr::from(57); + + let ck: CommitmentKey = + as CommitmentEngineTrait>::setup(b"test", n); + let ck = Arc::new(ck); + let (pk, vk): (KZGProverKey, KZGVerifierKey) = + EvaluationEngine::::setup(ck.clone()); + + // make a commitment + let C: Commitment = KZGCommitmentEngine::commit(&ck, &poly); + + let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); + let proof = + EvaluationEngine::::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval) + .unwrap(); + + let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); + EvaluationEngine::::verify(&vk, &mut verifier_transcript, &C, &point, &eval, &proof) + .unwrap(); + } + + #[test] + fn test_hyperkzg_shplonk_pcs_negative() { + let n = 8; + // poly = [1, 2, 1, 4, 1, 2, 1, 4] + let poly = vec![ + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + ]; + // point = [4,3,8] + let point = vec![Fr::from(4), Fr::from(3), Fr::from(8)]; + // eval = 57 + let eval = Fr::from(57); + + // eval = 57 + let eval1 = Fr::from(56); // wrong eval + test_negative_inner(n, &poly, &point, &eval1); + + // point = [4,3,8] + let point1 = vec![Fr::from(4), Fr::from(3), Fr::from(7)]; // wrong point + test_negative_inner(n, &poly, &point1, &eval); + + // poly = [1, 2, 1, 4, 1, 2, 1, 4] + let poly1 = vec![ + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + Fr::ONE, + Fr::from(2), + Fr::from(200), + Fr::from(100), + ]; // wrong poly + test_negative_inner(n, &poly1, &point, &eval); + } + + fn test_negative_inner(n: usize, poly: &[Fr], point: &[Fr], eval: &Fr) { + let ck: CommitmentKey = + as CommitmentEngineTrait>::setup(b"test", n); + let ck = Arc::new(ck); + let (pk, vk): (KZGProverKey, KZGVerifierKey) = + EvaluationEngine::::setup(ck.clone()); + + // make a commitment + let C: Commitment = KZGCommitmentEngine::commit(&ck, poly); + + let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); + let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); + + let proof = + EvaluationEngine::::prove(&ck, &pk, &mut prover_transcript, &C, poly, point, eval) + .unwrap(); + + assert!(EvaluationEngine::::verify( + &vk, + &mut verifier_transcript, + &C, + point, + eval, + &proof + ) + .is_err()); + } + + #[test] + fn test_hyperkzg_shplonk_pcs_negative_wrong_commitment() { + let n = 8; + // poly = [1, 2, 1, 4, 1, 2, 1, 4] + let poly = vec![ + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + Fr::ONE, + Fr::from(2), + Fr::from(1), + Fr::from(4), + ]; + // point = [4,3,8] + let point = vec![Fr::from(4), Fr::from(3), Fr::from(8)]; + // eval = 57 + let eval = Fr::from(57); + + // altered_poly = [85, 84, 83, 82, 81, 80, 79, 78] + let altered_poly = vec![ + Fr::from(85), + Fr::from(84), + Fr::from(83), + Fr::from(82), + Fr::from(81), + Fr::from(80), + Fr::from(79), + Fr::from(78), + ]; + + let ck: CommitmentKey = + as CommitmentEngineTrait>::setup(b"test", n); + + let C1: Commitment = KZGCommitmentEngine::commit(&ck, &poly); // correct commitment + let C2: Commitment = KZGCommitmentEngine::commit(&ck, &altered_poly); // wrong commitment + + test_negative_inner_commitment(&poly, &point, &eval, &ck, &C1, &C2); // here we check detection when proof and commitment do not correspond + test_negative_inner_commitment(&poly, &point, &eval, &ck, &C2, &C2); // here we check detection + // when proof was built + // with wrong commitment + } + + fn test_negative_inner_commitment( + poly: &[Fr], + point: &[Fr], + eval: &Fr, + ck: &CommitmentKey, + C_prover: &Commitment, + C_verifier: &Commitment, + ) { + let ck = Arc::new(ck.clone()); + let (pk, vk): (KZGProverKey, KZGVerifierKey) = + EvaluationEngine::::setup(ck.clone()); + + let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); + let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); + + let proof = EvaluationEngine::::prove( + &ck, + &pk, + &mut prover_transcript, + C_prover, + poly, + point, + eval, + ) + .unwrap(); + + assert!(EvaluationEngine::::verify( + &vk, + &mut verifier_transcript, + C_verifier, + point, + eval, + &proof + ) + .is_err()); + } + + #[test] + fn test_hyperkzg_shplonk_eval() { + // Test with poly(X1, X2) = 1 + X1 + X2 + X1*X2 + let n = 4; + let ck: CommitmentKey = + as CommitmentEngineTrait>::setup(b"test", n); + let ck = Arc::new(ck); + let (pk, vk): (KZGProverKey, KZGVerifierKey) = + EvaluationEngine::::setup(ck.clone()); + + // poly is in eval. representation; evaluated at [(0,0), (0,1), (1,0), (1,1)] + let poly = vec![Fr::from(1), Fr::from(2), Fr::from(2), Fr::from(4)]; + + let C = as CommitmentEngineTrait>::commit(&ck, &poly); + + let test_inner = |point: Vec, eval: Fr| -> Result<(), NovaError> { + let mut tr = Keccak256Transcript::::new(b"TestEval"); + let proof = + EvaluationEngine::::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).unwrap(); + let mut tr = Keccak256Transcript::new(b"TestEval"); + EvaluationEngine::::verify(&vk, &mut tr, &C, &point, &eval, &proof) + }; + + // Call the prover with a (point, eval) pair. + // The prover does not recompute so it may produce a proof, but it should not + // verify + let point = vec![Fr::from(0), Fr::from(0)]; + let eval = Fr::ONE; + test_inner(point, eval).unwrap(); + + let point = vec![Fr::from(0), Fr::from(1)]; + let eval = Fr::from(2); + test_inner(point, eval).unwrap(); + + let point = vec![Fr::from(1), Fr::from(1)]; + let eval = Fr::from(4); + test_inner(point, eval).unwrap(); + + let point = vec![Fr::from(0), Fr::from(2)]; + let eval = Fr::from(3); + test_inner(point, eval).unwrap(); + + let point = vec![Fr::from(2), Fr::from(2)]; + let eval = Fr::from(9); + test_inner(point, eval).unwrap(); + + // Try a couple incorrect evaluations and expect failure + let point = vec![Fr::from(2), Fr::from(2)]; + let eval = Fr::from(50); + assert!(test_inner(point, eval).is_err()); + + let point = vec![Fr::from(0), Fr::from(2)]; + let eval = Fr::from(4); + assert!(test_inner(point, eval).is_err()); + } + + #[test] + fn test_hyperkzg_shplonk_transcript_correctness() { + let n = 4; + + // poly = [1, 2, 1, 4] + let poly = vec![Fr::ONE, Fr::from(2), Fr::from(1), Fr::from(4)]; + + // point = [4,3] + let point = vec![Fr::from(4), Fr::from(3)]; + + // eval = 28 + let eval = Fr::from(28); + + let ck: CommitmentKey = + as CommitmentEngineTrait>::setup(b"test", n); + let ck = Arc::new(ck); + let (pk, vk): (KZGProverKey, KZGVerifierKey) = + EvaluationEngine::::setup(ck.clone()); + + // make a commitment + let C = KZGCommitmentEngine::commit(&ck, &poly); + + // prove an evaluation + let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); + let proof = + EvaluationEngine::::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval) + .unwrap(); + let post_c_p = prover_transcript.squeeze(b"c").unwrap(); + + // verify the evaluation + let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); + EvaluationEngine::::verify(&vk, &mut verifier_transcript, &C, &point, &eval, &proof) + .unwrap(); + let post_c_v = verifier_transcript.squeeze(b"c").unwrap(); + + // check if the prover transcript and verifier transcript are kept in the + // same state + assert_eq!(post_c_p, post_c_v); + + let proof_bytes = bincode::DefaultOptions::new() + .with_big_endian() + .with_fixint_encoding() + .serialize(&proof) + .unwrap(); + expect!["432"].assert_eq(&proof_bytes.len().to_string()); + + // Change the proof and expect verification to fail + let mut bad_proof = proof.clone(); + bad_proof.comms[0] = (bad_proof.comms[0] + bad_proof.comms[0] * Fr::from(123)).to_affine(); + let mut verifier_transcript2 = Keccak256Transcript::::new(b"TestEval"); + assert!(EvaluationEngine::::verify( + &vk, + &mut verifier_transcript2, + &C, + &point, + &eval, + &bad_proof + ) + .is_err()); + } + + #[test] + fn test_hyperkzg_shplonk_more() { + // test the hyperkzg prover and verifier with random instances (derived from a + // seed) + for num_vars in [4, 5, 6] { + prove_verify_from_num_vars::<_, EvaluationEngine>(num_vars); + } + } + + /// Compute powers of q : (1, q, q^2, ..., q^(k-1)) + fn batch_challenge_powers(q: Fr, k: usize) -> Vec { powers(&q, k) } +} diff --git a/prover/src/provider/ipa_pc.rs b/prover/src/provider/ipa_pc.rs new file mode 100644 index 0000000..116e979 --- /dev/null +++ b/prover/src/provider/ipa_pc.rs @@ -0,0 +1,360 @@ +//! This module implements `EvaluationEngine` using an IPA-based polynomial +//! commitment scheme +use core::iter; +use std::{marker::PhantomData, sync::Arc}; + +use ff::Field; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +use crate::{ + digest::SimpleDigestible, + errors::{NovaError, PCSError}, + provider::{pedersen::CommitmentKeyExtTrait, traits::DlogGroup, util::field::batch_invert}, + spartan::polys::eq::EqPolynomial, + traits::{ + commitment::{CommitmentEngineTrait, CommitmentTrait}, + evaluation::EvaluationEngineTrait, + Engine, TranscriptEngineTrait, TranscriptReprTrait, + }, + zip_with, Commitment, CommitmentKey, CompressedCommitment, CE, +}; + +/// Provides an implementation of the prover key +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct ProverKey { + pub ck_s: CommitmentKey, +} + +/// Provides an implementation of the verifier key +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct VerifierKey { + pub ck_v: Arc>, + pub ck_s: CommitmentKey, +} + +impl SimpleDigestible for VerifierKey {} + +/// Provides an implementation of a polynomial evaluation engine using IPA +#[derive(Clone, Debug)] +pub struct EvaluationEngine { + _p: PhantomData, +} + +impl EvaluationEngineTrait for EvaluationEngine +where + E: Engine, + E::GE: DlogGroup, + CommitmentKey: CommitmentKeyExtTrait, +{ + type EvaluationArgument = InnerProductArgument; + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + + fn setup( + ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, + ) -> (Self::ProverKey, Self::VerifierKey) { + let ck_c = E::CE::setup(b"ipa", 1); + + let pk = ProverKey { ck_s: ck_c.clone() }; + let vk = VerifierKey { ck_v: ck.clone(), ck_s: ck_c }; + + (pk, vk) + } + + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + transcript: &mut E::TE, + comm: &Commitment, + poly: &[E::Scalar], + point: &[E::Scalar], + eval: &E::Scalar, + ) -> Result { + let u = InnerProductInstance::new(comm, &EqPolynomial::evals_from_points(point), eval); + let w = InnerProductWitness::new(poly); + + InnerProductArgument::prove(ck.clone(), pk.ck_s.clone(), &u, &w, transcript) + } + + /// A method to verify purported evaluations of a batch of polynomials + fn verify( + vk: &Self::VerifierKey, + transcript: &mut E::TE, + comm: &Commitment, + point: &[E::Scalar], + eval: &E::Scalar, + arg: &Self::EvaluationArgument, + ) -> Result<(), NovaError> { + let u = InnerProductInstance::new(comm, &EqPolynomial::evals_from_points(point), eval); + + arg.verify(&vk.ck_v, vk.ck_s.clone(), 1 << point.len(), &u, transcript)?; + + Ok(()) + } +} + +fn inner_product(a: &[T], b: &[T]) -> T { + zip_with!(par_iter, (a, b), |x, y| *x * y).sum() +} + +/// An inner product instance consists of a commitment to a vector `a` and +/// another vector `b` and the claim that c = . +struct InnerProductInstance { + comm_a_vec: Commitment, + b_vec: Vec, + c: E::Scalar, +} + +impl InnerProductInstance +where + E: Engine, + E::GE: DlogGroup, +{ + fn new(comm_a_vec: &Commitment, b_vec: &[E::Scalar], c: &E::Scalar) -> Self { + Self { comm_a_vec: *comm_a_vec, b_vec: b_vec.to_vec(), c: *c } + } +} + +impl TranscriptReprTrait for InnerProductInstance { + fn to_transcript_bytes(&self) -> Vec { + // we do not need to include self.b_vec as in our context it is produced from + // the transcript + [self.comm_a_vec.to_transcript_bytes(), self.c.to_transcript_bytes()].concat() + } +} + +struct InnerProductWitness { + a_vec: Vec, +} + +impl InnerProductWitness { + fn new(a_vec: &[E::Scalar]) -> Self { Self { a_vec: a_vec.to_vec() } } +} + +/// An inner product argument +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct InnerProductArgument { + pub(in crate::provider) L_vec: Vec>, + pub(in crate::provider) R_vec: Vec>, + pub(in crate::provider) a_hat: E::Scalar, +} + +impl InnerProductArgument +where + E: Engine, + E::GE: DlogGroup, + CommitmentKey: CommitmentKeyExtTrait, +{ + const fn protocol_name() -> &'static [u8] { b"IPA" } + + fn prove( + ck: CommitmentKey, + mut ck_c: CommitmentKey, + U: &InnerProductInstance, + W: &InnerProductWitness, + transcript: &mut E::TE, + ) -> Result { + transcript.dom_sep(Self::protocol_name()); + + let (ck, _) = ck.split_at(U.b_vec.len()); + + if U.b_vec.len() != W.a_vec.len() { + return Err(NovaError::InvalidInputLength); + } + + // absorb the instance in the transcript + transcript.absorb(b"U", U); + + // sample a random base for committing to the inner product + let r = transcript.squeeze(b"r")?; + ck_c.scale(&r); + + // a closure that executes a step of the recursive inner product argument + let prove_inner = |a_vec: &[E::Scalar], + b_vec: &[E::Scalar], + ck: CommitmentKey, + transcript: &mut E::TE| + -> Result< + ( + CompressedCommitment, + CompressedCommitment, + Vec, + Vec, + CommitmentKey, + ), + NovaError, + > { + let n = a_vec.len(); + let (ck_L, ck_R) = ck.split_at(n / 2); + + let c_L = inner_product(&a_vec[0..n / 2], &b_vec[n / 2..n]); + let c_R = inner_product(&a_vec[n / 2..n], &b_vec[0..n / 2]); + + let L = CE::::commit( + &ck_R.combine(&ck_c), + &a_vec[0..n / 2].iter().chain(iter::once(&c_L)).copied().collect::>(), + ) + .compress(); + let R = CE::::commit( + &ck_L.combine(&ck_c), + &a_vec[n / 2..n].iter().chain(iter::once(&c_R)).copied().collect::>(), + ) + .compress(); + + transcript.absorb(b"L", &L); + transcript.absorb(b"R", &R); + + let r = transcript.squeeze(b"r")?; + let r_inverse = r.invert().unwrap(); + + // fold the left half and the right half + let a_vec_folded = + zip_with!((a_vec[0..n / 2].par_iter(), a_vec[n / 2..n].par_iter()), |a_L, a_R| *a_L * r + + r_inverse * *a_R) + .collect::>(); + + let b_vec_folded = + zip_with!((b_vec[0..n / 2].par_iter(), b_vec[n / 2..n].par_iter()), |b_L, b_R| *b_L + * r_inverse + + r * *b_R) + .collect::>(); + + let ck_folded = CommitmentKeyExtTrait::fold(&ck_L, &ck_R, &r_inverse, &r); + + Ok((L, R, a_vec_folded, b_vec_folded, ck_folded)) + }; + + // two vectors to hold the logarithmic number of group elements + let mut L_vec: Vec> = Vec::new(); + let mut R_vec: Vec> = Vec::new(); + + // we create mutable copies of vectors and generators + let mut a_vec = W.a_vec.to_vec(); + let mut b_vec = U.b_vec.to_vec(); + let mut ck = ck; + for _i in 0..usize::try_from(U.b_vec.len().ilog2()).unwrap() { + let (L, R, a_vec_folded, b_vec_folded, ck_folded) = + prove_inner(&a_vec, &b_vec, ck, transcript)?; + L_vec.push(L); + R_vec.push(R); + + a_vec = a_vec_folded; + b_vec = b_vec_folded; + ck = ck_folded; + } + + Ok(Self { L_vec, R_vec, a_hat: a_vec[0] }) + } + + fn verify( + &self, + ck: &CommitmentKey, + mut ck_c: CommitmentKey, + n: usize, + U: &InnerProductInstance, + transcript: &mut E::TE, + ) -> Result<(), NovaError> { + let (ck, _) = ck.clone().split_at(U.b_vec.len()); + + transcript.dom_sep(Self::protocol_name()); + if U.b_vec.len() != n + || n != (1 << self.L_vec.len()) + || self.L_vec.len() != self.R_vec.len() + || self.L_vec.len() >= 32 + { + return Err(NovaError::InvalidInputLength); + } + + // absorb the instance in the transcript + transcript.absorb(b"U", U); + + // sample a random base for committing to the inner product + let r = transcript.squeeze(b"r")?; + ck_c.scale(&r); + + let P = U.comm_a_vec + CE::::commit(&ck_c, &[U.c]); + + // compute a vector of public coins using self.L_vec and self.R_vec + let r = (0..self.L_vec.len()) + .map(|i| { + transcript.absorb(b"L", &self.L_vec[i]); + transcript.absorb(b"R", &self.R_vec[i]); + transcript.squeeze(b"r") + }) + .collect::, NovaError>>()?; + + // precompute scalars necessary for verification + let r_square: Vec = + (0..self.L_vec.len()).into_par_iter().map(|i| r[i] * r[i]).collect(); + let r_inverse = batch_invert(r.clone())?; + let r_inverse_square: Vec = + (0..self.L_vec.len()).into_par_iter().map(|i| r_inverse[i] * r_inverse[i]).collect(); + + // compute the vector with the tensor structure + let s = { + let mut s = vec![E::Scalar::ZERO; n]; + s[0] = { + let mut v = E::Scalar::ONE; + for r_inverse_i in r_inverse { + v *= r_inverse_i; + } + v + }; + for i in 1..n { + let pos_in_r = (31 - (i as u32).leading_zeros()) as usize; + s[i] = s[i - (1 << pos_in_r)] * r_square[(self.L_vec.len() - 1) - pos_in_r]; + } + s + }; + + let ck_hat = { + let c = CE::::commit(&ck, &s).compress(); + CommitmentKey::::reinterpret_commitments_as_ck(&[c])? + }; + + let b_hat = inner_product(&U.b_vec, &s); + + let P_hat = { + let ck_folded = { + let ck_L = CommitmentKey::::reinterpret_commitments_as_ck(&self.L_vec)?; + let ck_R = CommitmentKey::::reinterpret_commitments_as_ck(&self.R_vec)?; + let ck_P = CommitmentKey::::reinterpret_commitments_as_ck(&[P.compress()])?; + ck_L.combine(&ck_R).combine(&ck_P) + }; + + CE::::commit( + &ck_folded, + &r_square + .iter() + .chain(r_inverse_square.iter()) + .chain(iter::once(&E::Scalar::ONE)) + .copied() + .collect::>(), + ) + }; + + if P_hat == CE::::commit(&ck_hat.combine(&ck_c), &[self.a_hat, self.a_hat * b_hat]) { + Ok(()) + } else { + Err(NovaError::PCSError(PCSError::InvalidPCS)) + } + } +} + +#[cfg(test)] +mod test { + use crate::provider::{ + ipa_pc::EvaluationEngine, util::test_utils::prove_verify_from_num_vars, GrumpkinEngine, + }; + + #[test] + fn test_multiple_polynomial_size() { + for num_vars in [4, 5, 6] { + prove_verify_from_num_vars::<_, EvaluationEngine>(num_vars); + } + } +} diff --git a/prover/src/provider/keccak.rs b/prover/src/provider/keccak.rs new file mode 100644 index 0000000..6a27b01 --- /dev/null +++ b/prover/src/provider/keccak.rs @@ -0,0 +1,219 @@ +//! This module provides an implementation of `TranscriptEngineTrait` using +//! keccak256 +use core::marker::PhantomData; + +use sha3::{Digest, Keccak256}; + +use crate::{ + errors::NovaError, + traits::{Engine, PrimeFieldExt, TranscriptEngineTrait, TranscriptReprTrait}, +}; + +const PERSONA_TAG: &[u8] = b"NoTR"; +const DOM_SEP_TAG: &[u8] = b"NoDS"; +const KECCAK256_STATE_SIZE: usize = 64; +const KECCAK256_PREFIX_CHALLENGE_LO: u8 = 0; +const KECCAK256_PREFIX_CHALLENGE_HI: u8 = 1; + +/// Provides an implementation of `TranscriptEngine` +#[derive(Debug)] +pub struct Keccak256Transcript { + round: u16, + state: [u8; KECCAK256_STATE_SIZE], + transcript: Keccak256, + _p: PhantomData, +} + +fn compute_updated_state(keccak_instance: Keccak256, input: &[u8]) -> [u8; KECCAK256_STATE_SIZE] { + let mut updated_instance = keccak_instance; + updated_instance.update(input); + + let input_lo = &[KECCAK256_PREFIX_CHALLENGE_LO]; + let input_hi = &[KECCAK256_PREFIX_CHALLENGE_HI]; + + let mut hasher_lo = updated_instance.clone(); + let mut hasher_hi = updated_instance; + + hasher_lo.update(input_lo); + hasher_hi.update(input_hi); + + let output_lo = hasher_lo.finalize(); + let output_hi = hasher_hi.finalize(); + + [output_lo, output_hi].concat().as_slice().try_into().unwrap() +} + +impl TranscriptEngineTrait for Keccak256Transcript { + fn new(label: &'static [u8]) -> Self { + let keccak_instance = Keccak256::new(); + let input = [PERSONA_TAG, label].concat(); + let output = compute_updated_state(keccak_instance.clone(), &input); + + Self { + round: 0u16, + state: output, + transcript: keccak_instance, + _p: PhantomData, + } + } + + fn squeeze(&mut self, label: &'static [u8]) -> Result { + // we gather the full input from the round, preceded by the current state of the + // transcript + let input = + [DOM_SEP_TAG, self.round.to_le_bytes().as_ref(), self.state.as_ref(), label].concat(); + let output = compute_updated_state(self.transcript.clone(), &input); + + // update state + self.round = self.round.checked_add(1).ok_or(NovaError::InternalTranscriptError)?; + self.state.copy_from_slice(&output); + self.transcript = Keccak256::new(); + + // squeeze out a challenge + Ok(E::Scalar::from_uniform(&output)) + } + + fn absorb>(&mut self, label: &'static [u8], o: &T) { + self.transcript.update(label); + self.transcript.update(o.to_transcript_bytes()); + } + + fn dom_sep(&mut self, bytes: &'static [u8]) { + self.transcript.update(DOM_SEP_TAG); + self.transcript.update(bytes); + } +} + +#[cfg(test)] +mod tests { + use ff::PrimeField; + use rand::Rng; + use sha3::{Digest, Keccak256}; + + use crate::{ + provider::{keccak::Keccak256Transcript, Bn256EngineKZG, GrumpkinEngine}, + traits::{Engine, PrimeFieldExt, TranscriptEngineTrait, TranscriptReprTrait}, + }; + + fn test_keccak_transcript_with(expected_h1: &'static str, expected_h2: &'static str) { + let mut transcript: Keccak256Transcript = Keccak256Transcript::new(b"test"); + + // two scalars + let s1 = ::Scalar::from(2u64); + let s2 = ::Scalar::from(5u64); + + // add the scalars to the transcript + transcript.absorb(b"s1", &s1); + transcript.absorb(b"s2", &s2); + + // make a challenge + let c1: ::Scalar = transcript.squeeze(b"c1").unwrap(); + assert_eq!(hex::encode(c1.to_repr().as_ref()), expected_h1); + + // a scalar + let s3 = ::Scalar::from(128u64); + + // add the scalar to the transcript + transcript.absorb(b"s3", &s3); + + // make a challenge + let c2: ::Scalar = transcript.squeeze(b"c2").unwrap(); + assert_eq!(hex::encode(c2.to_repr().as_ref()), expected_h2); + } + + #[test] + fn test_keccak_transcript() { + test_keccak_transcript_with::( + "9fb71e3b74bfd0b60d97349849b895595779a240b92a6fae86bd2812692b6b0e", + "bfd4c50b7d6317e9267d5d65c985eb455a3561129c0b3beef79bfc8461a84f18", + ); + } + + #[test] + fn test_keccak_example() { + let mut hasher = Keccak256::new(); + hasher.update(0xffffffff_u32.to_le_bytes()); + let output: [u8; 32] = hasher.finalize().into(); + assert_eq!( + hex::encode(output), + "29045a592007d0c246ef02c2223570da9522d0cf0f73282c79a1bc8f0bb2c238" + ); + } + + use super::{ + DOM_SEP_TAG, KECCAK256_PREFIX_CHALLENGE_HI, KECCAK256_PREFIX_CHALLENGE_LO, + KECCAK256_STATE_SIZE, PERSONA_TAG, + }; + + fn compute_updated_state_for_testing(input: &[u8]) -> [u8; KECCAK256_STATE_SIZE] { + let input_lo = [input, &[KECCAK256_PREFIX_CHALLENGE_LO]].concat(); + let input_hi = [input, &[KECCAK256_PREFIX_CHALLENGE_HI]].concat(); + + let mut hasher_lo = Keccak256::new(); + let mut hasher_hi = Keccak256::new(); + + hasher_lo.update(&input_lo); + hasher_hi.update(&input_hi); + + let output_lo = hasher_lo.finalize(); + let output_hi = hasher_hi.finalize(); + + [output_lo, output_hi].concat().as_slice().try_into().unwrap() + } + + fn squeeze_for_testing( + transcript: &[u8], + round: u16, + state: [u8; KECCAK256_STATE_SIZE], + label: &'static [u8], + ) -> [u8; 64] { + let input = + [transcript, DOM_SEP_TAG, round.to_le_bytes().as_ref(), state.as_ref(), label].concat(); + compute_updated_state_for_testing(&input) + } + + // This test is meant to ensure compatibility between the incremental way of + // computing the transcript above, and the former, which materialized the + // entirety of the input vector before calling Keccak256 on it. + fn test_keccak_transcript_incremental_vs_explicit_with() { + let test_label = b"test"; + let mut transcript: Keccak256Transcript = Keccak256Transcript::new(test_label); + let mut rng = rand::thread_rng(); + + // ten scalars + let scalars = std::iter::from_fn(|| Some(::Scalar::from(rng.gen::()))) + .take(10) + .collect::>(); + + // add the scalars to the transcripts, + let mut manual_transcript: Vec = vec![]; + let labels = [b"s1", b"s2", b"s3", b"s4", b"s5", b"s6", b"s7", b"s8", b"s9", b"s0"]; + + for i in 0..10 { + transcript.absorb(&labels[i][..], &scalars[i]); + manual_transcript.extend(labels[i]); + manual_transcript.extend(scalars[i].to_transcript_bytes()); + } + + // compute the initial state + let input = [PERSONA_TAG, test_label].concat(); + let initial_state = compute_updated_state_for_testing(&input); + + // make a challenge + let c1: ::Scalar = transcript.squeeze(b"c1").unwrap(); + + let c1_bytes = squeeze_for_testing(&manual_transcript[..], 0u16, initial_state, b"c1"); + let to_hex = |g: E::Scalar| hex::encode(g.to_repr().as_ref()); + assert_eq!(to_hex(c1), to_hex(E::Scalar::from_uniform(&c1_bytes))); + } + + #[test] + fn test_keccak_transcript_incremental_vs_explicit() { + // test_keccak_transcript_incremental_vs_explicit_with::(); + // test_keccak_transcript_incremental_vs_explicit_with::(); + test_keccak_transcript_incremental_vs_explicit_with::(); + test_keccak_transcript_incremental_vs_explicit_with::(); + // test_keccak_transcript_incremental_vs_explicit_with::(); + // test_keccak_transcript_incremental_vs_explicit_with::(); + } +} diff --git a/prover/src/provider/kzg_commitment.rs b/prover/src/provider/kzg_commitment.rs new file mode 100644 index 0000000..a2232cc --- /dev/null +++ b/prover/src/provider/kzg_commitment.rs @@ -0,0 +1,301 @@ +//! Commitment engine for KZG commitments + +use std::{io::Cursor, marker::PhantomData, sync::Arc}; + +use ff::{Field, PrimeField, PrimeFieldBits}; +use group::{prime::PrimeCurveAffine, Curve, Group as _}; +use halo2curves::serde::SerdeObject; +use pairing::Engine; +use rand::rngs::StdRng; +use rand_core::{CryptoRng, RngCore, SeedableRng}; +use serde::{Deserialize, Serialize}; + +use crate::{ + digest::SimpleDigestible, + fast_serde, + fast_serde::{FastSerde, SerdeByteError, SerdeByteTypes}, + provider::{pedersen::Commitment, traits::DlogGroup, util::fb_msm}, + traits::{ + commitment::{CommitmentEngineTrait, Len}, + Engine as NovaEngine, Group, TranscriptReprTrait, + }, +}; + +/// `UniversalParams` are the universal parameters for the KZG10 scheme. +#[derive(Debug, Clone, Eq, Serialize, Deserialize)] +#[serde(bound( + serialize = "E::G1Affine: Serialize, E::G2Affine: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>, E::G2Affine: Deserialize<'de>" +))] +pub struct UniversalKZGParam { + /// Group elements of the form `{ β^i G }`, where `i` ranges from 0 to + /// `degree`. + pub powers_of_g: Vec, + /// Group elements of the form `{ β^i H }`, where `i` ranges from 0 to + /// `degree`. + pub powers_of_h: Vec, +} + +impl PartialEq for UniversalKZGParam { + fn eq(&self, other: &Self) -> bool { + self.powers_of_g == other.powers_of_g && self.powers_of_h == other.powers_of_h + } +} +// for the purpose of the Len trait, we count commitment bases, i.e. G1 elements +impl Len for UniversalKZGParam { + fn length(&self) -> usize { self.powers_of_g.len() } +} + +/// `UnivariateProverKey` is used to generate a proof +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[serde(bound( + serialize = "E::G1Affine: Serialize, E::G2Affine: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>, E::G2Affine: Deserialize<'de>" +))] +pub struct KZGProverKey { + /// generators from the universal parameters + uv_params: Arc>, + /// offset at which we start reading into the SRS + offset: usize, + /// maximum supported size + supported_size: usize, +} + +impl KZGProverKey { + pub(in crate::provider) fn new( + uv_params: Arc>, + offset: usize, + supported_size: usize, + ) -> Self { + assert!( + uv_params.max_degree() >= offset + supported_size, + "not enough bases (req: {} from offset {}) in the UVKZGParams (length: {})", + supported_size, + offset, + uv_params.max_degree() + ); + Self { uv_params, offset, supported_size } + } + + pub fn powers_of_g(&self) -> &[E::G1Affine] { + &self.uv_params.powers_of_g[self.offset..self.offset + self.supported_size] + } +} + +/// `UVKZGVerifierKey` is used to check evaluation proofs for a given +/// commitment. +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[serde(bound(serialize = "E::G1Affine: Serialize, E::G2Affine: Serialize",))] +pub struct KZGVerifierKey { + /// The generator of G1. + pub g: E::G1Affine, + /// The generator of G2. + pub h: E::G2Affine, + /// β times the above generator of G2. + pub beta_h: E::G2Affine, +} + +impl SimpleDigestible for KZGVerifierKey +where + E::G1Affine: Serialize, + E::G2Affine: Serialize, +{ +} + +impl UniversalKZGParam { + /// Returns the maximum supported degree + pub fn max_degree(&self) -> usize { self.powers_of_g.len() } + + /// Trim the universal parameters to specialize the public parameters + /// for univariate polynomials to the given `supported_size`, and + /// returns prover key and verifier key. `supported_size` should + /// be in range `1..params.len()` + /// + /// # Panics + /// If `supported_size` is greater than `self.max_degree()`, or + /// `self.max_degree()` is zero. + pub fn trim(ukzg: Arc, supported_size: usize) -> (KZGProverKey, KZGVerifierKey) { + assert!(ukzg.max_degree() > 0, "max_degree is zero"); + let g = ukzg.powers_of_g[0]; + let h = ukzg.powers_of_h[0]; + let beta_h = ukzg.powers_of_h[1]; + let pk = KZGProverKey::new(ukzg, 0, supported_size + 1); + let vk = KZGVerifierKey { g, h, beta_h }; + (pk, vk) + } +} + +impl FastSerde for UniversalKZGParam +where + E::G1Affine: SerdeObject, + E::G2Affine: SerdeObject, +{ + /// Byte format: + /// + /// [0..4] - Magic number (4 bytes) + /// [4] - Serde type: UniversalKZGParam (u8) + /// [5] - Number of sections (u8 = 2) + /// [6] - Section 1 type: powers_of_g (u8) + /// [7..11] - Section 1 size (u32) + /// [11..] - Section 1 data + /// [...+1] - Section 2 type: powers_of_h (u8) + /// [...+5] - Section 2 size (u32) + /// [...end] - Section 2 data + fn to_bytes(&self) -> Vec { + let mut out = Vec::new(); + + out.extend_from_slice(&fast_serde::MAGIC_NUMBER); + out.push(fast_serde::SerdeByteTypes::UniversalKZGParam as u8); + out.push(2); // num_sections + + Self::write_section_bytes( + &mut out, + 1, + &self.powers_of_g.iter().flat_map(|p| p.to_raw_bytes()).collect::>(), + ); + + Self::write_section_bytes( + &mut out, + 2, + &self.powers_of_h.iter().flat_map(|p| p.to_raw_bytes()).collect::>(), + ); + + out + } + + fn from_bytes(bytes: &[u8]) -> Result { + let mut cursor = Cursor::new(bytes); + + Self::validate_header(&mut cursor, SerdeByteTypes::UniversalKZGParam, 2)?; + + // Read sections of points + let powers_of_g = Self::read_section_bytes(&mut cursor, 1)? + .chunks(E::G1Affine::identity().to_raw_bytes().len()) + .map(|bytes| E::G1Affine::from_raw_bytes(bytes).ok_or(SerdeByteError::G1DecodeError)) + .collect::, _>>()?; + + let powers_of_h = Self::read_section_bytes(&mut cursor, 2)? + .chunks(E::G2Affine::identity().to_raw_bytes().len()) + .map(|bytes| E::G2Affine::from_raw_bytes(bytes).ok_or(SerdeByteError::G2DecodeError)) + .collect::, _>>()?; + + Ok(Self { powers_of_g, powers_of_h }) + } +} + +impl UniversalKZGParam +where E::Fr: PrimeFieldBits +{ + /// Build SRS for testing. + /// WARNING: THIS FUNCTION IS FOR TESTING PURPOSE ONLY. + /// THE OUTPUT SRS SHOULD NOT BE USED IN PRODUCTION. + pub fn gen_srs_for_testing(mut rng: &mut R, max_degree: usize) -> Self { + let beta = E::Fr::random(&mut rng); + let g = E::G1::random(&mut rng); + let h = E::G2::random(rng); + + let nz_powers_of_beta = (0..=max_degree) + .scan(beta, |acc, _| { + let val = *acc; + *acc *= beta; + Some(val) + }) + .collect::>(); + + let window_size = fb_msm::get_mul_window_size(max_degree); + let scalar_bits = E::Fr::NUM_BITS as usize; + + let (powers_of_g_projective, powers_of_h_projective) = rayon::join( + || { + let g_table = fb_msm::get_window_table(scalar_bits, window_size, g); + fb_msm::multi_scalar_mul::(scalar_bits, window_size, &g_table, &nz_powers_of_beta) + }, + || { + let h_table = fb_msm::get_window_table(scalar_bits, window_size, h); + fb_msm::multi_scalar_mul::(scalar_bits, window_size, &h_table, &nz_powers_of_beta) + }, + ); + + let mut powers_of_g = vec![E::G1Affine::identity(); powers_of_g_projective.len()]; + let mut powers_of_h = vec![E::G2Affine::identity(); powers_of_h_projective.len()]; + + rayon::join( + || E::G1::batch_normalize(&powers_of_g_projective, &mut powers_of_g), + || E::G2::batch_normalize(&powers_of_h_projective, &mut powers_of_h), + ); + + Self { powers_of_g, powers_of_h } + } +} + +/// Commitments +#[derive(Debug, Clone, Copy, Eq, PartialEq, Default, Serialize, Deserialize)] +#[serde(bound( + serialize = "E::G1Affine: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>" +))] +pub struct UVKZGCommitment( + /// the actual commitment is an affine point. + pub E::G1Affine, +); + +impl TranscriptReprTrait for UVKZGCommitment +where + E::G1: DlogGroup, + // Note: due to the move of the bound TranscriptReprTrait on G::Base from Group to Engine + ::Base: TranscriptReprTrait, +{ + fn to_transcript_bytes(&self) -> Vec { + // TODO: avoid the round-trip through the group (to_curve .. to_coordinates) + let (x, y, is_infinity) = self.0.to_curve().to_coordinates(); + let is_infinity_byte = (!is_infinity).into(); + [x.to_transcript_bytes(), y.to_transcript_bytes(), [is_infinity_byte].to_vec()].concat() + } +} + +/// Provides a commitment engine +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct KZGCommitmentEngine { + _p: PhantomData, +} + +impl> CommitmentEngineTrait + for KZGCommitmentEngine +where + E::G1: DlogGroup, + E::G1Affine: Serialize + for<'de> Deserialize<'de>, + E::G2Affine: Serialize + for<'de> Deserialize<'de>, + E::Fr: PrimeFieldBits, // TODO due to use of gen_srs_for_testing, make optional +{ + type Commitment = Commitment; + type CommitmentKey = UniversalKZGParam; + + fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey { + // TODO: this is just for testing, replace by grabbing from a real setup for + // production + let mut bytes = [0u8; 32]; + let len = label.len().min(32); + bytes[..len].copy_from_slice(&label[..len]); + let rng = &mut StdRng::from_seed(bytes); + UniversalKZGParam::gen_srs_for_testing(rng, n.next_power_of_two()) + } + + fn commit(ck: &Self::CommitmentKey, v: &[::Scalar]) -> Self::Commitment { + assert!(ck.length() >= v.len()); + Commitment { comm: E::G1::vartime_multiscalar_mul(v, &ck.powers_of_g[..v.len()]) } + } +} + +impl> From> + for UVKZGCommitment +where E::G1: Group +{ + fn from(c: Commitment) -> Self { Self(c.comm.to_affine()) } +} + +impl> From> + for Commitment +where E::G1: Group +{ + fn from(c: UVKZGCommitment) -> Self { Self { comm: c.0.to_curve() } } +} diff --git a/prover/src/provider/mod.rs b/prover/src/provider/mod.rs new file mode 100644 index 0000000..5b8f369 --- /dev/null +++ b/prover/src/provider/mod.rs @@ -0,0 +1,170 @@ +//! This module implements Nova's traits using the following several different +//! combinations + +// public modules to be used as an evaluation engine with Spartan +pub mod hyperkzg; +pub mod ipa_pc; + +// crate-public modules, made crate-public mostly for tests +pub(crate) mod bn256_grumpkin; +mod pedersen; +pub(crate) mod poseidon; +pub(crate) mod traits; +// a non-hiding variant of kzg +mod kzg_commitment; +pub(crate) mod util; + +// crate-private modules +mod keccak; +mod tests; + +use halo2curves::bn256::Bn256; + +use self::kzg_commitment::KZGCommitmentEngine; +use crate::{ + provider::{ + bn256_grumpkin::{bn256, grumpkin}, + keccak::Keccak256Transcript, + pedersen::CommitmentEngine as PedersenCommitmentEngine, + poseidon::{PoseidonRO, PoseidonROCircuit}, + }, + traits::{CurveCycleEquipped, Engine}, +}; + +/// An implementation of the Nova `Engine` trait with Grumpkin curve and +/// Pedersen commitment scheme +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct GrumpkinEngine; + +/// An implementation of the Nova `Engine` trait with BN254 curve and Pedersen +/// commitment scheme +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct Bn256EngineIPA; + +impl Engine for Bn256EngineIPA { + type Base = bn256::Base; + type CE = PedersenCommitmentEngine; + type GE = bn256::Point; + type RO = PoseidonRO; + type ROCircuit = PoseidonROCircuit; + type Scalar = bn256::Scalar; + type TE = Keccak256Transcript; +} + +impl Engine for GrumpkinEngine { + type Base = grumpkin::Base; + type CE = PedersenCommitmentEngine; + type GE = grumpkin::Point; + type RO = PoseidonRO; + type ROCircuit = PoseidonROCircuit; + type Scalar = grumpkin::Scalar; + type TE = Keccak256Transcript; +} + +/// An implementation of the Nova `Engine` trait with BN254 curve and Zeromorph +/// commitment scheme +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct Bn256EngineZM; + +impl Engine for Bn256EngineZM { + type Base = bn256::Base; + type CE = KZGCommitmentEngine; + type GE = bn256::Point; + type RO = PoseidonRO; + type ROCircuit = PoseidonROCircuit; + type Scalar = bn256::Scalar; + type TE = Keccak256Transcript; +} +/// An implementation of Nova traits with HyperKZG over the BN256 curve +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct Bn256EngineKZG; + +impl Engine for Bn256EngineKZG { + type Base = bn256::Base; + type CE = KZGCommitmentEngine; + type GE = bn256::Point; + type RO = PoseidonRO; + type ROCircuit = PoseidonROCircuit; + type Scalar = bn256::Scalar; + type TE = Keccak256Transcript; +} + +impl CurveCycleEquipped for Bn256EngineIPA { + type Secondary = GrumpkinEngine; +} + +impl CurveCycleEquipped for Bn256EngineKZG { + type Secondary = GrumpkinEngine; +} + +impl CurveCycleEquipped for Bn256EngineZM { + type Secondary = GrumpkinEngine; +} + +#[cfg(test)] +mod test { + use std::io::Read; + + use digest::{ExtendableOutput, Update}; + use group::{ff::Field, Curve, Group}; + use halo2curves::{CurveAffine, CurveExt}; + use itertools::Itertools as _; + use rand_core::OsRng; + use sha3::Shake256; + + use crate::provider::{ + bn256_grumpkin::{bn256, grumpkin}, + traits::DlogGroup, + util::msm::cpu_best_msm, + }; + + macro_rules! impl_cycle_pair_test { + ($curve:ident) => { + fn from_label_serial(label: &'static [u8], n: usize) -> Vec<$curve::Affine> { + let mut shake = Shake256::default(); + shake.update(label); + let mut reader = shake.finalize_xof(); + (0..n) + .map(|_| { + let mut uniform_bytes = [0u8; 32]; + reader.read_exact(&mut uniform_bytes).unwrap(); + let hash = $curve::Point::hash_to_curve("from_uniform_bytes"); + hash(&uniform_bytes).to_affine() + }) + .collect() + } + + let label = b"test_from_label"; + for n in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1021] { + let ck_par = <$curve::Point as DlogGroup>::from_label(label, n); + let ck_ser = from_label_serial(label, n); + assert_eq!(ck_par.len(), n); + assert_eq!(ck_ser.len(), n); + assert_eq!(ck_par, ck_ser); + } + }; + } + + fn test_msm_with>() { + let n = 8; + let coeffs = (0..n).map(|_| F::random(OsRng)).collect::>(); + let bases = (0..n).map(|_| A::from(A::generator() * F::random(OsRng))).collect::>(); + let naive = coeffs + .iter() + .zip_eq(bases.iter()) + .fold(A::CurveExt::identity(), |acc, (coeff, base)| acc + *base * coeff); + + assert_eq!(naive, cpu_best_msm(&bases, &coeffs)) + } + + #[test] + fn test_msm() { + test_msm_with::(); + test_msm_with::(); + } + + #[test] + fn test_bn256_from_label() { + impl_cycle_pair_test!(bn256); + } +} diff --git a/prover/src/provider/pedersen.rs b/prover/src/provider/pedersen.rs new file mode 100644 index 0000000..4fb139f --- /dev/null +++ b/prover/src/provider/pedersen.rs @@ -0,0 +1,310 @@ +//! This module provides an implementation of a commitment engine +use core::{ + fmt::Debug, + marker::PhantomData, + ops::{Add, Mul, MulAssign}, +}; +use std::io::Cursor; + +use ff::Field; +use group::{ + prime::{PrimeCurve, PrimeCurveAffine}, + Curve, Group, GroupEncoding, +}; +use halo2curves::serde::SerdeObject; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +use crate::{ + errors::NovaError, + fast_serde, + fast_serde::{FastSerde, SerdeByteError, SerdeByteTypes}, + provider::traits::DlogGroup, + traits::{ + commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, + AbsorbInROTrait, Engine, ROTrait, TranscriptReprTrait, + }, + zip_with, +}; + +/// A type that holds commitment generators +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct CommitmentKey +where + E: Engine, + E::GE: DlogGroup, { + pub ck: Vec<::Affine>, +} + +impl Len for CommitmentKey +where + E: Engine, + E::GE: DlogGroup, +{ + fn length(&self) -> usize { self.ck.len() } +} + +impl FastSerde for CommitmentKey +where + ::Affine: SerdeObject, + E::GE: DlogGroup, +{ + /// Byte format: + /// + /// [0..4] - Magic number (4 bytes) + /// [4] - Serde type: CommitmentKey (u8) + /// [5] - Number of sections (u8 = 1) + /// [6] - Section 1 type: ck (u8) + /// [7..11] - Section 1 size (u32) + /// [11..] - Section 1 data + fn to_bytes(&self) -> Vec { + let mut out = Vec::new(); + + out.extend_from_slice(&fast_serde::MAGIC_NUMBER); + out.push(fast_serde::SerdeByteTypes::CommitmentKey as u8); + out.push(1); // num_sections + + Self::write_section_bytes( + &mut out, + 1, + &self.ck.iter().flat_map(|p| p.to_raw_bytes()).collect::>(), + ); + + out + } + + fn from_bytes(bytes: &[u8]) -> Result { + let mut cursor = Cursor::new(bytes); + + // Validate header + Self::validate_header(&mut cursor, SerdeByteTypes::CommitmentKey, 1)?; + + // Read ck section + let ck = Self::read_section_bytes(&mut cursor, 1)? + .chunks(::Affine::identity().to_raw_bytes().len()) + .map(|bytes| { + ::Affine::from_raw_bytes(bytes).ok_or(SerdeByteError::G1DecodeError) + }) + .collect::, _>>()?; + + Ok(Self { ck }) + } +} + +/// A type that holds a commitment +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct Commitment { + pub(crate) comm: E::GE, +} + +/// A type that holds a compressed commitment +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct CompressedCommitment +where + E: Engine, + E::GE: DlogGroup, { + pub(crate) comm: ::Compressed, +} + +impl CommitmentTrait for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + type CompressedCommitment = CompressedCommitment; + + fn compress(&self) -> Self::CompressedCommitment { + CompressedCommitment { comm: ::to_bytes(&self.comm).into() } + } + + fn to_coordinates(&self) -> (E::Base, E::Base, bool) { self.comm.to_coordinates() } + + fn decompress(c: &Self::CompressedCommitment) -> Result { + let opt_comm = <::GE as GroupEncoding>::from_bytes(&c.comm.clone().into()); + let Some(comm) = Option::from(opt_comm) else { + return Err(NovaError::DecompressionError); + }; + Ok(Self { comm }) + } +} + +impl Default for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + fn default() -> Self { Self { comm: E::GE::identity() } } +} + +impl TranscriptReprTrait for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + fn to_transcript_bytes(&self) -> Vec { + let (x, y, is_infinity) = self.comm.to_coordinates(); + let is_infinity_byte = (!is_infinity).into(); + [x.to_transcript_bytes(), y.to_transcript_bytes(), [is_infinity_byte].to_vec()].concat() + } +} + +impl AbsorbInROTrait for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + fn absorb_in_ro(&self, ro: &mut E::RO) { + let (x, y, is_infinity) = self.comm.to_coordinates(); + ro.absorb(x); + ro.absorb(y); + ro.absorb(if is_infinity { E::Base::ONE } else { E::Base::ZERO }); + } +} + +impl TranscriptReprTrait for CompressedCommitment +where + E: Engine, + E::GE: DlogGroup, +{ + fn to_transcript_bytes(&self) -> Vec { self.comm.to_transcript_bytes() } +} + +impl MulAssign for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + fn mul_assign(&mut self, scalar: E::Scalar) { *self = Self { comm: self.comm * scalar }; } +} + +impl<'b, E> Mul<&'b E::Scalar> for &Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + type Output = Commitment; + + fn mul(self, scalar: &'b E::Scalar) -> Commitment { Commitment { comm: self.comm * scalar } } +} + +impl Mul for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + type Output = Self; + + fn mul(self, scalar: E::Scalar) -> Self { Self { comm: self.comm * scalar } } +} + +impl Add for Commitment +where + E: Engine, + E::GE: DlogGroup, +{ + type Output = Self; + + fn add(self, other: Self) -> Self { Self { comm: self.comm + other.comm } } +} + +/// Provides a commitment engine +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct CommitmentEngine { + _p: PhantomData, +} + +impl CommitmentEngineTrait for CommitmentEngine +where + E: Engine, + E::GE: DlogGroup, +{ + type Commitment = Commitment; + type CommitmentKey = CommitmentKey; + + fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey { + Self::CommitmentKey { ck: E::GE::from_label(label, n.next_power_of_two()) } + } + + fn commit(ck: &Self::CommitmentKey, v: &[E::Scalar]) -> Self::Commitment { + assert!(ck.ck.len() >= v.len()); + Commitment { comm: E::GE::vartime_multiscalar_mul(v, &ck.ck[..v.len()]) } + } +} + +/// A trait listing properties of a commitment key that can be managed in a +/// divide-and-conquer fashion +pub trait CommitmentKeyExtTrait +where + E: Engine, + E::GE: DlogGroup, { + /// Splits the commitment key into two pieces at a specified point + fn split_at(self, n: usize) -> (Self, Self) + where Self: Sized; + + /// Combines two commitment keys into one + fn combine(&self, other: &Self) -> Self; + + /// Folds the two commitment keys into one using the provided weights + fn fold(L: &Self, R: &Self, w1: &E::Scalar, w2: &E::Scalar) -> Self; + + /// Scales the commitment key using the provided scalar + fn scale(&mut self, r: &E::Scalar); + + /// Reinterprets commitments as commitment keys + fn reinterpret_commitments_as_ck( + c: &[<<::CE as CommitmentEngineTrait>::Commitment as CommitmentTrait< + E, + >>::CompressedCommitment], + ) -> Result + where + Self: Sized; +} + +impl CommitmentKeyExtTrait for CommitmentKey +where + E: Engine>, + E::GE: DlogGroup, +{ + fn split_at(mut self, n: usize) -> (Self, Self) { + let right = self.ck.split_off(n); + (self, Self { ck: right }) + } + + fn combine(&self, other: &Self) -> Self { + let ck = { self.ck.iter().cloned().chain(other.ck.iter().cloned()).collect::>() }; + Self { ck } + } + + // combines the left and right halves of `self` using `w1` and `w2` as the + // weights + fn fold(L: &Self, R: &Self, w1: &E::Scalar, w2: &E::Scalar) -> Self { + debug_assert!(L.ck.len() == R.ck.len()); + let ck_curve: Vec = zip_with!(par_iter, (L.ck, R.ck), |l, r| { + E::GE::vartime_multiscalar_mul(&[*w1, *w2], &[*l, *r]) + }) + .collect(); + let mut ck_affine = vec![::Affine::identity(); L.ck.len()]; + E::GE::batch_normalize(&ck_curve, &mut ck_affine); + + Self { ck: ck_affine } + } + + /// Scales each element in `self` by `r` + fn scale(&mut self, r: &E::Scalar) { + let ck_scaled: Vec = self.ck.par_iter().map(|g| *g * r).collect(); + E::GE::batch_normalize(&ck_scaled, &mut self.ck); + } + + /// reinterprets a vector of commitments as a set of generators + fn reinterpret_commitments_as_ck(c: &[CompressedCommitment]) -> Result { + let d = c + .par_iter() + .map(|c| Commitment::::decompress(c).map(|c| c.comm)) + .collect::, NovaError>>()?; + let mut ck = vec![::Affine::identity(); d.len()]; + E::GE::batch_normalize(&d, &mut ck); + Ok(Self { ck }) + } +} diff --git a/prover/src/provider/poseidon.rs b/prover/src/provider/poseidon.rs new file mode 100644 index 0000000..8452bae --- /dev/null +++ b/prover/src/provider/poseidon.rs @@ -0,0 +1,222 @@ +//! Poseidon Constants and Poseidon-based RO used in Nova +use core::marker::PhantomData; + +use bellpepper_core::{ + boolean::{AllocatedBit, Boolean}, + num::AllocatedNum, + ConstraintSystem, SynthesisError, +}; +use ff::{PrimeField, PrimeFieldBits}; +use generic_array::typenum::U24; +use neptune::{ + circuit2::Elt, + poseidon::PoseidonConstants, + sponge::{ + api::{IOPattern, SpongeAPI, SpongeOp}, + circuit::SpongeCircuit, + vanilla::{Mode::Simplex, Sponge, SpongeTrait}, + }, + Strength, +}; +use serde::{Deserialize, Serialize}; + +use crate::traits::{ROCircuitTrait, ROTrait}; + +/// All Poseidon Constants that are used in Nova +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct PoseidonConstantsCircuit(PoseidonConstants); + +impl Default for PoseidonConstantsCircuit { + /// Generate Poseidon constants + fn default() -> Self { Self(Sponge::::api_constants(Strength::Standard)) } +} + +/// A Poseidon-based RO to use outside circuits +#[derive(Debug)] +pub struct PoseidonRO +where + Base: PrimeField, + Scalar: PrimeField, { + state: Vec, + constants: PoseidonConstantsCircuit, + num_absorbs: usize, + squeezed: bool, + _p: PhantomData, +} + +impl ROTrait for PoseidonRO +where + Base: PrimeField + PrimeFieldBits + Serialize + for<'de> Deserialize<'de>, + Scalar: PrimeField, +{ + type CircuitRO = PoseidonROCircuit; + type Constants = PoseidonConstantsCircuit; + + fn new(constants: PoseidonConstantsCircuit, num_absorbs: usize) -> Self { + Self { state: Vec::new(), constants, num_absorbs, squeezed: false, _p: PhantomData } + } + + /// Absorb a new number into the state of the oracle + fn absorb(&mut self, e: Base) { + assert!(!self.squeezed, "Cannot absorb after squeezing"); + self.state.push(e); + } + + /// Compute a challenge by hashing the current state + fn squeeze(&mut self, num_bits: usize) -> Scalar { + // check if we have squeezed already + assert!(!self.squeezed, "Cannot squeeze again after squeezing"); + self.squeezed = true; + + let mut sponge = Sponge::new_with_constants(&self.constants.0, Simplex); + let acc = &mut (); + let parameter = + IOPattern(vec![SpongeOp::Absorb(self.num_absorbs as u32), SpongeOp::Squeeze(1u32)]); + + sponge.start(parameter, None, acc); + assert_eq!(self.num_absorbs, self.state.len()); + SpongeAPI::absorb(&mut sponge, self.num_absorbs as u32, &self.state, acc); + let hash = SpongeAPI::squeeze(&mut sponge, 1, acc); + sponge.finish(acc).unwrap(); + + // Only return `num_bits` + let bits = hash[0].to_le_bits(); + let mut res = Scalar::ZERO; + let mut coeff = Scalar::ONE; + for bit in bits[..num_bits].into_iter() { + if *bit { + res += coeff; + } + coeff += coeff; + } + res + } +} + +/// A Poseidon-based RO gadget to use inside the verifier circuit. +#[derive(Debug)] +pub struct PoseidonROCircuit { + // Internal state + state: Vec>, + constants: PoseidonConstantsCircuit, + num_absorbs: usize, + squeezed: bool, +} + +impl ROCircuitTrait for PoseidonROCircuit +where Scalar: PrimeField + PrimeFieldBits + Serialize + for<'de> Deserialize<'de> +{ + type Constants = PoseidonConstantsCircuit; + type NativeRO = PoseidonRO; + + /// Initialize the internal state and set the poseidon constants + fn new(constants: PoseidonConstantsCircuit, num_absorbs: usize) -> Self { + Self { state: Vec::new(), constants, num_absorbs, squeezed: false } + } + + /// Absorb a new number into the state of the oracle + fn absorb(&mut self, e: &AllocatedNum) { + assert!(!self.squeezed, "Cannot absorb after squeezing"); + self.state.push(e.clone()); + } + + /// Compute a challenge by hashing the current state + fn squeeze>( + &mut self, + mut cs: CS, + num_bits: usize, + ) -> Result, SynthesisError> { + // check if we have squeezed already + assert!(!self.squeezed, "Cannot squeeze again after squeezing"); + self.squeezed = true; + let parameter = + IOPattern(vec![SpongeOp::Absorb(self.num_absorbs as u32), SpongeOp::Squeeze(1u32)]); + let mut ns = cs.namespace(|| "ns"); + + let hash = { + let mut sponge = SpongeCircuit::new_with_constants(&self.constants.0, Simplex); + let acc = &mut ns; + assert_eq!(self.num_absorbs, self.state.len()); + + sponge.start(parameter, None, acc); + SpongeAPI::absorb( + &mut sponge, + self.num_absorbs as u32, + &(0..self.state.len()) + .map(|i| Elt::Allocated(self.state[i].clone())) + .collect::>>(), + acc, + ); + + let output = SpongeAPI::squeeze(&mut sponge, 1, acc); + sponge.finish(acc).unwrap(); + output + }; + + let hash = Elt::ensure_allocated(&hash[0], &mut ns.namespace(|| "ensure allocated"), true)?; + + // return the hash as a vector of bits, truncated + Ok( + hash + .to_bits_le_strict(ns.namespace(|| "poseidon hash to boolean"))? + .iter() + .map(|boolean| match boolean { + Boolean::Is(ref x) => x.clone(), + _ => panic!("Wrong type of input. We should have never reached there"), + }) + .collect::>()[..num_bits] + .into(), + ) + } +} + +#[cfg(test)] +mod tests { + use ff::Field; + use rand::rngs::OsRng; + + use super::*; + use crate::{ + bellpepper::solver::SatisfyingAssignment, + constants::NUM_CHALLENGE_BITS, + gadgets::le_bits_to_num, + provider::{Bn256EngineKZG, GrumpkinEngine}, + traits::Engine, + }; + + fn test_poseidon_ro_with() + where + // we can print the field elements we get from E's Base & Scalar fields, + // and compare their byte representations + <::Base as PrimeField>::Repr: std::fmt::Debug, + <::Scalar as PrimeField>::Repr: std::fmt::Debug, + <::Base as PrimeField>::Repr: + PartialEq<<::Scalar as PrimeField>::Repr>, { + // Check that the number computed inside the circuit is equal to the number + // computed outside the circuit + let mut csprng: OsRng = OsRng; + let constants = PoseidonConstantsCircuit::::default(); + let num_absorbs = 32; + let mut ro: PoseidonRO = PoseidonRO::new(constants.clone(), num_absorbs); + let mut ro_gadget: PoseidonROCircuit = + PoseidonROCircuit::new(constants, num_absorbs); + let mut cs = SatisfyingAssignment::::new(); + for i in 0..num_absorbs { + let num = E::Scalar::random(&mut csprng); + ro.absorb(num); + let num_gadget = AllocatedNum::alloc_infallible(cs.namespace(|| format!("data {i}")), || num); + num_gadget.inputize(&mut cs.namespace(|| format!("input {i}"))).unwrap(); + ro_gadget.absorb(&num_gadget); + } + let num = ro.squeeze(NUM_CHALLENGE_BITS); + let num2_bits = ro_gadget.squeeze(&mut cs, NUM_CHALLENGE_BITS).unwrap(); + let num2 = le_bits_to_num(&mut cs, &num2_bits).unwrap(); + assert_eq!(num.to_repr(), num2.get_value().unwrap().to_repr()); + } + + #[test] + fn test_poseidon_ro() { + test_poseidon_ro_with::(); + test_poseidon_ro_with::(); + } +} diff --git a/prover/src/provider/tests/ipa_pc.rs b/prover/src/provider/tests/ipa_pc.rs new file mode 100644 index 0000000..586cee6 --- /dev/null +++ b/prover/src/provider/tests/ipa_pc.rs @@ -0,0 +1,128 @@ +#[cfg(test)] +mod test { + use group::Curve; + use handlebars::Handlebars; + use serde_json::{json, Map, Value}; + + use crate::provider::{ + ipa_pc::EvaluationEngine, + pedersen::{CommitmentKey, CommitmentKeyExtTrait}, + tests::solidity_compatibility_utils::{ + compressed_commitment_to_json, ec_points_to_json, field_elements_to_json, + generate_pcs_solidity_unit_test_data, + }, + GrumpkinEngine, + }; + + static IPA_COMPATIBILITY_UNIT_TESTING_TEMPLATE: &str = " +// SPDX-License-Identifier: Apache-2.0 +pragma solidity ^0.8.16; +import \"@std/Test.sol\"; +import \"src/blocks/grumpkin/Grumpkin.sol\"; +import \"src/blocks/EqPolynomial.sol\"; +import \"src/Utilities.sol\"; +import \"src/blocks/IpaPcs.sol\"; + +contract IpaTest is Test { +function composeIpaInput() public pure returns (InnerProductArgument.IpaInputGrumpkin memory) { +Grumpkin.GrumpkinAffinePoint[] memory ck_v = new Grumpkin.GrumpkinAffinePoint[]({{ len ck_v }}); +{{ #each ck_v }} ck_v[{{ i }}]=Grumpkin.GrumpkinAffinePoint({{ x }}, {{y}});\n {{ /each }} + +Grumpkin.GrumpkinAffinePoint[] memory ck_s = new Grumpkin.GrumpkinAffinePoint[]({{ len ck_s }}); +{{ #each ck_s }} ck_s[{{ i }}]=Grumpkin.GrumpkinAffinePoint({{ x }}, {{y}});\n {{ /each }} + +uint256[] memory point = new uint256[]({{ len point }}); +{{ #each point }} point[{{ i }}]={{ val }};\n {{ /each }} + +uint256[] memory L_vec = new uint256[]({{ len L_vec }}); +{{ #each L_vec }} L_vec[{{ i }}]={{ compressed }};\n {{ /each }} + +uint256[] memory R_vec = new uint256[]({{ len R_vec }}); +{{ #each R_vec }} R_vec[{{ i }}]={{ compressed }};\n {{ /each }} + +uint256 a_hat = {{ a_hat }}; + +// InnerProductInstance +Grumpkin.GrumpkinAffinePoint memory commitment = Grumpkin.GrumpkinAffinePoint({{ commitment_x }}, \ + {{ commitment_y }}); + +uint256 eval = {{ eval }}; + +return InnerProductArgument.IpaInputGrumpkin(ck_v, ck_s, point, L_vec, R_vec, commitment, eval, \ + a_hat); +} + +function testIpaGrumpkinVerification_{{ num_vars }}_Variables() public { +InnerProductArgument.IpaInputGrumpkin memory input = composeIpaInput(); +assertTrue(InnerProductArgument.verifyGrumpkin(input, getTranscript())); +} + +function getTranscript() public pure returns (KeccakTranscriptLib.KeccakTranscript memory) { +// b\"TestEval\" in Rust +uint8[] memory label = new uint8[](8); +label[0] = 0x54; +label[1] = 0x65; +label[2] = 0x73; +label[3] = 0x74; +label[4] = 0x45; +label[5] = 0x76; +label[6] = 0x61; +label[7] = 0x6c; + +KeccakTranscriptLib.KeccakTranscript memory keccak_transcript = \ + KeccakTranscriptLib.instantiate(label); +return keccak_transcript; +} +} +"; + + // To generate Solidity unit-test: + // cargo test test_solidity_compatibility_ipa --release -- --ignored --nocapture + // > ipa.t.sol + #[test] + #[ignore] + fn test_solidity_compatibility_ipa() { + let num_vars = 2; + + // Secondary part of verification is IPA over Grumpkin + let (commitment, point, eval, proof, vk) = + generate_pcs_solidity_unit_test_data::<_, EvaluationEngine>(num_vars); + + let num_vars_string = format!("{}", num_vars); + let eval_string = format!("{:?}", eval); + let commitment_x_string = format!("{:?}", commitment.comm.to_affine().x); + let commitment_y_string = format!("{:?}", commitment.comm.to_affine().y); + let proof_a_hat_string = format!("{:?}", proof.a_hat); + + let r_vec = CommitmentKey::::reinterpret_commitments_as_ck(&proof.R_vec) + .expect("can't reinterpred R_vec"); + let l_vec = CommitmentKey::::reinterpret_commitments_as_ck(&proof.L_vec) + .expect("can't reinterpred L_vec"); + + let r_vec_array = compressed_commitment_to_json::(&r_vec.ck); + let l_vec_array = compressed_commitment_to_json::(&l_vec.ck); + let point_array = field_elements_to_json::(&point); + let ckv_array = ec_points_to_json::(&vk.ck_v.ck); + let cks_array = ec_points_to_json::(&vk.ck_s.ck); + + let mut map = Map::new(); + map.insert("num_vars".to_string(), Value::String(num_vars_string)); + map.insert("eval".to_string(), Value::String(eval_string)); + map.insert("commitment_x".to_string(), Value::String(commitment_x_string)); + map.insert("commitment_y".to_string(), Value::String(commitment_y_string)); + map.insert("R_vec".to_string(), Value::Array(r_vec_array)); + map.insert("L_vec".to_string(), Value::Array(l_vec_array)); + map.insert("a_hat".to_string(), Value::String(proof_a_hat_string)); + map.insert("point".to_string(), Value::Array(point_array)); + map.insert("ck_v".to_string(), Value::Array(ckv_array)); + map.insert("ck_s".to_string(), Value::Array(cks_array)); + + let mut reg = Handlebars::new(); + reg + .register_template_string("ipa.t.sol", IPA_COMPATIBILITY_UNIT_TESTING_TEMPLATE) + .expect("can't register template"); + + let solidity_unit_test_source = reg.render("ipa.t.sol", &json!(map)).expect("can't render"); + println!("{}", solidity_unit_test_source); + } +} diff --git a/prover/src/provider/tests/mod.rs b/prover/src/provider/tests/mod.rs new file mode 100644 index 0000000..cb37ee8 --- /dev/null +++ b/prover/src/provider/tests/mod.rs @@ -0,0 +1,134 @@ +mod ipa_pc; + +#[cfg(test)] +pub mod solidity_compatibility_utils { + use std::sync::Arc; + + use group::{ + prime::{PrimeCurve, PrimeCurveAffine}, + GroupEncoding, + }; + use rand::rngs::StdRng; + use serde_json::{Map, Value}; + + use crate::{ + provider::traits::DlogGroup, + spartan::polys::multilinear::MultilinearPolynomial, + traits::{commitment::CommitmentEngineTrait, evaluation::EvaluationEngineTrait, Engine}, + }; + + pub(crate) fn generate_pcs_solidity_unit_test_data>( + num_vars: usize, + ) -> ( + >::Commitment, + Vec, + E::Scalar, + EE::EvaluationArgument, + EE::VerifierKey, + ) { + use rand_core::SeedableRng; + + let mut rng = StdRng::seed_from_u64(num_vars as u64); + + let (poly, point, eval) = + crate::provider::util::test_utils::random_poly_with_eval::(num_vars, &mut rng); + + // Mock commitment key. + let ck = E::CE::setup(b"test", 1 << num_vars); + let ck_arc = Arc::new(ck.clone()); + // Commits to the provided vector using the provided generators. + let commitment = E::CE::commit(&ck_arc, poly.evaluations()); + + let (proof, vk) = prove_verify_solidity::(ck_arc, &commitment, &poly, &point, &eval); + + (commitment, point, eval, proof, vk) + } + + fn prove_verify_solidity>( + ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, + commitment: &<::CE as CommitmentEngineTrait>::Commitment, + poly: &MultilinearPolynomial<::Scalar>, + point: &[::Scalar], + eval: &::Scalar, + ) -> (EE::EvaluationArgument, EE::VerifierKey) { + use crate::traits::TranscriptEngineTrait; + + // Generate Prover and verifier key for given commitment key. + let ock = ck.clone(); + let (prover_key, verifier_key) = EE::setup(ck); + + // Generate proof. + let mut prover_transcript = E::TE::new(b"TestEval"); + let proof: EE::EvaluationArgument = EE::prove( + &*ock, + &prover_key, + &mut prover_transcript, + commitment, + poly.evaluations(), + point, + eval, + ) + .unwrap(); + let pcp = prover_transcript.squeeze(b"c").unwrap(); + + // Verify proof. + let mut verifier_transcript = E::TE::new(b"TestEval"); + EE::verify(&verifier_key, &mut verifier_transcript, commitment, point, eval, &proof).unwrap(); + let pcv = verifier_transcript.squeeze(b"c").unwrap(); + + // Check if the prover transcript and verifier transcript are kept in the same + // state. + assert_eq!(pcp, pcv); + + (proof, verifier_key) + } + + pub(crate) fn field_elements_to_json(field_elements: &[E::Scalar]) -> Vec { + let mut value_vector = vec![]; + field_elements.iter().enumerate().for_each(|(i, fe)| { + let mut value = Map::new(); + value.insert("i".to_string(), Value::String(i.to_string())); + value.insert("val".to_string(), Value::String(format!("{:?}", fe))); + value_vector.push(Value::Object(value)); + }); + value_vector + } + + pub(crate) fn ec_points_to_json(ec_points: &[::Affine]) -> Vec + where + E: Engine, + E::GE: DlogGroup, { + let mut value_vector = vec![]; + ec_points.iter().enumerate().for_each(|(i, ec_point)| { + let mut value = Map::new(); + let coordinates_info = ec_point.to_curve().to_coordinates(); + let not_infinity = !coordinates_info.2; + assert!(not_infinity); + value.insert("i".to_string(), Value::String(i.to_string())); + value.insert("x".to_string(), Value::String(format!("{:?}", coordinates_info.0))); + value.insert("y".to_string(), Value::String(format!("{:?}", coordinates_info.1))); + value_vector.push(Value::Object(value)); + }); + value_vector + } + + pub(crate) fn compressed_commitment_to_json( + ec_points: &[::Affine], + ) -> Vec + where + E: Engine, + E::GE: DlogGroup, { + let mut value_vector = vec![]; + ec_points.iter().enumerate().for_each(|(i, ec_point)| { + let mut value = Map::new(); + let compressed_commitment_info = ec_point.to_curve().to_bytes(); + let mut data = compressed_commitment_info.as_ref().to_vec(); + data.reverse(); + + value.insert("i".to_string(), Value::String(i.to_string())); + value.insert("compressed".to_string(), Value::String(format!("0x{}", hex::encode(data)))); + value_vector.push(Value::Object(value)); + }); + value_vector + } +} diff --git a/prover/src/provider/traits.rs b/prover/src/provider/traits.rs new file mode 100644 index 0000000..c5068ec --- /dev/null +++ b/prover/src/provider/traits.rs @@ -0,0 +1,189 @@ +use std::{fmt::Debug, ops::Mul}; + +use group::{ + prime::{PrimeCurve, PrimeCurveAffine}, + GroupEncoding, +}; +use serde::{Deserialize, Serialize}; + +use crate::traits::{Group, TranscriptReprTrait}; + +/// A trait that defines extensions to the Group trait +pub trait DlogGroup: + Group::ScalarExt> + + Serialize + + for<'de> Deserialize<'de> + + PrimeCurve::ScalarExt, Affine = ::AffineExt> { + type ScalarExt; + type AffineExt: Clone + + Debug + + Eq + + Serialize + + for<'de> Deserialize<'de> + + Sync + + Send + // technical bounds, should disappear when associated_type_bounds stabilizes + + Mul + + PrimeCurveAffine; + type Compressed: Clone + + Debug + + Eq + + From<::Repr> + + Into<::Repr> + + Serialize + + for<'de> Deserialize<'de> + + Sync + + Send + + TranscriptReprTrait; + + /// A method to compute a multiexponentation + fn vartime_multiscalar_mul(scalars: &[Self::ScalarExt], bases: &[Self::AffineExt]) -> Self; + + /// Produce a vector of group elements using a static label + fn from_label(label: &'static [u8], n: usize) -> Vec; + + /// Returns the affine coordinates (x, y, infinity) for the point + fn to_coordinates(&self) -> (::Base, ::Base, bool); +} + +/// This implementation behaves in ways specific to the halo2curves suite of +/// curves in: +// - to_coordinates, +// - vartime_multiscalar_mul, where it does not call into accelerated implementations. +// A specific reimplementation exists for the pasta curves in their own module. +#[macro_export] +macro_rules! impl_traits { + ($name:ident, $order_str:literal, $base_str:literal) => { + $crate::impl_traits!($name, $order_str, $base_str, cpu_best_msm); + }; + ($name:ident, $order_str:literal, $base_str:literal, $large_msm_method:ident) => { + // These compile-time assertions check important assumptions in the memory + // representation of group data that supports the use of Abomonation. + static_assertions::assert_eq_size!($name::Affine, [u64; 8]); + static_assertions::assert_eq_size!($name::Point, [u64; 12]); + + impl Group for $name::Point { + type Base = $name::Base; + type Scalar = $name::Scalar; + + fn group_params() -> (Self::Base, Self::Base, BigInt, BigInt) { + let A = $name::Point::a(); + let B = $name::Point::b(); + let order = BigInt::from_str_radix($order_str, 16).unwrap(); + let base = BigInt::from_str_radix($base_str, 16).unwrap(); + + (A, B, order, base) + } + } + + impl DlogGroup for $name::Point { + type AffineExt = $name::Affine; + // note: for halo2curves implementations, $name::Compressed == <$name::Point as + // GroupEncoding>::Repr so the blanket impl From for T and impl + // Into apply. + type Compressed = $name::Compressed; + type ScalarExt = $name::Scalar; + + fn vartime_multiscalar_mul(scalars: &[Self::ScalarExt], bases: &[Self::AffineExt]) -> Self { + #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] + if scalars.len() >= 128 { + $large_msm_method(bases, scalars) + } else { + cpu_best_msm(bases, scalars) + } + #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] + cpu_best_msm(bases, scalars) + } + + fn from_label(label: &'static [u8], n: usize) -> Vec { + let mut shake = Shake256::default(); + shake.update(label); + let mut reader = shake.finalize_xof(); + let mut uniform_bytes_vec = Vec::new(); + for _ in 0..n { + let mut uniform_bytes = [0u8; 32]; + reader.read_exact(&mut uniform_bytes).unwrap(); + uniform_bytes_vec.push(uniform_bytes); + } + let gens_proj: Vec<$name::Point> = (0..n) + .into_par_iter() + .map(|i| { + let hash = $name::Point::hash_to_curve("from_uniform_bytes"); + hash(&uniform_bytes_vec[i]) + }) + .collect(); + + let num_threads = rayon::current_num_threads(); + if gens_proj.len() > num_threads { + let chunk = (gens_proj.len() as f64 / num_threads as f64).ceil() as usize; + (0..num_threads) + .into_par_iter() + .flat_map(|i| { + let start = i * chunk; + let end = if i == num_threads - 1 { + gens_proj.len() + } else { + core::cmp::min((i + 1) * chunk, gens_proj.len()) + }; + if end > start { + let mut gens = vec![$name::Affine::identity(); end - start]; + ::batch_normalize(&gens_proj[start..end], &mut gens); + gens + } else { + vec![] + } + }) + .collect() + } else { + let mut gens = vec![$name::Affine::identity(); n]; + ::batch_normalize(&gens_proj, &mut gens); + gens + } + } + + fn to_coordinates(&self) -> (Self::Base, Self::Base, bool) { + let coordinates = self.to_affine().coordinates(); + if coordinates.is_some().unwrap_u8() == 1 && ($name::Point::identity() != *self) { + (*coordinates.unwrap().x(), *coordinates.unwrap().y(), false) + } else { + (Self::Base::zero(), Self::Base::zero(), true) + } + } + } + + impl PrimeFieldExt for $name::Scalar { + fn from_uniform(bytes: &[u8]) -> Self { + let bytes_arr: [u8; 64] = bytes.try_into().unwrap(); + $name::Scalar::from_uniform_bytes(&bytes_arr) + } + } + + impl TranscriptReprTrait for $name::Compressed { + fn to_transcript_bytes(&self) -> Vec { self.as_ref().to_vec() } + } + + impl TranscriptReprTrait for $name::Scalar { + fn to_transcript_bytes(&self) -> Vec { self.to_repr().to_vec() } + } + + impl TranscriptReprTrait for $name::Affine { + fn to_transcript_bytes(&self) -> Vec { + let (x, y, is_infinity_byte) = { + let coordinates = self.coordinates(); + if coordinates.is_some().unwrap_u8() == 1 && ($name::Affine::identity() != *self) { + let c = coordinates.unwrap(); + (*c.x(), *c.y(), u8::from(false)) + } else { + ($name::Base::zero(), $name::Base::zero(), u8::from(false)) + } + }; + + x.to_repr() + .into_iter() + .chain(y.to_repr().into_iter()) + .chain(std::iter::once(is_infinity_byte)) + .collect() + } + } + }; +} diff --git a/prover/src/provider/util/fb_msm.rs b/prover/src/provider/util/fb_msm.rs new file mode 100644 index 0000000..cf5b9be --- /dev/null +++ b/prover/src/provider/util/fb_msm.rs @@ -0,0 +1,126 @@ +/// # Fixed-base Scalar Multiplication +/// +/// This module provides an implementation of fixed-base scalar multiplication +/// on elliptic curves. +/// +/// The multiplication is optimized through a windowed method, where scalars are +/// broken into fixed-size windows, pre-computation tables are generated, and +/// results are efficiently combined. +use ff::{PrimeField, PrimeFieldBits}; +use group::{prime::PrimeCurve, Curve}; +use rayon::prelude::*; + +/// Determines the window size for scalar multiplication based on the number of +/// scalars. +/// +/// This is used to balance between pre-computation and number of point +/// additions. +pub(crate) fn get_mul_window_size(num_scalars: usize) -> usize { + if num_scalars < 32 { + 3 + } else { + (num_scalars as f64).ln().ceil() as usize + } +} + +/// Generates a table of multiples of a base point `g` for use in windowed +/// scalar multiplication. +/// +/// This pre-computes multiples of a base point for each window and organizes +/// them into a table for quick lookup during the scalar multiplication process. +/// The table is a vector of vectors, each inner vector corresponding to a +/// window and containing the multiples of `g` for that window. +pub(crate) fn get_window_table( + scalar_size: usize, + window: usize, + g: T, +) -> Vec> +where + T: Curve, + T::AffineRepr: Send, +{ + let in_window = 1 << window; + // Number of outer iterations needed to cover the entire scalar + let outerc = scalar_size.div_ceil(window); + + // Number of multiples of the window's "outer point" needed for each window + // (fewer for the last window) + let last_in_window = 1 << (scalar_size - (outerc - 1) * window); + + let mut multiples_of_g = vec![vec![T::identity(); in_window]; outerc]; + + // Compute the multiples of g for each window + // g_outers = [ 2^{k*window}*g for k in 0..outerc] + let mut g_outer = g; + let mut g_outers = Vec::with_capacity(outerc); + for _ in 0..outerc { + g_outers.push(g_outer); + for _ in 0..window { + g_outer = g_outer.double(); + } + } + multiples_of_g.par_iter_mut().enumerate().zip_eq(g_outers).for_each( + |((outer, multiples_of_g), g_outer)| { + let cur_in_window = if outer == outerc - 1 { last_in_window } else { in_window }; + + // multiples_of_g = [id, g_outer, 2*g_outer, 3*g_outer, ...], + // where g_outer = 2^{outer*window}*g + let mut g_inner = T::identity(); + for inner in multiples_of_g.iter_mut().take(cur_in_window) { + *inner = g_inner; + g_inner.add_assign(&g_outer); + } + }, + ); + multiples_of_g.par_iter().map(|s| s.iter().map(|s| s.to_affine()).collect()).collect() +} + +/// Performs the actual windowed scalar multiplication using a pre-computed +/// table of points. +/// +/// Given a scalar and a table of pre-computed multiples of a base point, this +/// function efficiently computes the scalar multiplication by breaking the +/// scalar into windows and adding the corresponding multiples from the table. +fn windowed_mul( + outerc: usize, + window: usize, + multiples_of_g: &[Vec], + scalar: &T::Scalar, +) -> T +where + T: PrimeCurve, + T::Scalar: PrimeFieldBits, +{ + let modulus_size = ::NUM_BITS as usize; + let scalar_val: Vec = scalar.to_le_bits().into_iter().collect(); + + let mut res = T::identity(); + for outer in 0..outerc { + let mut inner = 0usize; + for i in 0..window { + if outer * window + i < modulus_size && scalar_val[outer * window + i] { + inner |= 1 << i; + } + } + res.add_assign(&multiples_of_g[outer][inner]); + } + res +} + +/// Computes multiple scalar multiplications simultaneously using the windowed +/// method. +pub(crate) fn multi_scalar_mul( + scalar_size: usize, + window: usize, + table: &[Vec], + v: &[T::Scalar], +) -> Vec +where + T: PrimeCurve, + T::Scalar: PrimeFieldBits, +{ + let outerc = scalar_size.div_ceil(window); + assert!(outerc <= table.len()); + + v.par_iter().map(|e| windowed_mul::(outerc, window, table, e)).collect::>() +} diff --git a/prover/src/provider/util/mod.rs b/prover/src/provider/util/mod.rs new file mode 100644 index 0000000..b1d8c99 --- /dev/null +++ b/prover/src/provider/util/mod.rs @@ -0,0 +1,213 @@ +//! Utilities for provider module. +pub(in crate::provider) mod fb_msm; +pub mod msm { + use halo2curves::{msm::best_multiexp, CurveAffine}; + + // this argument swap is useful until Rust gets named arguments + // and saves significant complexity in macro code + pub fn cpu_best_msm(bases: &[C], scalars: &[C::Scalar]) -> C::Curve { + best_multiexp(scalars, bases) + } +} + +pub mod field { + use ff::{BatchInverter, Field}; + + use crate::errors::NovaError; + + #[inline] + pub fn batch_invert(mut v: Vec) -> Result, NovaError> { + // we only allocate the scratch space if every element of v is nonzero + let mut scratch_space = v + .iter() + .map(|x| if !x.is_zero_vartime() { Ok(*x) } else { Err(NovaError::InternalError) }) + .collect::, _>>()?; + let _ = BatchInverter::invert_with_external_scratch(&mut v, &mut scratch_space[..]); + Ok(v) + } +} + +pub mod iterators { + use std::{ + borrow::Borrow, + iter::DoubleEndedIterator, + ops::{AddAssign, MulAssign}, + }; + + use ff::Field; + use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; + use rayon_scan::ScanParallelIterator; + + pub trait DoubleEndedIteratorExt: DoubleEndedIterator { + /// This function employs Horner's scheme and core traits to create a + /// combination of an iterator input with the powers + /// of a provided coefficient. + fn rlc(&mut self, coefficient: &F) -> T + where + T: Clone + for<'a> MulAssign<&'a F> + for<'r> AddAssign<&'r T>, + Self::Item: Borrow, { + let mut iter = self.rev(); + let Some(fst) = iter.next() else { panic!("input iterator should not be empty") }; + + iter.fold(fst.borrow().clone(), |mut acc, item| { + acc *= coefficient; + acc += item.borrow(); + acc + }) + } + } + + impl DoubleEndedIteratorExt for I {} + + pub trait IndexedParallelIteratorExt: IndexedParallelIterator { + /// This function core traits to create a combination of an iterator + /// input with the powers of a provided coefficient. + fn rlc(self, coefficient: &F) -> T + where + F: Field, + Self::Item: Borrow, + T: Clone + for<'a> MulAssign<&'a F> + for<'r> AddAssign<&'r T> + Send + Sync, { + debug_assert!(self.len() > 0); + // generate an iterator of powers of the right length + let v = { + let mut v = vec![*coefficient; self.len()]; + v[0] = F::ONE; + v + }; + // the collect is due to Scan being unindexed + let powers: Vec<_> = v.into_par_iter().scan(|a, b| *a * *b, F::ONE).collect(); + + self + .zip_eq(powers.into_par_iter()) + .map(|(pt, val)| { + let mut pt = pt.borrow().clone(); + pt *= &val; + pt + }) + .reduce_with(|mut a, b| { + a += &b; + a + }) + .unwrap() + } + } + + impl IndexedParallelIteratorExt for I {} +} + +#[cfg(test)] +pub mod test_utils { + //! Contains utilities for testing and benchmarking. + use std::sync::Arc; + + use ff::Field; + use rand::rngs::StdRng; + use rand_core::{CryptoRng, RngCore}; + + use crate::{ + spartan::polys::multilinear::MultilinearPolynomial, + traits::{commitment::CommitmentEngineTrait, evaluation::EvaluationEngineTrait, Engine}, + }; + + /// Returns a random polynomial, a point and calculate its evaluation. + pub(crate) fn random_poly_with_eval( + num_vars: usize, + mut rng: &mut R, + ) -> ( + MultilinearPolynomial<::Scalar>, + Vec<::Scalar>, + ::Scalar, + ) { + // Generate random polynomial and point. + let poly = MultilinearPolynomial::random(num_vars, &mut rng); + let point = (0..num_vars).map(|_| ::Scalar::random(&mut rng)).collect::>(); + + // Calculation evaluation of point over polynomial. + let eval = poly.evaluate(&point); + + (poly, point, eval) + } + + /// Methods used to test the prove and verify flow of + /// [`MultilinearPolynomial`] Commitment Schemes (PCS). + /// + /// Generates a random polynomial and point from a seed to test a + /// proving/verifying flow of one of our [`EvaluationEngine`]. + pub(crate) fn prove_verify_from_num_vars>( + num_vars: usize, + ) { + use rand_core::SeedableRng; + + let mut rng = StdRng::seed_from_u64(num_vars as u64); + + let (poly, point, eval) = random_poly_with_eval::(num_vars, &mut rng); + + // Mock commitment key. + let ck = E::CE::setup(b"test", 1 << num_vars); + let ck = Arc::new(ck); + // Commits to the provided vector using the provided generators. + let commitment = E::CE::commit(&ck, poly.evaluations()); + + prove_verify_with::(ck, &commitment, &poly, &point, &eval, true) + } + + fn prove_verify_with>( + ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, + commitment: &<::CE as CommitmentEngineTrait>::Commitment, + poly: &MultilinearPolynomial<::Scalar>, + point: &[::Scalar], + eval: &::Scalar, + evaluate_bad_proof: bool, + ) { + use std::ops::Add; + + use crate::traits::TranscriptEngineTrait; + + // Generate Prover and verifier key for given commitment key. + let ock = ck.clone(); + let (prover_key, verifier_key) = EE::setup(ck); + + // Generate proof. + let mut prover_transcript = E::TE::new(b"TestEval"); + let proof = EE::prove( + &*ock, + &prover_key, + &mut prover_transcript, + commitment, + poly.evaluations(), + point, + eval, + ) + .unwrap(); + let pcp = prover_transcript.squeeze(b"c").unwrap(); + + // Verify proof. + let mut verifier_transcript = E::TE::new(b"TestEval"); + EE::verify(&verifier_key, &mut verifier_transcript, commitment, point, eval, &proof).unwrap(); + let pcv = verifier_transcript.squeeze(b"c").unwrap(); + + // Check if the prover transcript and verifier transcript are kept in the same + // state. + assert_eq!(pcp, pcv); + + if evaluate_bad_proof { + // Generate another point to verify proof. Also produce eval. + let altered_verifier_point = + point.iter().map(|s| s.add(::Scalar::ONE)).collect::>(); + let altered_verifier_eval = + MultilinearPolynomial::evaluate_with(poly.evaluations(), &altered_verifier_point); + + // Verify proof, should fail. + let mut verifier_transcript = E::TE::new(b"TestEval"); + assert!(EE::verify( + &verifier_key, + &mut verifier_transcript, + commitment, + &altered_verifier_point, + &altered_verifier_eval, + &proof, + ) + .is_err()); + } + } +} diff --git a/prover/src/r1cs/mod.rs b/prover/src/r1cs/mod.rs new file mode 100644 index 0000000..ef5e3f4 --- /dev/null +++ b/prover/src/r1cs/mod.rs @@ -0,0 +1,833 @@ +//! This module defines R1CS related types and a folding scheme for Relaxed R1CS +mod sparse; +pub(crate) mod util; + +use core::cmp::max; + +use ff::Field; +use once_cell::sync::OnceCell; +use rand_core::{CryptoRng, RngCore}; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; +pub(crate) use sparse::SparseMatrix; + +use crate::{ + constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, + digest::{DigestComputer, SimpleDigestible}, + errors::NovaError, + gadgets::{f_to_nat, nat_to_limbs, scalar_as_base}, + traits::{ + commitment::CommitmentEngineTrait, AbsorbInROTrait, Engine, ROTrait, TranscriptReprTrait, + }, + zip_with, Commitment, CommitmentKey, CE, +}; + +/// A type that holds the shape of the R1CS matrices +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct R1CSShape { + pub(crate) num_cons: usize, + pub(crate) num_vars: usize, + pub(crate) num_io: usize, + pub(crate) A: SparseMatrix, + pub(crate) B: SparseMatrix, + pub(crate) C: SparseMatrix, + #[serde(skip, default = "OnceCell::new")] + pub(crate) digest: OnceCell, +} + +impl SimpleDigestible for R1CSShape {} + +/// A type that holds the result of a R1CS multiplication +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct R1CSResult { + pub(crate) AZ: Vec, + pub(crate) BZ: Vec, + pub(crate) CZ: Vec, +} + +/// A type that holds a witness for a given R1CS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct R1CSWitness { + W: Vec, +} + +/// A type that holds an R1CS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct R1CSInstance { + pub(crate) comm_W: Commitment, + pub(crate) X: Vec, +} + +/// A type that holds a witness for a given Relaxed R1CS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct RelaxedR1CSWitness { + pub(crate) W: Vec, + pub(crate) E: Vec, +} + +/// A type that holds a Relaxed R1CS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct RelaxedR1CSInstance { + pub(crate) comm_W: Commitment, + pub(crate) comm_E: Commitment, + pub(crate) X: Vec, + pub(crate) u: E::Scalar, +} + +/// A type for functions that hints commitment key sizing by returning the floor +/// of the number of required generators. +pub type CommitmentKeyHint = dyn Fn(&R1CSShape) -> usize; + +/// Generates public parameters for a Rank-1 Constraint System (R1CS). +/// +/// This function takes into consideration the shape of the R1CS matrices and a +/// hint function for the number of generators. It returns a `CommitmentKey`. +/// +/// # Arguments +/// +/// * `S`: The shape of the R1CS matrices. +/// * `ck_floor`: A function that provides a floor for the number of generators. A good function to +/// provide is the `commitment_key_floor` field in the trait `RelaxedR1CSSNARKTrait`. +pub fn commitment_key( + S: &R1CSShape, + ck_floor: &CommitmentKeyHint, +) -> CommitmentKey { + let size = commitment_key_size(S, ck_floor); + E::CE::setup(b"ck", size) +} + +/// Computes the number of generators required for the commitment key +/// corresponding to shape `S`. +pub fn commitment_key_size(S: &R1CSShape, ck_floor: &CommitmentKeyHint) -> usize { + let num_cons = S.num_cons; + let num_vars = S.num_vars; + let ck_hint = ck_floor(S); + max(max(num_cons, num_vars), ck_hint) +} + +impl R1CSShape { + /// Create an object of type `R1CSShape` from the explicitly specified R1CS + /// matrices + pub fn new( + num_cons: usize, + num_vars: usize, + num_io: usize, + A: SparseMatrix, + B: SparseMatrix, + C: SparseMatrix, + ) -> Result { + let is_valid = |num_cons: usize, + num_vars: usize, + num_io: usize, + M: &SparseMatrix| + -> Result, NovaError> { + M.iter() + .map(|(row, col, _val)| { + if row >= num_cons || col > num_io + num_vars { + Err(NovaError::InvalidIndex) + } else { + Ok(()) + } + }) + .collect::, NovaError>>() + }; + + is_valid(num_cons, num_vars, num_io, &A)?; + is_valid(num_cons, num_vars, num_io, &B)?; + is_valid(num_cons, num_vars, num_io, &C)?; + + // We require the number of public inputs/outputs to be even + if num_io % 2 != 0 { + return Err(NovaError::InvalidStepCircuitIO); + } + + Ok(Self { num_cons, num_vars, num_io, A, B, C, digest: OnceCell::new() }) + } + + /// Generate a random [`R1CSShape`] with the specified number of + /// constraints, variables, and public inputs/outputs. + pub fn random( + num_cons: usize, + num_vars: usize, + num_io: usize, + num_entries: usize, + mut rng: &mut R, + ) -> Self { + assert!(num_cons.is_power_of_two()); + assert!(num_vars.is_power_of_two()); + assert!(num_entries.is_power_of_two()); + assert!(num_io < num_vars); + + let num_rows = num_cons; + let num_cols = num_vars + 1 + num_io; + + let (NA, NB, NC) = { + let N_div_3 = num_entries / 3; + let NC = num_entries - (2 * N_div_3); + (N_div_3, N_div_3, NC) + }; + + let A = SparseMatrix::random(num_rows, num_cols, NA, &mut rng); + let B = SparseMatrix::random(num_rows, num_cols, NB, &mut rng); + let C = SparseMatrix::random(num_rows, num_cols, NC, &mut rng); + + Self { num_cons, num_vars, num_io, A, B, C, digest: Default::default() } + } + + /// Generate a satisfying [`RelaxedR1CSWitness`] and [`RelaxedR1CSInstance`] + /// for this [`R1CSShape`]. + pub fn random_witness_instance( + &self, + commitment_key: &CommitmentKey, + mut rng: &mut R, + ) -> (RelaxedR1CSWitness, RelaxedR1CSInstance) { + // Sample a random witness and compute the error term + let W = (0..self.num_vars).map(|_| E::Scalar::random(&mut rng)).collect::>(); + let u = E::Scalar::random(&mut rng); + let X = (0..self.num_io).map(|_| E::Scalar::random(&mut rng)).collect::>(); + + let E = self.compute_E(&W, &u, &X).unwrap(); + + let (comm_W, comm_E) = + rayon::join(|| CE::::commit(commitment_key, &W), || CE::::commit(commitment_key, &E)); + + let witness = RelaxedR1CSWitness { W, E }; + let instance = RelaxedR1CSInstance { comm_W, comm_E, u, X }; + + (witness, instance) + } + + /// returned the digest of the `R1CSShape` + pub fn digest(&self) -> E::Scalar { + self + .digest + .get_or_try_init(|| DigestComputer::new(self).digest()) + .cloned() + .expect("Failure retrieving digest") + } + + // Checks regularity conditions on the R1CSShape, required in Spartan-class + // SNARKs Returns false if num_cons or num_vars are not powers of two, or if + // num_io > num_vars + #[inline] + pub(crate) fn is_regular_shape(&self) -> bool { + let cons_valid = self.num_cons.next_power_of_two() == self.num_cons; + let vars_valid = self.num_vars.next_power_of_two() == self.num_vars; + let io_lt_vars = self.num_io < self.num_vars; + cons_valid && vars_valid && io_lt_vars + } + + pub(crate) fn multiply_vec( + &self, + z: &[E::Scalar], + ) -> Result<(Vec, Vec, Vec), NovaError> { + if z.len() != self.num_io + self.num_vars + 1 { + return Err(NovaError::InvalidWitnessLength); + } + + let (Az, (Bz, Cz)) = rayon::join( + || self.A.multiply_vec(z), + || rayon::join(|| self.B.multiply_vec(z), || self.C.multiply_vec(z)), + ); + + Ok((Az, Bz, Cz)) + } + + pub(crate) fn multiply_witness( + &self, + W: &[E::Scalar], + u: &E::Scalar, + X: &[E::Scalar], + ) -> Result<(Vec, Vec, Vec), NovaError> { + if X.len() != self.num_io || W.len() != self.num_vars { + return Err(NovaError::InvalidWitnessLength); + } + + let (Az, (Bz, Cz)) = rayon::join( + || self.A.multiply_witness(W, u, X), + || rayon::join(|| self.B.multiply_witness(W, u, X), || self.C.multiply_witness(W, u, X)), + ); + + Ok((Az, Bz, Cz)) + } + + pub(crate) fn multiply_witness_into( + &self, + W: &[E::Scalar], + u: &E::Scalar, + X: &[E::Scalar], + ABC_Z: &mut R1CSResult, + ) -> Result<(), NovaError> { + if X.len() != self.num_io || W.len() != self.num_vars { + return Err(NovaError::InvalidWitnessLength); + } + + let R1CSResult { AZ, BZ, CZ } = ABC_Z; + + rayon::join( + || self.A.multiply_witness_into(W, u, X, AZ), + || { + rayon::join( + || self.B.multiply_witness_into(W, u, X, BZ), + || self.C.multiply_witness_into(W, u, X, CZ), + ) + }, + ); + + Ok(()) + } + + /// Computes the error term E = Az * Bz - u*Cz. + fn compute_E( + &self, + W: &[E::Scalar], + u: &E::Scalar, + X: &[E::Scalar], + ) -> Result, NovaError> { + if X.len() != self.num_io || W.len() != self.num_vars { + return Err(NovaError::InvalidWitnessLength); + } + + let (Az, (Bz, Cz)) = rayon::join( + || self.A.multiply_witness(W, u, X), + || rayon::join(|| self.B.multiply_witness(W, u, X), || self.C.multiply_witness(W, u, X)), + ); + + let E = zip_with!((Az.into_par_iter(), Bz.into_par_iter(), Cz.into_par_iter()), |a, b, c| a + * b + - c * u) + .collect::>(); + + Ok(E) + } + + /// Checks if the Relaxed R1CS instance is satisfiable given a witness and + /// its shape + pub fn is_sat_relaxed( + &self, + ck: &CommitmentKey, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, + ) -> Result<(), NovaError> { + assert_eq!(W.W.len(), self.num_vars); + assert_eq!(W.E.len(), self.num_cons); + assert_eq!(U.X.len(), self.num_io); + + // verify if Az * Bz - u*Cz = E + let E = self.compute_E(&W.W, &U.u, &U.X)?; + W.E.par_iter().zip_eq(E.into_par_iter()).enumerate().try_for_each(|(i, (we, e))| { + if *we != e { + // constraint failed, retrieve constraint name + Err(NovaError::UnSatIndex(i)) + } else { + Ok(()) + } + })?; + + // verify if comm_E and comm_W are commitments to E and W + let res_comm = { + let (comm_W, comm_E) = + rayon::join(|| CE::::commit(ck, &W.W), || CE::::commit(ck, &W.E)); + U.comm_W == comm_W && U.comm_E == comm_E + }; + + if !res_comm { + return Err(NovaError::UnSat); + } + Ok(()) + } + + /// Checks if the R1CS instance is satisfiable given a witness and its shape + pub fn is_sat( + &self, + ck: &CommitmentKey, + U: &R1CSInstance, + W: &R1CSWitness, + ) -> Result<(), NovaError> { + assert_eq!(W.W.len(), self.num_vars); + assert_eq!(U.X.len(), self.num_io); + + // verify if Az * Bz - u*Cz = 0 + let E = self.compute_E(&W.W, &E::Scalar::ONE, &U.X)?; + E.into_par_iter().enumerate().try_for_each(|(i, e)| { + if e != E::Scalar::ZERO { + Err(NovaError::UnSatIndex(i)) + } else { + Ok(()) + } + })?; + + // verify if comm_W is a commitment to W + if U.comm_W != CE::::commit(ck, &W.W) { + return Err(NovaError::UnSat); + } + Ok(()) + } + + /// A method to compute a commitment to the cross-term `T` given a + /// Relaxed R1CS instance-witness pair and an R1CS instance-witness pair + pub fn commit_T( + &self, + ck: &CommitmentKey, + U1: &RelaxedR1CSInstance, + W1: &RelaxedR1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + ) -> Result<(Vec, Commitment), NovaError> { + let (AZ_1, BZ_1, CZ_1) = tracing::trace_span!("AZ_1, BZ_1, CZ_1") + .in_scope(|| self.multiply_witness(&W1.W, &U1.u, &U1.X))?; + + let (AZ_2, BZ_2, CZ_2) = tracing::trace_span!("AZ_2, BZ_2, CZ_2") + .in_scope(|| self.multiply_witness(&W2.W, &E::Scalar::ONE, &U2.X))?; + + let (AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1) = + tracing::trace_span!("cross terms").in_scope(|| { + let AZ_1_circ_BZ_2 = + (0..AZ_1.len()).into_par_iter().map(|i| AZ_1[i] * BZ_2[i]).collect::>(); + let AZ_2_circ_BZ_1 = + (0..AZ_2.len()).into_par_iter().map(|i| AZ_2[i] * BZ_1[i]).collect::>(); + let u_1_cdot_CZ_2 = + (0..CZ_2.len()).into_par_iter().map(|i| U1.u * CZ_2[i]).collect::>(); + let u_2_cdot_CZ_1 = + (0..CZ_1.len()).into_par_iter().map(|i| CZ_1[i]).collect::>(); + (AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1) + }); + + let T = tracing::trace_span!("T").in_scope(|| { + AZ_1_circ_BZ_2 + .par_iter() + .zip_eq(&AZ_2_circ_BZ_1) + .zip_eq(&u_1_cdot_CZ_2) + .zip_eq(&u_2_cdot_CZ_1) + .map(|(((a, b), c), d)| *a + *b - *c - *d) + .collect::>() + }); + + let comm_T = CE::::commit(ck, &T); + + Ok((T, comm_T)) + } + + /// A method to compute a commitment to the cross-term `T` given a + /// Relaxed R1CS instance-witness pair and an R1CS instance-witness pair + /// + /// This is [`R1CSShape::commit_T`] but into a buffer. + pub fn commit_T_into( + &self, + ck: &CommitmentKey, + U1: &RelaxedR1CSInstance, + W1: &RelaxedR1CSWitness, + U2: &R1CSInstance, + W2: &R1CSWitness, + T: &mut Vec, + ABC_Z_1: &mut R1CSResult, + ABC_Z_2: &mut R1CSResult, + ) -> Result, NovaError> { + tracing::info_span!("AZ_1, BZ_1, CZ_1") + .in_scope(|| self.multiply_witness_into(&W1.W, &U1.u, &U1.X, ABC_Z_1))?; + + let R1CSResult { AZ: AZ_1, BZ: BZ_1, CZ: CZ_1 } = ABC_Z_1; + + tracing::info_span!("AZ_2, BZ_2, CZ_2") + .in_scope(|| self.multiply_witness_into(&W2.W, &E::Scalar::ONE, &U2.X, ABC_Z_2))?; + + let R1CSResult { AZ: AZ_2, BZ: BZ_2, CZ: CZ_2 } = ABC_Z_2; + + // this doesn't allocate memory but has bad temporal cache locality -- should + // test to see which is faster + T.clear(); + tracing::info_span!("T").in_scope(|| { + (0..AZ_1.len()) + .into_par_iter() + .map(|i| { + let AZ_1_circ_BZ_2 = AZ_1[i] * BZ_2[i]; + let AZ_2_circ_BZ_1 = AZ_2[i] * BZ_1[i]; + let u_1_cdot_Cz_2_plus_Cz_1 = U1.u * CZ_2[i] + CZ_1[i]; + AZ_1_circ_BZ_2 + AZ_2_circ_BZ_1 - u_1_cdot_Cz_2_plus_Cz_1 + }) + .collect_into_vec(T) + }); + + Ok(CE::::commit(ck, T)) + } + + /// Pads the `R1CSShape` so that the shape passes `is_regular_shape` + /// Renumbers variables to accommodate padded variables + pub fn pad(&self) -> Self { + // check if the provided R1CSShape is already as required + if self.is_regular_shape() { + return self.clone(); + } + + // equalize the number of variables, constraints, and public IO + let m = max(max(self.num_vars, self.num_cons), self.num_io).next_power_of_two(); + + // check if the number of variables are as expected, then + // we simply set the number of constraints to the next power of two + if self.num_vars == m { + return Self { + num_cons: m, + num_vars: m, + num_io: self.num_io, + A: self.A.clone(), + B: self.B.clone(), + C: self.C.clone(), + digest: OnceCell::new(), + }; + } + + // otherwise, we need to pad the number of variables and renumber variable + // accesses + let num_vars_padded = m; + let num_cons_padded = m; + + let apply_pad = |mut M: SparseMatrix| -> SparseMatrix { + M.indices.par_iter_mut().for_each(|c| { + if *c >= self.num_vars { + *c += num_vars_padded - self.num_vars + } + }); + + M.cols += num_vars_padded - self.num_vars; + + let ex = { + let nnz = M.indptr.last().unwrap(); + vec![*nnz; num_cons_padded - self.num_cons] + }; + M.indptr.extend(ex); + M + }; + + let A_padded = apply_pad(self.A.clone()); + let B_padded = apply_pad(self.B.clone()); + let C_padded = apply_pad(self.C.clone()); + + Self { + num_cons: num_cons_padded, + num_vars: num_vars_padded, + num_io: self.num_io, + A: A_padded, + B: B_padded, + C: C_padded, + digest: OnceCell::new(), + } + } +} + +impl R1CSResult { + /// Produces a default `R1CSResult` given an `R1CSShape` + pub fn default(num_cons: usize) -> Self { + Self { + AZ: vec![E::Scalar::ZERO; num_cons], + BZ: vec![E::Scalar::ZERO; num_cons], + CZ: vec![E::Scalar::ZERO; num_cons], + } + } +} + +impl R1CSWitness { + /// A method to create a witness object using a vector of scalars + pub fn new(S: &R1CSShape, W: Vec) -> Result { + if S.num_vars != W.len() { + Err(NovaError::InvalidWitnessLength) + } else { + Ok(Self { W }) + } + } + + /// Commits to the witness using the supplied generators + pub fn commit(&self, ck: &CommitmentKey) -> Commitment { CE::::commit(ck, &self.W) } +} + +impl R1CSInstance { + /// A method to create an instance object using constituent elements + pub fn new( + S: &R1CSShape, + comm_W: Commitment, + X: Vec, + ) -> Result { + if S.num_io != X.len() { + Err(NovaError::InvalidInputLength) + } else { + Ok(Self { comm_W, X }) + } + } +} + +impl AbsorbInROTrait for R1CSInstance { + fn absorb_in_ro(&self, ro: &mut E::RO) { + self.comm_W.absorb_in_ro(ro); + for x in &self.X { + ro.absorb(scalar_as_base::(*x)); + } + } +} + +impl RelaxedR1CSWitness { + /// Produces a default `RelaxedR1CSWitness` given an `R1CSShape` + pub fn default(S: &R1CSShape) -> Self { + Self { W: vec![E::Scalar::ZERO; S.num_vars], E: vec![E::Scalar::ZERO; S.num_cons] } + } + + /// Initializes a new `RelaxedR1CSWitness` from an `R1CSWitness` + pub fn from_r1cs_witness(S: &R1CSShape, witness: R1CSWitness) -> Self { + Self { W: witness.W, E: vec![E::Scalar::ZERO; S.num_cons] } + } + + /// Commits to the witness using the supplied generators + pub fn commit(&self, ck: &CommitmentKey) -> (Commitment, Commitment) { + (CE::::commit(ck, &self.W), CE::::commit(ck, &self.E)) + } + + /// Folds an incoming `R1CSWitness` into the current one + pub fn fold( + &self, + W2: &R1CSWitness, + T: &[E::Scalar], + r: &E::Scalar, + ) -> Result { + let (W1, E1) = (&self.W, &self.E); + let W2 = &W2.W; + + if W1.len() != W2.len() { + return Err(NovaError::InvalidWitnessLength); + } + + let W = zip_with!((W1.par_iter(), W2), |a, b| *a + *r * *b).collect::>(); + let E = zip_with!((E1.par_iter(), T), |a, b| *a + *r * *b).collect::>(); + Ok(Self { W, E }) + } + + /// Mutably folds an incoming `R1CSWitness` into the current one + pub fn fold_mut( + &mut self, + W2: &R1CSWitness, + T: &[E::Scalar], + r: &E::Scalar, + ) -> Result<(), NovaError> { + if self.W.len() != W2.W.len() { + return Err(NovaError::InvalidWitnessLength); + } + + self.W.par_iter_mut().zip_eq(&W2.W).for_each(|(a, b)| *a += *r * *b); + self.E.par_iter_mut().zip_eq(T).for_each(|(a, b)| *a += *r * *b); + + Ok(()) + } + + /// Pads the provided witness to the correct length + pub fn pad(&self, S: &R1CSShape) -> Self { + let mut W = self.W.clone(); + W.extend(vec![E::Scalar::ZERO; S.num_vars - W.len()]); + + let mut E = self.E.clone(); + E.extend(vec![E::Scalar::ZERO; S.num_cons - E.len()]); + + Self { W, E } + } +} + +impl RelaxedR1CSInstance { + /// Produces a default `RelaxedR1CSInstance` given `R1CSGens` and + /// `R1CSShape` + pub fn default(_ck: &CommitmentKey, S: &R1CSShape) -> Self { + let (comm_W, comm_E) = (Commitment::::default(), Commitment::::default()); + Self { comm_W, comm_E, u: E::Scalar::ZERO, X: vec![E::Scalar::ZERO; S.num_io] } + } + + /// Initializes a new `RelaxedR1CSInstance` from an `R1CSInstance` + pub fn from_r1cs_instance( + _ck: &CommitmentKey, + S: &R1CSShape, + instance: R1CSInstance, + ) -> Self { + assert_eq!(S.num_io, instance.X.len()); + + Self { + comm_W: instance.comm_W, + comm_E: Commitment::::default(), + u: E::Scalar::ONE, + X: instance.X, + } + } + + /// Initializes a new `RelaxedR1CSInstance` from an `R1CSInstance` + pub fn from_r1cs_instance_unchecked(comm_W: &Commitment, X: &[E::Scalar]) -> Self { + Self { + comm_W: *comm_W, + comm_E: Commitment::::default(), + u: E::Scalar::ONE, + X: X.to_vec(), + } + } + + /// Folds an incoming `RelaxedR1CSInstance` into the current one + pub fn fold(&self, U2: &R1CSInstance, comm_T: &Commitment, r: &E::Scalar) -> Self { + let (X1, u1, comm_W_1, comm_E_1) = + (&self.X, &self.u, &self.comm_W.clone(), &self.comm_E.clone()); + let (X2, comm_W_2) = (&U2.X, &U2.comm_W); + + // weighted sum of X, comm_W, comm_E, and u + let X = zip_with!((X1.par_iter(), X2), |a, b| *a + *r * *b).collect::>(); + let comm_W = *comm_W_1 + *comm_W_2 * *r; + let comm_E = *comm_E_1 + *comm_T * *r; + let u = *u1 + *r; + + Self { comm_W, comm_E, X, u } + } + + /// Mutably folds an incoming `RelaxedR1CSInstance` into the current one + pub fn fold_mut(&mut self, U2: &R1CSInstance, comm_T: &Commitment, r: &E::Scalar) { + let (X2, comm_W_2) = (&U2.X, &U2.comm_W); + + // weighted sum of X, comm_W, comm_E, and u + self.X.par_iter_mut().zip_eq(X2).for_each(|(a, b)| { + *a += *r * *b; + }); + self.comm_W = self.comm_W + *comm_W_2 * *r; + self.comm_E = self.comm_E + *comm_T * *r; + self.u += *r; + } +} + +impl TranscriptReprTrait for RelaxedR1CSInstance { + fn to_transcript_bytes(&self) -> Vec { + [ + self.comm_W.to_transcript_bytes(), + self.comm_E.to_transcript_bytes(), + self.u.to_transcript_bytes(), + self.X.as_slice().to_transcript_bytes(), + ] + .concat() + } +} + +impl AbsorbInROTrait for RelaxedR1CSInstance { + fn absorb_in_ro(&self, ro: &mut E::RO) { + self.comm_W.absorb_in_ro(ro); + self.comm_E.absorb_in_ro(ro); + ro.absorb(scalar_as_base::(self.u)); + + // absorb each element of self.X in bignum format + for x in &self.X { + let limbs: Vec = nat_to_limbs(&f_to_nat(x), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); + for limb in limbs { + ro.absorb(scalar_as_base::(limb)); + } + } + } +} + +/// Empty buffer for `commit_T_into` +pub fn default_T(num_cons: usize) -> Vec { Vec::with_capacity(num_cons) } + +#[cfg(test)] +pub(crate) mod tests { + use ff::Field; + use rand_chacha::ChaCha20Rng; + use rand_core::SeedableRng; + + use super::*; + use crate::{ + provider::{Bn256EngineIPA, Bn256EngineKZG}, + r1cs::sparse::SparseMatrix, + traits::Engine, + }; + + pub(crate) fn tiny_r1cs(num_vars: usize) -> R1CSShape { + let one = ::ONE; + let (num_cons, num_vars, num_io, A, B, C) = { + let num_cons = 4; + let num_io = 2; + + // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and `y` are + // respectively the input and output. The R1CS for this problem + // consists of the following constraints: `I0 * I0 - Z0 = 0` + // `Z0 * I0 - Z1 = 0` + // `(Z1 + I0) * 1 - Z2 = 0` + // `(Z2 + 5) * 1 - I1 = 0` + + // Relaxed R1CS is a set of three sparse matrices (A B C), where there is a row + // for every constraint and a column for every entry in z = (vars, + // u, inputs) An R1CS instance is satisfiable iff: + // Az \circ Bz = u \cdot Cz + E, where z = (vars, 1, inputs) + let mut A: Vec<(usize, usize, E::Scalar)> = Vec::new(); + let mut B: Vec<(usize, usize, E::Scalar)> = Vec::new(); + let mut C: Vec<(usize, usize, E::Scalar)> = Vec::new(); + + // constraint 0 entries in (A,B,C) + // `I0 * I0 - Z0 = 0` + A.push((0, num_vars + 1, one)); + B.push((0, num_vars + 1, one)); + C.push((0, 0, one)); + + // constraint 1 entries in (A,B,C) + // `Z0 * I0 - Z1 = 0` + A.push((1, 0, one)); + B.push((1, num_vars + 1, one)); + C.push((1, 1, one)); + + // constraint 2 entries in (A,B,C) + // `(Z1 + I0) * 1 - Z2 = 0` + A.push((2, 1, one)); + A.push((2, num_vars + 1, one)); + B.push((2, num_vars, one)); + C.push((2, 2, one)); + + // constraint 3 entries in (A,B,C) + // `(Z2 + 5) * 1 - I1 = 0` + A.push((3, 2, one)); + A.push((3, num_vars, one + one + one + one + one)); + B.push((3, num_vars, one)); + C.push((3, num_vars + 2, one)); + + (num_cons, num_vars, num_io, A, B, C) + }; + + // create a shape object + let rows = num_cons; + let cols = num_vars + num_io + 1; + + R1CSShape::new( + num_cons, + num_vars, + num_io, + SparseMatrix::new(&A, rows, cols), + SparseMatrix::new(&B, rows, cols), + SparseMatrix::new(&C, rows, cols), + ) + .unwrap() + } + + fn test_pad_tiny_r1cs_with() { + let padded_r1cs = tiny_r1cs::(3).pad(); + assert!(padded_r1cs.is_regular_shape()); + + let expected_r1cs = tiny_r1cs::(4); + + assert_eq!(padded_r1cs, expected_r1cs); + } + + #[test] + fn test_pad_tiny_r1cs() { test_pad_tiny_r1cs_with::(); } + + fn test_random_r1cs_with() { + let mut rng = ChaCha20Rng::from_seed([0u8; 32]); + + let ck_size: usize = 16_384; + let ck = E::CE::setup(b"ipa", ck_size); + + let cases = [(16, 16, 2, 16), (16, 32, 12, 8), (256, 256, 2, 1024)]; + + for (num_cons, num_vars, num_io, num_entries) in cases { + let S = R1CSShape::::random(num_cons, num_vars, num_io, num_entries, &mut rng); + let (W, U) = S.random_witness_instance(&ck, &mut rng); + S.is_sat_relaxed(&ck, &U, &W).unwrap(); + } + } + + #[test] + fn test_random_r1cs() { test_random_r1cs_with::(); } +} diff --git a/prover/src/r1cs/sparse.rs b/prover/src/r1cs/sparse.rs new file mode 100644 index 0000000..e52e537 --- /dev/null +++ b/prover/src/r1cs/sparse.rs @@ -0,0 +1,333 @@ +//! # Sparse Matrices +//! +//! This module defines a custom implementation of CSR/CSC sparse matrices. +//! Specifically, we implement sparse matrix / dense vector multiplication +//! to compute the `A z`, `B z`, and `C z` in Nova. + +use std::{cmp::Ordering, collections::BTreeSet}; + +use ff::PrimeField; +use itertools::Itertools as _; +use rand_core::{CryptoRng, RngCore}; +use rayon::prelude::*; +use ref_cast::RefCast; +use serde::{Deserialize, Serialize}; + +/// CSR format sparse matrix, We follow the names used by scipy. +/// Detailed explanation here: +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct SparseMatrix { + /// all non-zero values in the matrix + pub data: Vec, + /// column indices + pub indices: Vec, + /// row information + pub indptr: Vec, + /// number of columns + pub cols: usize, +} + +/// Wrapper type for encode rows of [`SparseMatrix`] +#[derive(Debug, Clone, RefCast)] +#[repr(transparent)] +pub struct RowData([usize; 2]); + +/// [`SparseMatrix`]s are often large, and this helps with cloning bottlenecks +impl Clone for SparseMatrix { + fn clone(&self) -> Self { + Self { + data: self.data.par_iter().cloned().collect(), + indices: self.indices.par_iter().cloned().collect(), + indptr: self.indptr.par_iter().cloned().collect(), + cols: self.cols, + } + } +} + +impl SparseMatrix { + /// 0x0 empty matrix + pub fn empty() -> Self { Self { data: vec![], indices: vec![], indptr: vec![0], cols: 0 } } + + /// Construct from the COO representation; Vec. + /// We assume that the rows are sorted during construction. + pub fn new(matrix: &[(usize, usize, F)], rows: usize, cols: usize) -> Self { + let mut new_matrix = vec![vec![]; rows]; + for (row, col, val) in matrix { + new_matrix[*row].push((*col, *val)); + } + + for row in new_matrix.iter() { + assert!(row.windows(2).all(|w| w[0].0 < w[1].0)); + } + + let mut indptr = vec![0; rows + 1]; + for (i, col) in new_matrix.iter().enumerate() { + indptr[i + 1] = indptr[i] + col.len(); + } + + let mut indices = vec![]; + let mut data = vec![]; + for col in new_matrix { + let (idx, val): (Vec<_>, Vec<_>) = col.into_iter().unzip(); + indices.extend(idx); + data.extend(val); + } + + Self { data, indices, indptr, cols } + } + + /// Samples a new random matrix of size `rows` x `cols` with `num_entries` + /// non-zero entries. + pub fn random( + rows: usize, + cols: usize, + num_entries: usize, + mut rng: &mut R, + ) -> Self { + assert!(num_entries <= rows * cols); + + let mut indices = BTreeSet::<(usize, usize)>::new(); + while indices.len() < num_entries { + let row = rng.next_u32() as usize % rows; + let col = rng.next_u32() as usize % cols; + indices.insert((row, col)); + } + + let matrix = + indices.into_iter().map(|(row, col)| (row, col, F::random(&mut rng))).collect::>(); + + Self::new(&matrix, rows, cols) + } + + /// Returns an iterator into the rows + pub fn iter_rows(&self) -> impl Iterator { + self.indptr.windows(2).map(|ptrs| RowData::ref_cast(ptrs.try_into().unwrap())) + } + + /// Returns a parallel iterator into the rows + pub fn par_iter_rows(&self) -> impl IndexedParallelIterator { + self.indptr.par_windows(2).map(|ptrs| RowData::ref_cast(ptrs.try_into().unwrap())) + } + + /// Retrieves the data for row slice [i..j] from `row`. + /// [`RowData`] **must** be created from unmodified `self` previously to + /// guarentee safety. + pub fn get_row(&self, row: &RowData) -> impl Iterator { + self.data[row.0[0]..row.0[1]].iter().zip_eq(&self.indices[row.0[0]..row.0[1]]) + } + + /// Retrieves the data for row slice [i..j] from `ptrs`. + /// We assume that `ptrs` is indexed from `indptrs` and do not check if the + /// returned slice is actually a valid row. + pub fn get_row_unchecked(&self, ptrs: &[usize; 2]) -> impl Iterator { + self.data[ptrs[0]..ptrs[1]].iter().zip_eq(&self.indices[ptrs[0]..ptrs[1]]) + } + + /// Multiply by a dense vector; uses rayon to parallelize. + pub fn multiply_vec(&self, vector: &[F]) -> Vec { + assert_eq!(self.cols, vector.len(), "invalid shape"); + + self.multiply_vec_unchecked(vector) + } + + /// Multiply by a dense vector; uses rayon to parallelize. + /// This does not check that the shape of the matrix/vector are compatible. + #[tracing::instrument(skip_all, level = "trace", name = "SparseMatrix::multiply_vec_unchecked")] + fn multiply_vec_unchecked(&self, vector: &[F]) -> Vec { + let mut sink: Vec = Vec::with_capacity(self.indptr.len() - 1); + self.multiply_vec_into_unchecked(vector, &mut sink); + sink + } + + fn multiply_vec_into_unchecked(&self, vector: &[F], sink: &mut Vec) { + self + .indptr + .par_windows(2) + .map(|ptrs| { + self + .get_row_unchecked(ptrs.try_into().unwrap()) + .map(|(val, col_idx)| *val * vector[*col_idx]) + .sum() + }) + .collect_into_vec(sink); + } + + /// Multiply by a witness representing a dense vector; uses rayon to + /// parallelize. + pub fn multiply_witness(&self, W: &[F], u: &F, X: &[F]) -> Vec { + assert_eq!(self.cols, W.len() + X.len() + 1, "invalid shape"); + + self.multiply_witness_unchecked(W, u, X) + } + + /// Multiply by a witness representing a dense vector; uses rayon to + /// parallelize. This does not check that the shape of the matrix/vector + /// are compatible. + #[tracing::instrument(skip_all, level = "trace", name = "SparseMatrix::multiply_vec_unchecked")] + fn multiply_witness_unchecked(&self, W: &[F], u: &F, X: &[F]) -> Vec { + // preallocate the result vector + let mut sink = Vec::with_capacity(self.indptr.len() - 1); + self.multiply_witness_into_unchecked(W, u, X, &mut sink); + sink + } + + /// Multiply by a witness representing a dense vector; uses rayon to + /// parallelize. + pub fn multiply_witness_into(&self, W: &[F], u: &F, X: &[F], sink: &mut Vec) { + assert_eq!(self.cols, W.len() + X.len() + 1, "invalid shape"); + + self.multiply_witness_into_unchecked(W, u, X, sink); + } + + /// Multiply by a witness representing a dense vector; uses rayon to + /// parallelize. This does not check that the shape of the matrix/vector + /// are compatible. + fn multiply_witness_into_unchecked(&self, W: &[F], u: &F, X: &[F], sink: &mut Vec) { + let num_vars = W.len(); + self + .indptr + .par_windows(2) + .map(|ptrs| { + self.get_row_unchecked(ptrs.try_into().unwrap()).fold(F::ZERO, |acc, (val, col_idx)| { + let val = match col_idx.cmp(&num_vars) { + Ordering::Less => *val * W[*col_idx], + Ordering::Equal => *val * *u, + Ordering::Greater => *val * X[*col_idx - num_vars - 1], + }; + acc + val + }) + }) + .collect_into_vec(sink); + } + + /// number of non-zero entries + pub fn len(&self) -> usize { *self.indptr.last().unwrap() } + + /// empty matrix + pub fn is_empty(&self) -> bool { self.len() == 0 } + + /// returns a custom iterator + pub fn iter(&self) -> Iter<'_, F> { + let mut row = 0; + while self.indptr[row + 1] == 0 { + row += 1; + } + Iter { matrix: self, row, i: 0, nnz: *self.indptr.last().unwrap() } + } + + pub fn num_rows(&self) -> usize { self.indptr.len() - 1 } + + pub fn num_cols(&self) -> usize { self.cols } +} + +/// Iterator for sparse matrix +#[derive(Debug)] +pub struct Iter<'a, F: PrimeField> { + matrix: &'a SparseMatrix, + row: usize, + i: usize, + nnz: usize, +} + +impl Iterator for Iter<'_, F> { + type Item = (usize, usize, F); + + fn next(&mut self) -> Option { + // are we at the end? + if self.i == self.nnz { + return None; + } + + // compute current item + let curr_item = (self.row, self.matrix.indices[self.i], self.matrix.data[self.i]); + + // advance the iterator + self.i += 1; + // edge case at the end + if self.i == self.nnz { + return Some(curr_item); + } + // if `i` has moved to next row + while self.i >= self.matrix.indptr[self.row + 1] { + self.row += 1; + } + + Some(curr_item) + } +} + +// #[cfg(test)] +// mod tests { +// #[cfg(not(target_arch = "wasm32"))] +// use proptest::{ +// prelude::*, +// strategy::{BoxedStrategy, Just, Strategy}, +// }; + +// use super::SparseMatrix; +// #[cfg(not(target_arch = "wasm32"))] +// use crate::r1cs::util::FWrap; +// use crate::{ +// provider::PallasEngine, +// traits::{Engine, Group}, +// }; + +// type G = ::GE; +// type Fr = ::Scalar; + +// #[test] +// fn test_matrix_creation() { +// let matrix_data = vec![ +// (0, 1, Fr::from(2)), +// (1, 2, Fr::from(3)), +// (2, 0, Fr::from(4)), +// ]; +// let sparse_matrix = SparseMatrix::::new(&matrix_data, 3, 3); + +// assert_eq!( +// sparse_matrix.data, +// vec![Fr::from(2), Fr::from(3), Fr::from(4)] +// ); +// assert_eq!(sparse_matrix.indices, vec![1, 2, 0]); +// assert_eq!(sparse_matrix.indptr, vec![0, 1, 2, 3]); +// } + +// #[test] +// fn test_matrix_vector_multiplication() { +// let matrix_data = vec![ +// (0, 1, Fr::from(2)), +// (0, 2, Fr::from(7)), +// (1, 2, Fr::from(3)), +// (2, 0, Fr::from(4)), +// ]; +// let sparse_matrix = SparseMatrix::::new(&matrix_data, 3, 3); +// let vector = vec![Fr::from(1), Fr::from(2), Fr::from(3)]; + +// let result = sparse_matrix.multiply_vec(&vector); + +// assert_eq!(result, vec![Fr::from(25), Fr::from(9), Fr::from(4)]); +// } + +// #[cfg(not(target_arch = "wasm32"))] +// fn coo_strategy() -> BoxedStrategy)>> { +// let coo_strategy = +// any::>().prop_flat_map(|f| (0usize..100, 0usize..100, +// Just(f))); proptest::collection::vec(coo_strategy, 10).boxed() +// } + +// #[cfg(not(target_arch = "wasm32"))] +// proptest! { +// #[test] +// fn test_matrix_iter(mut coo_matrix in coo_strategy()) { +// // process the randomly generated coo matrix +// coo_matrix.sort_by_key(|(row, col, _val)| (*row, *col)); +// coo_matrix.dedup_by_key(|(row, col, _val)| (*row, *col)); +// let coo_matrix = coo_matrix.into_iter().map(|(row, col, val)| { +// (row, col, val.0) }).collect::>(); + +// let matrix = SparseMatrix::new(&coo_matrix, 100, 100); + +// prop_assert_eq!(coo_matrix, matrix.iter().collect::>()); +// } +// } +// } diff --git a/src/r1cs/util.rs b/prover/src/r1cs/util.rs similarity index 51% rename from src/r1cs/util.rs rename to prover/src/r1cs/util.rs index e81c85d..e8438f1 100644 --- a/src/r1cs/util.rs +++ b/prover/src/r1cs/util.rs @@ -12,18 +12,17 @@ impl Copy for FWrap {} #[cfg(not(target_arch = "wasm32"))] /// Trait implementation for generating `FWrap` instances with proptest impl Arbitrary for FWrap { - type Parameters = (); - type Strategy = BoxedStrategy; - - fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { - use rand::rngs::StdRng; - use rand_core::SeedableRng; - - let strategy = any::<[u8; 32]>() - .prop_map(|seed| Self(F::random(StdRng::from_seed(seed)))) - .no_shrink(); - strategy.boxed() - } + type Parameters = (); + type Strategy = BoxedStrategy; + + fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { + use rand::rngs::StdRng; + use rand_core::SeedableRng; + + let strategy = + any::<[u8; 32]>().prop_map(|seed| Self(F::random(StdRng::from_seed(seed)))).no_shrink(); + strategy.boxed() + } } /// Wrapper struct around a Group element that implements additional traits @@ -35,16 +34,15 @@ impl Copy for GWrap {} #[cfg(not(target_arch = "wasm32"))] /// Trait implementation for generating `GWrap` instances with proptest impl Arbitrary for GWrap { - type Parameters = (); - type Strategy = BoxedStrategy; - - fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { - use rand::rngs::StdRng; - use rand_core::SeedableRng; - - let strategy = any::<[u8; 32]>() - .prop_map(|seed| Self(G::random(StdRng::from_seed(seed)))) - .no_shrink(); - strategy.boxed() - } + type Parameters = (); + type Strategy = BoxedStrategy; + + fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { + use rand::rngs::StdRng; + use rand_core::SeedableRng; + + let strategy = + any::<[u8; 32]>().prop_map(|seed| Self(G::random(StdRng::from_seed(seed)))).no_shrink(); + strategy.boxed() + } } diff --git a/prover/src/spartan/batched.rs b/prover/src/spartan/batched.rs new file mode 100644 index 0000000..90d25ac --- /dev/null +++ b/prover/src/spartan/batched.rs @@ -0,0 +1,580 @@ +//! This module implements `BatchedRelaxedR1CSSNARKTrait` using Spartan that is +//! generic over the polynomial commitment and evaluation argument (i.e., a PCS) +//! This version of Spartan does not use preprocessing so the verifier keeps the +//! entire description of R1CS matrices. This is essentially optimal for the +//! verifier when using an IPA-based polynomial commitment scheme. This batched +//! implementation batches the outer and inner sumchecks of the Spartan SNARK. + +use core::slice; +use std::{iter, sync::Arc}; + +use ff::Field; +use itertools::Itertools; +use once_cell::sync::OnceCell; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +use super::{ + compute_eval_table_sparse, + math::Math, + polys::{eq::EqPolynomial, multilinear::MultilinearPolynomial}, + powers, + snark::batch_eval_reduce, + sumcheck::SumcheckProof, + PolyEvalInstance, PolyEvalWitness, +}; +use crate::{ + digest::{DigestComputer, SimpleDigestible}, + errors::NovaError, + r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness, SparseMatrix}, + spartan::{ + polys::{multilinear::SparsePolynomial, power::PowPolynomial}, + snark::batch_eval_verify, + }, + traits::{ + evaluation::EvaluationEngineTrait, + snark::{BatchedRelaxedR1CSSNARKTrait, DigestHelperTrait, RelaxedR1CSSNARKTrait}, + Engine, TranscriptEngineTrait, + }, + zip_with, CommitmentKey, +}; + +/// A succinct proof of knowledge of a witness to a batch of relaxed R1CS +/// instances The proof is produced using Spartan's combination of the sum-check +/// and the commitment to a vector viewed as a polynomial commitment +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct BatchedRelaxedR1CSSNARK> { + sc_proof_outer: SumcheckProof, + // Claims ([Azᵢ(τᵢ)], [Bzᵢ(τᵢ)], [Czᵢ(τᵢ)]) + claims_outer: Vec<(E::Scalar, E::Scalar, E::Scalar)>, + // [Eᵢ(r_x)] + evals_E: Vec, + sc_proof_inner: SumcheckProof, + // [Wᵢ(r_y[1..])] + evals_W: Vec, + sc_proof_batch: SumcheckProof, + // [Wᵢ(r_z), Eᵢ(r_z)] + evals_batch: Vec, + eval_arg: EE::EvaluationArgument, +} + +/// A type that represents the prover's key +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct ProverKey> { + pub pk_ee: EE::ProverKey, + pub vk_digest: E::Scalar, // digest of the verifier's key +} + +/// A type that represents the verifier's key +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct VerifierKey> { + pub vk_ee: EE::VerifierKey, + S: Vec>, + #[serde(skip, default = "OnceCell::new")] + pub digest: OnceCell, +} + +impl> VerifierKey { + fn new(shapes: Vec>, vk_ee: EE::VerifierKey) -> Self { + Self { vk_ee, S: shapes, digest: OnceCell::new() } + } +} + +impl> SimpleDigestible for VerifierKey {} + +impl> DigestHelperTrait for VerifierKey { + /// Returns the digest of the verifier's key. + fn digest(&self) -> E::Scalar { + self + .digest + .get_or_try_init(|| { + let dc = DigestComputer::::new(self); + dc.digest() + }) + .cloned() + .expect("Failure to retrieve digest!") + } +} + +impl> BatchedRelaxedR1CSSNARKTrait + for BatchedRelaxedR1CSSNARK +{ + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + + fn initialize_pk( + ck: Arc>, + vk_digest: E::Scalar, + ) -> Result { + // NOTE: We do not use the verifier key in this context + // TODO: This currently samples a `ck_c` element, does this need to + // be truly secret, if so, retrieve from an SRS. + let (pk_ee, _vk) = EE::setup(ck); + + Ok(ProverKey { pk_ee, vk_digest }) + } + + fn setup( + ck: Arc>, + S: Vec<&R1CSShape>, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { + let (pk_ee, vk_ee) = EE::setup(ck); + + let S = S.iter().map(|s| s.pad()).collect(); + + let vk = VerifierKey::new(S, vk_ee); + + let pk = ProverKey { pk_ee, vk_digest: vk.digest() }; + + Ok((pk, vk)) + } + + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + S: Vec<&R1CSShape>, + U: &[RelaxedR1CSInstance], + W: &[RelaxedR1CSWitness], + ) -> Result { + let num_instances = U.len(); + // Pad shapes and ensure their sizes are correct + let S = S.iter().map(|s| s.pad()).collect::>(); + + // Pad (W,E) for each instance + let W = zip_with!(iter, (W, S), |w, s| w.pad(s)).collect::>>(); + + let mut transcript = E::TE::new(b"BatchedRelaxedR1CSSNARK"); + + transcript.absorb(b"vk", &pk.vk_digest); + if num_instances > 1 { + let num_instances_field = E::Scalar::from(num_instances as u64); + transcript.absorb(b"n", &num_instances_field); + } + transcript.absorb(b"U", &U); + + let (polys_W, polys_E): (Vec<_>, Vec<_>) = W.into_iter().map(|w| (w.W, w.E)).unzip(); + + // Append public inputs to W: Z = [W, u, X] + let polys_Z = zip_with!(iter, (polys_W, U), |w, u| [w.clone(), vec![u.u], u.X.clone()] + .concat()) + .collect::>>(); + + let (num_rounds_x, num_rounds_y): (Vec<_>, Vec<_>) = + S.iter().map(|s| (s.num_cons.log_2(), s.num_vars.log_2() + 1)).unzip(); + let num_rounds_x_max = *num_rounds_x.iter().max().unwrap(); + let num_rounds_y_max = *num_rounds_y.iter().max().unwrap(); + + // Generate tau polynomial corresponding to eq(τ, τ², τ⁴ , …) + // for a random challenge τ + let tau = transcript.squeeze(b"t")?; + let all_taus = PowPolynomial::squares(&tau, num_rounds_x_max); + + let polys_tau = num_rounds_x + .iter() + .map(|&num_rounds_x| PowPolynomial::evals_with_powers(&all_taus, num_rounds_x)) + .map(MultilinearPolynomial::new) + .collect::>(); + + // Compute MLEs of Az, Bz, Cz, uCz + E + let (polys_Az, polys_Bz, polys_Cz): (Vec<_>, Vec<_>, Vec<_>) = + zip_with!(par_iter, (S, polys_Z), |s, poly_Z| { + let (poly_Az, poly_Bz, poly_Cz) = s.multiply_vec(poly_Z)?; + Ok((poly_Az, poly_Bz, poly_Cz)) + }) + .collect::, NovaError>>()? + .into_iter() + .multiunzip(); + + let polys_uCz_E = zip_with!(par_iter, (U, polys_E, polys_Cz), |u, poly_E, poly_Cz| { + zip_with!(par_iter, (poly_Cz, poly_E), |cz, e| u.u * cz + e).collect::>() + }) + .collect::>(); + + let comb_func_outer = + |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar, + poly_D_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; + + // Sample challenge for random linear-combination of outer claims + let outer_r = transcript.squeeze(b"out_r")?; + let outer_r_powers = powers(&outer_r, num_instances); + + // Verify outer sumcheck: Az * Bz - uCz_E for each instance + let (sc_proof_outer, r_x, claims_outer) = SumcheckProof::prove_cubic_with_additive_term_batch( + &vec![E::Scalar::ZERO; num_instances], + &num_rounds_x, + polys_tau, + polys_Az.into_iter().map(MultilinearPolynomial::new).collect(), + polys_Bz.into_iter().map(MultilinearPolynomial::new).collect(), + polys_uCz_E.into_iter().map(MultilinearPolynomial::new).collect(), + &outer_r_powers, + comb_func_outer, + &mut transcript, + )?; + + let r_x = num_rounds_x + .iter() + .map(|&num_rounds| r_x[(num_rounds_x_max - num_rounds)..].to_vec()) + .collect::>(); + + // Extract evaluations of Az, Bz from Sumcheck and Cz, E at r_x + let (evals_Az_Bz_Cz, evals_E): (Vec<_>, Vec<_>) = zip_with!( + par_iter, + (claims_outer[1], claims_outer[2], polys_Cz, polys_E, r_x), + |eval_Az, eval_Bz, poly_Cz, poly_E, r_x| { + let (eval_Cz, eval_E) = rayon::join( + || MultilinearPolynomial::evaluate_with(poly_Cz, r_x), + || MultilinearPolynomial::evaluate_with(poly_E, r_x), + ); + ((*eval_Az, *eval_Bz, eval_Cz), eval_E) + } + ) + .unzip(); + + evals_Az_Bz_Cz.iter().zip_eq(evals_E.iter()).for_each( + |(&(eval_Az, eval_Bz, eval_Cz), &eval_E)| { + transcript.absorb(b"claims_outer", &[eval_Az, eval_Bz, eval_Cz, eval_E].as_slice()) + }, + ); + + let inner_r = transcript.squeeze(b"in_r")?; + let inner_r_square = inner_r.square(); + let inner_r_cube = inner_r_square * inner_r; + let inner_r_powers = powers(&inner_r_cube, num_instances); + + let claims_inner_joint = evals_Az_Bz_Cz + .iter() + .map(|(eval_Az, eval_Bz, eval_Cz)| *eval_Az + inner_r * eval_Bz + inner_r_square * eval_Cz) + .collect::>(); + + let polys_ABCs = { + let inner = |M_evals_As: Vec, + M_evals_Bs: Vec, + M_evals_Cs: Vec| + -> Vec { + zip_with!(into_par_iter, (M_evals_As, M_evals_Bs, M_evals_Cs), |eval_A, eval_B, eval_C| { + eval_A + inner_r * eval_B + inner_r_square * eval_C + }) + .collect::>() + }; + + zip_with!(par_iter, (S, r_x), |s, r_x| { + let evals_rx = EqPolynomial::evals_from_points(r_x); + let (eval_A, eval_B, eval_C) = compute_eval_table_sparse(s, &evals_rx); + MultilinearPolynomial::new(inner(eval_A, eval_B, eval_C)) + }) + .collect::>() + }; + + let polys_Z = polys_Z + .into_iter() + .zip_eq(num_rounds_y.iter()) + .map(|(mut z, &num_rounds_y)| { + z.resize(1 << num_rounds_y, E::Scalar::ZERO); + MultilinearPolynomial::new(z) + }) + .collect::>(); + + let comb_func = |poly_A_comp: &E::Scalar, poly_B_comp: &E::Scalar| -> E::Scalar { + *poly_A_comp * *poly_B_comp + }; + + let (sc_proof_inner, r_y, _claims_inner): (SumcheckProof, Vec, (Vec<_>, Vec<_>)) = + SumcheckProof::prove_quad_batch( + &claims_inner_joint, + &num_rounds_y, + polys_ABCs, + polys_Z, + &inner_r_powers, + comb_func, + &mut transcript, + )?; + + let r_y = num_rounds_y + .iter() + .map(|num_rounds| { + let (_, r_y_hi) = r_y.split_at(num_rounds_y_max - num_rounds); + r_y_hi + }) + .collect::>(); + + let evals_W = zip_with!(par_iter, (polys_W, r_y), |poly, r_y| { + MultilinearPolynomial::evaluate_with(poly, &r_y[1..]) + }) + .collect::>(); + + // Create evaluation instances for W(r_y[1..]) and E(r_x) + let (w_vec, u_vec) = { + let mut w_vec = Vec::with_capacity(2 * num_instances); + let mut u_vec = Vec::with_capacity(2 * num_instances); + w_vec.extend(polys_W.into_iter().map(|poly| PolyEvalWitness { p: poly })); + u_vec.extend(zip_with!(iter, (evals_W, U, r_y), |eval, u, r_y| { + PolyEvalInstance { c: u.comm_W, x: r_y[1..].to_vec(), e: *eval } + })); + + w_vec.extend(polys_E.into_iter().map(|poly| PolyEvalWitness { p: poly })); + u_vec.extend(zip_with!((evals_E.iter(), U.iter(), r_x), |eval_E, u, r_x| { + PolyEvalInstance { c: u.comm_E, x: r_x, e: *eval_E } + })); + (w_vec, u_vec) + }; + + let (batched_u, batched_w, sc_proof_batch, claims_batch_left) = + batch_eval_reduce(u_vec, &w_vec, &mut transcript)?; + + let eval_arg = EE::prove( + ck, + &pk.pk_ee, + &mut transcript, + &batched_u.c, + &batched_w.p, + &batched_u.x, + &batched_u.e, + )?; + + Ok(Self { + sc_proof_outer, + claims_outer: evals_Az_Bz_Cz, + evals_E, + sc_proof_inner, + evals_W, + sc_proof_batch, + evals_batch: claims_batch_left, + eval_arg, + }) + } + + fn verify(&self, vk: &Self::VerifierKey, U: &[RelaxedR1CSInstance]) -> Result<(), NovaError> { + let num_instances = U.len(); + let mut transcript = E::TE::new(b"BatchedRelaxedR1CSSNARK"); + + transcript.absorb(b"vk", &vk.digest()); + if num_instances > 1 { + let num_instances_field = E::Scalar::from(num_instances as u64); + transcript.absorb(b"n", &num_instances_field); + } + transcript.absorb(b"U", &U); + + let num_instances = U.len(); + + let (num_rounds_x, num_rounds_y): (Vec<_>, Vec<_>) = + vk.S.iter().map(|s| (s.num_cons.log_2(), s.num_vars.log_2() + 1)).unzip(); + let num_rounds_x_max = *num_rounds_x.iter().max().unwrap(); + let num_rounds_y_max = *num_rounds_y.iter().max().unwrap(); + + // Define τ polynomials of the appropriate size for each instance + let tau = transcript.squeeze(b"t")?; + let all_taus = PowPolynomial::squares(&tau, num_rounds_x_max); + + let polys_tau = num_rounds_x + .iter() + .map(|&num_rounds_x| PowPolynomial::evals_with_powers(&all_taus, num_rounds_x)) + .map(MultilinearPolynomial::new) + .collect::>(); + + // Sample challenge for random linear-combination of outer claims + let outer_r = transcript.squeeze(b"out_r")?; + let outer_r_powers = powers(&outer_r, num_instances); + + let (claim_outer_final, r_x) = self.sc_proof_outer.verify_batch( + &vec![E::Scalar::ZERO; num_instances], + &num_rounds_x, + &outer_r_powers, + 3, + &mut transcript, + )?; + + // Since each instance has a different number of rounds, the Sumcheck + // prover skips the first num_rounds_x_max - num_rounds_x rounds. + // The evaluation point for each instance is therefore r_x[num_rounds_x_max - + // num_rounds_x..] + let r_x = num_rounds_x + .iter() + .map(|num_rounds| r_x[(num_rounds_x_max - num_rounds)..].to_vec()) + .collect::>(); + + // Extract evaluations into a vector [(Azᵢ, Bzᵢ, Czᵢ, Eᵢ)] + let ABCE_evals = || self.claims_outer.iter().zip_eq(self.evals_E.iter()); + + // Add evaluations of Az, Bz, Cz, E to transcript + for ((claim_Az, claim_Bz, claim_Cz), eval_E) in ABCE_evals() { + transcript.absorb(b"claims_outer", &[*claim_Az, *claim_Bz, *claim_Cz, *eval_E].as_slice()) + } + + let chis_r_x = + r_x.par_iter().map(|r_x| EqPolynomial::evals_from_points(r_x)).collect::>(); + + // Evaluate τ(rₓ) for each instance + let evals_tau = zip_with!(iter, (polys_tau, chis_r_x), |poly_tau, er_x| { + MultilinearPolynomial::evaluate_with_chis(poly_tau.evaluations(), er_x) + }); + + // Compute expected claim for all instances ∑ᵢ rⁱ⋅τ(rₓ)⋅(Azᵢ⋅Bzᵢ − uᵢ⋅Czᵢ − Eᵢ) + let claim_outer_final_expected = zip_with!( + (ABCE_evals(), U.iter(), evals_tau, outer_r_powers.iter()), + |ABCE_eval, u, eval_tau, r| { + let ((claim_Az, claim_Bz, claim_Cz), eval_E) = ABCE_eval; + *r * eval_tau * (*claim_Az * claim_Bz - u.u * claim_Cz - eval_E) + } + ) + .sum::(); + + if claim_outer_final != claim_outer_final_expected { + return Err(NovaError::InvalidSumcheckProof); + } + + let inner_r = transcript.squeeze(b"in_r")?; + let inner_r_square = inner_r.square(); + let inner_r_cube = inner_r_square * inner_r; + let inner_r_powers = powers(&inner_r_cube, num_instances); + + // Compute inner claims Mzᵢ = (Azᵢ + r⋅Bzᵢ + r²⋅Czᵢ), + // which are batched by Sumcheck into one claim: ∑ᵢ r³ⁱ⋅Mzᵢ + let claims_inner = self + .claims_outer + .iter() + .map(|(claim_Az, claim_Bz, claim_Cz)| { + *claim_Az + inner_r * claim_Bz + inner_r_square * claim_Cz + }) + .collect::>(); + + let (claim_inner_final, r_y) = self.sc_proof_inner.verify_batch( + &claims_inner, + &num_rounds_y, + &inner_r_powers, + 2, + &mut transcript, + )?; + let r_y: Vec> = num_rounds_y + .iter() + .map(|num_rounds| r_y[(num_rounds_y_max - num_rounds)..].to_vec()) + .collect(); + + // Compute evaluations of Zᵢ = [Wᵢ, uᵢ, Xᵢ] at r_y + // Zᵢ(r_y) = (1−r_y[0])⋅W(r_y[1..]) + r_y[0]⋅MLE([uᵢ, Xᵢ])(r_y[1..]) + let evals_Z = zip_with!(iter, (self.evals_W, U, r_y), |eval_W, U, r_y| { + let eval_X = { + // constant term + let poly_X = iter::once(U.u).chain(U.X.iter().cloned()).collect(); + SparsePolynomial::new(r_y.len() - 1, poly_X).evaluate(&r_y[1..]) + }; + (E::Scalar::ONE - r_y[0]) * eval_W + r_y[0] * eval_X + }) + .collect::>(); + + // compute evaluations of R1CS matrices M(r_x, r_y) = eq(r_y)ᵀ⋅M⋅eq(r_x) + let multi_evaluate = |M_vec: &[&SparseMatrix], + chi_r_x: &[E::Scalar], + r_y: &[E::Scalar]| + -> Vec { + let evaluate_with_table = + |M: &SparseMatrix, T_x: &[E::Scalar], T_y: &[E::Scalar]| -> E::Scalar { + M.par_iter_rows() + .enumerate() + .map(|(row_idx, row)| { + M.get_row(row) + .map(|(val, col_idx)| T_x[row_idx] * T_y[*col_idx] * val) + .sum::() + }) + .sum() + }; + + let T_x = chi_r_x; + let T_y = EqPolynomial::evals_from_points(r_y); + + M_vec.par_iter().map(|&M_vec| evaluate_with_table(M_vec, T_x, &T_y)).collect() + }; + + // Compute inner claim ∑ᵢ r³ⁱ⋅(Aᵢ(r_x, r_y) + r⋅Bᵢ(r_x, r_y) + r²⋅Cᵢ(r_x, + // r_y))⋅Zᵢ(r_y) + let claim_inner_final_expected = zip_with!( + iter, + (vk.S, chis_r_x, r_y, evals_Z, inner_r_powers), + |S, r_x, r_y, eval_Z, r_i| { + let evals = multi_evaluate(&[&S.A, &S.B, &S.C], r_x, r_y); + let eval = evals[0] + inner_r * evals[1] + inner_r_square * evals[2]; + eval * r_i * eval_Z + } + ) + .sum::(); + + if claim_inner_final != claim_inner_final_expected { + return Err(NovaError::InvalidSumcheckProof); + } + + // Create evaluation instances for W(r_y[1..]) and E(r_x) + let u_vec = { + let mut u_vec = Vec::with_capacity(2 * num_instances); + u_vec.extend(zip_with!(iter, (self.evals_W, U, r_y), |eval, u, r_y| { + PolyEvalInstance { c: u.comm_W, x: r_y[1..].to_vec(), e: *eval } + })); + + u_vec.extend(zip_with!(iter, (self.evals_E, U, r_x), |eval, u, r_x| { + PolyEvalInstance { c: u.comm_E, x: r_x.to_vec(), e: *eval } + })); + u_vec + }; + + let batched_u = + batch_eval_verify(u_vec, &mut transcript, &self.sc_proof_batch, &self.evals_batch)?; + + // verify + EE::verify( + &vk.vk_ee, + &mut transcript, + &batched_u.c, + &batched_u.x, + &batched_u.e, + &self.eval_arg, + )?; + + Ok(()) + } +} + +impl> RelaxedR1CSSNARKTrait + for BatchedRelaxedR1CSSNARK +{ + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + + fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { + >::ck_floor() + } + + fn initialize_pk( + ck: Arc>, + vk_digest: E::Scalar, + ) -> Result { + >::initialize_pk(ck, vk_digest) + } + + fn setup( + ck: Arc>, + S: &R1CSShape, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { + >::setup(ck, vec![S]) + } + + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + S: &R1CSShape, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, + ) -> Result { + let slice_U = slice::from_ref(U); + let slice_W = slice::from_ref(W); + >::prove(ck, pk, vec![S], slice_U, slice_W) + } + + fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { + let slice = slice::from_ref(U); + >::verify(self, vk, slice) + } +} diff --git a/prover/src/spartan/batched_ppsnark.rs b/prover/src/spartan/batched_ppsnark.rs new file mode 100644 index 0000000..7e963a4 --- /dev/null +++ b/prover/src/spartan/batched_ppsnark.rs @@ -0,0 +1,1261 @@ +//! batched pp snark + +use core::slice; +use std::sync::Arc; + +use ff::Field; +use itertools::{chain, Itertools as _}; +use once_cell::sync::*; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +use crate::{ + digest::{DigestComputer, SimpleDigestible}, + errors::NovaError, + r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, + spartan::{ + math::Math, + polys::{ + eq::EqPolynomial, + identity::IdentityPolynomial, + masked_eq::MaskedEqPolynomial, + multilinear::{MultilinearPolynomial, SparsePolynomial}, + power::PowPolynomial, + univariate::{CompressedUniPoly, UniPoly}, + }, + powers, + ppsnark::{R1CSShapeSparkCommitment, R1CSShapeSparkRepr}, + sumcheck::{ + engine::{ + InnerSumcheckInstance, MemorySumcheckInstance, OuterSumcheckInstance, SumcheckEngine, + WitnessBoundSumcheck, + }, + SumcheckProof, + }, + PolyEvalInstance, PolyEvalWitness, + }, + traits::{ + commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, + evaluation::EvaluationEngineTrait, + snark::{BatchedRelaxedR1CSSNARKTrait, DigestHelperTrait, RelaxedR1CSSNARKTrait}, + Engine, TranscriptEngineTrait, + }, + zip_with, zip_with_for_each, Commitment, CommitmentKey, CompressedCommitment, +}; + +/// A type that represents the prover's key +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct ProverKey> { + pk_ee: EE::ProverKey, + S_repr: Vec>, + S_comm: Vec>, + vk_digest: E::Scalar, // digest of verifier's key +} + +/// A type that represents the verifier's key +#[derive(Debug, Deserialize, Serialize)] +#[serde(bound = "")] +pub struct VerifierKey> { + vk_ee: EE::VerifierKey, + S_comm: Vec>, + num_vars: Vec, + #[serde(skip, default = "OnceCell::new")] + digest: OnceCell, +} +impl> VerifierKey { + fn new( + num_vars: Vec, + S_comm: Vec>, + vk_ee: EE::VerifierKey, + ) -> Self { + Self { num_vars, S_comm, vk_ee, digest: Default::default() } + } +} + +impl> SimpleDigestible for VerifierKey {} + +impl> DigestHelperTrait for VerifierKey { + /// Returns the digest of the verifier's key + fn digest(&self) -> E::Scalar { + self + .digest + .get_or_try_init(|| { + let dc = DigestComputer::new(self); + dc.digest() + }) + .cloned() + .expect("Failure to retrieve digest!") + } +} + +/// A succinct proof of knowledge of a witness to a relaxed R1CS instance +/// The proof is produced using Spartan's combination of the sum-check and +/// the commitment to a vector viewed as a polynomial commitment +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct BatchedRelaxedR1CSSNARK> { + // commitment to oracles: the first three are for Az, Bz, Cz, + // and the last two are for memory reads + comms_Az_Bz_Cz: Vec<[CompressedCommitment; 3]>, + comms_L_row_col: Vec<[CompressedCommitment; 2]>, + // commitments to aid the memory checks + // [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] + comms_mem_oracles: Vec<[CompressedCommitment; 4]>, + + // claims about Az, Bz, and Cz polynomials + evals_Az_Bz_Cz_at_tau: Vec<[E::Scalar; 3]>, + + // sum-check + sc: SumcheckProof, + + // claims from the end of sum-check + evals_Az_Bz_Cz_W_E: Vec<[E::Scalar; 5]>, + evals_L_row_col: Vec<[E::Scalar; 2]>, + // [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] + evals_mem_oracle: Vec<[E::Scalar; 4]>, + // [val_A, val_B, val_C, row, col, ts_row, ts_col] + evals_mem_preprocessed: Vec<[E::Scalar; 7]>, + + // a PCS evaluation argument + eval_arg: EE::EvaluationArgument, +} + +impl> BatchedRelaxedR1CSSNARKTrait + for BatchedRelaxedR1CSSNARK +{ + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + + fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { + Box::new(|shape: &R1CSShape| -> usize { + // the commitment key should be large enough to commit to the R1CS matrices + std::cmp::max( + shape.A.len() + shape.B.len() + shape.C.len(), + std::cmp::max(shape.num_cons, 2 * shape.num_vars), + ) + }) + } + + fn initialize_pk( + _ck: Arc>, + _vk_digest: E::Scalar, + ) -> Result { + todo!("unimplemented for batched_ppsnark"); + } + + fn setup( + ck: Arc>, + S: Vec<&R1CSShape>, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { + for s in S.iter() { + // check the provided commitment key meets minimal requirements + if ck.length() < >::ck_floor()(s) { + // return Err(NovaError::InvalidCommitmentKeyLength); + return Err(NovaError::InternalError); + } + } + let (pk_ee, vk_ee) = EE::setup(ck.clone()); + + let S = S.iter().map(|s| s.pad()).collect::>(); + let S_repr = S.iter().map(R1CSShapeSparkRepr::new).collect::>(); + let S_comm = S_repr.iter().map(|s_repr| s_repr.commit(&*ck)).collect::>(); + let num_vars = S.iter().map(|s| s.num_vars).collect::>(); + let vk = VerifierKey::new(num_vars, S_comm.clone(), vk_ee); + let pk = ProverKey { pk_ee, S_repr, S_comm, vk_digest: vk.digest() }; + Ok((pk, vk)) + } + + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + S: Vec<&R1CSShape>, + U: &[RelaxedR1CSInstance], + W: &[RelaxedR1CSWitness], + ) -> Result { + // Pad shapes so that num_vars = num_cons = Nᵢ and check the sizes are correct + let S = S.par_iter().map(|s| s.pad()).collect::>(); + + // N[i] = max(|Aᵢ|+|Bᵢ|+|Cᵢ|, 2*num_varsᵢ, num_consᵢ) + let Nis = pk.S_repr.iter().map(|s| s.N).collect::>(); + assert!(Nis.iter().all(|&Ni| Ni.is_power_of_two())); + let N_max = *Nis.iter().max().unwrap(); + + let num_instances = U.len(); + + // Pad [(Wᵢ,Eᵢ)] to the next power of 2 (not to Ni) + let W = zip_with!(par_iter, (W, S), |w, s| w.pad(s)).collect::>>(); + + // number of rounds of sum-check + let num_rounds_sc = N_max.log_2(); + + // Initialize transcript with vk || [Uᵢ] + let mut transcript = E::TE::new(b"BatchedRelaxedR1CSSNARK"); + transcript.absorb(b"vk", &pk.vk_digest); + if num_instances > 1 { + let num_instances_field = E::Scalar::from(num_instances as u64); + transcript.absorb(b"n", &num_instances_field); + } + transcript.absorb(b"U", &U); + + // Append public inputs to Wᵢ: Zᵢ = [Wᵢ, uᵢ, Xᵢ] + let polys_Z = zip_with!(par_iter, (W, U, Nis), |W, U, Ni| { + // poly_Z will be resized later, so we preallocate the correct capacity + let mut poly_Z = Vec::with_capacity(*Ni); + poly_Z.extend(W.W.iter().chain([&U.u]).chain(U.X.iter())); + poly_Z + }) + .collect::>>(); + + // Move polys_W and polys_E, as well as U.u out of U + let (comms_W_E, us): (Vec<_>, Vec<_>) = U.iter().map(|U| ([U.comm_W, U.comm_E], U.u)).unzip(); + let (polys_W, polys_E): (Vec<_>, Vec<_>) = W.into_iter().map(|w| (w.W, w.E)).unzip(); + + // Compute [Az, Bz, Cz] + let mut polys_Az_Bz_Cz = zip_with!(par_iter, (polys_Z, S), |z, s| { + let (Az, Bz, Cz) = s.multiply_vec(z)?; + Ok([Az, Bz, Cz]) + }) + .collect::, NovaError>>()?; + + // Commit to [Az, Bz, Cz] and add to transcript + let comms_Az_Bz_Cz = polys_Az_Bz_Cz + .par_iter() + .map(|[Az, Bz, Cz]| { + let (comm_Az, (comm_Bz, comm_Cz)) = rayon::join( + || E::CE::commit(ck, Az), + || rayon::join(|| E::CE::commit(ck, Bz), || E::CE::commit(ck, Cz)), + ); + [comm_Az, comm_Bz, comm_Cz] + }) + .collect::>(); + comms_Az_Bz_Cz.iter().for_each(|comms| transcript.absorb(b"c", &comms.as_slice())); + + // Compute eq(tau) for each instance in log2(Ni) variables + let tau = transcript.squeeze(b"t")?; + let all_taus = PowPolynomial::squares(&tau, N_max.log_2()); + + let (polys_tau, coords_tau): (Vec<_>, Vec<_>) = Nis + .par_iter() + .map(|&N_i| { + let log_Ni = N_i.log_2(); + let eqp: EqPolynomial<_> = all_taus[..log_Ni].iter().cloned().collect(); + let evals = eqp.evals(); + let coords = eqp.r; + (evals, coords) + }) + .unzip(); + + // Pad [Az, Bz, Cz] to Ni + polys_Az_Bz_Cz.par_iter_mut().zip_eq(Nis.par_iter()).for_each(|(az_bz_cz, &Ni)| { + az_bz_cz.par_iter_mut().for_each(|mz| mz.resize(Ni, E::Scalar::ZERO)) + }); + + // Evaluate and commit to [Az(tau), Bz(tau), Cz(tau)] + let evals_Az_Bz_Cz_at_tau = + zip_with!(par_iter, (polys_Az_Bz_Cz, coords_tau), |ABCs, tau_coords| { + let [Az, Bz, Cz] = ABCs; + let (eval_Az, (eval_Bz, eval_Cz)) = rayon::join( + || MultilinearPolynomial::evaluate_with(Az, tau_coords), + || { + rayon::join( + || MultilinearPolynomial::evaluate_with(Bz, tau_coords), + || MultilinearPolynomial::evaluate_with(Cz, tau_coords), + ) + }, + ); + [eval_Az, eval_Bz, eval_Cz] + }) + .collect::>(); + + // absorb the claimed evaluations into the transcript + for evals in evals_Az_Bz_Cz_at_tau.iter() { + transcript.absorb(b"e", &evals.as_slice()); + } + + // Pad Zᵢ, E to Nᵢ + let polys_Z = polys_Z + .into_par_iter() + .zip_eq(Nis.par_iter()) + .map(|(mut poly_Z, &Ni)| { + poly_Z.resize(Ni, E::Scalar::ZERO); + poly_Z + }) + .collect::>(); + + // Pad both W,E to have the same size. This is inefficient for W since the + // second half is empty, but it makes it easier to handle the batching + // at the end. + let polys_E = polys_E + .into_par_iter() + .zip_eq(Nis.par_iter()) + .map(|(mut poly_E, &Ni)| { + poly_E.resize(Ni, E::Scalar::ZERO); + poly_E + }) + .collect::>(); + + let polys_W = polys_W + .into_par_iter() + .zip_eq(Nis.par_iter()) + .map(|(mut poly_W, &Ni)| { + poly_W.resize(Ni, E::Scalar::ZERO); + poly_W + }) + .collect::>(); + + // (2) send commitments to the following two oracles + // L_row(i) = eq(tau, row(i)) for all i in [0..Nᵢ] + // L_col(i) = z(col(i)) for all i in [0..Nᵢ] + let polys_L_row_col = + zip_with!(par_iter, (S, Nis, polys_Z, polys_tau), |S, Ni, poly_Z, poly_tau| { + let mut L_row = vec![poly_tau[0]; *Ni]; // we place mem_row[0] since resized row is appended with 0s + let mut L_col = vec![poly_Z[Ni - 1]; *Ni]; // we place mem_col[Ni-1] since resized col is appended with Ni-1 + + for (i, (val_r, val_c)) in S + .A + .iter() + .chain(S.B.iter()) + .chain(S.C.iter()) + .map(|(r, c, _)| (poly_tau[r], poly_Z[c])) + .enumerate() + { + L_row[i] = val_r; + L_col[i] = val_c; + } + + [L_row, L_col] + }) + .collect::>(); + + let comms_L_row_col = polys_L_row_col + .par_iter() + .map(|[L_row, L_col]| { + let (comm_L_row, comm_L_col) = + rayon::join(|| E::CE::commit(ck, L_row), || E::CE::commit(ck, L_col)); + [comm_L_row, comm_L_col] + }) + .collect::>(); + + // absorb commitments to L_row and L_col in the transcript + for comms in comms_L_row_col.iter() { + transcript.absorb(b"e", &comms.as_slice()); + } + + // For each instance, batch Mz = Az + c*Bz + c^2*Cz + let c = transcript.squeeze(b"c")?; + + let polys_Mz: Vec<_> = polys_Az_Bz_Cz + .par_iter() + .map(|polys_Az_Bz_Cz| { + let poly_vec: Vec<&Vec<_>> = polys_Az_Bz_Cz.iter().collect(); + let w = PolyEvalWitness::::batch(&poly_vec[..], &c); + w.p + }) + .collect(); + + let evals_Mz: Vec<_> = zip_with!( + iter, + (comms_Az_Bz_Cz, evals_Az_Bz_Cz_at_tau), + |comm_Az_Bz_Cz, evals_Az_Bz_Cz_at_tau| { + let u = PolyEvalInstance::::batch( + comm_Az_Bz_Cz.as_slice(), + vec![], // ignored by the function + evals_Az_Bz_Cz_at_tau.as_slice(), + &c, + ); + u.e + } + ) + .collect(); + + // we now need to prove three claims for each instance + // (outer) + // 0 = \sum_x poly_tau(x) * (poly_Az(x) * poly_Bz(x) - poly_uCz_E(x)) + // eval_Az_at_tau + c * eval_Bz_at_tau + c^2 * eval_Cz_at_tau = + // (Az+c*Bz+c^2*Cz)(tau) (inner) + // eval_Az_at_tau + c * eval_Bz_at_tau + c^2 * eval_Cz_at_tau = \sum_y + // L_row(y) * (val_A(y) + c * val_B(y) + c^2 * val_C(y)) * L_col(y) + // (mem) + // L_row(i) = eq(tau, row(i)) + // L_col(i) = z(col(i)) + let outer_sc_inst = zip_with!( + ( + polys_Az_Bz_Cz.par_iter(), + polys_E.par_iter(), + polys_Mz.into_par_iter(), + polys_tau.par_iter(), + evals_Mz.par_iter(), + us.par_iter() + ), + |poly_ABC, poly_E, poly_Mz, poly_tau, eval_Mz, u| { + let [poly_Az, poly_Bz, poly_Cz] = poly_ABC; + let poly_uCz_E = zip_with!(par_iter, (poly_Cz, poly_E), |cz, e| *u * cz + e).collect(); + OuterSumcheckInstance::new( + poly_tau.clone(), + poly_Az.clone(), + poly_Bz.clone(), + poly_uCz_E, + poly_Mz, // Mz = Az + c * Bz + c^2 * Cz + eval_Mz, // eval_Az_at_tau + c * eval_Az_at_tau + c^2 * eval_Cz_at_tau + ) + } + ) + .collect::>(); + + let inner_sc_inst = + zip_with!(par_iter, (pk.S_repr, evals_Mz, polys_L_row_col), |s_repr, eval_Mz, poly_L| { + let [poly_L_row, poly_L_col] = poly_L; + let c_square = c.square(); + let val = + zip_with!(par_iter, (s_repr.val_A, s_repr.val_B, s_repr.val_C), |v_a, v_b, v_c| *v_a + + c * *v_b + + c_square * *v_c) + .collect::>(); + + InnerSumcheckInstance::new( + *eval_Mz, + MultilinearPolynomial::new(poly_L_row.clone()), + MultilinearPolynomial::new(poly_L_col.clone()), + MultilinearPolynomial::new(val), + ) + }) + .collect::>(); + + // a third sum-check instance to prove the read-only memory claim + // we now need to prove that L_row and L_col are well-formed + let (mem_sc_inst, comms_mem_oracles, polys_mem_oracles) = { + let gamma = transcript.squeeze(b"g")?; + let r = transcript.squeeze(b"r")?; + + // We start by computing oracles and auxiliary polynomials to help prove the + // claim oracles correspond to [t_plus_r_inv_row, w_plus_r_inv_row, + // t_plus_r_inv_col, w_plus_r_inv_col] + let (comms_mem_oracles, polys_mem_oracles, mem_aux) = pk + .S_repr + .iter() + .zip_eq(polys_tau.iter()) + .zip_eq(polys_Z.iter()) + .zip_eq(polys_L_row_col.iter()) + .try_fold( + (Vec::new(), Vec::new(), Vec::new()), + |(mut comms, mut polys, mut aux), (((s_repr, poly_tau), poly_Z), [L_row, L_col])| { + let (comm, poly, a) = MemorySumcheckInstance::::compute_oracles( + ck, + &r, + &gamma, + poly_tau, + &s_repr.row, + L_row, + &s_repr.ts_row, + poly_Z, + &s_repr.col, + L_col, + &s_repr.ts_col, + )?; + + comms.push(comm); + polys.push(poly); + aux.push(a); + + Ok::<_, NovaError>((comms, polys, aux)) + }, + )?; + + // Commit to oracles + for comms in comms_mem_oracles.iter() { + transcript.absorb(b"l", &comms.as_slice()); + } + + // Sample new random variable for eq polynomial + let rho = transcript.squeeze(b"r")?; + let all_rhos = PowPolynomial::squares(&rho, N_max.log_2()); + + let instances = zip_with!( + ( + pk.S_repr.par_iter(), + Nis.par_iter(), + polys_mem_oracles.par_iter(), + mem_aux.into_par_iter() + ), + |s_repr, Ni, polys_mem_oracles, polys_aux| { + MemorySumcheckInstance::::new( + polys_mem_oracles.clone(), + polys_aux, + PowPolynomial::evals_with_powers(&all_rhos, Ni.log_2()), + s_repr.ts_row.clone(), + s_repr.ts_col.clone(), + ) + } + ) + .collect::>(); + (instances, comms_mem_oracles, polys_mem_oracles) + }; + + let witness_sc_inst = zip_with!(par_iter, (polys_W, S), |poly_W, S| { + WitnessBoundSumcheck::new(tau, poly_W.clone(), S.num_vars) + }) + .collect::>(); + + // Run batched Sumcheck for the 3 claims for all instances. + // Note that the polynomials for claims relating to instance i have size Ni. + let (sc, rand_sc, claims_outer, claims_inner, claims_mem, claims_witness) = Self::prove_helper( + num_rounds_sc, + mem_sc_inst, + outer_sc_inst, + inner_sc_inst, + witness_sc_inst, + &mut transcript, + )?; + + let (evals_Az_Bz_Cz_W_E, evals_L_row_col, evals_mem_oracle, evals_mem_preprocessed) = { + let evals_Az_Bz = + claims_outer.into_iter().map(|claims| [claims[0][0], claims[0][1]]).collect::>(); + + let evals_L_row_col = claims_inner + .into_iter() + .map(|claims| { + // [L_row, L_col] + [claims[0][0], claims[0][1]] + }) + .collect::>(); + + let (evals_mem_oracle, evals_mem_ts): (Vec<_>, Vec<_>) = claims_mem + .into_iter() + .map(|claims| { + ( + // [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] + [claims[0][0], claims[0][1], claims[1][0], claims[1][1]], + // [ts_row, ts_col] + [claims[0][2], claims[1][2]], + ) + }) + .unzip(); + + let evals_W = claims_witness.into_iter().map(|claims| claims[0][0]).collect::>(); + + let (evals_Cz_E, evals_mem_val_row_col): (Vec<_>, Vec<_>) = + zip_with!(iter, (polys_Az_Bz_Cz, polys_E, pk.S_repr), |ABCzs, poly_E, s_repr| { + let [_, _, Cz] = ABCzs; + let log_Ni = s_repr.N.log_2(); + let (_, rand_sc) = rand_sc.split_at(num_rounds_sc - log_Ni); + let rand_sc_evals = EqPolynomial::evals_from_points(rand_sc); + let e = + [Cz, poly_E, &s_repr.val_A, &s_repr.val_B, &s_repr.val_C, &s_repr.row, &s_repr.col] + .into_iter() + .map(|p| { + // Manually compute evaluation to avoid recomputing rand_sc_evals + zip_with!(par_iter, (p, rand_sc_evals), |p, eq| *p * eq).sum() + }) + .collect::>(); + ([e[0], e[1]], [e[2], e[3], e[4], e[5], e[6]]) + }) + .unzip(); + + let evals_Az_Bz_Cz_W_E = + zip_with!((evals_Az_Bz.into_iter(), evals_Cz_E.into_iter(), evals_W), |Az_Bz, Cz_E, W| { + let [Az, Bz] = Az_Bz; + let [Cz, E] = Cz_E; + [Az, Bz, Cz, W, E] + }) + .collect::>(); + + // [val_A, val_B, val_C, row, col, ts_row, ts_col] + let evals_mem_preprocessed = zip_with!( + (evals_mem_val_row_col.into_iter(), evals_mem_ts), + |eval_mem_val_row_col, eval_mem_ts| { + let [val_A, val_B, val_C, row, col] = eval_mem_val_row_col; + let [ts_row, ts_col] = eval_mem_ts; + [val_A, val_B, val_C, row, col, ts_row, ts_col] + } + ) + .collect::>(); + (evals_Az_Bz_Cz_W_E, evals_L_row_col, evals_mem_oracle, evals_mem_preprocessed) + }; + + let evals_vec = zip_with!( + iter, + (evals_Az_Bz_Cz_W_E, evals_L_row_col, evals_mem_oracle, evals_mem_preprocessed), + |Az_Bz_Cz_W_E, L_row_col, mem_oracles, mem_preprocessed| { + chain![Az_Bz_Cz_W_E, L_row_col, mem_oracles, mem_preprocessed].cloned().collect::>() + } + ) + .collect::>(); + + let comms_vec = zip_with!( + iter, + (comms_Az_Bz_Cz, comms_W_E, comms_L_row_col, comms_mem_oracles, pk.S_comm), + |Az_Bz_Cz, comms_W_E, L_row_col, mem_oracles, S_comm| { + chain![Az_Bz_Cz, comms_W_E, L_row_col, mem_oracles, [ + &S_comm.comm_val_A, + &S_comm.comm_val_B, + &S_comm.comm_val_C, + &S_comm.comm_row, + &S_comm.comm_col, + &S_comm.comm_ts_row, + &S_comm.comm_ts_col, + ]] + } + ) + .flatten() + .cloned() + .collect::>(); + + let w_vec = zip_with!( + ( + polys_Az_Bz_Cz.into_iter(), + polys_W.into_iter(), + polys_E.into_iter(), + polys_L_row_col.into_iter(), + polys_mem_oracles.into_iter(), + pk.S_repr.iter() + ), + |Az_Bz_Cz, W, E, L_row_col, mem_oracles, S_repr| { + chain![Az_Bz_Cz, [W, E], L_row_col, mem_oracles, [ + S_repr.val_A.clone(), + S_repr.val_B.clone(), + S_repr.val_C.clone(), + S_repr.row.clone(), + S_repr.col.clone(), + S_repr.ts_row.clone(), + S_repr.ts_col.clone(), + ]] + } + ) + .flatten() + .map(|p| PolyEvalWitness:: { p }) + .collect::>(); + + for evals in evals_vec.iter() { + transcript.absorb(b"e", &evals.as_slice()); // comm_vec is already + // in the transcript + } + let evals_vec = evals_vec.into_iter().flatten().collect::>(); + + let c = transcript.squeeze(b"c")?; + + // Compute number of variables for each polynomial + let num_vars_u = w_vec.iter().map(|w| w.p.len().log_2()).collect::>(); + let u_batch = + PolyEvalInstance::::batch_diff_size(&comms_vec, &evals_vec, &num_vars_u, rand_sc, c); + let w_batch = + PolyEvalWitness::::batch_diff_size(&w_vec.iter().by_ref().collect::>(), c); + + let eval_arg = + EE::prove(ck, &pk.pk_ee, &mut transcript, &u_batch.c, &w_batch.p, &u_batch.x, &u_batch.e)?; + + let comms_Az_Bz_Cz = + comms_Az_Bz_Cz.into_iter().map(|comms| comms.map(|comm| comm.compress())).collect(); + let comms_L_row_col = + comms_L_row_col.into_iter().map(|comms| comms.map(|comm| comm.compress())).collect(); + let comms_mem_oracles = + comms_mem_oracles.into_iter().map(|comms| comms.map(|comm| comm.compress())).collect(); + + Ok(Self { + comms_Az_Bz_Cz, + comms_L_row_col, + comms_mem_oracles, + evals_Az_Bz_Cz_at_tau, + sc, + evals_Az_Bz_Cz_W_E, + evals_L_row_col, + evals_mem_oracle, + evals_mem_preprocessed, + eval_arg, + }) + } + + fn verify(&self, vk: &Self::VerifierKey, U: &[RelaxedR1CSInstance]) -> Result<(), NovaError> { + let num_instances = U.len(); + let num_claims_per_instance = 10; + + // number of rounds of sum-check + let num_rounds = vk.S_comm.iter().map(|s| s.N.log_2()).collect::>(); + let num_rounds_max = *num_rounds.iter().max().unwrap(); + + let mut transcript = E::TE::new(b"BatchedRelaxedR1CSSNARK"); + + transcript.absorb(b"vk", &vk.digest()); + if num_instances > 1 { + let num_instances_field = E::Scalar::from(num_instances as u64); + transcript.absorb(b"n", &num_instances_field); + } + transcript.absorb(b"U", &U); + + // Decompress commitments + let comms_Az_Bz_Cz = self + .comms_Az_Bz_Cz + .iter() + .map(|comms| comms.iter().map(Commitment::::decompress).collect::, _>>()) + .collect::, _>>()?; + + let comms_L_row_col = self + .comms_L_row_col + .iter() + .map(|comms| comms.iter().map(Commitment::::decompress).collect::, _>>()) + .collect::, _>>()?; + + let comms_mem_oracles = self + .comms_mem_oracles + .iter() + .map(|comms| comms.iter().map(Commitment::::decompress).collect::, _>>()) + .collect::, _>>()?; + + // Add commitments [Az, Bz, Cz] to the transcript + comms_Az_Bz_Cz.iter().for_each(|comms| transcript.absorb(b"c", &comms.as_slice())); + + let tau = transcript.squeeze(b"t")?; + let tau_coords = PowPolynomial::new(&tau, num_rounds_max).coordinates(); + + // absorb the claimed evaluations into the transcript + self.evals_Az_Bz_Cz_at_tau.iter().for_each(|evals| { + transcript.absorb(b"e", &evals.as_slice()); + }); + + // absorb commitments to L_row and L_col in the transcript + for comms in comms_L_row_col.iter() { + transcript.absorb(b"e", &comms.as_slice()); + } + + // Batch at tau for each instance + let c = transcript.squeeze(b"c")?; + + // Compute eval_Mz = eval_Az_at_tau + c * eval_Bz_at_tau + c^2 * eval_Cz_at_tau + let evals_Mz: Vec<_> = zip_with!( + iter, + (comms_Az_Bz_Cz, self.evals_Az_Bz_Cz_at_tau), + |comm_Az_Bz_Cz, evals_Az_Bz_Cz_at_tau| { + let u = PolyEvalInstance::::batch( + comm_Az_Bz_Cz.as_slice(), + tau_coords.clone(), + evals_Az_Bz_Cz_at_tau.as_slice(), + &c, + ); + u.e + } + ) + .collect(); + + let gamma = transcript.squeeze(b"g")?; + let r = transcript.squeeze(b"r")?; + + for comms in comms_mem_oracles.iter() { + transcript.absorb(b"l", &comms.as_slice()); + } + + let rho = transcript.squeeze(b"r")?; + + let s = transcript.squeeze(b"r")?; + let s_powers = powers(&s, num_instances * num_claims_per_instance); + + let (claim_sc_final, rand_sc) = { + // Gather all claims into a single vector + let claims = evals_Mz + .iter() + .flat_map(|&eval_Mz| { + let mut claims = vec![E::Scalar::ZERO; num_claims_per_instance]; + claims[7] = eval_Mz; + claims[8] = eval_Mz; + claims.into_iter() + }) + .collect::>(); + + // Number of rounds for each claim + let num_rounds_by_claim = num_rounds + .iter() + .flat_map(|num_rounds_i| vec![*num_rounds_i; num_claims_per_instance].into_iter()) + .collect::>(); + + self.sc.verify_batch(&claims, &num_rounds_by_claim, &s_powers, 3, &mut transcript)? + }; + + // Truncated sumcheck randomness for each instance + let rand_sc_i = num_rounds + .iter() + .map(|num_rounds| rand_sc[(num_rounds_max - num_rounds)..].to_vec()) + .collect::>(); + + let claim_sc_final_expected = zip_with!( + ( + vk.num_vars.iter(), + rand_sc_i.iter(), + U.iter(), + self.evals_Az_Bz_Cz_W_E.iter().cloned(), + self.evals_L_row_col.iter().cloned(), + self.evals_mem_oracle.iter().cloned(), + self.evals_mem_preprocessed.iter().cloned() + ), + |num_vars, + rand_sc, + U, + evals_Az_Bz_Cz_W_E, + evals_L_row_col, + eval_mem_oracle, + eval_mem_preprocessed| { + let [Az, Bz, Cz, W, E] = evals_Az_Bz_Cz_W_E; + let [L_row, L_col] = evals_L_row_col; + let [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] = + eval_mem_oracle; + let [val_A, val_B, val_C, row, col, ts_row, ts_col] = eval_mem_preprocessed; + + let num_rounds_i = rand_sc.len(); + let num_vars_log = num_vars.log_2(); + + let eq_rho = PowPolynomial::new(&rho, num_rounds_i).evaluate(rand_sc); + + let (eq_tau, eq_masked_tau) = { + let eq_tau: EqPolynomial<_> = PowPolynomial::new(&tau, num_rounds_i).into(); + + let eq_tau_at_rand = eq_tau.evaluate(rand_sc); + let eq_masked_tau = MaskedEqPolynomial::new(&eq_tau, num_vars_log).evaluate(rand_sc); + + (eq_tau_at_rand, eq_masked_tau) + }; + + // Evaluate identity polynomial + let id = IdentityPolynomial::new(num_rounds_i).evaluate(rand_sc); + + let Z = { + // rand_sc was padded, so we now remove the padding + let (factor, rand_sc_unpad) = { + let l = num_rounds_i - (num_vars_log + 1); + + let (rand_sc_lo, rand_sc_hi) = rand_sc.split_at(l); + + let factor = + rand_sc_lo.iter().fold(E::Scalar::ONE, |acc, r_p| acc * (E::Scalar::ONE - r_p)); + + (factor, rand_sc_hi) + }; + + let X = { + // constant term + let poly_X = std::iter::once(U.u).chain(U.X.iter().cloned()).collect(); + SparsePolynomial::new(num_vars_log, poly_X).evaluate(&rand_sc_unpad[1..]) + }; + + // W was evaluated as if it was padded to logNi variables, + // so we don't multiply it by (1-rand_sc_unpad[0]) + W + factor * rand_sc_unpad[0] * X + }; + + let t_plus_r_row = { + let addr_row = id; + let val_row = eq_tau; + let t = addr_row + gamma * val_row; + t + r + }; + + let w_plus_r_row = { + let addr_row = row; + let val_row = L_row; + let w = addr_row + gamma * val_row; + w + r + }; + + let t_plus_r_col = { + let addr_col = id; + let val_col = Z; + let t = addr_col + gamma * val_col; + t + r + }; + + let w_plus_r_col = { + let addr_col = col; + let val_col = L_col; + let w = addr_col + gamma * val_col; + w + r + }; + + let claims_mem = [ + t_plus_r_inv_row - w_plus_r_inv_row, + t_plus_r_inv_col - w_plus_r_inv_col, + eq_rho * (t_plus_r_inv_row * t_plus_r_row - ts_row), + eq_rho * (w_plus_r_inv_row * w_plus_r_row - E::Scalar::ONE), + eq_rho * (t_plus_r_inv_col * t_plus_r_col - ts_col), + eq_rho * (w_plus_r_inv_col * w_plus_r_col - E::Scalar::ONE), + ]; + + let claims_outer = [eq_tau * (Az * Bz - U.u * Cz - E), eq_tau * (Az + c * Bz + c * c * Cz)]; + let claims_inner = [L_row * L_col * (val_A + c * val_B + c * c * val_C)]; + + let claims_witness = [eq_masked_tau * W]; + + chain![claims_mem, claims_outer, claims_inner, claims_witness] + } + ) + .flatten() + .zip_eq(s_powers) + .fold(E::Scalar::ZERO, |acc, (claim, s)| acc + s * claim); + + if claim_sc_final_expected != claim_sc_final { + return Err(NovaError::InvalidSumcheckProof); + } + + let evals_vec = zip_with!( + iter, + ( + self.evals_Az_Bz_Cz_W_E, + self.evals_L_row_col, + self.evals_mem_oracle, + self.evals_mem_preprocessed + ), + |Az_Bz_Cz_W_E, L_row_col, mem_oracles, mem_preprocessed| { + chain![Az_Bz_Cz_W_E, L_row_col, mem_oracles, mem_preprocessed].cloned().collect::>() + } + ) + .collect::>(); + + // Add all Sumcheck evaluations to the transcript + for evals in evals_vec.iter() { + transcript.absorb(b"e", &evals.as_slice()); // comm_vec is already + // in the transcript + } + + let c = transcript.squeeze(b"c")?; + + // Compute batched polynomial evaluation instance at rand_sc + let u = { + let num_evals = evals_vec[0].len(); + + let evals_vec = evals_vec.into_iter().flatten().collect::>(); + + let num_vars = num_rounds + .iter() + .flat_map(|num_rounds| vec![*num_rounds; num_evals].into_iter()) + .collect::>(); + + let comms_vec = zip_with!( + ( + comms_Az_Bz_Cz.into_iter(), + U.iter(), + comms_L_row_col.into_iter(), + comms_mem_oracles.into_iter(), + vk.S_comm.iter() + ), + |Az_Bz_Cz, U, L_row_col, mem_oracles, S_comm| { + chain![Az_Bz_Cz, [U.comm_W, U.comm_E], L_row_col, mem_oracles, [ + S_comm.comm_val_A, + S_comm.comm_val_B, + S_comm.comm_val_C, + S_comm.comm_row, + S_comm.comm_col, + S_comm.comm_ts_row, + S_comm.comm_ts_col, + ]] + } + ) + .flatten() + .collect::>(); + + PolyEvalInstance::::batch_diff_size(&comms_vec, &evals_vec, &num_vars, rand_sc, c) + }; + + // verify + EE::verify(&vk.vk_ee, &mut transcript, &u.c, &u.x, &u.e, &self.eval_arg)?; + + Ok(()) + } +} + +impl> BatchedRelaxedR1CSSNARK { + /// Runs the batched Sumcheck protocol for the claims of multiple instance + /// of possibly different sizes. + /// + /// # Details + /// + /// In order to avoid padding all polynomials to the same maximum size, we + /// adopt the following strategy. + /// + /// Let n be the number of variables for the largest instance, + /// and let m be the number of variables for a shorter one. + /// Let P(X_{0},...,X_{m-1}) be one of the MLEs of the short instance, which + /// has been committed to by taking the MSM of its evaluations with the + /// first 2^m basis points of the commitment key. + /// + /// This Sumcheck prover will interpret it as the polynomial + /// P'(X_{0},...,X_{n-1}) = P(X_{n-m},...,X_{n-1}), + /// whose MLE evaluations over {0,1}^m is equal to 2^{n-m} repetitions of + /// the evaluations of P. + /// + /// In order to account for these "imagined" repetitions, the initial claims + /// for this short instances are scaled by 2^{n-m}. + /// + /// For the first n-m rounds, the univariate polynomials relating to this + /// shorter claim will be constant, and equal to the initial claims, + /// scaled by 2^{n-m-i}, where i is the round number. By definition, P' + /// does not depend on X_i, so binding P' to r_i has no effect on the + /// evaluations. The Sumcheck prover will then interpret the polynomial + /// P' as having half as many repetitions in the next round. + /// + /// When we get to round n-m, the Sumcheck proceeds as usual since the + /// polynomials are the expected size for the round. + /// + /// Note that at the end of the protocol, the prover returns the evaluation + /// u' = P'(r_{0},...,r_{n-1}) = P(r_{n-m},...,r_{n-1}) + /// However, the polynomial we actually committed to over {0,1}^n is + /// P''(X_{0},...,X_{n-1}) = L_0(X_{0},...,X_{n-m-1}) * + /// P(X_{n-m},...,X_{n-1}) The SNARK prover/verifier will need to + /// rescale the evaluation by the first Lagrange polynomial + /// u'' = L_0(r_{0},...,r_{n-m-1}) * u' + /// in order batch all evaluations with a single PCS call. + fn prove_helper( + num_rounds: usize, + mut mem: Vec, + mut outer: Vec, + mut inner: Vec, + mut witness: Vec, + transcript: &mut E::TE, + ) -> Result< + ( + SumcheckProof, + Vec, + Vec>>, + Vec>>, + Vec>>, + Vec>>, + ), + NovaError, + > + where + T1: SumcheckEngine, + T2: SumcheckEngine, + T3: SumcheckEngine, + T4: SumcheckEngine, + { + // sanity checks + let num_instances = mem.len(); + assert_eq!(outer.len(), num_instances); + assert_eq!(inner.len(), num_instances); + assert_eq!(witness.len(), num_instances); + + for inst in mem.iter_mut() { + assert!(inst.size().is_power_of_two()); + } + for inst in outer.iter() { + assert!(inst.size().is_power_of_two()); + } + for inst in inner.iter() { + assert!(inst.size().is_power_of_two()); + } + for inst in witness.iter() { + assert!(inst.size().is_power_of_two()); + } + + let degree = mem[0].degree(); + assert!(mem.iter().all(|inst| inst.degree() == degree)); + assert!(outer.iter().all(|inst| inst.degree() == degree)); + assert!(inner.iter().all(|inst| inst.degree() == degree)); + assert!(witness.iter().all(|inst| inst.degree() == degree)); + + // Collect all claims from the instances. If the instances is defined over `m` + // variables, which is less that the total number of rounds `n`, + // the individual claims σ are scaled by 2^{n-m}. + let claims = zip_with!(iter, (mem, outer, inner, witness), |mem, outer, inner, witness| { + Self::scaled_claims(mem, num_rounds) + .into_iter() + .chain(Self::scaled_claims(outer, num_rounds)) + .chain(Self::scaled_claims(inner, num_rounds)) + .chain(Self::scaled_claims(witness, num_rounds)) + }) + .flatten() + .collect::>(); + + // Sample a challenge for the random linear combination of all scaled claims + let s = transcript.squeeze(b"r")?; + let coeffs = powers(&s, claims.len()); + + // At the start of each round, the running claim is equal to the random linear + // combination of the Sumcheck claims, evaluated over the bound + // polynomials. Initially, it is equal to the random linear combination + // of the scaled input claims. + let mut running_claim = zip_with!(iter, (claims, coeffs), |c_1, c_2| *c_1 * c_2).sum(); + + // Keep track of the verifier challenges r, and the univariate polynomials sent + // by the prover in each round + let mut r: Vec = Vec::new(); + let mut cubic_polys: Vec> = Vec::new(); + + for i in 0..num_rounds { + // At the start of round i, there input polynomials are defined over at most n-i + // variables. + let remaining_variables = num_rounds - i; + + // For each claim j, compute the evaluations of its univariate polynomial + // S_j(X_i) at X = 0, 2, 3. The polynomial is such that + // S_{j-1}(r_{j-1}) = S_j(0) + S_j(1). If the number of variable m + // of the claim is m < n-i, then the polynomial is constants and + // equal to the initial claim σ_j scaled by 2^{n-m-i-1}. + let evals = + zip_with!(par_iter, (mem, outer, inner, witness), |mem, outer, inner, witness| { + let ((evals_mem, evals_outer), (evals_inner, evals_witness)) = rayon::join( + || { + rayon::join( + || Self::get_evals(mem, remaining_variables), + || Self::get_evals(outer, remaining_variables), + ) + }, + || { + rayon::join( + || Self::get_evals(inner, remaining_variables), + || Self::get_evals(witness, remaining_variables), + ) + }, + ); + evals_mem + .into_par_iter() + .chain(evals_outer.into_par_iter()) + .chain(evals_inner.into_par_iter()) + .chain(evals_witness.into_par_iter()) + }) + .flatten() + .collect::>(); + + assert_eq!(evals.len(), claims.len()); + + // Random linear combination of the univariate evaluations at X_i = 0, 2, 3 + let evals_combined_0 = (0..evals.len()).map(|i| evals[i][0] * coeffs[i]).sum(); + let evals_combined_2 = (0..evals.len()).map(|i| evals[i][1] * coeffs[i]).sum(); + let evals_combined_3 = (0..evals.len()).map(|i| evals[i][2] * coeffs[i]).sum(); + + let evals = vec![ + evals_combined_0, + running_claim - evals_combined_0, + evals_combined_2, + evals_combined_3, + ]; + // Coefficient representation of S(X_i) + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + // derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + r.push(r_i); + + // Bind the variable X_i of polynomials across all claims to r_i. + // If the claim is defined over m variables and m < n-i, then + // binding has no effect on the polynomial. + zip_with_for_each!( + par_iter_mut, + (mem, outer, inner, witness), + |mem, outer, inner, witness| { + rayon::join( + || { + rayon::join( + || Self::bind(mem, remaining_variables, &r_i), + || Self::bind(outer, remaining_variables, &r_i), + ) + }, + || { + rayon::join( + || Self::bind(inner, remaining_variables, &r_i), + || Self::bind(witness, remaining_variables, &r_i), + ) + }, + ); + } + ); + + running_claim = poly.evaluate(&r_i); + cubic_polys.push(poly.compress()); + } + + // Collect evaluations at (r_{n-m}, ..., r_{n-1}) of polynomials over all + // claims, where m is the initial number of variables the individual + // claims are defined over. + let claims_outer = outer.into_iter().map(|inst| inst.final_claims()).collect(); + let claims_inner = inner.into_iter().map(|inst| inst.final_claims()).collect(); + let claims_mem = mem.into_iter().map(|inst| inst.final_claims()).collect(); + let claims_witness = witness.into_iter().map(|inst| inst.final_claims()).collect(); + + Ok((SumcheckProof::new(cubic_polys), r, claims_outer, claims_inner, claims_mem, claims_witness)) + } + + /// In round i, computes the evaluations at X_i = 0, 2, 3 of the univariate + /// polynomials S(X_i) for each claim in the instance. + /// Let `n` be the total number of Sumcheck rounds, and assume the instance + /// is defined over `m` variables. We define `remaining_variables` as + /// n-i. If m < n-i, then the polynomials in the instance are not + /// defined over X_i, so the univariate polynomial is constant and equal + /// to 2^{n-m-i-1}*σ, where σ is the initial claim. + fn get_evals>(inst: &T, remaining_variables: usize) -> Vec> { + let num_instance_variables = inst.size().log_2(); // m + if num_instance_variables < remaining_variables { + let deg = inst.degree(); + + // The evaluations at X_i = 0, 2, 3 are all equal to the scaled claim + Self::scaled_claims(inst, remaining_variables - 1) + .into_iter() + .map(|scaled_claim| vec![scaled_claim; deg]) + .collect() + } else { + inst.evaluation_points() + } + } + + /// In round i after receiving challenge r_i, we partially evaluate all + /// polynomials in the instance at X_i = r_i. If the instance is defined + /// over m variables m which is less than n-i, then the polynomials do + /// not depend on X_i, so binding them to r_i has no effect. + fn bind>(inst: &mut T, remaining_variables: usize, r: &E::Scalar) { + let num_instance_variables = inst.size().log_2(); // m + if remaining_variables <= num_instance_variables { + inst.bound(r) + } + } + + /// Given an instance defined over m variables, the sum over n = + /// `remaining_variables` is equal to the initial claim scaled by + /// 2^{n-m}, when m ≤ n. + fn scaled_claims>(inst: &T, remaining_variables: usize) -> Vec { + let num_instance_variables = inst.size().log_2(); // m + let num_repetitions = 1 << (remaining_variables - num_instance_variables); + let scaling = E::Scalar::from(num_repetitions as u64); + inst.initial_claims().iter().map(|claim| scaling * claim).collect() + } +} + +impl> RelaxedR1CSSNARKTrait + for BatchedRelaxedR1CSSNARK +{ + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + + fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { + >::ck_floor() + } + + fn initialize_pk( + ck: Arc>, + vk_digest: E::Scalar, + ) -> Result { + >::initialize_pk(ck, vk_digest) + } + + fn setup( + ck: Arc>, + S: &R1CSShape, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { + >::setup(ck, vec![S]) + } + + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + S: &R1CSShape, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, + ) -> Result { + let slice_U = slice::from_ref(U); + let slice_W = slice::from_ref(W); + + >::prove(ck, pk, vec![S], slice_U, slice_W) + } + + fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { + let slice = slice::from_ref(U); + >::verify(self, vk, slice) + } +} diff --git a/src/spartan/macros.rs b/prover/src/spartan/macros.rs similarity index 99% rename from src/spartan/macros.rs rename to prover/src/spartan/macros.rs index 0f2d943..c0e3cf8 100644 --- a/src/spartan/macros.rs +++ b/prover/src/spartan/macros.rs @@ -14,7 +14,6 @@ /// /// println!("{:?}", res); // Output: [6, 9, 12] /// ``` - #[macro_export] macro_rules! zip_with { // no iterator projection specified: the macro assumes the arguments *are* iterators diff --git a/prover/src/spartan/math.rs b/prover/src/spartan/math.rs new file mode 100644 index 0000000..94b35c5 --- /dev/null +++ b/prover/src/spartan/math.rs @@ -0,0 +1,15 @@ +pub trait Math { + fn log_2(self) -> usize; +} + +impl Math for usize { + fn log_2(self) -> usize { + assert_ne!(self, 0); + + if self.is_power_of_two() { + (1usize.leading_zeros() - self.leading_zeros()) as Self + } else { + (0usize.leading_zeros() - self.leading_zeros()) as Self + } + } +} diff --git a/src/spartan/mod.rs b/prover/src/spartan/mod.rs similarity index 55% rename from src/spartan/mod.rs rename to prover/src/spartan/mod.rs index 2e0a078..6514e4d 100644 --- a/src/spartan/mod.rs +++ b/prover/src/spartan/mod.rs @@ -26,196 +26,168 @@ use rayon_scan::ScanParallelIterator as _; use ref_cast::RefCast; use crate::{ - r1cs::{R1CSShape, SparseMatrix}, - traits::Engine, - Commitment, + r1cs::{R1CSShape, SparseMatrix}, + traits::Engine, + Commitment, }; // Creates a vector of the first `n` powers of `s`. /// Creates a vector of the first `n` powers of `s`. pub fn powers(s: &F, n: usize) -> Vec { - assert!(n >= 1); - let mut v = vec![*s; n]; - v[0] = F::ONE; - v.into_par_iter().scan(|a, b| *a * *b, F::ONE).collect() + assert!(n >= 1); + let mut v = vec![*s; n]; + v[0] = F::ONE; + v.into_par_iter().scan(|a, b| *a * *b, F::ONE).collect() } /// A type that holds a witness to a polynomial evaluation instance #[repr(transparent)] #[derive(Debug, RefCast)] struct PolyEvalWitness { - p: Vec, // polynomial + p: Vec, // polynomial } impl PolyEvalWitness { - /// Given [Pᵢ] and s, compute P = ∑ᵢ sⁱ⋅Pᵢ - /// - /// # Details - /// - /// We allow the input polynomials to have different sizes, and interpret - /// smaller ones as being padded with 0 to the maximum size of all - /// polynomials. - fn batch_diff_size(W: &[&Self], s: E::Scalar) -> Self { - let powers = powers(&s, W.len()); - - let size_max = W.iter().map(|w| w.p.len()).max().unwrap(); - let p_vec = W.par_iter().map(|w| &w.p); - // Scale the input polynomials by the power of s - let p = zip_with!((p_vec, powers.par_iter()), |v, weight| { - // compute the weighted sum for each vector - v.iter() - .map(|&x| { - if *weight != E::Scalar::ONE { - x * *weight - } else { - x - } - }) - .collect::>() - }) - .reduce( - || vec![E::Scalar::ZERO; size_max], - |left, right| { - // Sum into the largest polynomial - let (mut big, small) = if left.len() > right.len() { - (left, right) - } else { - (right, left) - }; - - #[allow(clippy::disallowed_methods)] - big.par_iter_mut() - .zip(small.par_iter()) - .for_each(|(b, s)| *b += s); - - big - }, - ); - - Self { p } - } + /// Given [Pᵢ] and s, compute P = ∑ᵢ sⁱ⋅Pᵢ + /// + /// # Details + /// + /// We allow the input polynomials to have different sizes, and interpret + /// smaller ones as being padded with 0 to the maximum size of all + /// polynomials. + fn batch_diff_size(W: &[&Self], s: E::Scalar) -> Self { + let powers = powers(&s, W.len()); + + let size_max = W.iter().map(|w| w.p.len()).max().unwrap(); + let p_vec = W.par_iter().map(|w| &w.p); + // Scale the input polynomials by the power of s + let p = zip_with!((p_vec, powers.par_iter()), |v, weight| { + // compute the weighted sum for each vector + v.iter().map(|&x| if *weight != E::Scalar::ONE { x * *weight } else { x }).collect::>() + }) + .reduce( + || vec![E::Scalar::ZERO; size_max], + |left, right| { + // Sum into the largest polynomial + let (mut big, small) = if left.len() > right.len() { (left, right) } else { (right, left) }; + + #[allow(clippy::disallowed_methods)] + big.par_iter_mut().zip(small.par_iter()).for_each(|(b, s)| *b += s); + + big + }, + ); - /// Given a set of polynomials \[Pᵢ\] and a scalar `s`, this method computes - /// the weighted sum of the polynomials, where each polynomial Pᵢ is - /// scaled by sⁱ. - /// - /// # Panics - /// - /// This method panics if the polynomials in `p_vec` are not all of the same - /// length. - fn batch(p_vec: &[&Vec], s: &E::Scalar) -> Self { - p_vec - .iter() - .skip(1) - .for_each(|p| assert_eq!(p.len(), p_vec[0].len())); - let instances = p_vec.iter().map(|p| Self::ref_cast(p)).collect::>(); - Self::batch_diff_size(&instances, *s) - } + Self { p } + } + + /// Given a set of polynomials \[Pᵢ\] and a scalar `s`, this method computes + /// the weighted sum of the polynomials, where each polynomial Pᵢ is + /// scaled by sⁱ. + /// + /// # Panics + /// + /// This method panics if the polynomials in `p_vec` are not all of the same + /// length. + fn batch(p_vec: &[&Vec], s: &E::Scalar) -> Self { + p_vec.iter().skip(1).for_each(|p| assert_eq!(p.len(), p_vec[0].len())); + let instances = p_vec.iter().map(|p| Self::ref_cast(p)).collect::>(); + Self::batch_diff_size(&instances, *s) + } } /// A type that holds a polynomial evaluation instance #[derive(Debug)] struct PolyEvalInstance { - c: Commitment, // commitment to the polynomial - x: Vec, // evaluation point - e: E::Scalar, // claimed evaluation + c: Commitment, // commitment to the polynomial + x: Vec, // evaluation point + e: E::Scalar, // claimed evaluation } impl PolyEvalInstance { - fn batch_diff_size( - c_vec: &[Commitment], - e_vec: &[E::Scalar], - num_vars: &[usize], - x: Vec, - s: E::Scalar, - ) -> Self { - let num_instances = num_vars.len(); - assert_eq!(c_vec.len(), num_instances); - assert_eq!(e_vec.len(), num_instances); - - let num_vars_max = x.len(); - let powers: Vec = powers(&s, num_instances); - // Rescale evaluations by the first Lagrange polynomial, - // so that we can check its evaluation against x - let evals_scaled = zip_with!(iter, (e_vec, num_vars), |eval, num_rounds| { - // x_lo = [ x[0] , ..., x[n-nᵢ-1] ] - // x_hi = [ x[n-nᵢ], ..., x[n] ] - let (r_lo, _r_hi) = x.split_at(num_vars_max - num_rounds); - // Compute L₀(x_lo) - let lagrange_eval = r_lo - .iter() - .map(|r| E::Scalar::ONE - r) - .product::(); - - // vᵢ = L₀(x_lo)⋅Pᵢ(x_hi) - lagrange_eval * eval - }); - - // C = ∑ᵢ γⁱ⋅Cᵢ - let comm_joint = zip_with!(iter, (c_vec, powers), |c, g_i| *c * *g_i) - .fold(Commitment::::default(), |acc, item| acc + item); - - // v = ∑ᵢ γⁱ⋅vᵢ - let eval_joint = zip_with!((evals_scaled, powers.iter()), |e, g_i| e * g_i).sum(); - - Self { - c: comm_joint, - x, - e: eval_joint, - } - } - - fn batch( - c_vec: &[Commitment], - x: Vec, - e_vec: &[E::Scalar], - s: &E::Scalar, - ) -> Self { - let sizes = vec![x.len(); e_vec.len()]; - Self::batch_diff_size(c_vec, e_vec, &sizes, x, *s) - } + fn batch_diff_size( + c_vec: &[Commitment], + e_vec: &[E::Scalar], + num_vars: &[usize], + x: Vec, + s: E::Scalar, + ) -> Self { + let num_instances = num_vars.len(); + assert_eq!(c_vec.len(), num_instances); + assert_eq!(e_vec.len(), num_instances); + + let num_vars_max = x.len(); + let powers: Vec = powers(&s, num_instances); + // Rescale evaluations by the first Lagrange polynomial, + // so that we can check its evaluation against x + let evals_scaled = zip_with!(iter, (e_vec, num_vars), |eval, num_rounds| { + // x_lo = [ x[0] , ..., x[n-nᵢ-1] ] + // x_hi = [ x[n-nᵢ], ..., x[n] ] + let (r_lo, _r_hi) = x.split_at(num_vars_max - num_rounds); + // Compute L₀(x_lo) + let lagrange_eval = r_lo.iter().map(|r| E::Scalar::ONE - r).product::(); + + // vᵢ = L₀(x_lo)⋅Pᵢ(x_hi) + lagrange_eval * eval + }); + + // C = ∑ᵢ γⁱ⋅Cᵢ + let comm_joint = zip_with!(iter, (c_vec, powers), |c, g_i| *c * *g_i) + .fold(Commitment::::default(), |acc, item| acc + item); + + // v = ∑ᵢ γⁱ⋅vᵢ + let eval_joint = zip_with!((evals_scaled, powers.iter()), |e, g_i| e * g_i).sum(); + + Self { c: comm_joint, x, e: eval_joint } + } + + fn batch(c_vec: &[Commitment], x: Vec, e_vec: &[E::Scalar], s: &E::Scalar) -> Self { + let sizes = vec![x.len(); e_vec.len()]; + Self::batch_diff_size(c_vec, e_vec, &sizes, x, *s) + } } /// Binds "row" variables of (A, B, C) matrices viewed as 2d multilinear /// polynomials +#[allow(clippy::type_complexity)] fn compute_eval_table_sparse( - S: &R1CSShape, - rx: &[E::Scalar], + S: &R1CSShape, + rx: &[E::Scalar], ) -> (Vec, Vec, Vec) { - assert_eq!(rx.len(), S.num_cons); - - let inner = |M: &SparseMatrix, M_evals: &mut Vec| { - for (row_idx, row) in M.iter_rows().enumerate() { - for (val, col_idx) in M.get_row(row) { - // TODO(@winston-h-zhang): Parallelize? Will need more complicated locking - M_evals[*col_idx] += rx[row_idx] * val; - } - } - }; - - let (A_evals, (B_evals, C_evals)) = rayon::join( + assert_eq!(rx.len(), S.num_cons); + + let inner = |M: &SparseMatrix, M_evals: &mut Vec| { + for (row_idx, row) in M.iter_rows().enumerate() { + for (val, col_idx) in M.get_row(row) { + // TODO(@winston-h-zhang): Parallelize? Will need more complicated locking + M_evals[*col_idx] += rx[row_idx] * val; + } + } + }; + + let (A_evals, (B_evals, C_evals)) = rayon::join( + || { + let mut A_evals: Vec = vec![E::Scalar::ZERO; 2 * S.num_vars]; + inner(&S.A, &mut A_evals); + A_evals + }, + || { + rayon::join( || { - let mut A_evals: Vec = vec![E::Scalar::ZERO; 2 * S.num_vars]; - inner(&S.A, &mut A_evals); - A_evals + let mut B_evals: Vec = vec![E::Scalar::ZERO; 2 * S.num_vars]; + inner(&S.B, &mut B_evals); + B_evals }, || { - rayon::join( - || { - let mut B_evals: Vec = vec![E::Scalar::ZERO; 2 * S.num_vars]; - inner(&S.B, &mut B_evals); - B_evals - }, - || { - let mut C_evals: Vec = vec![E::Scalar::ZERO; 2 * S.num_vars]; - inner(&S.C, &mut C_evals); - C_evals - }, - ) + let mut C_evals: Vec = vec![E::Scalar::ZERO; 2 * S.num_vars]; + inner(&S.C, &mut C_evals); + C_evals }, - ); + ) + }, + ); - (A_evals, B_evals, C_evals) + (A_evals, B_evals, C_evals) } // #[cfg(all(test, not(target_arch = "wasm32")))] diff --git a/prover/src/spartan/polys/eq.rs b/prover/src/spartan/polys/eq.rs new file mode 100644 index 0000000..70abe99 --- /dev/null +++ b/prover/src/spartan/polys/eq.rs @@ -0,0 +1,115 @@ +//! `EqPolynomial`: Represents multilinear extension of equality polynomials, +//! evaluated based on binary input values. + +use ff::PrimeField; +use rayon::prelude::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; + +/// Represents the multilinear extension polynomial (MLE) of the equality +/// polynomial $eq(x,e)$, denoted as $\tilde{eq}(x, e)$. +/// +/// The polynomial is defined by the formula: +/// $$ +/// \tilde{eq}(x, e) = \prod_{i=1}^m(e_i * x_i + (1 - e_i) * (1 - x_i)) +/// $$ +/// +/// Each element in the vector `r` corresponds to a component $e_i$, +/// representing a bit from the binary representation of an input value $e$. +/// This polynomial evaluates to 1 if every component $x_i$ equals its +/// corresponding $e_i$, and 0 otherwise. +/// +/// For instance, for e = 6 (with a binary representation of 0b110), the vector +/// r would be [1, 1, 0]. +#[derive(Debug)] +pub struct EqPolynomial { + pub(in crate::spartan) r: Vec, +} + +impl EqPolynomial { + /// Creates a new `EqPolynomial` from a vector of Scalars `r`. + /// + /// Each Scalar in `r` corresponds to a bit from the binary representation + /// of an input value `e`. + pub const fn new(r: Vec) -> Self { Self { r } } + + /// Evaluates the `EqPolynomial` at a given point `rx`. + /// + /// This function computes the value of the polynomial at the point + /// specified by `rx`. It expects `rx` to have the same length as the + /// internal vector `r`. + /// + /// Panics if `rx` and `r` have different lengths. + pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { + assert_eq!(self.r.len(), rx.len()); + (0..rx.len()) + .map(|i| self.r[i] * rx[i] + (Scalar::ONE - self.r[i]) * (Scalar::ONE - rx[i])) + .product() + } + + /// Evaluates the `EqPolynomial` at all the `2^|r|` points in its domain. + /// + /// Returns a vector of Scalars, each corresponding to the polynomial + /// evaluation at a specific point. + #[must_use = "this returns an expensive vector and leaves self unchanged"] + pub fn evals(&self) -> Vec { Self::evals_from_points(&self.r) } + + /// Evaluates the `EqPolynomial` from the `2^|r|` points in its domain, + /// without creating an intermediate polynomial representation. + /// + /// Returns a vector of Scalars, each corresponding to the polynomial + /// evaluation at a specific point. + pub fn evals_from_points(r: &[Scalar]) -> Vec { + let ell = r.len(); + let mut evals: Vec = vec![Scalar::ZERO; (2_usize).pow(ell as u32)]; + let mut size = 1; + evals[0] = Scalar::ONE; + + for r in r.iter().rev() { + let (evals_left, evals_right) = evals.split_at_mut(size); + let (evals_right, _) = evals_right.split_at_mut(size); + + evals_left.par_iter_mut().zip_eq(evals_right.par_iter_mut()).for_each(|(x, y)| { + *y = *x * r; + *x -= &*y; + }); + + size *= 2; + } + + evals + } +} + +impl FromIterator for EqPolynomial { + fn from_iter>(iter: I) -> Self { + let r: Vec<_> = iter.into_iter().collect(); + Self { r } + } +} + +#[cfg(test)] +mod tests { + + use super::*; + use crate::provider; + + fn test_eq_polynomial_with() { + let eq_poly = EqPolynomial::::new(vec![F::ONE, F::ZERO, F::ONE]); + let y = eq_poly.evaluate(vec![F::ONE, F::ONE, F::ONE].as_slice()); + assert_eq!(y, F::ZERO); + + let y = eq_poly.evaluate(vec![F::ONE, F::ZERO, F::ONE].as_slice()); + assert_eq!(y, F::ONE); + + let eval_list = eq_poly.evals(); + for (i, &coeff) in eval_list.iter().enumerate().take((2_usize).pow(3)) { + if i == 5 { + assert_eq!(coeff, F::ONE); + } else { + assert_eq!(coeff, F::ZERO); + } + } + } + + #[test] + fn test_eq_polynomial() { test_eq_polynomial_with::(); } +} diff --git a/prover/src/spartan/polys/identity.rs b/prover/src/spartan/polys/identity.rs new file mode 100644 index 0000000..ce7db47 --- /dev/null +++ b/prover/src/spartan/polys/identity.rs @@ -0,0 +1,25 @@ +use core::marker::PhantomData; + +use ff::PrimeField; + +pub struct IdentityPolynomial { + ell: usize, + _p: PhantomData, +} + +impl IdentityPolynomial { + pub fn new(ell: usize) -> Self { Self { ell, _p: PhantomData } } + + pub fn evaluate(&self, r: &[Scalar]) -> Scalar { + assert_eq!(self.ell, r.len()); + let mut power_of_two = 1_u64; + (0..self.ell) + .rev() + .map(|i| { + let result = Scalar::from(power_of_two) * r[i]; + power_of_two *= 2; + result + }) + .sum() + } +} diff --git a/prover/src/spartan/polys/masked_eq.rs b/prover/src/spartan/polys/masked_eq.rs new file mode 100644 index 0000000..bca43b3 --- /dev/null +++ b/prover/src/spartan/polys/masked_eq.rs @@ -0,0 +1,131 @@ +//! `MaskedEqPolynomial`: Represents the `eq` polynomial over n variables, where +//! the first 2^m entries are 0. + +use ff::PrimeField; +use itertools::zip_eq; + +use crate::spartan::polys::eq::EqPolynomial; + +/// Represents the multilinear extension polynomial (MLE) of the equality +/// polynomial $eqₘ(x,r)$ over n variables, where the first 2^m evaluations are +/// 0. +/// +/// The polynomial is defined by the formula: +/// eqₘ(x,r) = eq(x,r) - ( ∏_{0 ≤ i < n-m} (1−rᵢ)(1−xᵢ) )⋅( ∏_{n-m ≤ i < n} +/// (1−rᵢ)(1−xᵢ) + rᵢ⋅xᵢ ) +#[derive(Debug)] +pub struct MaskedEqPolynomial<'a, Scalar> { + eq: &'a EqPolynomial, + num_masked_vars: usize, +} + +impl<'a, Scalar: PrimeField> MaskedEqPolynomial<'a, Scalar> { + /// Creates a new `MaskedEqPolynomial` from a vector of Scalars `r` of size + /// n, with the number of masked variables m = `num_masked_vars`. + pub const fn new(eq: &'a EqPolynomial, num_masked_vars: usize) -> Self { + MaskedEqPolynomial { eq, num_masked_vars } + } + + /// Evaluates the `MaskedEqPolynomial` at a given point `rx`. + /// + /// This function computes the value of the polynomial at the point + /// specified by `rx`. It expects `rx` to have the same length as the + /// internal vector `r`. + /// + /// Panics if `rx` and `r` have different lengths. + pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { + let r = &self.eq.r; + assert_eq!(r.len(), rx.len()); + let split_idx = r.len() - self.num_masked_vars; + + let (r_lo, r_hi) = r.split_at(split_idx); + let (rx_lo, rx_hi) = rx.split_at(split_idx); + let eq_lo = zip_eq(r_lo, rx_lo) + .map(|(r, rx)| *r * rx + (Scalar::ONE - r) * (Scalar::ONE - rx)) + .product::(); + let eq_hi = zip_eq(r_hi, rx_hi) + .map(|(r, rx)| *r * rx + (Scalar::ONE - r) * (Scalar::ONE - rx)) + .product::(); + let mask_lo = + zip_eq(r_lo, rx_lo).map(|(r, rx)| (Scalar::ONE - r) * (Scalar::ONE - rx)).product::(); + + (eq_lo - mask_lo) * eq_hi + } + + /// Evaluates the `MaskedEqPolynomial` at all the `2^|r|` points in its + /// domain. + /// + /// Returns a vector of Scalars, each corresponding to the polynomial + /// evaluation at a specific point. + pub fn evals(&self) -> Vec { Self::evals_from_points(&self.eq.r, self.num_masked_vars) } + + /// Evaluates the `MaskedEqPolynomial` from the `2^|r|` points in its + /// domain, without creating an intermediate polynomial representation. + /// + /// Returns a vector of Scalars, each corresponding to the polynomial + /// evaluation at a specific point. + fn evals_from_points(r: &[Scalar], num_masked_vars: usize) -> Vec { + let mut evals = EqPolynomial::evals_from_points(r); + + // replace the first 2^m evaluations with 0 + let num_masked_evals = 1 << num_masked_vars; + evals[..num_masked_evals].iter_mut().for_each(|e| *e = Scalar::ZERO); + + evals + } +} + +#[cfg(test)] +mod tests { + use rand_chacha::ChaCha20Rng; + use rand_core::{CryptoRng, RngCore, SeedableRng}; + + use super::*; + use crate::{provider, spartan::polys::eq::EqPolynomial}; + + fn test_masked_eq_polynomial_with( + num_vars: usize, + num_masked_vars: usize, + mut rng: &mut R, + ) { + let num_masked_evals = 1 << num_masked_vars; + + // random point + let r = std::iter::from_fn(|| Some(F::random(&mut rng))).take(num_vars).collect::>(); + // evaluation point + let rx = std::iter::from_fn(|| Some(F::random(&mut rng))).take(num_vars).collect::>(); + + let poly_eq = EqPolynomial::new(r); + let poly_eq_evals = poly_eq.evals(); + + let masked_eq_poly = MaskedEqPolynomial::new(&poly_eq, num_masked_vars); + let masked_eq_poly_evals = masked_eq_poly.evals(); + + // ensure the first 2^m entries are 0 + assert_eq!(masked_eq_poly_evals[..num_masked_evals], vec![F::ZERO; num_masked_evals]); + // ensure the remaining evaluations match eq(r) + assert_eq!(masked_eq_poly_evals[num_masked_evals..], poly_eq_evals[num_masked_evals..]); + + // compute the evaluation at rx succinctly + let masked_eq_eval = masked_eq_poly.evaluate(&rx); + + // compute the evaluation as a MLE + let rx_evals = EqPolynomial::evals_from_points(&rx); + let expected_masked_eq_eval = + zip_eq(rx_evals, masked_eq_poly_evals).map(|(rx, r)| rx * r).sum(); + + assert_eq!(masked_eq_eval, expected_masked_eq_eval); + } + + #[test] + fn test_masked_eq_polynomial() { + let mut rng = ChaCha20Rng::from_seed([0u8; 32]); + let num_vars = 5; + let num_masked_vars = 2; + test_masked_eq_polynomial_with::( + num_vars, + num_masked_vars, + &mut rng, + ); + } +} diff --git a/src/spartan/polys/mod.rs b/prover/src/spartan/polys/mod.rs similarity index 100% rename from src/spartan/polys/mod.rs rename to prover/src/spartan/polys/mod.rs diff --git a/prover/src/spartan/polys/multilinear.rs b/prover/src/spartan/polys/multilinear.rs new file mode 100644 index 0000000..1eb5176 --- /dev/null +++ b/prover/src/spartan/polys/multilinear.rs @@ -0,0 +1,295 @@ +//! Main components: +//! - `MultilinearPolynomial`: Dense representation of multilinear polynomials, represented by +//! evaluations over all possible binary inputs. +//! - `SparsePolynomial`: Efficient representation of sparse multilinear polynomials, storing only +//! non-zero evaluations. + +use std::ops::{Add, Index}; + +use ff::PrimeField; +use itertools::Itertools as _; +use rand_core::{CryptoRng, RngCore}; +use rayon::prelude::{ + IndexedParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator, ParallelIterator, +}; +use serde::{Deserialize, Serialize}; + +use crate::spartan::{math::Math, polys::eq::EqPolynomial}; + +/// A multilinear extension of a polynomial $Z(\cdot)$, denote it as +/// $\tilde{Z}(x_1, ..., x_m)$ where the degree of each variable is at most one. +/// +/// This is the dense representation of a multilinear poynomial. +/// Let it be $\mathbb{G}(\cdot): \mathbb{F}^m \rightarrow \mathbb{F}$, it can +/// be represented uniquely by the list of evaluations of $\mathbb{G}(\cdot)$ +/// over the Boolean hypercube $\{0, 1\}^m$. +/// +/// For example, a 3 variables multilinear polynomial can be represented by +/// evaluation at points $[0, 2^3-1]$. +/// +/// The implementation follows +/// $$ +/// \tilde{Z}(x_1, ..., x_m) = \sum_{e\in {0,1}^m}Z(e) \cdot \prod_{i=1}^m(x_i +/// \cdot e_i + (1-x_i) \cdot (1-e_i)) $$ +/// +/// Vector $Z$ indicates $Z(e)$ where $e$ ranges from $0$ to $2^m-1$. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct MultilinearPolynomial { + num_vars: usize, // the number of variables in the multilinear polynomial + pub(crate) Z: Vec, /* evaluations of the polynomial in all the 2^num_vars Boolean + * inputs */ +} + +impl MultilinearPolynomial { + /// Creates a new `MultilinearPolynomial` from the given evaluations. + /// + /// # Panics + /// The number of evaluations must be a power of two. + pub fn new(Z: Vec) -> Self { + let num_vars = Z.len().log_2(); + assert_eq!(Z.len(), 1 << num_vars); + Self { num_vars, Z } + } + + /// evaluations of the polynomial in all the 2^num_vars Boolean inputs + pub fn evaluations(&self) -> &[Scalar] { &self.Z[..] } + + /// Returns the number of variables in the multilinear polynomial + pub const fn get_num_vars(&self) -> usize { self.num_vars } + + /// Returns the total number of evaluations. + pub fn len(&self) -> usize { self.Z.len() } + + /// Returns true if no evaluations. + pub fn is_empty(&self) -> bool { self.Z.len() == 0 } + + /// Returns a random polynomial + pub fn random(num_vars: usize, mut rng: &mut R) -> Self { + Self::new(std::iter::from_fn(|| Some(Scalar::random(&mut rng))).take(1 << num_vars).collect()) + } + + /// Binds the polynomial's top variable using the given scalar. + /// + /// This operation modifies the polynomial in-place. + pub fn bind_poly_var_top(&mut self, r: &Scalar) { + assert!(self.num_vars > 0); + + let n = self.len() / 2; + + let (left, right) = self.Z.split_at_mut(n); + + left.par_iter_mut().zip_eq(right.par_iter()).for_each(|(a, b)| { + *a += *r * (*b - *a); + }); + + self.Z.resize(n, Scalar::ZERO); + self.num_vars -= 1; + } + + /// Evaluates the polynomial at the given point. + /// Returns Z(r) in O(n) time. + /// + /// The point must have a value for each variable. + pub fn evaluate(&self, r: &[Scalar]) -> Scalar { + // r must have a value for each variable + assert_eq!(r.len(), self.get_num_vars()); + Self::evaluate_with(&self.Z, r) + } + + /// Evaluates the polynomial with the given evaluations and point. + pub fn evaluate_with(Z: &[Scalar], r: &[Scalar]) -> Scalar { + let chis = EqPolynomial::evals_from_points(r); + Self::evaluate_with_chis(Z, &chis) + } + + /// Evaluates the polynomial with the given evaluations and chi coefficients + pub fn evaluate_with_chis(Z: &[Scalar], chis: &[Scalar]) -> Scalar { + zip_with!(par_iter, (chis, Z), |a, b| *a * b).sum() + } +} + +impl Index for MultilinearPolynomial { + type Output = Scalar; + + #[inline(always)] + fn index(&self, _index: usize) -> &Scalar { &(self.Z[_index]) } +} + +/// Sparse multilinear polynomial, which means the $Z(\cdot)$ is zero at most +/// points. In our context, sparse polynomials are non-zeros over the hypercube +/// at locations that map to "small" integers We exploit this property to +/// implement a time-optimal algorithm +pub(crate) struct SparsePolynomial { + num_vars: usize, + Z: Vec, +} + +impl SparsePolynomial { + pub fn new(num_vars: usize, Z: Vec) -> Self { Self { num_vars, Z } } + + // a time-optimal algorithm to evaluate sparse polynomials + pub fn evaluate(&self, r: &[Scalar]) -> Scalar { + assert_eq!(self.num_vars, r.len()); + + let num_vars_z = self.Z.len().next_power_of_two().log_2(); + let chis = EqPolynomial::evals_from_points(&r[self.num_vars - 1 - num_vars_z..]); + #[allow(clippy::disallowed_methods)] + let eval_partial: Scalar = self.Z.iter().zip(chis.iter()).map(|(z, chi)| *z * *chi).sum(); + + let common = + (0..self.num_vars - 1 - num_vars_z).map(|i| (Scalar::ONE - r[i])).product::(); + + common * eval_partial + } +} + +/// Adds another multilinear polynomial to `self`. +/// Assumes the two polynomials have the same number of variables. +impl Add for MultilinearPolynomial { + type Output = Result; + + fn add(self, other: Self) -> Self::Output { + if self.get_num_vars() != other.get_num_vars() { + return Err("The two polynomials must have the same number of variables"); + } + + let sum: Vec = zip_with!(into_iter, (self.Z, other.Z), |a, b| a + b).collect(); + + Ok(Self::new(sum)) + } +} + +#[cfg(test)] +mod tests { + use rand_chacha::ChaCha20Rng; + use rand_core::SeedableRng; + + use super::*; + use crate::provider::bn256_grumpkin::bn256; + + fn make_mlp(len: usize, value: F) -> MultilinearPolynomial { + MultilinearPolynomial { num_vars: len.count_ones() as usize, Z: vec![value; len] } + } + + // fn test_multilinear_polynomial_with() { + // // Let the polynomial has 3 variables, p(x_1, x_2, x_3) = (x_1 + x_2) * + // x_3 // Evaluations of the polynomial at boolean cube are [0, 0, 0, 1, + // 0, 1, 0, 2]. + + // let TWO = F::from(2); + + // let Z = vec![ + // F::ZERO, + // F::ZERO, + // F::ZERO, + // F::ONE, + // F::ZERO, + // F::ONE, + // F::ZERO, + // TWO, + // ]; + // let m_poly = MultilinearPolynomial::::new(Z.clone()); + // assert_eq!(m_poly.get_num_vars(), 3); + + // let x = vec![F::ONE, F::ONE, F::ONE]; + // assert_eq!(m_poly.evaluate(x.as_slice()), TWO); + + // let y = MultilinearPolynomial::::evaluate_with(Z.as_slice(), + // x.as_slice()); assert_eq!(y, TWO); + // } + + // fn test_sparse_polynomial_with() { + // // Let the polynomial have 4 variables, but is non-zero at only 3 + // locations (out // of 2^4 = 16) over the hypercube + // let mut Z = vec![F::ONE, F::ONE, F::from(2)]; + // let m_poly = SparsePolynomial::::new(4, Z.clone()); + + // Z.resize(16, F::ZERO); // append with zeros to make it a dense polynomial + // let m_poly_dense = MultilinearPolynomial::new(Z); + + // // evaluation point + // let x = vec![F::from(5), F::from(8), F::from(5), F::from(3)]; + + // // check evaluations + // assert_eq!( + // m_poly.evaluate(x.as_slice()), + // m_poly_dense.evaluate(x.as_slice()) + // ); + // } + + fn test_mlp_add_with() { + let mlp1 = make_mlp(4, F::from(3)); + let mlp2 = make_mlp(4, F::from(7)); + + let mlp3 = mlp1.add(mlp2).unwrap(); + + assert_eq!(mlp3.Z, vec![F::from(10); 4]); + } + + #[test] + fn test_mlp_add() { test_mlp_add_with::(); } + + fn test_evaluation_with() { + let num_evals = 4; + let mut evals: Vec = Vec::with_capacity(num_evals); + for _ in 0..num_evals { + evals.push(F::from(8)); + } + let dense_poly: MultilinearPolynomial = MultilinearPolynomial::new(evals.clone()); + + // Evaluate at 3: + // (0, 0) = 1 + // (0, 1) = 1 + // (1, 0) = 1 + // (1, 1) = 1 + // g(x_0,x_1) => c_0*(1 - x_0)(1 - x_1) + c_1*(1-x_0)(x_1) + c_2*(x_0)(1-x_1) + + // c_3*(x_0)(x_1) g(3, 4) = 8*(1 - 3)(1 - 4) + 8*(1-3)(4) + 8*(3)(1-4) + + // 8*(3)(4) = 48 + -64 + -72 + 96 = 8 g(5, 10) = 8*(1 - 5)(1 - 10) + + // 8*(1 - 5)(10) + 8*(5)(1-10) + 8*(5)(10) = 96 + -16 + -72 + 96 = 8 + assert_eq!(dense_poly.evaluate(vec![F::from(3), F::from(4)].as_slice()), F::from(8)); + assert_eq!(dense_poly.evaluate(vec![F::from(5), F::from(10)].as_slice()), F::from(8)); + } + + #[test] + fn test_evaluation() { test_evaluation_with::(); } + + /// This binds the variables of a multilinear polynomial to a provided + /// sequence of values. + /// + /// Assuming `bind_poly_var_top` defines the "top" variable of the + /// polynomial, this aims to test whether variables should be provided + /// to the `evaluate` function in topmost-first (big endian) of + /// topmost-last (lower endian) order. + fn bind_sequence( + poly: &MultilinearPolynomial, + values: &[F], + ) -> MultilinearPolynomial { + // Assert that the size of the polynomial being evaluated is a power of 2 + // greater than (1 << values.len()) + assert!(poly.Z.len().is_power_of_two()); + assert!(poly.Z.len() >= 1 << values.len()); + + let mut tmp = poly.clone(); + for v in values.iter() { + tmp.bind_poly_var_top(v); + } + tmp + } + + fn bind_and_evaluate_with() { + for i in 0..50 { + // Initialize a random polynomial + let n = 7; + let mut rng = ChaCha20Rng::from_seed([i as u8; 32]); + let poly = MultilinearPolynomial::random(n, &mut rng); + + // draw a random point + let pt: Vec<_> = std::iter::from_fn(|| Some(F::random(&mut rng))).take(n).collect(); + // this shows the order in which coordinates are evaluated + assert_eq!(poly.evaluate(&pt), bind_sequence(&poly, &pt).Z[0]) + } + } + + #[test] + fn test_bind_and_evaluate() { bind_and_evaluate_with::(); } +} diff --git a/prover/src/spartan/polys/power.rs b/prover/src/spartan/polys/power.rs new file mode 100644 index 0000000..6e79700 --- /dev/null +++ b/prover/src/spartan/polys/power.rs @@ -0,0 +1,63 @@ +//! `PowPolynomial`: Represents multilinear extension of power polynomials + +use std::iter::successors; + +use ff::PrimeField; + +use crate::spartan::polys::eq::EqPolynomial; + +/// Represents the multilinear extension polynomial (MLE) of the equality +/// polynomial $pow(x,t)$, denoted as $\tilde{pow}(x, t)$. +/// +/// The polynomial is defined by the formula: +/// $$ +/// \tilde{power}(x, t) = \prod_{i=1}^m(1 + (t^{2^i} - 1) * x_i) +/// $$ +pub struct PowPolynomial { + eq: EqPolynomial, +} + +impl PowPolynomial { + /// Creates a new `PowPolynomial` from a Scalars `t`. + pub fn new(t: &Scalar, ell: usize) -> Self { + // t_pow = [t^{2^0}, t^{2^1}, ..., t^{2^{ell-1}}] + let t_pow = Self::squares(t, ell); + + Self { eq: EqPolynomial::new(t_pow) } + } + + /// Create powers the following powers of `t`: + /// [t^{2^0}, t^{2^1}, ..., t^{2^{ell-1}}] + pub fn squares(t: &Scalar, ell: usize) -> Vec { + successors(Some(*t), |p: &Scalar| Some(p.square())).take(ell).collect::>() + } + + /// Creates the evals corresponding to a `PowPolynomial` from an + /// already-existing vector of powers. `t_pow.len() > ell` must be true. + pub(crate) fn evals_with_powers(powers: &[Scalar], ell: usize) -> Vec { + let t_pow = powers[..ell].to_vec(); + EqPolynomial::evals_from_points(&t_pow) + } + + /// Evaluates the `PowPolynomial` at a given point `rx`. + /// + /// This function computes the value of the polynomial at the point + /// specified by `rx`. It expects `rx` to have the same length as the + /// internal vector `t_pow`. + /// + /// Panics if `rx` and `t_pow` have different lengths. + pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { self.eq.evaluate(rx) } + + pub fn coordinates(self) -> Vec { self.eq.r } + + /// Evaluates the `PowPolynomial` at all the `2^|t_pow|` points in its + /// domain. + /// + /// Returns a vector of Scalars, each corresponding to the polynomial + /// evaluation at a specific point. + pub fn evals(&self) -> Vec { self.eq.evals() } +} + +impl From> for EqPolynomial { + fn from(polynomial: PowPolynomial) -> Self { polynomial.eq } +} diff --git a/prover/src/spartan/polys/univariate.rs b/prover/src/spartan/polys/univariate.rs new file mode 100644 index 0000000..a4b1666 --- /dev/null +++ b/prover/src/spartan/polys/univariate.rs @@ -0,0 +1,373 @@ +//! Main components: +//! - `UniPoly`: an univariate dense polynomial in coefficient form (big endian), +//! - `CompressedUniPoly`: a univariate dense polynomial, compressed (omitted linear term), in +//! coefficient form (little endian), +use std::{ + cmp::Ordering, + ops::{AddAssign, Index, IndexMut, MulAssign, SubAssign}, +}; + +use ff::PrimeField; +use rayon::prelude::{IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; +use ref_cast::RefCast; +use serde::{Deserialize, Serialize}; + +use crate::{ + provider::util::iterators::DoubleEndedIteratorExt as _, + traits::{Group, TranscriptReprTrait}, +}; + +// ax^2 + bx + c stored as vec![c, b, a] +// ax^3 + bx^2 + cx + d stored as vec![d, c, b, a] +#[derive(Debug, Clone, PartialEq, Eq, RefCast)] +#[repr(transparent)] +pub struct UniPoly { + pub coeffs: Vec, +} + +// ax^2 + bx + c stored as vec![c, a] +// ax^3 + bx^2 + cx + d stored as vec![d, c, a] +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct CompressedUniPoly { + coeffs_except_linear_term: Vec, +} + +impl UniPoly { + pub fn new(coeffs: Vec) -> Self { + let mut res = Self { coeffs }; + res.truncate_leading_zeros(); + res + } + + fn zero() -> Self { Self::new(Vec::new()) } + + /// Divide self by another polynomial, and returns the + /// quotient and remainder. + pub fn divide_with_q_and_r(&self, divisor: &Self) -> Option<(Self, Self)> { + if self.is_zero() { + Some((Self::zero(), Self::zero())) + } else if divisor.is_zero() { + None + } else if self.degree() < divisor.degree() { + Some((Self::zero(), self.clone())) + } else { + // Now we know that self.degree() >= divisor.degree(); + let mut quotient = vec![Scalar::ZERO; self.degree() - divisor.degree() + 1]; + let mut remainder: Self = self.clone(); + // Can unwrap here because we know self is not zero. + let divisor_leading_inv = divisor.leading_coefficient().unwrap().invert().unwrap(); + while !remainder.is_zero() && remainder.degree() >= divisor.degree() { + let cur_q_coeff = *remainder.leading_coefficient().unwrap() * divisor_leading_inv; + let cur_q_degree = remainder.degree() - divisor.degree(); + quotient[cur_q_degree] = cur_q_coeff; + + for (i, div_coeff) in divisor.coeffs.iter().enumerate() { + remainder.coeffs[cur_q_degree + i] -= &(cur_q_coeff * div_coeff); + } + while let Some(true) = remainder.coeffs.last().map(|c| c == &Scalar::ZERO) { + remainder.coeffs.pop(); + } + } + Some((Self::new(quotient), remainder)) + } + } + + /// Divides f(x) by x-a and returns quotient polynomial with no reminder + /// This is a common use case for polynomial divisions in KZG-based PCS. + pub fn divide_minus_u(&self, u: Scalar) -> Self { + if self.is_zero() { + Self::zero() + } else { + // On input f(x) and u compute the witness polynomial used to prove + // that f(u) = v. The main part of this is to compute the + // division (f(x) - f(u)) / (x - u), but we don't use a general + // division algorithm, we make use of the fact that the division + // never has a remainder, and that the denominator is always a linear + // polynomial. The cost is (d-1) mults + (d-1) adds in E::Scalar, where + // d is the degree of f. + // + // We use the fact that if we compute the quotient of f(x)/(x-u), + // there will be a remainder, but it'll be v = f(u). Put another way + // the quotient of f(x)/(x-u) and (f(x) - f(v))/(x-u) is the + // same. One advantage is that computing f(u) could be decoupled + // from kzg_open, it could be done later or separate from computing W. + + let d = self.coeffs.len(); + + // Compute h(x) = f(x)/(x - u) + let mut h = vec![Scalar::ZERO; d]; + for i in (1..d).rev() { + h[i - 1] = self.coeffs[i] + h[i] * u; + } + Self::new(h) + } + } + + fn is_zero(&self) -> bool { + self.coeffs.is_empty() || self.coeffs.iter().all(|c| c == &Scalar::ZERO) + } + + fn truncate_leading_zeros(&mut self) { + while self.coeffs.last() == Some(&Scalar::ZERO) { + self.coeffs.pop(); + } + } + + fn leading_coefficient(&self) -> Option<&Scalar> { self.coeffs.last() } + + pub fn from_evals(evals: &[Scalar]) -> Self { + // we only support degree-2 or degree-3 univariate polynomials + assert!(evals.len() == 3 || evals.len() == 4); + let two_inv = Scalar::from(2).invert().unwrap(); + let coeffs = if evals.len() == 3 { + // ax^2 + bx + c + let c = evals[0]; + let a = two_inv * (evals[2] - evals[1] - evals[1] + c); + let b = evals[1] - c - a; + vec![c, b, a] + } else { + // ax^3 + bx^2 + cx + d + let six_inv = Scalar::from(6).invert().unwrap(); + + let d = evals[0]; + let a = six_inv + * (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1] - evals[0]); + let b = two_inv + * (evals[0] + evals[0] - evals[1] - evals[1] - evals[1] - evals[1] - evals[1] + + evals[2] + + evals[2] + + evals[2] + + evals[2] + - evals[3]); + let c = evals[1] - d - a - b; + vec![d, c, b, a] + }; + + Self { coeffs } + } + + pub fn degree(&self) -> usize { self.coeffs.len() - 1 } + + pub fn eval_at_zero(&self) -> Scalar { self.coeffs[0] } + + pub fn eval_at_one(&self) -> Scalar { + (0..self.coeffs.len()).into_par_iter().map(|i| self.coeffs[i]).sum() + } + + pub fn evaluate(&self, r: &Scalar) -> Scalar { self.coeffs.iter().rlc(r) } + + pub fn compress(&self) -> CompressedUniPoly { + let coeffs_except_linear_term = [&self.coeffs[..1], &self.coeffs[2..]].concat(); + assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len()); + CompressedUniPoly { coeffs_except_linear_term } + } + + #[cfg(test)] + /// Returns a random polynomial + pub fn random(num_vars: usize, mut rng: &mut R) -> Self { + Self::new(std::iter::from_fn(|| Some(Scalar::random(&mut rng))).take(num_vars).collect()) + } +} + +impl CompressedUniPoly { + // we require eval(0) + eval(1) = hint, so we can solve for the linear term as: + // linear_term = hint - 2 * constant_term - deg2 term - deg3 term + pub fn decompress(&self, hint: &Scalar) -> UniPoly { + let mut linear_term = + *hint - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0]; + for i in 1..self.coeffs_except_linear_term.len() { + linear_term -= self.coeffs_except_linear_term[i]; + } + + let mut coeffs: Vec = Vec::new(); + coeffs.push(self.coeffs_except_linear_term[0]); + coeffs.push(linear_term); + coeffs.extend(&self.coeffs_except_linear_term[1..]); + assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len()); + UniPoly { coeffs } + } +} + +impl TranscriptReprTrait for UniPoly { + fn to_transcript_bytes(&self) -> Vec { + let coeffs = self.compress().coeffs_except_linear_term; + coeffs.iter().flat_map(|&t| t.to_repr().as_ref().to_vec()).collect::>() + } +} + +impl Index for UniPoly { + type Output = Scalar; + + fn index(&self, index: usize) -> &Self::Output { &self.coeffs[index] } +} + +impl IndexMut for UniPoly { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { &mut self.coeffs[index] } +} + +impl AddAssign<&Scalar> for UniPoly { + fn add_assign(&mut self, rhs: &Scalar) { self.coeffs.par_iter_mut().for_each(|c| *c += rhs); } +} + +impl MulAssign<&Scalar> for UniPoly { + fn mul_assign(&mut self, rhs: &Scalar) { self.coeffs.par_iter_mut().for_each(|c| *c *= rhs); } +} + +impl AddAssign<&Self> for UniPoly { + fn add_assign(&mut self, rhs: &Self) { + let ordering = self.coeffs.len().cmp(&rhs.coeffs.len()); + #[allow(clippy::disallowed_methods)] + for (lhs, rhs) in self.coeffs.iter_mut().zip(&rhs.coeffs) { + *lhs += rhs; + } + if matches!(ordering, Ordering::Less) { + self.coeffs.extend(rhs.coeffs[self.coeffs.len()..].iter().cloned()); + } + if matches!(ordering, Ordering::Equal) { + self.truncate_leading_zeros(); + } + } +} + +impl SubAssign<&Self> for UniPoly { + fn sub_assign(&mut self, rhs: &Self) { + let ordering = self.coeffs.len().cmp(&rhs.coeffs.len()); + #[allow(clippy::disallowed_methods)] + for (lhs, rhs) in self.coeffs.iter_mut().zip(&rhs.coeffs) { + *lhs -= rhs; + } + if matches!(ordering, Ordering::Less) { + self.coeffs.extend(rhs.coeffs[self.coeffs.len()..].iter().cloned()); + } + if matches!(ordering, Ordering::Equal) { + self.truncate_leading_zeros(); + } + } +} + +impl AsRef> for UniPoly { + fn as_ref(&self) -> &Vec { &self.coeffs } +} + +#[cfg(test)] +mod tests { + use rand::SeedableRng; + use rand_chacha::ChaCha20Rng; + + use super::*; + use crate::provider::bn256_grumpkin; + + fn test_from_evals_quad_with() { + // polynomial is 2x^2 + 3x + 1 + let e0 = F::ONE; + let e1 = F::from(6); + let e2 = F::from(15); + let evals = vec![e0, e1, e2]; + let poly = UniPoly::from_evals(&evals); + + assert_eq!(poly.eval_at_zero(), e0); + assert_eq!(poly.eval_at_one(), e1); + assert_eq!(poly.coeffs.len(), 3); + assert_eq!(poly.coeffs[0], F::ONE); + assert_eq!(poly.coeffs[1], F::from(3)); + assert_eq!(poly.coeffs[2], F::from(2)); + + let hint = e0 + e1; + let compressed_poly = poly.compress(); + let decompressed_poly = compressed_poly.decompress(&hint); + for i in 0..decompressed_poly.coeffs.len() { + assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); + } + + let e3 = F::from(28); + assert_eq!(poly.evaluate(&F::from(3)), e3); + } + + #[test] + fn test_from_evals_quad() { test_from_evals_quad_with::(); } + + fn test_from_evals_cubic_with() { + // polynomial is x^3 + 2x^2 + 3x + 1 + let e0 = F::ONE; + let e1 = F::from(7); + let e2 = F::from(23); + let e3 = F::from(55); + let evals = vec![e0, e1, e2, e3]; + let poly = UniPoly::from_evals(&evals); + + assert_eq!(poly.eval_at_zero(), e0); + assert_eq!(poly.eval_at_one(), e1); + assert_eq!(poly.coeffs.len(), 4); + + assert_eq!(poly.coeffs[1], F::from(3)); + assert_eq!(poly.coeffs[2], F::from(2)); + assert_eq!(poly.coeffs[3], F::from(1)); + + let hint = e0 + e1; + let compressed_poly = poly.compress(); + let decompressed_poly = compressed_poly.decompress(&hint); + for i in 0..decompressed_poly.coeffs.len() { + assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); + } + + let e4 = F::from(109); + assert_eq!(poly.evaluate(&F::from(4)), e4); + } + + #[test] + fn test_from_evals_cubic() { test_from_evals_cubic_with::(); } + + /// Perform a naive n^2 multiplication of `self` by `other`. + pub fn naive_mul(ours: &UniPoly, other: &UniPoly) -> UniPoly { + if ours.is_zero() || other.is_zero() { + UniPoly::zero() + } else { + let mut result = vec![F::ZERO; ours.degree() + other.degree() + 1]; + for (i, self_coeff) in ours.coeffs.iter().enumerate() { + for (j, other_coeff) in other.coeffs.iter().enumerate() { + result[i + j] += &(*self_coeff * other_coeff); + } + } + UniPoly::new(result) + } + } + + fn divide_polynomials_random() { + let rng = &mut ChaCha20Rng::from_seed([0u8; 32]); + + for a_degree in 0..50 { + for b_degree in 0..50 { + let dividend = UniPoly::::random(a_degree, rng); + let divisor = UniPoly::::random(b_degree, rng); + + if let Some((quotient, remainder)) = UniPoly::divide_with_q_and_r(÷nd, &divisor) { + let mut prod = naive_mul(&divisor, "ient); + prod += &remainder; + assert_eq!(dividend, prod) + } + } + } + } + + #[test] + fn test_divide_minus_u() { + fn test_inner() { + let rng = &mut ChaCha20Rng::from_seed([0u8; 32]); + let dividend = UniPoly::::random(50, rng); + let u = Fr::random(rng); + let divisor = UniPoly::new(vec![-u, Fr::ONE]); + + let (q1, _) = dividend.divide_with_q_and_r(&divisor).unwrap(); + let q2 = dividend.divide_minus_u(u); + + assert_eq!(q1, q2); + } + + test_inner::(); + } + + #[test] + fn test_divide_polynomials_random() { + divide_polynomials_random::(); + } +} diff --git a/prover/src/spartan/ppsnark.rs b/prover/src/spartan/ppsnark.rs new file mode 100644 index 0000000..485011d --- /dev/null +++ b/prover/src/spartan/ppsnark.rs @@ -0,0 +1,1036 @@ +//! This module implements `RelaxedR1CSSNARK` traits using a spark-based +//! approach to prove evaluations of sparse multilinear polynomials involved in +//! Spartan's sum-check protocol, thereby providing a preprocessing SNARK +//! The verifier in this preprocessing SNARK maintains a commitment to R1CS +//! matrices. This is beneficial when using a polynomial commitment scheme in +//! which the verifier's costs is succinct. This code includes experimental +//! optimizations to reduce runtimes and proof sizes. +use core::cmp::max; +use std::sync::Arc; + +use ff::Field; +use itertools::Itertools as _; +use once_cell::sync::OnceCell; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +use super::polys::{masked_eq::MaskedEqPolynomial, multilinear::SparsePolynomial}; +use crate::{ + digest::{DigestComputer, SimpleDigestible}, + errors::NovaError, + r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, + spartan::{ + math::Math, + polys::{ + eq::EqPolynomial, + identity::IdentityPolynomial, + multilinear::MultilinearPolynomial, + power::PowPolynomial, + univariate::{CompressedUniPoly, UniPoly}, + }, + powers, + sumcheck::{ + engine::{ + InnerSumcheckInstance, MemorySumcheckInstance, OuterSumcheckInstance, SumcheckEngine, + WitnessBoundSumcheck, + }, + SumcheckProof, + }, + PolyEvalInstance, PolyEvalWitness, + }, + traits::{ + commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, + evaluation::EvaluationEngineTrait, + snark::{DigestHelperTrait, RelaxedR1CSSNARKTrait}, + Engine, TranscriptEngineTrait, TranscriptReprTrait, + }, + zip_with, Commitment, CommitmentKey, CompressedCommitment, +}; + +fn padded(v: &[E::Scalar], n: usize, e: &E::Scalar) -> Vec { + let mut v_padded = vec![*e; n]; + v_padded[..v.len()].copy_from_slice(v); + v_padded +} + +/// A type that holds `R1CSShape` in a form amenable to memory checking +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct R1CSShapeSparkRepr { + pub(in crate::spartan) N: usize, // size of the vectors + + // dense representation + pub(in crate::spartan) row: Vec, + pub(in crate::spartan) col: Vec, + pub(in crate::spartan) val_A: Vec, + pub(in crate::spartan) val_B: Vec, + pub(in crate::spartan) val_C: Vec, + + // timestamp polynomials + pub(in crate::spartan) ts_row: Vec, + pub(in crate::spartan) ts_col: Vec, +} + +/// A type that holds a commitment to a sparse polynomial +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct R1CSShapeSparkCommitment { + pub(in crate::spartan) N: usize, // size of each vector + + // commitments to the dense representation + pub(in crate::spartan) comm_row: Commitment, + pub(in crate::spartan) comm_col: Commitment, + pub(in crate::spartan) comm_val_A: Commitment, + pub(in crate::spartan) comm_val_B: Commitment, + pub(in crate::spartan) comm_val_C: Commitment, + + // commitments to the timestamp polynomials + pub(in crate::spartan) comm_ts_row: Commitment, + pub(in crate::spartan) comm_ts_col: Commitment, +} + +impl TranscriptReprTrait for R1CSShapeSparkCommitment { + fn to_transcript_bytes(&self) -> Vec { + [ + self.comm_row, + self.comm_col, + self.comm_val_A, + self.comm_val_B, + self.comm_val_C, + self.comm_ts_row, + self.comm_ts_col, + ] + .as_slice() + .to_transcript_bytes() + } +} + +impl R1CSShapeSparkRepr { + /// represents `R1CSShape` in a Spark-friendly format amenable to memory + /// checking + pub fn new(S: &R1CSShape) -> Self { + let N = { + let total_nz = S.A.len() + S.B.len() + S.C.len(); + max(total_nz, max(2 * S.num_vars, S.num_cons)).next_power_of_two() + }; + + // we make col lookup into the last entry of z, so we commit to zeros + let (mut row, mut col, mut val_A, mut val_B, mut val_C) = ( + vec![0; N], + vec![N - 1; N], + vec![E::Scalar::ZERO; N], + vec![E::Scalar::ZERO; N], + vec![E::Scalar::ZERO; N], + ); + + for (i, entry) in S.A.iter().enumerate() { + let (r, c, v) = entry; + row[i] = r; + col[i] = c; + val_A[i] = v; + } + + let b_offset = S.A.len(); + for (i, entry) in S.B.iter().enumerate() { + let (r, c, v) = entry; + row[b_offset + i] = r; + col[b_offset + i] = c; + val_B[b_offset + i] = v; + } + + let c_offset = S.A.len() + S.B.len(); + for (i, entry) in S.C.iter().enumerate() { + let (r, c, v) = entry; + row[c_offset + i] = r; + col[c_offset + i] = c; + val_C[c_offset + i] = v; + } + + // timestamp calculation routine + let timestamp_calc = |num_ops: usize, num_cells: usize, addr_trace: &[usize]| -> Vec { + let mut ts = vec![0usize; num_cells]; + + assert!(num_ops >= addr_trace.len()); + for addr in addr_trace { + assert!(*addr < num_cells); + ts[*addr] += 1; + } + ts + }; + + // timestamp polynomials for row + let (ts_row, ts_col) = + rayon::join(|| timestamp_calc(N, N, &row), || timestamp_calc(N, N, &col)); + + // a routine to turn a vector of usize into a vector scalars + let to_vec_scalar = |v: &[usize]| -> Vec { + v.iter().map(|x| E::Scalar::from(*x as u64)).collect::>() + }; + + Self { + N, + + // dense representation + row: to_vec_scalar(&row), + col: to_vec_scalar(&col), + val_A, + val_B, + val_C, + + // timestamp polynomials + ts_row: to_vec_scalar(&ts_row), + ts_col: to_vec_scalar(&ts_col), + } + } + + pub(in crate::spartan) fn commit(&self, ck: &CommitmentKey) -> R1CSShapeSparkCommitment { + let comm_vec: Vec> = + [&self.row, &self.col, &self.val_A, &self.val_B, &self.val_C, &self.ts_row, &self.ts_col] + .par_iter() + .map(|v| E::CE::commit(ck, v)) + .collect(); + + R1CSShapeSparkCommitment { + N: self.row.len(), + comm_row: comm_vec[0], + comm_col: comm_vec[1], + comm_val_A: comm_vec[2], + comm_val_B: comm_vec[3], + comm_val_C: comm_vec[4], + comm_ts_row: comm_vec[5], + comm_ts_col: comm_vec[6], + } + } + + // computes evaluation oracles + fn evaluation_oracles( + &self, + S: &R1CSShape, + r_x: &E::Scalar, + z: &[E::Scalar], + ) -> (Vec, Vec, Vec, Vec) { + let mem_row = PowPolynomial::new(r_x, self.N.log_2()).evals(); + let mem_col = padded::(z, self.N, &E::Scalar::ZERO); + + let (L_row, L_col) = { + let mut L_row = vec![mem_row[0]; self.N]; // we place mem_row[0] since resized row is appended with 0s + let mut L_col = vec![mem_col[self.N - 1]; self.N]; // we place mem_col[N-1] since resized col is appended with N-1 + + for (i, (val_r, val_c)) in S + .A + .iter() + .chain(S.B.iter()) + .chain(S.C.iter()) + .map(|(r, c, _)| (mem_row[r], mem_col[c])) + .enumerate() + { + L_row[i] = val_r; + L_col[i] = val_c; + } + (L_row, L_col) + }; + + (mem_row, mem_col, L_row, L_col) + } +} + +/// A type that represents the prover's key +#[derive(Debug, Clone)] +pub struct ProverKey> { + pk_ee: EE::ProverKey, + S_repr: R1CSShapeSparkRepr, + S_comm: R1CSShapeSparkCommitment, + vk_digest: E::Scalar, // digest of verifier's key +} + +/// A type that represents the verifier's key +#[derive(Debug, Clone, Serialize)] +#[serde(bound = "EE::VerifierKey: Serialize")] +pub struct VerifierKey> { + num_cons: usize, + num_vars: usize, + vk_ee: EE::VerifierKey, + S_comm: R1CSShapeSparkCommitment, + #[serde(skip, default = "OnceCell::new")] + digest: OnceCell, +} + +impl> SimpleDigestible for VerifierKey where EE::VerifierKey: Serialize {} + +/// A succinct proof of knowledge of a witness to a relaxed R1CS instance +/// The proof is produced using Spartan's combination of the sum-check and +/// the commitment to a vector viewed as a polynomial commitment +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct RelaxedR1CSSNARK> { + // commitment to oracles: the first three are for Az, Bz, Cz, + // and the last two are for memory reads + comm_Az: CompressedCommitment, + comm_Bz: CompressedCommitment, + comm_Cz: CompressedCommitment, + comm_L_row: CompressedCommitment, + comm_L_col: CompressedCommitment, + + // commitments to aid the memory checks + comm_t_plus_r_inv_row: CompressedCommitment, + comm_w_plus_r_inv_row: CompressedCommitment, + comm_t_plus_r_inv_col: CompressedCommitment, + comm_w_plus_r_inv_col: CompressedCommitment, + + // claims about Az, Bz, and Cz polynomials + eval_Az_at_tau: E::Scalar, + eval_Bz_at_tau: E::Scalar, + eval_Cz_at_tau: E::Scalar, + + // sum-check + sc: SumcheckProof, + + // claims from the end of sum-check + eval_Az: E::Scalar, + eval_Bz: E::Scalar, + eval_Cz: E::Scalar, + eval_E: E::Scalar, + eval_L_row: E::Scalar, + eval_L_col: E::Scalar, + eval_val_A: E::Scalar, + eval_val_B: E::Scalar, + eval_val_C: E::Scalar, + + eval_W: E::Scalar, + + eval_t_plus_r_inv_row: E::Scalar, + eval_row: E::Scalar, // address + eval_w_plus_r_inv_row: E::Scalar, + eval_ts_row: E::Scalar, + + eval_t_plus_r_inv_col: E::Scalar, + eval_col: E::Scalar, // address + eval_w_plus_r_inv_col: E::Scalar, + eval_ts_col: E::Scalar, + + // a PCS evaluation argument + eval_arg: EE::EvaluationArgument, +} + +impl> RelaxedR1CSSNARK { + fn prove_helper( + mem: &mut T1, + outer: &mut T2, + inner: &mut T3, + witness: &mut T4, + transcript: &mut E::TE, + ) -> Result< + ( + SumcheckProof, + Vec, + Vec>, + Vec>, + Vec>, + Vec>, + ), + NovaError, + > + where + T1: SumcheckEngine, + T2: SumcheckEngine, + T3: SumcheckEngine, + T4: SumcheckEngine, + { + // sanity checks + assert_eq!(mem.size(), outer.size()); + assert_eq!(mem.size(), inner.size()); + assert_eq!(mem.size(), witness.size()); + assert_eq!(mem.degree(), outer.degree()); + assert_eq!(mem.degree(), inner.degree()); + assert_eq!(mem.degree(), witness.degree()); + + // these claims are already added to the transcript, so we do not need to add + let claims = mem + .initial_claims() + .into_iter() + .chain(outer.initial_claims()) + .chain(inner.initial_claims()) + .chain(witness.initial_claims()) + .collect::>(); + + let s = transcript.squeeze(b"r")?; + let coeffs = powers(&s, claims.len()); + + // compute the joint claim + let claim = zip_with!(iter, (claims, coeffs), |c_1, c_2| *c_1 * c_2).sum(); + + let mut e = claim; + let mut r: Vec = Vec::new(); + let mut cubic_polys: Vec> = Vec::new(); + let num_rounds = mem.size().log_2(); + for _ in 0..num_rounds { + let ((evals_mem, evals_outer), (evals_inner, evals_witness)) = rayon::join( + || rayon::join(|| mem.evaluation_points(), || outer.evaluation_points()), + || rayon::join(|| inner.evaluation_points(), || witness.evaluation_points()), + ); + + let evals: Vec> = evals_mem + .into_iter() + .chain(evals_outer.into_iter()) + .chain(evals_inner.into_iter()) + .chain(evals_witness.into_iter()) + .collect::>>(); + assert_eq!(evals.len(), claims.len()); + + let evals_combined_0 = (0..evals.len()).map(|i| evals[i][0] * coeffs[i]).sum(); + let evals_combined_2 = (0..evals.len()).map(|i| evals[i][1] * coeffs[i]).sum(); + let evals_combined_3 = (0..evals.len()).map(|i| evals[i][2] * coeffs[i]).sum(); + + let evals = vec![evals_combined_0, e - evals_combined_0, evals_combined_2, evals_combined_3]; + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + // derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + r.push(r_i); + + let _ = rayon::join( + || rayon::join(|| mem.bound(&r_i), || outer.bound(&r_i)), + || rayon::join(|| inner.bound(&r_i), || witness.bound(&r_i)), + ); + + e = poly.evaluate(&r_i); + cubic_polys.push(poly.compress()); + } + + let mem_claims = mem.final_claims(); + let outer_claims = outer.final_claims(); + let inner_claims = inner.final_claims(); + let witness_claims = witness.final_claims(); + + Ok((SumcheckProof::new(cubic_polys), r, mem_claims, outer_claims, inner_claims, witness_claims)) + } +} + +impl> VerifierKey { + fn new( + num_cons: usize, + num_vars: usize, + S_comm: R1CSShapeSparkCommitment, + vk_ee: EE::VerifierKey, + ) -> Self { + Self { num_cons, num_vars, S_comm, vk_ee, digest: Default::default() } + } +} +impl> DigestHelperTrait for VerifierKey { + /// Returns the digest of the verifier's key + fn digest(&self) -> E::Scalar { + self + .digest + .get_or_try_init(|| { + let dc = DigestComputer::new(self); + dc.digest() + }) + .cloned() + .expect("Failure to retrieve digest!") + } +} + +impl> RelaxedR1CSSNARKTrait for RelaxedR1CSSNARK { + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + + fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { + Box::new(|shape: &R1CSShape| -> usize { + // the commitment key should be large enough to commit to the R1CS matrices + shape.A.len() + shape.B.len() + shape.C.len() + }) + } + + fn initialize_pk( + _ck: Arc>, + _vk_digest: ::Scalar, + ) -> Result { + todo!("not implemented for nova snarks"); + } + + fn setup( + ck: Arc>, + S: &R1CSShape, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { + // check the provided commitment key meets minimal requirements + if ck.length() < Self::ck_floor()(S) { + return Err(NovaError::InvalidCommitmentKeyLength); + } + let (pk_ee, vk_ee) = EE::setup(ck.clone()); + + // pad the R1CS matrices + let S = S.pad(); + + let S_repr = R1CSShapeSparkRepr::new(&S); + let S_comm = S_repr.commit(&*ck); + + let vk = VerifierKey::new(S.num_cons, S.num_vars, S_comm.clone(), vk_ee); + + let pk = ProverKey { pk_ee, S_repr, S_comm, vk_digest: vk.digest() }; + + Ok((pk, vk)) + } + + /// produces a succinct proof of satisfiability of a `RelaxedR1CS` instance + #[tracing::instrument(skip_all, name = "PPSNARK::prove")] + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + S: &R1CSShape, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, + ) -> Result { + // pad the R1CSShape + let S = S.pad(); + // sanity check that R1CSShape has all required size characteristics + assert!(S.is_regular_shape()); + + let W = W.pad(&S); // pad the witness + let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); + + // append the verifier key (which includes commitment to R1CS matrices) and the + // RelaxedR1CSInstance to the transcript + transcript.absorb(b"vk", &pk.vk_digest); + transcript.absorb(b"U", U); + + // compute the full satisfying assignment by concatenating W.W, U.u, and U.X + let z = [W.W.clone(), vec![U.u], U.X.clone()].concat(); + + // compute Az, Bz, Cz + let (mut Az, mut Bz, mut Cz) = S.multiply_vec(&z)?; + + // commit to Az, Bz, Cz + let (comm_Az, (comm_Bz, comm_Cz)) = rayon::join( + || E::CE::commit(ck, &Az), + || rayon::join(|| E::CE::commit(ck, &Bz), || E::CE::commit(ck, &Cz)), + ); + + transcript.absorb(b"c", &[comm_Az, comm_Bz, comm_Cz].as_slice()); + + // number of rounds of sum-check + let num_rounds_sc = pk.S_repr.N.log_2(); + let tau = transcript.squeeze(b"t")?; + let tau_coords = PowPolynomial::new(&tau, num_rounds_sc).coordinates(); + + // (1) send commitments to Az, Bz, and Cz along with their evaluations at tau + let (Az, Bz, Cz, W, E) = { + Az.resize(pk.S_repr.N, E::Scalar::ZERO); + Bz.resize(pk.S_repr.N, E::Scalar::ZERO); + Cz.resize(pk.S_repr.N, E::Scalar::ZERO); + let E = padded::(&W.E, pk.S_repr.N, &E::Scalar::ZERO); + let W = padded::(&W.W, pk.S_repr.N, &E::Scalar::ZERO); + + (Az, Bz, Cz, W, E) + }; + let chis_taus = EqPolynomial::evals_from_points(&tau_coords); + let (eval_Az_at_tau, eval_Bz_at_tau, eval_Cz_at_tau) = { + let evals_at_tau = [&Az, &Bz, &Cz] + .into_par_iter() + .map(|p| MultilinearPolynomial::evaluate_with_chis(p, &chis_taus)) + .collect::>(); + (evals_at_tau[0], evals_at_tau[1], evals_at_tau[2]) + }; + + // (2) send commitments to the following two oracles + // L_row(i) = eq(tau, row(i)) for all i + // L_col(i) = z(col(i)) for all i + let (mem_row, mem_col, L_row, L_col) = pk.S_repr.evaluation_oracles(&S, &tau, &z); + let (comm_L_row, comm_L_col) = + rayon::join(|| E::CE::commit(ck, &L_row), || E::CE::commit(ck, &L_col)); + + // since all the three polynomials are opened at tau, + // we can combine them into a single polynomial opened at tau + let eval_vec = vec![eval_Az_at_tau, eval_Bz_at_tau, eval_Cz_at_tau]; + + // absorb the claimed evaluations into the transcript + transcript.absorb(b"e", &eval_vec.as_slice()); + // absorb commitments to L_row and L_col in the transcript + transcript.absorb(b"e", &vec![comm_L_row, comm_L_col].as_slice()); + let comm_vec = vec![comm_Az, comm_Bz, comm_Cz]; + let poly_vec = vec![&Az, &Bz, &Cz]; + let c = transcript.squeeze(b"c")?; + let w: PolyEvalWitness = PolyEvalWitness::batch(&poly_vec, &c); + let u: PolyEvalInstance = + PolyEvalInstance::batch(&comm_vec, tau_coords.clone(), &eval_vec, &c); + + // we now need to prove four claims + // (1) 0 = \sum_x poly_tau(x) * (poly_Az(x) * poly_Bz(x) - poly_uCz_E(x)), and + // eval_Az_at_tau + r * eval_Bz_at_tau + r^2 * eval_Cz_at_tau = + // (Az+r*Bz+r^2*Cz)(tau) (2) eval_Az_at_tau + c * eval_Bz_at_tau + c^2 * + // eval_Cz_at_tau = \sum_y L_row(y) * (val_A(y) + c * val_B(y) + c^2 * val_C(y)) + // * L_col(y) (3) L_row(i) = eq(tau, row(i)) and L_col(i) = z(col(i)) + // (4) Check that the witness polynomial W is well-formed e.g., it is padded + // with only zeros + let gamma = transcript.squeeze(b"g")?; + let r = transcript.squeeze(b"r")?; + + let ((mut outer_sc_inst, mut inner_sc_inst), mem_res) = rayon::join( + || { + // a sum-check instance to prove the first claim + let outer_sc_inst = OuterSumcheckInstance::new( + PowPolynomial::new(&tau, num_rounds_sc).evals(), + Az.clone(), + Bz.clone(), + (0..Cz.len()).map(|i| U.u * Cz[i] + E[i]).collect::>(), + w.p.clone(), // Mz = Az + r * Bz + r^2 * Cz + &u.e, // eval_Az_at_tau + r * eval_Az_at_tau + r^2 * eval_Cz_at_tau + ); + + // a sum-check instance to prove the second claim + let val = zip_with!( + par_iter, + (pk.S_repr.val_A, pk.S_repr.val_B, pk.S_repr.val_C), + |v_a, v_b, v_c| *v_a + c * *v_b + c * c * *v_c + ) + .collect::>(); + let inner_sc_inst = InnerSumcheckInstance { + claim: eval_Az_at_tau + c * eval_Bz_at_tau + c * c * eval_Cz_at_tau, + poly_L_row: MultilinearPolynomial::new(L_row.clone()), + poly_L_col: MultilinearPolynomial::new(L_col.clone()), + poly_val: MultilinearPolynomial::new(val), + }; + + (outer_sc_inst, inner_sc_inst) + }, + || { + // a third sum-check instance to prove the read-only memory claim + // we now need to prove that L_row and L_col are well-formed + + // hash the tuples of (addr,val) memory contents and read responses into a + // single field element using `hash_func` + + let (comm_mem_oracles, mem_oracles, mem_aux) = + MemorySumcheckInstance::::compute_oracles( + ck, + &r, + &gamma, + &mem_row, + &pk.S_repr.row, + &L_row, + &pk.S_repr.ts_row, + &mem_col, + &pk.S_repr.col, + &L_col, + &pk.S_repr.ts_col, + )?; + // absorb the commitments + transcript.absorb(b"l", &comm_mem_oracles.as_slice()); + + let rho = transcript.squeeze(b"r")?; + let poly_eq = MultilinearPolynomial::new(PowPolynomial::new(&rho, num_rounds_sc).evals()); + + Ok::<_, NovaError>(( + MemorySumcheckInstance::new( + mem_oracles.clone(), + mem_aux, + poly_eq.Z, + pk.S_repr.ts_row.clone(), + pk.S_repr.ts_col.clone(), + ), + comm_mem_oracles, + mem_oracles, + )) + }, + ); + + let (mut mem_sc_inst, comm_mem_oracles, mem_oracles) = mem_res?; + + let mut witness_sc_inst = WitnessBoundSumcheck::new(tau, W.clone(), S.num_vars); + + let (sc, rand_sc, claims_mem, claims_outer, claims_inner, claims_witness) = Self::prove_helper( + &mut mem_sc_inst, + &mut outer_sc_inst, + &mut inner_sc_inst, + &mut witness_sc_inst, + &mut transcript, + )?; + + // claims from the end of the sum-check + let eval_Az = claims_outer[0][0]; + let eval_Bz = claims_outer[0][1]; + + let eval_L_row = claims_inner[0][0]; + let eval_L_col = claims_inner[0][1]; + + let eval_t_plus_r_inv_row = claims_mem[0][0]; + let eval_w_plus_r_inv_row = claims_mem[0][1]; + let eval_ts_row = claims_mem[0][2]; + + let eval_t_plus_r_inv_col = claims_mem[1][0]; + let eval_w_plus_r_inv_col = claims_mem[1][1]; + let eval_ts_col = claims_mem[1][2]; + let eval_W = claims_witness[0][0]; + + // compute the remaining claims that did not come for free from the sum-check + // prover + let (eval_Cz, eval_E, eval_val_A, eval_val_B, eval_val_C, eval_row, eval_col) = { + let e = [ + &Cz, + &E, + &pk.S_repr.val_A, + &pk.S_repr.val_B, + &pk.S_repr.val_C, + &pk.S_repr.row, + &pk.S_repr.col, + ] + .into_par_iter() + .map(|p| MultilinearPolynomial::evaluate_with(p, &rand_sc)) + .collect::>(); + (e[0], e[1], e[2], e[3], e[4], e[5], e[6]) + }; + + // all the evaluations are at rand_sc, we can fold them into one claim + let eval_vec = vec![ + eval_W, + eval_Az, + eval_Bz, + eval_Cz, + eval_E, + eval_L_row, + eval_L_col, + eval_val_A, + eval_val_B, + eval_val_C, + eval_t_plus_r_inv_row, + eval_row, + eval_w_plus_r_inv_row, + eval_ts_row, + eval_t_plus_r_inv_col, + eval_col, + eval_w_plus_r_inv_col, + eval_ts_col, + ]; + + let comm_vec = [ + U.comm_W, + comm_Az, + comm_Bz, + comm_Cz, + U.comm_E, + comm_L_row, + comm_L_col, + pk.S_comm.comm_val_A, + pk.S_comm.comm_val_B, + pk.S_comm.comm_val_C, + comm_mem_oracles[0], + pk.S_comm.comm_row, + comm_mem_oracles[1], + pk.S_comm.comm_ts_row, + comm_mem_oracles[2], + pk.S_comm.comm_col, + comm_mem_oracles[3], + pk.S_comm.comm_ts_col, + ]; + let poly_vec = [ + &W, + &Az, + &Bz, + &Cz, + &E, + &L_row, + &L_col, + &pk.S_repr.val_A, + &pk.S_repr.val_B, + &pk.S_repr.val_C, + mem_oracles[0].as_ref(), + &pk.S_repr.row, + mem_oracles[1].as_ref(), + &pk.S_repr.ts_row, + mem_oracles[2].as_ref(), + &pk.S_repr.col, + mem_oracles[3].as_ref(), + &pk.S_repr.ts_col, + ]; + transcript.absorb(b"e", &eval_vec.as_slice()); // comm_vec is already in the transcript + let c = transcript.squeeze(b"c")?; + let w: PolyEvalWitness = PolyEvalWitness::batch(&poly_vec, &c); + let u: PolyEvalInstance = PolyEvalInstance::batch(&comm_vec, rand_sc.clone(), &eval_vec, &c); + + let eval_arg = EE::prove(ck, &pk.pk_ee, &mut transcript, &u.c, &w.p, &rand_sc, &u.e)?; + + Ok(Self { + comm_Az: comm_Az.compress(), + comm_Bz: comm_Bz.compress(), + comm_Cz: comm_Cz.compress(), + comm_L_row: comm_L_row.compress(), + comm_L_col: comm_L_col.compress(), + + comm_t_plus_r_inv_row: comm_mem_oracles[0].compress(), + comm_w_plus_r_inv_row: comm_mem_oracles[1].compress(), + comm_t_plus_r_inv_col: comm_mem_oracles[2].compress(), + comm_w_plus_r_inv_col: comm_mem_oracles[3].compress(), + + eval_Az_at_tau, + eval_Bz_at_tau, + eval_Cz_at_tau, + + sc, + + eval_Az, + eval_Bz, + eval_Cz, + eval_E, + eval_L_row, + eval_L_col, + eval_val_A, + eval_val_B, + eval_val_C, + + eval_W, + + eval_t_plus_r_inv_row, + eval_row, + eval_w_plus_r_inv_row, + eval_ts_row, + + eval_col, + eval_t_plus_r_inv_col, + eval_w_plus_r_inv_col, + eval_ts_col, + + eval_arg, + }) + } + + /// verifies a proof of satisfiability of a `RelaxedR1CS` instance + fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { + let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); + + // append the verifier key (including commitment to R1CS matrices) and the + // RelaxedR1CSInstance to the transcript + transcript.absorb(b"vk", &vk.digest()); + transcript.absorb(b"U", U); + + let comm_Az = Commitment::::decompress(&self.comm_Az)?; + let comm_Bz = Commitment::::decompress(&self.comm_Bz)?; + let comm_Cz = Commitment::::decompress(&self.comm_Cz)?; + let comm_L_row = Commitment::::decompress(&self.comm_L_row)?; + let comm_L_col = Commitment::::decompress(&self.comm_L_col)?; + let comm_t_plus_r_inv_row = Commitment::::decompress(&self.comm_t_plus_r_inv_row)?; + let comm_w_plus_r_inv_row = Commitment::::decompress(&self.comm_w_plus_r_inv_row)?; + let comm_t_plus_r_inv_col = Commitment::::decompress(&self.comm_t_plus_r_inv_col)?; + let comm_w_plus_r_inv_col = Commitment::::decompress(&self.comm_w_plus_r_inv_col)?; + + transcript.absorb(b"c", &[comm_Az, comm_Bz, comm_Cz].as_slice()); + + let num_rounds_sc = vk.S_comm.N.log_2(); + let tau = transcript.squeeze(b"t")?; + let tau_coords = PowPolynomial::new(&tau, num_rounds_sc).coordinates(); + + // add claims about Az, Bz, and Cz to be checked later + // since all the three polynomials are opened at tau, + // we can combine them into a single polynomial opened at tau + let eval_vec = vec![self.eval_Az_at_tau, self.eval_Bz_at_tau, self.eval_Cz_at_tau]; + + transcript.absorb(b"e", &eval_vec.as_slice()); + + transcript.absorb(b"e", &vec![comm_L_row, comm_L_col].as_slice()); + let comm_vec = vec![comm_Az, comm_Bz, comm_Cz]; + let c = transcript.squeeze(b"c")?; + let u: PolyEvalInstance = + PolyEvalInstance::batch(&comm_vec, tau_coords.clone(), &eval_vec, &c); + let claim = u.e; + + let gamma = transcript.squeeze(b"g")?; + + let r = transcript.squeeze(b"r")?; + + transcript.absorb( + b"l", + &vec![ + comm_t_plus_r_inv_row, + comm_w_plus_r_inv_row, + comm_t_plus_r_inv_col, + comm_w_plus_r_inv_col, + ] + .as_slice(), + ); + + let rho = transcript.squeeze(b"r")?; + + let num_claims = 10; + let s = transcript.squeeze(b"r")?; + let coeffs = powers(&s, num_claims); + let claim = (coeffs[7] + coeffs[8]) * claim; // rest are zeros + + // verify sc + let (claim_sc_final, rand_sc) = self.sc.verify(claim, num_rounds_sc, 3, &mut transcript)?; + + // verify claim_sc_final + let claim_sc_final_expected = { + let rand_eq_bound_rand_sc = PowPolynomial::new(&rho, num_rounds_sc).evaluate(&rand_sc); + let eq_tau: EqPolynomial<_> = PowPolynomial::new(&tau, num_rounds_sc).into(); + + let taus_bound_rand_sc = eq_tau.evaluate(&rand_sc); + let taus_masked_bound_rand_sc = + MaskedEqPolynomial::new(&eq_tau, vk.num_vars.log_2()).evaluate(&rand_sc); + + let eval_t_plus_r_row = { + let eval_addr_row = IdentityPolynomial::new(num_rounds_sc).evaluate(&rand_sc); + let eval_val_row = taus_bound_rand_sc; + let eval_t = eval_addr_row + gamma * eval_val_row; + eval_t + r + }; + + let eval_w_plus_r_row = { + let eval_addr_row = self.eval_row; + let eval_val_row = self.eval_L_row; + let eval_w = eval_addr_row + gamma * eval_val_row; + eval_w + r + }; + + let eval_t_plus_r_col = { + let eval_addr_col = IdentityPolynomial::new(num_rounds_sc).evaluate(&rand_sc); + + // memory contents is z, so we compute eval_Z from eval_W and eval_X + let eval_val_col = { + // rand_sc was padded, so we now remove the padding + let (factor, rand_sc_unpad) = { + let l = vk.S_comm.N.log_2() - (2 * vk.num_vars).log_2(); + + let mut factor = E::Scalar::ONE; + for r_p in rand_sc.iter().take(l) { + factor *= E::Scalar::ONE - r_p + } + + let rand_sc_unpad = rand_sc[l..].to_vec(); + + (factor, rand_sc_unpad) + }; + + let eval_X = { + // public IO is (u, X) + let X = vec![U.u].into_iter().chain(U.X.iter().cloned()).collect::>(); + + // evaluate the sparse polynomial at rand_sc_unpad[1..] + let poly_X = SparsePolynomial::new(rand_sc_unpad.len() - 1, X); + poly_X.evaluate(&rand_sc_unpad[1..]) + }; + + self.eval_W + factor * rand_sc_unpad[0] * eval_X + }; + let eval_t = eval_addr_col + gamma * eval_val_col; + eval_t + r + }; + + let eval_w_plus_r_col = { + let eval_addr_col = self.eval_col; + let eval_val_col = self.eval_L_col; + let eval_w = eval_addr_col + gamma * eval_val_col; + eval_w + r + }; + + let claim_mem_final_expected: E::Scalar = coeffs[0] + * (self.eval_t_plus_r_inv_row - self.eval_w_plus_r_inv_row) + + coeffs[1] * (self.eval_t_plus_r_inv_col - self.eval_w_plus_r_inv_col) + + coeffs[2] + * (rand_eq_bound_rand_sc + * (self.eval_t_plus_r_inv_row * eval_t_plus_r_row - self.eval_ts_row)) + + coeffs[3] + * (rand_eq_bound_rand_sc + * (self.eval_w_plus_r_inv_row * eval_w_plus_r_row - E::Scalar::ONE)) + + coeffs[4] + * (rand_eq_bound_rand_sc + * (self.eval_t_plus_r_inv_col * eval_t_plus_r_col - self.eval_ts_col)) + + coeffs[5] + * (rand_eq_bound_rand_sc + * (self.eval_w_plus_r_inv_col * eval_w_plus_r_col - E::Scalar::ONE)); + + let claim_outer_final_expected = coeffs[6] + * taus_bound_rand_sc + * (self.eval_Az * self.eval_Bz - U.u * self.eval_Cz - self.eval_E) + + coeffs[7] * taus_bound_rand_sc * (self.eval_Az + c * self.eval_Bz + c * c * self.eval_Cz); + let claim_inner_final_expected = coeffs[8] + * self.eval_L_row + * self.eval_L_col + * (self.eval_val_A + c * self.eval_val_B + c * c * self.eval_val_C); + + let claim_witness_final_expected = coeffs[9] * taus_masked_bound_rand_sc * self.eval_W; + + claim_mem_final_expected + + claim_outer_final_expected + + claim_inner_final_expected + + claim_witness_final_expected + }; + + if claim_sc_final_expected != claim_sc_final { + return Err(NovaError::InvalidSumcheckProof); + } + + let eval_vec = vec![ + self.eval_W, + self.eval_Az, + self.eval_Bz, + self.eval_Cz, + self.eval_E, + self.eval_L_row, + self.eval_L_col, + self.eval_val_A, + self.eval_val_B, + self.eval_val_C, + self.eval_t_plus_r_inv_row, + self.eval_row, + self.eval_w_plus_r_inv_row, + self.eval_ts_row, + self.eval_t_plus_r_inv_col, + self.eval_col, + self.eval_w_plus_r_inv_col, + self.eval_ts_col, + ]; + + let comm_vec = [ + U.comm_W, + comm_Az, + comm_Bz, + comm_Cz, + U.comm_E, + comm_L_row, + comm_L_col, + vk.S_comm.comm_val_A, + vk.S_comm.comm_val_B, + vk.S_comm.comm_val_C, + comm_t_plus_r_inv_row, + vk.S_comm.comm_row, + comm_w_plus_r_inv_row, + vk.S_comm.comm_ts_row, + comm_t_plus_r_inv_col, + vk.S_comm.comm_col, + comm_w_plus_r_inv_col, + vk.S_comm.comm_ts_col, + ]; + transcript.absorb(b"e", &eval_vec.as_slice()); // comm_vec is already in the transcript + let c = transcript.squeeze(b"c")?; + let u: PolyEvalInstance = PolyEvalInstance::batch(&comm_vec, rand_sc.clone(), &eval_vec, &c); + + // verify + EE::verify(&vk.vk_ee, &mut transcript, &u.c, &rand_sc, &u.e, &self.eval_arg)?; + + Ok(()) + } +} + +// #[cfg(test)] +// mod tests { +// use ff::Field; +// use pasta_curves::Fq as Scalar; + +// use super::*; +// use crate::provider::PallasEngine; + +// #[test] +// fn test_padded() { +// let mut rng = rand::thread_rng(); +// let e = Scalar::random(&mut rng); +// let v: Vec = (0..10).map(|_| Scalar::random(&mut +// rng)).collect(); let n = 20; + +// let result = padded::(&v, n, &e); + +// assert_eq!(result.len(), n); +// assert_eq!(&result[..10], &v[..]); +// assert!(result[10..].iter().all(|&i| i == e)); +// } +// } diff --git a/prover/src/spartan/snark.rs b/prover/src/spartan/snark.rs new file mode 100644 index 0000000..e903cbd --- /dev/null +++ b/prover/src/spartan/snark.rs @@ -0,0 +1,502 @@ +//! This module implements `RelaxedR1CSSNARKTrait` using Spartan that is generic +//! over the polynomial commitment and evaluation argument (i.e., a PCS) +//! This version of Spartan does not use preprocessing so the verifier keeps the +//! entire description of R1CS matrices. This is essentially optimal for the +//! verifier when using an IPA-based polynomial commitment scheme. + +use std::sync::Arc; + +use ff::Field; +use itertools::Itertools as _; +use once_cell::sync::OnceCell; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +use crate::{ + digest::{DigestComputer, SimpleDigestible}, + errors::NovaError, + r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness, SparseMatrix}, + spartan::{ + compute_eval_table_sparse, + polys::{ + eq::EqPolynomial, + multilinear::{MultilinearPolynomial, SparsePolynomial}, + power::PowPolynomial, + }, + powers, + sumcheck::SumcheckProof, + PolyEvalInstance, PolyEvalWitness, + }, + traits::{ + evaluation::EvaluationEngineTrait, + snark::{DigestHelperTrait, RelaxedR1CSSNARKTrait}, + Engine, TranscriptEngineTrait, + }, + CommitmentKey, +}; + +/// A type that represents the prover's key +#[derive(Debug, Clone)] +pub struct ProverKey> { + pub pk_ee: EE::ProverKey, + pub vk_digest: E::Scalar, // digest of the verifier's key +} + +/// A type that represents the verifier's key +#[derive(Debug, Clone, Serialize)] +#[serde(bound = "")] +pub struct VerifierKey> { + vk_ee: EE::VerifierKey, + S: R1CSShape, + #[serde(skip, default = "OnceCell::new")] + digest: OnceCell, +} + +impl> SimpleDigestible for VerifierKey {} + +impl> VerifierKey { + fn new(shape: R1CSShape, vk_ee: EE::VerifierKey) -> Self { + Self { vk_ee, S: shape, digest: OnceCell::new() } + } +} + +impl> DigestHelperTrait for VerifierKey { + /// Returns the digest of the verifier's key. + fn digest(&self) -> E::Scalar { + self + .digest + .get_or_try_init(|| { + let dc = DigestComputer::::new(self); + dc.digest() + }) + .cloned() + .expect("Failure to retrieve digest!") + } +} + +/// A succinct proof of knowledge of a witness to a relaxed R1CS instance +/// The proof is produced using Spartan's combination of the sum-check and +/// the commitment to a vector viewed as a polynomial commitment +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct RelaxedR1CSSNARK> { + sc_proof_outer: SumcheckProof, + claims_outer: (E::Scalar, E::Scalar, E::Scalar), + eval_E: E::Scalar, + sc_proof_inner: SumcheckProof, + eval_W: E::Scalar, + sc_proof_batch: SumcheckProof, + evals_batch: Vec, + eval_arg: EE::EvaluationArgument, +} + +impl> RelaxedR1CSSNARKTrait for RelaxedR1CSSNARK { + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + + fn initialize_pk( + _ck: Arc>, + _vk_digest: ::Scalar, + ) -> Result { + todo!("not implemented for nova snarks"); + } + + fn setup( + ck: Arc>, + S: &R1CSShape, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { + let (pk_ee, vk_ee) = EE::setup(ck); + + let S = S.pad(); + + let vk: VerifierKey = VerifierKey::new(S, vk_ee); + + let pk = ProverKey { pk_ee, vk_digest: vk.digest() }; + + Ok((pk, vk)) + } + + /// produces a succinct proof of satisfiability of a `RelaxedR1CS` instance + #[tracing::instrument(skip_all, name = "SNARK::prove")] + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + S: &R1CSShape, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, + ) -> Result { + // pad the R1CSShape + let S = S.pad(); + // sanity check that R1CSShape has all required size characteristics + assert!(S.is_regular_shape()); + + let W = W.pad(&S); // pad the witness + let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); + + // append the digest of vk (which includes R1CS matrices) and the + // RelaxedR1CSInstance to the transcript + transcript.absorb(b"vk", &pk.vk_digest); + transcript.absorb(b"U", U); + + // compute the full satisfying assignment by concatenating W.W, U.u, and U.X + let mut z = [W.W.clone(), vec![U.u], U.X.clone()].concat(); + + let (num_rounds_x, num_rounds_y) = ( + usize::try_from(S.num_cons.ilog2()).unwrap(), + (usize::try_from(S.num_vars.ilog2()).unwrap() + 1), + ); + + // outer sum-check + let tau: EqPolynomial<_> = PowPolynomial::new(&transcript.squeeze(b"t")?, num_rounds_x).into(); + + let mut poly_tau = MultilinearPolynomial::new(tau.evals()); + let (mut poly_Az, mut poly_Bz, poly_Cz, mut poly_uCz_E) = { + let (poly_Az, poly_Bz, poly_Cz) = S.multiply_vec(&z)?; + let poly_uCz_E = (0..S.num_cons) + .into_par_iter() + .map(|i| U.u * poly_Cz[i] + W.E[i]) + .collect::>(); + ( + MultilinearPolynomial::new(poly_Az), + MultilinearPolynomial::new(poly_Bz), + MultilinearPolynomial::new(poly_Cz), + MultilinearPolynomial::new(poly_uCz_E), + ) + }; + + let comb_func_outer = + |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar, + poly_D_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; + let (sc_proof_outer, r_x, claims_outer) = SumcheckProof::prove_cubic_with_additive_term( + &E::Scalar::ZERO, // claim is zero + num_rounds_x, + &mut poly_tau, + &mut poly_Az, + &mut poly_Bz, + &mut poly_uCz_E, + comb_func_outer, + &mut transcript, + )?; + + // claims from the end of sum-check + let (claim_Az, claim_Bz): (E::Scalar, E::Scalar) = (claims_outer[1], claims_outer[2]); + let chis_r_x = EqPolynomial::evals_from_points(&r_x); + + let claim_Cz = MultilinearPolynomial::evaluate_with_chis(poly_Cz.evaluations(), &chis_r_x); + let eval_E = MultilinearPolynomial::evaluate_with_chis(&W.E, &chis_r_x); + transcript.absorb(b"claims_outer", &[claim_Az, claim_Bz, claim_Cz, eval_E].as_slice()); + + // inner sum-check + let r = transcript.squeeze(b"r")?; + let claim_inner_joint = claim_Az + r * claim_Bz + r * r * claim_Cz; + + let poly_ABC = { + // compute the initial evaluation table for R(\tau, x) + let evals_rx = EqPolynomial::evals_from_points(&r_x.clone()); + + let (evals_A, evals_B, evals_C) = compute_eval_table_sparse(&S, &evals_rx); + + assert_eq!(evals_A.len(), evals_B.len()); + assert_eq!(evals_A.len(), evals_C.len()); + (0..evals_A.len()) + .into_par_iter() + .map(|i| evals_A[i] + r * evals_B[i] + r * r * evals_C[i]) + .collect::>() + }; + + let poly_z = { + z.resize(S.num_vars * 2, E::Scalar::ZERO); + z + }; + + let comb_func = |poly_A_comp: &E::Scalar, poly_B_comp: &E::Scalar| -> E::Scalar { + *poly_A_comp * *poly_B_comp + }; + let (sc_proof_inner, r_y, _claims_inner) = SumcheckProof::prove_quad( + &claim_inner_joint, + num_rounds_y, + &mut MultilinearPolynomial::new(poly_ABC), + &mut MultilinearPolynomial::new(poly_z), + comb_func, + &mut transcript, + )?; + + // Add additional claims about W and E polynomials to the list from CC + // We will reduce a vector of claims of evaluations at different points into + // claims about them at the same point. For example, eval_W =? + // W(r_y[1..]) and eval_E =? E(r_x) into two claims: eval_W_prime =? + // W(rz) and eval_E_prime =? E(rz) We can them combine the two into one: + // eval_W_prime + gamma * eval_E_prime =? (W + gamma*E)(rz), where gamma + // is a public challenge Since commitments to W and E are homomorphic, + // the verifier can compute a commitment to the batched polynomial. + let eval_W = MultilinearPolynomial::evaluate_with(&W.W, &r_y[1..]); + + let w_vec = vec![PolyEvalWitness { p: W.W }, PolyEvalWitness { p: W.E }]; + let u_vec = + vec![PolyEvalInstance { c: U.comm_W, x: r_y[1..].to_vec(), e: eval_W }, PolyEvalInstance { + c: U.comm_E, + x: r_x, + e: eval_E, + }]; + + let (batched_u, batched_w, sc_proof_batch, claims_batch_left) = + batch_eval_reduce(u_vec, &w_vec, &mut transcript)?; + + let eval_arg = EE::prove( + ck, + &pk.pk_ee, + &mut transcript, + &batched_u.c, + &batched_w.p, + &batched_u.x, + &batched_u.e, + )?; + + Ok(Self { + sc_proof_outer, + claims_outer: (claim_Az, claim_Bz, claim_Cz), + eval_E, + sc_proof_inner, + eval_W, + sc_proof_batch, + evals_batch: claims_batch_left, + eval_arg, + }) + } + + /// verifies a proof of satisfiability of a `RelaxedR1CS` instance + fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { + let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); + + // append the digest of R1CS matrices and the RelaxedR1CSInstance to the + // transcript + transcript.absorb(b"vk", &vk.digest()); + transcript.absorb(b"U", U); + + let (num_rounds_x, num_rounds_y) = ( + usize::try_from(vk.S.num_cons.ilog2()).unwrap(), + (usize::try_from(vk.S.num_vars.ilog2()).unwrap() + 1), + ); + + // outer sum-check + let tau: EqPolynomial<_> = PowPolynomial::new(&transcript.squeeze(b"t")?, num_rounds_x).into(); + + let (claim_outer_final, r_x) = + self.sc_proof_outer.verify(E::Scalar::ZERO, num_rounds_x, 3, &mut transcript)?; + + // verify claim_outer_final + let (claim_Az, claim_Bz, claim_Cz) = self.claims_outer; + let taus_bound_rx = tau.evaluate(&r_x); + let claim_outer_final_expected = + taus_bound_rx * (claim_Az * claim_Bz - U.u * claim_Cz - self.eval_E); + if claim_outer_final != claim_outer_final_expected { + return Err(NovaError::InvalidSumcheckProof); + } + + transcript.absorb( + b"claims_outer", + &[self.claims_outer.0, self.claims_outer.1, self.claims_outer.2, self.eval_E].as_slice(), + ); + + // inner sum-check + let r = transcript.squeeze(b"r")?; + let claim_inner_joint = + self.claims_outer.0 + r * self.claims_outer.1 + r * r * self.claims_outer.2; + + let (claim_inner_final, r_y) = + self.sc_proof_inner.verify(claim_inner_joint, num_rounds_y, 2, &mut transcript)?; + + // verify claim_inner_final + let eval_Z = { + let eval_X = { + // public IO is (u, X) + let X = vec![U.u].into_iter().chain(U.X.iter().cloned()).collect::>(); + SparsePolynomial::new(usize::try_from(vk.S.num_vars.ilog2()).unwrap(), X) + .evaluate(&r_y[1..]) + }; + (E::Scalar::ONE - r_y[0]) * self.eval_W + r_y[0] * eval_X + }; + + // compute evaluations of R1CS matrices + let multi_evaluate = |M_vec: &[&SparseMatrix], + r_x: &[E::Scalar], + r_y: &[E::Scalar]| + -> Vec { + let evaluate_with_table = + |M: &SparseMatrix, T_x: &[E::Scalar], T_y: &[E::Scalar]| -> E::Scalar { + M.par_iter_rows() + .enumerate() + .map(|(row_idx, row)| { + M.get_row(row) + .map(|(val, col_idx)| T_x[row_idx] * T_y[*col_idx] * val) + .sum::() + }) + .sum() + }; + + let (T_x, T_y) = rayon::join( + || EqPolynomial::evals_from_points(r_x), + || EqPolynomial::evals_from_points(r_y), + ); + + (0..M_vec.len()).into_par_iter().map(|i| evaluate_with_table(M_vec[i], &T_x, &T_y)).collect() + }; + + let evals = multi_evaluate(&[&vk.S.A, &vk.S.B, &vk.S.C], &r_x, &r_y); + + let claim_inner_final_expected = (evals[0] + r * evals[1] + r * r * evals[2]) * eval_Z; + if claim_inner_final != claim_inner_final_expected { + return Err(NovaError::InvalidSumcheckProof); + } + + // add claims about W and E polynomials + let u_vec: Vec> = vec![ + PolyEvalInstance { c: U.comm_W, x: r_y[1..].to_vec(), e: self.eval_W }, + PolyEvalInstance { c: U.comm_E, x: r_x, e: self.eval_E }, + ]; + + let batched_u = + batch_eval_verify(u_vec, &mut transcript, &self.sc_proof_batch, &self.evals_batch)?; + + // verify + EE::verify( + &vk.vk_ee, + &mut transcript, + &batched_u.c, + &batched_u.x, + &batched_u.e, + &self.eval_arg, + )?; + + Ok(()) + } +} + +/// Reduces a batch of polynomial evaluation claims using Sumcheck +/// to a single claim at the same point. +/// +/// # Details +/// +/// We are given as input a list of instance/witness pairs +/// u = [(Cᵢ, xᵢ, eᵢ)], w = [Pᵢ], such that +/// - nᵢ = |xᵢ| +/// - Cᵢ = Commit(Pᵢ) +/// - eᵢ = Pᵢ(xᵢ) +/// - |Pᵢ| = 2^nᵢ +/// +/// We allow the polynomial Pᵢ to have different sizes, by appropriately scaling +/// the claims and resulting evaluations from Sumcheck. +pub(in crate::spartan) fn batch_eval_reduce( + u_vec: Vec>, + w_vec: &[PolyEvalWitness], + transcript: &mut E::TE, +) -> Result<(PolyEvalInstance, PolyEvalWitness, SumcheckProof, Vec), NovaError> +{ + let num_claims = u_vec.len(); + assert_eq!(w_vec.len(), num_claims); + + // Compute nᵢ and n = maxᵢ{nᵢ} + let num_rounds = u_vec.iter().map(|u| u.x.len()).collect::>(); + + // Check polynomials match number of variables, i.e. |Pᵢ| = 2^nᵢ + zip_with_for_each!(iter, (w_vec, num_rounds), |w, num_vars| assert_eq!(w.p.len(), 1 << num_vars)); + + // generate a challenge, and powers of it for random linear combination + let rho = transcript.squeeze(b"r")?; + let powers_of_rho = powers(&rho, num_claims); + + let (claims, u_xs, comms): (Vec<_>, Vec<_>, Vec<_>) = + u_vec.into_iter().map(|u| (u.e, u.x, u.c)).multiunzip(); + + // Create clones of polynomials to be given to Sumcheck + // Pᵢ(X) + let polys_P: Vec> = + w_vec.iter().map(|w| MultilinearPolynomial::new(w.p.clone())).collect(); + // eq(xᵢ, X) + let polys_eq: Vec> = u_xs + .into_iter() + .map(|ux| MultilinearPolynomial::new(EqPolynomial::evals_from_points(&ux))) + .collect(); + + // For each i, check eᵢ = ∑ₓ Pᵢ(x)eq(xᵢ,x), where x ∈ {0,1}^nᵢ + let comb_func = |poly_P: &E::Scalar, poly_eq: &E::Scalar| -> E::Scalar { *poly_P * *poly_eq }; + let (sc_proof_batch, r, claims_batch) = SumcheckProof::prove_quad_batch( + &claims, + &num_rounds, + polys_P, + polys_eq, + &powers_of_rho, + comb_func, + transcript, + )?; + + let (claims_batch_left, _): (Vec, Vec) = claims_batch; + + transcript.absorb(b"l", &claims_batch_left.as_slice()); + + // we now combine evaluation claims at the same point r into one + let gamma = transcript.squeeze(b"g")?; + + let u_joint = + PolyEvalInstance::batch_diff_size(&comms, &claims_batch_left, &num_rounds, r, gamma); + + // P = ∑ᵢ γⁱ⋅Pᵢ + let w_joint = PolyEvalWitness::batch_diff_size(&w_vec.iter().by_ref().collect::>(), gamma); + + Ok((u_joint, w_joint, sc_proof_batch, claims_batch_left)) +} + +/// Verifies a batch of polynomial evaluation claims using Sumcheck +/// reducing them to a single claim at the same point. +pub(in crate::spartan) fn batch_eval_verify( + u_vec: Vec>, + transcript: &mut E::TE, + sc_proof_batch: &SumcheckProof, + evals_batch: &[E::Scalar], +) -> Result, NovaError> { + let num_claims = u_vec.len(); + assert_eq!(evals_batch.len(), num_claims); + + // generate a challenge + let rho = transcript.squeeze(b"r")?; + let powers_of_rho = powers(&rho, num_claims); + + // Compute nᵢ and n = maxᵢ{nᵢ} + let num_rounds = u_vec.iter().map(|u| u.x.len()).collect::>(); + let num_rounds_max = *num_rounds.iter().max().unwrap(); + + let claims = u_vec.iter().map(|u| u.e).collect::>(); + + let (claim_batch_final, r) = + sc_proof_batch.verify_batch(&claims, &num_rounds, &powers_of_rho, 2, transcript)?; + + let claim_batch_final_expected = { + let evals_r = u_vec.iter().map(|u| { + let (_, r_hi) = r.split_at(num_rounds_max - u.x.len()); + EqPolynomial::new(r_hi.to_vec()).evaluate(&u.x) + }); + + zip_with!((evals_r, evals_batch.iter(), powers_of_rho.iter()), |e_i, p_i, rho_i| e_i + * *p_i + * rho_i) + .sum() + }; + + if claim_batch_final != claim_batch_final_expected { + return Err(NovaError::InvalidSumcheckProof); + } + + transcript.absorb(b"l", &evals_batch); + + // we now combine evaluation claims at the same point r into one + let gamma = transcript.squeeze(b"g")?; + + let comms = u_vec.into_iter().map(|u| u.c).collect::>(); + + let u_joint = PolyEvalInstance::batch_diff_size(&comms, evals_batch, &num_rounds, r, gamma); + + Ok(u_joint) +} diff --git a/prover/src/spartan/sumcheck/engine.rs b/prover/src/spartan/sumcheck/engine.rs new file mode 100644 index 0000000..bb1c4b1 --- /dev/null +++ b/prover/src/spartan/sumcheck/engine.rs @@ -0,0 +1,571 @@ +use ff::Field; +use rayon::prelude::*; + +use crate::{ + provider::util::field::batch_invert, + spartan::{ + math::Math, + polys::{ + eq::EqPolynomial, masked_eq::MaskedEqPolynomial, multilinear::MultilinearPolynomial, + power::PowPolynomial, + }, + sumcheck::SumcheckProof, + }, + traits::commitment::CommitmentEngineTrait, + Commitment, CommitmentKey, Engine, NovaError, +}; + +/// Defines a trait for implementing sum-check in a generic manner +pub trait SumcheckEngine: Send + Sync { + /// returns the initial claims + fn initial_claims(&self) -> Vec; + + /// degree of the sum-check polynomial + fn degree(&self) -> usize; + + /// the size of the polynomials + fn size(&self) -> usize; + + /// returns evaluation points at 0, 2, d-1 (where d is the degree of the + /// sum-check polynomial) + fn evaluation_points(&self) -> Vec>; + + /// bounds a variable in the constituent polynomials + fn bound(&mut self, r: &E::Scalar); + + /// returns the final claims + fn final_claims(&self) -> Vec>; +} + +/// The [`WitnessBoundSumcheck`] ensures that the witness polynomial W defined +/// over n = log(N) variables, is zero outside of the first `num_vars = 2^m` +/// entries. +/// +/// # Details +/// +/// The `W` polynomial is padded with zeros to size N = 2^n. +/// The `masked_eq` polynomials is defined as with regards to a random challenge +/// `tau` as the eq(tau) polynomial, where the first 2^m evaluations to 0. +/// +/// The instance is given by +/// `0 = ∑_{0≤i<2^n} masked_eq[i] * W[i]`. +/// It is equivalent to the expression +/// `0 = ∑_{2^m≤i<2^n} eq[i] * W[i]` +/// Since `eq` is random, the instance is only satisfied if `W[2^{m}..] = 0`. +pub(in crate::spartan) struct WitnessBoundSumcheck { + poly_W: MultilinearPolynomial, + poly_masked_eq: MultilinearPolynomial, +} + +impl WitnessBoundSumcheck { + pub fn new(tau: E::Scalar, poly_W_padded: Vec, num_vars: usize) -> Self { + let num_vars_log = num_vars.log_2(); + // When num_vars = num_rounds, we shouldn't have to prove anything + // but we still want this instance to compute the evaluation of W + let num_rounds = poly_W_padded.len().log_2(); + assert!(num_vars_log < num_rounds); + + let tau_coords = PowPolynomial::new(&tau, num_rounds).coordinates(); + let poly_masked_eq_evals = + MaskedEqPolynomial::new(&EqPolynomial::new(tau_coords), num_vars_log).evals(); + + Self { + poly_W: MultilinearPolynomial::new(poly_W_padded), + poly_masked_eq: MultilinearPolynomial::new(poly_masked_eq_evals), + } + } +} +impl SumcheckEngine for WitnessBoundSumcheck { + fn initial_claims(&self) -> Vec { vec![E::Scalar::ZERO] } + + fn degree(&self) -> usize { 3 } + + fn size(&self) -> usize { + assert_eq!(self.poly_W.len(), self.poly_masked_eq.len()); + self.poly_W.len() + } + + fn evaluation_points(&self) -> Vec> { + let comb_func = |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + _: &E::Scalar| + -> E::Scalar { *poly_A_comp * *poly_B_comp }; + + let (eval_point_0, eval_point_2, eval_point_3) = SumcheckProof::::compute_eval_points_cubic( + &self.poly_masked_eq, + &self.poly_W, + &self.poly_W, // unused + &comb_func, + ); + + vec![vec![eval_point_0, eval_point_2, eval_point_3]] + } + + fn bound(&mut self, r: &E::Scalar) { + [&mut self.poly_W, &mut self.poly_masked_eq] + .par_iter_mut() + .for_each(|poly| poly.bind_poly_var_top(r)); + } + + fn final_claims(&self) -> Vec> { + vec![vec![self.poly_W[0], self.poly_masked_eq[0]]] + } +} + +pub(in crate::spartan) struct MemorySumcheckInstance { + // row + w_plus_r_row: MultilinearPolynomial, + t_plus_r_row: MultilinearPolynomial, + t_plus_r_inv_row: MultilinearPolynomial, + w_plus_r_inv_row: MultilinearPolynomial, + ts_row: MultilinearPolynomial, + + // col + w_plus_r_col: MultilinearPolynomial, + t_plus_r_col: MultilinearPolynomial, + t_plus_r_inv_col: MultilinearPolynomial, + w_plus_r_inv_col: MultilinearPolynomial, + ts_col: MultilinearPolynomial, + + // eq + poly_eq: MultilinearPolynomial, + + // zero polynomial + poly_zero: MultilinearPolynomial, +} + +impl MemorySumcheckInstance { + /// Computes witnesses for `MemoryInstanceSumcheck` + /// + /// # Description + /// We use the logUp protocol to prove that + /// ∑ TS[i]/(T[i] + r) - 1/(W[i] + r) = 0 + /// where + /// T_row[i] = mem_row[i] * gamma + i + /// = eq(tau)[i] * gamma + i + /// W_row[i] = L_row[i] * gamma + addr_row[i] + /// = eq(tau)[row[i]] * gamma + addr_row[i] + /// T_col[i] = mem_col[i] * gamma + i + /// = z[i] * gamma + i + /// W_col[i] = addr_col[i] * gamma + addr_col[i] + /// = z[col[i]] * gamma + addr_col[i] + /// and + /// `TS_row`, `TS_col` are integer-valued vectors representing the number + /// of reads to each memory cell of `L_row`, `L_col` + /// + /// The function returns oracles for the polynomials TS[i]/(T[i] + r), + /// 1/(W[i] + r), as well as auxiliary polynomials T[i] + r, W[i] + r + pub fn compute_oracles( + ck: &CommitmentKey, + r: &E::Scalar, + gamma: &E::Scalar, + mem_row: &[E::Scalar], + addr_row: &[E::Scalar], + L_row: &[E::Scalar], + ts_row: &[E::Scalar], + mem_col: &[E::Scalar], + addr_col: &[E::Scalar], + L_col: &[E::Scalar], + ts_col: &[E::Scalar], + ) -> Result<([Commitment; 4], [Vec; 4], [Vec; 4]), NovaError> { + // hash the tuples of (addr,val) memory contents and read responses into a + // single field element using `hash_func` + let hash_func_vec = |mem: &[E::Scalar], + addr: &[E::Scalar], + lookups: &[E::Scalar]| + -> (Vec, Vec) { + let hash_func = |addr: &E::Scalar, val: &E::Scalar| -> E::Scalar { *val * gamma + *addr }; + assert_eq!(addr.len(), lookups.len()); + rayon::join( + || { + (0..mem.len()) + .map(|i| hash_func(&E::Scalar::from(i as u64), &mem[i])) + .collect::>() + }, + || (0..addr.len()).map(|i| hash_func(&addr[i], &lookups[i])).collect::>(), + ) + }; + + let ((T_row, W_row), (T_col, W_col)) = rayon::join( + || hash_func_vec(mem_row, addr_row, L_row), + || hash_func_vec(mem_col, addr_col, L_col), + ); + + // compute vectors TS[i]/(T[i] + r) and 1/(W[i] + r) + let helper = |T: &[E::Scalar], + W: &[E::Scalar], + TS: &[E::Scalar], + r: &E::Scalar| + -> ( + (Result, NovaError>, Result, NovaError>), + (Vec, Vec), + ) { + rayon::join( + || { + rayon::join( + || { + let inv = batch_invert(T.par_iter().map(|e| *e + *r).collect::>())?; + + // compute inv[i] * TS[i] in parallel + Ok( + zip_with!((inv.into_par_iter(), TS.par_iter()), |e1, e2| e1 * *e2) + .collect::>(), + ) + }, + || batch_invert(W.par_iter().map(|e| *e + *r).collect::>()), + ) + }, + || { + rayon::join( + || T.par_iter().map(|e| *e + *r).collect(), + || W.par_iter().map(|e| *e + *r).collect(), + ) + }, + ) + }; + + let ( + ((t_plus_r_inv_row, w_plus_r_inv_row), (t_plus_r_row, w_plus_r_row)), + ((t_plus_r_inv_col, w_plus_r_inv_col), (t_plus_r_col, w_plus_r_col)), + ) = rayon::join(|| helper(&T_row, &W_row, ts_row, r), || helper(&T_col, &W_col, ts_col, r)); + + let t_plus_r_inv_row = t_plus_r_inv_row?; + let w_plus_r_inv_row = w_plus_r_inv_row?; + let t_plus_r_inv_col = t_plus_r_inv_col?; + let w_plus_r_inv_col = w_plus_r_inv_col?; + + let ( + (comm_t_plus_r_inv_row, comm_w_plus_r_inv_row), + (comm_t_plus_r_inv_col, comm_w_plus_r_inv_col), + ) = rayon::join( + || { + rayon::join( + || E::CE::commit(ck, &t_plus_r_inv_row), + || E::CE::commit(ck, &w_plus_r_inv_row), + ) + }, + || { + rayon::join( + || E::CE::commit(ck, &t_plus_r_inv_col), + || E::CE::commit(ck, &w_plus_r_inv_col), + ) + }, + ); + + let comm_vec = + [comm_t_plus_r_inv_row, comm_w_plus_r_inv_row, comm_t_plus_r_inv_col, comm_w_plus_r_inv_col]; + + let poly_vec = [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col]; + + let aux_poly_vec = [t_plus_r_row, w_plus_r_row, t_plus_r_col, w_plus_r_col]; + + Ok((comm_vec, poly_vec, aux_poly_vec)) + } + + pub fn new( + polys_oracle: [Vec; 4], + polys_aux: [Vec; 4], + poly_eq: Vec, + ts_row: Vec, + ts_col: Vec, + ) -> Self { + let [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] = polys_oracle; + let [t_plus_r_row, w_plus_r_row, t_plus_r_col, w_plus_r_col] = polys_aux; + + let zero = vec![E::Scalar::ZERO; poly_eq.len()]; + + Self { + w_plus_r_row: MultilinearPolynomial::new(w_plus_r_row), + t_plus_r_row: MultilinearPolynomial::new(t_plus_r_row), + t_plus_r_inv_row: MultilinearPolynomial::new(t_plus_r_inv_row), + w_plus_r_inv_row: MultilinearPolynomial::new(w_plus_r_inv_row), + ts_row: MultilinearPolynomial::new(ts_row), + w_plus_r_col: MultilinearPolynomial::new(w_plus_r_col), + t_plus_r_col: MultilinearPolynomial::new(t_plus_r_col), + t_plus_r_inv_col: MultilinearPolynomial::new(t_plus_r_inv_col), + w_plus_r_inv_col: MultilinearPolynomial::new(w_plus_r_inv_col), + ts_col: MultilinearPolynomial::new(ts_col), + poly_eq: MultilinearPolynomial::new(poly_eq), + poly_zero: MultilinearPolynomial::new(zero), + } + } +} + +impl SumcheckEngine for MemorySumcheckInstance { + fn initial_claims(&self) -> Vec { vec![E::Scalar::ZERO; 6] } + + fn degree(&self) -> usize { 3 } + + fn size(&self) -> usize { + // sanity checks + assert_eq!(self.w_plus_r_row.len(), self.t_plus_r_row.len()); + assert_eq!(self.w_plus_r_row.len(), self.ts_row.len()); + assert_eq!(self.w_plus_r_row.len(), self.w_plus_r_col.len()); + assert_eq!(self.w_plus_r_row.len(), self.t_plus_r_col.len()); + assert_eq!(self.w_plus_r_row.len(), self.ts_col.len()); + + self.w_plus_r_row.len() + } + + fn evaluation_points(&self) -> Vec> { + let comb_func = |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + _poly_C_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp - *poly_B_comp }; + + let comb_func2 = + |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar, + _poly_D_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - E::Scalar::ONE) }; + + let comb_func3 = + |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar, + poly_D_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; + + // inv related evaluation points + // 0 = ∑ TS[i]/(T[i] + r) - 1/(W[i] + r) + let (eval_inv_0_row, eval_inv_2_row, eval_inv_3_row) = + SumcheckProof::::compute_eval_points_cubic( + &self.t_plus_r_inv_row, + &self.w_plus_r_inv_row, + &self.poly_zero, + &comb_func, + ); + + let (eval_inv_0_col, eval_inv_2_col, eval_inv_3_col) = + SumcheckProof::::compute_eval_points_cubic( + &self.t_plus_r_inv_col, + &self.w_plus_r_inv_col, + &self.poly_zero, + &comb_func, + ); + + // row related evaluation points + // 0 = ∑ eq[i] * (inv_T[i] * (T[i] + r) - TS[i])) + let (eval_T_0_row, eval_T_2_row, eval_T_3_row) = + SumcheckProof::::compute_eval_points_cubic_with_additive_term( + &self.poly_eq, + &self.t_plus_r_inv_row, + &self.t_plus_r_row, + &self.ts_row, + &comb_func3, + ); + // 0 = ∑ eq[i] * (inv_W[i] * (T[i] + r) - 1)) + let (eval_W_0_row, eval_W_2_row, eval_W_3_row) = + SumcheckProof::::compute_eval_points_cubic_with_additive_term( + &self.poly_eq, + &self.w_plus_r_inv_row, + &self.w_plus_r_row, + &self.poly_zero, + &comb_func2, + ); + + // column related evaluation points + let (eval_T_0_col, eval_T_2_col, eval_T_3_col) = + SumcheckProof::::compute_eval_points_cubic_with_additive_term( + &self.poly_eq, + &self.t_plus_r_inv_col, + &self.t_plus_r_col, + &self.ts_col, + &comb_func3, + ); + let (eval_W_0_col, eval_W_2_col, eval_W_3_col) = + SumcheckProof::::compute_eval_points_cubic_with_additive_term( + &self.poly_eq, + &self.w_plus_r_inv_col, + &self.w_plus_r_col, + &self.poly_zero, + &comb_func2, + ); + + vec![ + vec![eval_inv_0_row, eval_inv_2_row, eval_inv_3_row], + vec![eval_inv_0_col, eval_inv_2_col, eval_inv_3_col], + vec![eval_T_0_row, eval_T_2_row, eval_T_3_row], + vec![eval_W_0_row, eval_W_2_row, eval_W_3_row], + vec![eval_T_0_col, eval_T_2_col, eval_T_3_col], + vec![eval_W_0_col, eval_W_2_col, eval_W_3_col], + ] + } + + fn bound(&mut self, r: &E::Scalar) { + [ + &mut self.t_plus_r_row, + &mut self.t_plus_r_inv_row, + &mut self.w_plus_r_row, + &mut self.w_plus_r_inv_row, + &mut self.ts_row, + &mut self.t_plus_r_col, + &mut self.t_plus_r_inv_col, + &mut self.w_plus_r_col, + &mut self.w_plus_r_inv_col, + &mut self.ts_col, + &mut self.poly_eq, + ] + .par_iter_mut() + .for_each(|poly| poly.bind_poly_var_top(r)); + } + + fn final_claims(&self) -> Vec> { + let poly_row_final = vec![self.t_plus_r_inv_row[0], self.w_plus_r_inv_row[0], self.ts_row[0]]; + + let poly_col_final = vec![self.t_plus_r_inv_col[0], self.w_plus_r_inv_col[0], self.ts_col[0]]; + + vec![poly_row_final, poly_col_final] + } +} + +pub(in crate::spartan) struct OuterSumcheckInstance { + poly_tau: MultilinearPolynomial, + poly_Az: MultilinearPolynomial, + poly_Bz: MultilinearPolynomial, + poly_uCz_E: MultilinearPolynomial, + + poly_Mz: MultilinearPolynomial, + eval_Mz_at_tau: E::Scalar, + + poly_zero: MultilinearPolynomial, +} + +impl OuterSumcheckInstance { + pub fn new( + tau: Vec, + Az: Vec, + Bz: Vec, + uCz_E: Vec, + Mz: Vec, + eval_Mz_at_tau: &E::Scalar, + ) -> Self { + let zero = vec![E::Scalar::ZERO; tau.len()]; + Self { + poly_tau: MultilinearPolynomial::new(tau), + poly_Az: MultilinearPolynomial::new(Az), + poly_Bz: MultilinearPolynomial::new(Bz), + poly_uCz_E: MultilinearPolynomial::new(uCz_E), + poly_Mz: MultilinearPolynomial::new(Mz), + eval_Mz_at_tau: *eval_Mz_at_tau, + poly_zero: MultilinearPolynomial::new(zero), + } + } +} + +impl SumcheckEngine for OuterSumcheckInstance { + fn initial_claims(&self) -> Vec { vec![E::Scalar::ZERO, self.eval_Mz_at_tau] } + + fn degree(&self) -> usize { 3 } + + fn size(&self) -> usize { + assert_eq!(self.poly_tau.len(), self.poly_Az.len()); + assert_eq!(self.poly_tau.len(), self.poly_Bz.len()); + assert_eq!(self.poly_tau.len(), self.poly_uCz_E.len()); + assert_eq!(self.poly_tau.len(), self.poly_Mz.len()); + self.poly_tau.len() + } + + fn evaluation_points(&self) -> Vec> { + let comb_func = + |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar, + poly_D_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; + + let (eval_point_h_0, eval_point_h_2, eval_point_h_3) = + SumcheckProof::::compute_eval_points_cubic_with_additive_term( + &self.poly_tau, + &self.poly_Az, + &self.poly_Bz, + &self.poly_uCz_E, + &comb_func, + ); + + let comb_func2 = |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + _poly_C_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * *poly_B_comp }; + + let (eval_point_e_0, eval_point_e_2, eval_point_e_3) = + SumcheckProof::::compute_eval_points_cubic( + &self.poly_tau, + &self.poly_Mz, + &self.poly_zero, + &comb_func2, + ); + + vec![vec![eval_point_h_0, eval_point_h_2, eval_point_h_3], vec![ + eval_point_e_0, + eval_point_e_2, + eval_point_e_3, + ]] + } + + fn bound(&mut self, r: &E::Scalar) { + [ + &mut self.poly_tau, + &mut self.poly_Az, + &mut self.poly_Bz, + &mut self.poly_uCz_E, + &mut self.poly_Mz, + ] + .par_iter_mut() + .for_each(|poly| poly.bind_poly_var_top(r)); + } + + fn final_claims(&self) -> Vec> { vec![vec![self.poly_Az[0], self.poly_Bz[0]]] } +} + +pub(in crate::spartan) struct InnerSumcheckInstance { + pub(in crate::spartan) claim: E::Scalar, + pub(in crate::spartan) poly_L_row: MultilinearPolynomial, + pub(in crate::spartan) poly_L_col: MultilinearPolynomial, + pub(in crate::spartan) poly_val: MultilinearPolynomial, +} +impl InnerSumcheckInstance { + pub fn new( + claim: E::Scalar, + poly_L_row: MultilinearPolynomial, + poly_L_col: MultilinearPolynomial, + poly_val: MultilinearPolynomial, + ) -> Self { + Self { claim, poly_L_row, poly_L_col, poly_val } + } +} +impl SumcheckEngine for InnerSumcheckInstance { + fn initial_claims(&self) -> Vec { vec![self.claim] } + + fn degree(&self) -> usize { 3 } + + fn size(&self) -> usize { + assert_eq!(self.poly_L_row.len(), self.poly_val.len()); + assert_eq!(self.poly_L_row.len(), self.poly_L_col.len()); + self.poly_L_row.len() + } + + fn evaluation_points(&self) -> Vec> { + let (poly_A, poly_B, poly_C) = (&self.poly_L_row, &self.poly_L_col, &self.poly_val); + let comb_func = |poly_A_comp: &E::Scalar, + poly_B_comp: &E::Scalar, + poly_C_comp: &E::Scalar| + -> E::Scalar { *poly_A_comp * *poly_B_comp * *poly_C_comp }; + + let (eval_point_0, eval_point_2, eval_point_3) = + SumcheckProof::::compute_eval_points_cubic(poly_A, poly_B, poly_C, &comb_func); + + vec![vec![eval_point_0, eval_point_2, eval_point_3]] + } + + fn bound(&mut self, r: &E::Scalar) { + [&mut self.poly_L_row, &mut self.poly_L_col, &mut self.poly_val] + .par_iter_mut() + .for_each(|poly| poly.bind_poly_var_top(r)); + } + + fn final_claims(&self) -> Vec> { + vec![vec![self.poly_L_row[0], self.poly_L_col[0]]] + } +} diff --git a/prover/src/spartan/sumcheck/mod.rs b/prover/src/spartan/sumcheck/mod.rs new file mode 100644 index 0000000..49d4bc0 --- /dev/null +++ b/prover/src/spartan/sumcheck/mod.rs @@ -0,0 +1,544 @@ +use ff::Field; +use itertools::Itertools as _; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +use crate::{ + errors::NovaError, + spartan::polys::{ + multilinear::MultilinearPolynomial, + univariate::{CompressedUniPoly, UniPoly}, + }, + traits::{Engine, TranscriptEngineTrait}, +}; + +pub(in crate::spartan) mod engine; + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub(crate) struct SumcheckProof { + compressed_polys: Vec>, +} + +impl SumcheckProof { + pub fn new(compressed_polys: Vec>) -> Self { + Self { compressed_polys } + } + + pub fn verify( + &self, + claim: E::Scalar, + num_rounds: usize, + degree_bound: usize, + transcript: &mut E::TE, + ) -> Result<(E::Scalar, Vec), NovaError> { + let mut e = claim; + let mut r: Vec = Vec::new(); + + // verify that there is a univariate polynomial for each round + if self.compressed_polys.len() != num_rounds { + return Err(NovaError::InvalidSumcheckProof); + } + + for i in 0..self.compressed_polys.len() { + let poly = self.compressed_polys[i].decompress(&e); + + // verify degree bound + if poly.degree() != degree_bound { + return Err(NovaError::InvalidSumcheckProof); + } + + // we do not need to check if poly(0) + poly(1) = e, as + // decompress() call above already ensures that holds + debug_assert_eq!(poly.eval_at_zero() + poly.eval_at_one(), e); + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + // derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + + r.push(r_i); + + // evaluate the claimed degree-ell polynomial at r_i + e = poly.evaluate(&r_i); + } + + Ok((e, r)) + } + + pub fn verify_batch( + &self, + claims: &[E::Scalar], + num_rounds: &[usize], + coeffs: &[E::Scalar], + degree_bound: usize, + transcript: &mut E::TE, + ) -> Result<(E::Scalar, Vec), NovaError> { + let num_instances = claims.len(); + assert_eq!(num_rounds.len(), num_instances); + assert_eq!(coeffs.len(), num_instances); + + // n = maxᵢ{nᵢ} + let num_rounds_max = *num_rounds.iter().max().unwrap(); + + // Random linear combination of claims, + // where each claim is scaled by 2^{n-nᵢ} to account for the padding. + // + // claim = ∑ᵢ coeffᵢ⋅2^{n-nᵢ}⋅cᵢ + let claim = zip_with!( + ( + zip_with!(iter, (claims, num_rounds), |claim, num_rounds| { + let scaling_factor = 1 << (num_rounds_max - num_rounds); + E::Scalar::from(scaling_factor as u64) * claim + }), + coeffs.iter() + ), + |scaled_claim, coeff| scaled_claim * coeff + ) + .sum(); + + self.verify(claim, num_rounds_max, degree_bound, transcript) + } + + #[inline] + fn compute_eval_points_quad( + poly_A: &MultilinearPolynomial, + poly_B: &MultilinearPolynomial, + comb_func: &F, + ) -> (E::Scalar, E::Scalar) + where + F: Fn(&E::Scalar, &E::Scalar) -> E::Scalar + Sync, + { + let len = poly_A.len() / 2; + (0..len) + .into_par_iter() + .map(|i| { + // eval 0: bound_func is A(low) + let eval_point_0 = comb_func(&poly_A[i], &poly_B[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let eval_point_2 = comb_func(&poly_A_bound_point, &poly_B_bound_point); + (eval_point_0, eval_point_2) + }) + .reduce(|| (E::Scalar::ZERO, E::Scalar::ZERO), |a, b| (a.0 + b.0, a.1 + b.1)) + } + + pub fn prove_quad( + claim: &E::Scalar, + num_rounds: usize, + poly_A: &mut MultilinearPolynomial, + poly_B: &mut MultilinearPolynomial, + comb_func: F, + transcript: &mut E::TE, + ) -> Result<(Self, Vec, Vec), NovaError> + where + F: Fn(&E::Scalar, &E::Scalar) -> E::Scalar + Sync, + { + let mut r: Vec = Vec::new(); + let mut polys: Vec> = Vec::new(); + let mut claim_per_round = *claim; + for _ in 0..num_rounds { + let poly = { + let (eval_point_0, eval_point_2) = + Self::compute_eval_points_quad(poly_A, poly_B, &comb_func); + + let evals = vec![eval_point_0, claim_per_round - eval_point_0, eval_point_2]; + UniPoly::from_evals(&evals) + }; + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + // derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + r.push(r_i); + polys.push(poly.compress()); + + // Set up next round + claim_per_round = poly.evaluate(&r_i); + + // bind all tables to the verifier's challenge + rayon::join(|| poly_A.bind_poly_var_top(&r_i), || poly_B.bind_poly_var_top(&r_i)); + } + + Ok((Self { compressed_polys: polys }, r, vec![poly_A[0], poly_B[0]])) + } + + pub fn prove_quad_batch( + claims: &[E::Scalar], + num_rounds: &[usize], + mut poly_A_vec: Vec>, + mut poly_B_vec: Vec>, + coeffs: &[E::Scalar], + comb_func: F, + transcript: &mut E::TE, + ) -> Result<(Self, Vec, (Vec, Vec)), NovaError> + where + F: Fn(&E::Scalar, &E::Scalar) -> E::Scalar + Sync, + { + let num_claims = claims.len(); + + assert_eq!(num_rounds.len(), num_claims); + assert_eq!(poly_A_vec.len(), num_claims); + assert_eq!(poly_B_vec.len(), num_claims); + assert_eq!(coeffs.len(), num_claims); + + for (i, &num_rounds) in num_rounds.iter().enumerate() { + let expected_size = 1 << num_rounds; + + // Direct indexing with the assumption that the index will always be in bounds + let a = &poly_A_vec[i]; + let b = &poly_B_vec[i]; + + for (l, polyname) in [(a.len(), "poly_A_vec"), (b.len(), "poly_B_vec")].iter() { + assert_eq!(*l, expected_size, "Mismatch in size for {} at index {}", polyname, i); + } + } + + let num_rounds_max = *num_rounds.iter().max().unwrap(); + let mut e = zip_with!(iter, (claims, num_rounds, coeffs), |claim, num_rounds, coeff| { + let scaled_claim = E::Scalar::from((1 << (num_rounds_max - num_rounds)) as u64) * claim; + scaled_claim * coeff + }) + .sum(); + let mut r: Vec = Vec::new(); + let mut quad_polys: Vec> = Vec::new(); + + for current_round in 0..num_rounds_max { + let remaining_rounds = num_rounds_max - current_round; + let evals: Vec<(E::Scalar, E::Scalar)> = zip_with!( + par_iter, + (num_rounds, claims, poly_A_vec, poly_B_vec), + |num_rounds, claim, poly_A, poly_B| { + if remaining_rounds <= *num_rounds { + Self::compute_eval_points_quad(poly_A, poly_B, &comb_func) + } else { + let remaining_variables = remaining_rounds - num_rounds - 1; + let scaled_claim = E::Scalar::from((1 << remaining_variables) as u64) * claim; + (scaled_claim, scaled_claim) + } + } + ) + .collect(); + + let evals_combined_0 = (0..evals.len()).map(|i| evals[i].0 * coeffs[i]).sum(); + let evals_combined_2 = (0..evals.len()).map(|i| evals[i].1 * coeffs[i]).sum(); + + let evals = vec![evals_combined_0, e - evals_combined_0, evals_combined_2]; + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + // derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + r.push(r_i); + + // bound all tables to the verifier's challenge + zip_with_for_each!( + (num_rounds.par_iter(), poly_A_vec.par_iter_mut(), poly_B_vec.par_iter_mut()), + |num_rounds, poly_A, poly_B| { + if remaining_rounds <= *num_rounds { + let _ = + rayon::join(|| poly_A.bind_poly_var_top(&r_i), || poly_B.bind_poly_var_top(&r_i)); + } + } + ); + + e = poly.evaluate(&r_i); + quad_polys.push(poly.compress()); + } + poly_A_vec.iter().for_each(|p| assert_eq!(p.len(), 1)); + poly_B_vec.iter().for_each(|p| assert_eq!(p.len(), 1)); + + let poly_A_final = poly_A_vec.into_iter().map(|poly| poly[0]).collect::>(); + let poly_B_final = poly_B_vec.into_iter().map(|poly| poly[0]).collect::>(); + + let eval_expected = + zip_with!(iter, (poly_A_final, poly_B_final, coeffs), |eA, eB, coeff| comb_func(eA, eB) + * coeff) + .sum::(); + assert_eq!(e, eval_expected); + + let claims_prod = (poly_A_final, poly_B_final); + + Ok((Self::new(quad_polys), r, claims_prod)) + } + + #[inline] + fn compute_eval_points_cubic( + poly_A: &MultilinearPolynomial, + poly_B: &MultilinearPolynomial, + poly_C: &MultilinearPolynomial, + comb_func: &F, + ) -> (E::Scalar, E::Scalar, E::Scalar) + where + F: Fn(&E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, + { + let len = poly_A.len() / 2; + (0..len) + .into_par_iter() + .map(|i| { + // eval 0: bound_func is A(low) + let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i]); + + let poly_A_right_term = poly_A[len + i] - poly_A[i]; + let poly_B_right_term = poly_B[len + i] - poly_B[i]; + let poly_C_right_term = poly_C[len + i] - poly_C[i]; + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A_right_term; + let poly_B_bound_point = poly_B[len + i] + poly_B_right_term; + let poly_C_bound_point = poly_C[len + i] + poly_C_right_term; + let eval_point_2 = comb_func(&poly_A_bound_point, &poly_B_bound_point, &poly_C_bound_point); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with + // bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A_right_term; + let poly_B_bound_point = poly_B_bound_point + poly_B_right_term; + let poly_C_bound_point = poly_C_bound_point + poly_C_right_term; + let eval_point_3 = comb_func(&poly_A_bound_point, &poly_B_bound_point, &poly_C_bound_point); + (eval_point_0, eval_point_2, eval_point_3) + }) + .reduce( + || (E::Scalar::ZERO, E::Scalar::ZERO, E::Scalar::ZERO), + |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), + ) + } + + #[inline] + fn compute_eval_points_cubic_with_additive_term( + poly_A: &MultilinearPolynomial, + poly_B: &MultilinearPolynomial, + poly_C: &MultilinearPolynomial, + poly_D: &MultilinearPolynomial, + comb_func: &F, + ) -> (E::Scalar, E::Scalar, E::Scalar) + where + F: Fn(&E::Scalar, &E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, + { + let len = poly_A.len() / 2; + (0..len) + .into_par_iter() + .map(|i| { + // eval 0: bound_func is A(low) + let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]); + + let poly_A_right_term = poly_A[len + i] - poly_A[i]; + let poly_B_right_term = poly_B[len + i] - poly_B[i]; + let poly_C_right_term = poly_C[len + i] - poly_C[i]; + let poly_D_right_term = poly_D[len + i] - poly_D[i]; + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A_right_term; + let poly_B_bound_point = poly_B[len + i] + poly_B_right_term; + let poly_C_bound_point = poly_C[len + i] + poly_C_right_term; + let poly_D_bound_point = poly_D[len + i] + poly_D_right_term; + let eval_point_2 = comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + &poly_D_bound_point, + ); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with + // bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A_right_term; + let poly_B_bound_point = poly_B_bound_point + poly_B_right_term; + let poly_C_bound_point = poly_C_bound_point + poly_C_right_term; + let poly_D_bound_point = poly_D_bound_point + poly_D_right_term; + let eval_point_3 = comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + &poly_D_bound_point, + ); + (eval_point_0, eval_point_2, eval_point_3) + }) + .reduce( + || (E::Scalar::ZERO, E::Scalar::ZERO, E::Scalar::ZERO), + |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), + ) + } + + #[allow(clippy::too_many_arguments, clippy::type_complexity)] + pub fn prove_cubic_with_additive_term( + claim: &E::Scalar, + num_rounds: usize, + poly_A: &mut MultilinearPolynomial, + poly_B: &mut MultilinearPolynomial, + poly_C: &mut MultilinearPolynomial, + poly_D: &mut MultilinearPolynomial, + comb_func: F, + transcript: &mut E::TE, + ) -> Result<(Self, Vec, Vec), NovaError> + where + F: Fn(&E::Scalar, &E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, + { + let mut r: Vec = Vec::new(); + let mut polys: Vec> = Vec::new(); + let mut claim_per_round = *claim; + + for _ in 0..num_rounds { + let poly = { + // Make an iterator returning the contributions to the evaluations + let (eval_point_0, eval_point_2, eval_point_3) = + Self::compute_eval_points_cubic_with_additive_term( + poly_A, poly_B, poly_C, poly_D, &comb_func, + ); + + let evals = vec![eval_point_0, claim_per_round - eval_point_0, eval_point_2, eval_point_3]; + UniPoly::from_evals(&evals) + }; + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + // derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + r.push(r_i); + polys.push(poly.compress()); + + // Set up next round + claim_per_round = poly.evaluate(&r_i); + + // bound all tables to the verifier's challenge + rayon::join( + || rayon::join(|| poly_A.bind_poly_var_top(&r_i), || poly_B.bind_poly_var_top(&r_i)), + || rayon::join(|| poly_C.bind_poly_var_top(&r_i), || poly_D.bind_poly_var_top(&r_i)), + ); + } + + Ok((Self { compressed_polys: polys }, r, vec![poly_A[0], poly_B[0], poly_C[0], poly_D[0]])) + } + + #[allow(clippy::too_many_arguments, clippy::type_complexity)] + pub fn prove_cubic_with_additive_term_batch( + claims: &[E::Scalar], + num_rounds: &[usize], + mut poly_A_vec: Vec>, + mut poly_B_vec: Vec>, + mut poly_C_vec: Vec>, + mut poly_D_vec: Vec>, + coeffs: &[E::Scalar], + comb_func: F, + transcript: &mut E::TE, + ) -> Result<(Self, Vec, Vec>), NovaError> + where + F: Fn(&E::Scalar, &E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, + { + let num_instances = claims.len(); + assert_eq!(num_rounds.len(), num_instances); + assert_eq!(coeffs.len(), num_instances); + assert_eq!(poly_A_vec.len(), num_instances); + assert_eq!(poly_B_vec.len(), num_instances); + assert_eq!(poly_C_vec.len(), num_instances); + assert_eq!(poly_D_vec.len(), num_instances); + + for (i, &num_rounds) in num_rounds.iter().enumerate() { + let expected_size = 1 << num_rounds; + + // Direct indexing with the assumption that the index will always be in bounds + let a = &poly_A_vec[i]; + let b = &poly_B_vec[i]; + let c = &poly_C_vec[i]; + let d = &poly_D_vec[i]; + + for (l, polyname) in + [(a.len(), "poly_A"), (b.len(), "poly_B"), (c.len(), "poly_C"), (d.len(), "poly_D")].iter() + { + assert_eq!(*l, expected_size, "Mismatch in size for {} at index {}", polyname, i); + } + } + + let num_rounds_max = *num_rounds.iter().max().unwrap(); + + let mut r: Vec = Vec::new(); + let mut polys: Vec> = Vec::new(); + let mut claim_per_round = + zip_with!(iter, (claims, num_rounds, coeffs), |claim, num_rounds, coeff| { + let scaled_claim = E::Scalar::from((1 << (num_rounds_max - num_rounds)) as u64) * claim; + scaled_claim * *coeff + }) + .sum(); + + for current_round in 0..num_rounds_max { + let remaining_rounds = num_rounds_max - current_round; + let evals: Vec<(E::Scalar, E::Scalar, E::Scalar)> = zip_with!( + par_iter, + (num_rounds, claims, poly_A_vec, poly_B_vec, poly_C_vec, poly_D_vec), + |num_rounds, claim, poly_A, poly_B, poly_C, poly_D| { + if remaining_rounds <= *num_rounds { + Self::compute_eval_points_cubic_with_additive_term( + poly_A, poly_B, poly_C, poly_D, &comb_func, + ) + } else { + let remaining_variables = remaining_rounds - num_rounds - 1; + let scaled_claim = E::Scalar::from((1 << remaining_variables) as u64) * claim; + (scaled_claim, scaled_claim, scaled_claim) + } + } + ) + .collect(); + + let evals_combined_0 = (0..num_instances).map(|i| evals[i].0 * coeffs[i]).sum(); + let evals_combined_2 = (0..num_instances).map(|i| evals[i].1 * coeffs[i]).sum(); + let evals_combined_3 = (0..num_instances).map(|i| evals[i].2 * coeffs[i]).sum(); + + let evals = vec![ + evals_combined_0, + claim_per_round - evals_combined_0, + evals_combined_2, + evals_combined_3, + ]; + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + transcript.absorb(b"p", &poly); + + // derive the verifier's challenge for the next round + let r_i = transcript.squeeze(b"c")?; + r.push(r_i); + + polys.push(poly.compress()); + + // Set up next round + claim_per_round = poly.evaluate(&r_i); + + // bound all the tables to the verifier's challenge + + zip_with_for_each!( + ( + num_rounds.par_iter(), + poly_A_vec.par_iter_mut(), + poly_B_vec.par_iter_mut(), + poly_C_vec.par_iter_mut(), + poly_D_vec.par_iter_mut() + ), + |num_rounds, poly_A, poly_B, poly_C, poly_D| { + if remaining_rounds <= *num_rounds { + let _ = rayon::join( + || rayon::join(|| poly_A.bind_poly_var_top(&r_i), || poly_B.bind_poly_var_top(&r_i)), + || rayon::join(|| poly_C.bind_poly_var_top(&r_i), || poly_D.bind_poly_var_top(&r_i)), + ); + } + } + ); + } + + let poly_A_final = poly_A_vec.into_iter().map(|poly| poly[0]).collect(); + let poly_B_final = poly_B_vec.into_iter().map(|poly| poly[0]).collect(); + let poly_C_final = poly_C_vec.into_iter().map(|poly| poly[0]).collect(); + let poly_D_final = poly_D_vec.into_iter().map(|poly| poly[0]).collect(); + + Ok((Self { compressed_polys: polys }, r, vec![ + poly_A_final, + poly_B_final, + poly_C_final, + poly_D_final, + ])) + } +} diff --git a/src/supernova/Readme.md b/prover/src/supernova/Readme.md similarity index 98% rename from src/supernova/Readme.md rename to prover/src/supernova/Readme.md index 82c69f1..8d78ae5 100644 --- a/src/supernova/Readme.md +++ b/prover/src/supernova/Readme.md @@ -8,9 +8,10 @@ We aim to provide a mathematical description of the protocol, as it is implement Before delving into the specifics of the implementation, it's crucial to define and clarify some key terms and concepts used throughout this document: - **Recursive SNARK**: A Recursive SNARK is a type of succinct non-interactive argument of knowledge for a circuit $F$ which can be composed with itself as $z\_{i+1} \gets F(z_i)$. -Each iteration proves the verification of a proof for $z_i$ and the correctness of $z\_{i+1}$, ensuring the proving of each step remains constant. + Each iteration proves the verification of a proof for $z_i$ and the correctness of $z\_{i+1}$, ensuring the proving of each step remains constant. - **Augmentation Circuit**: In the context of the SuperNova protocol, an augmentation circuit refers to a circuit $F'$ composing $F$ with a circuit which partially verifies the validity of the previous output $z_i$ before running $F(z_i)$. - **NIFS Folding Verifier**: A non-interactive folding scheme is a protocol for efficiently updating a proof $\pi_i$ about an iterated function $z\_{i+1} \gets F(z_i)$ into a new proof $\pi\_{i+1}$, through a process referred to as "folding". + By splitting the proof into an instance/witness pair $(u,w) = \pi$, the folding verifier describes an algorithm for verifying that the $u$ component was properly updated. ## SuperNova vs. Nova @@ -44,7 +45,7 @@ While the original Nova implementation allows computation to be done on both cur The prover needs to store data about the previous function iteration. It is defined by the `supernova::RecursiveSNARK` struct. It contains: - $i$: the number of iterations performed. -Note that the `new` constructor actually performs the first iteration, and the first call to `prove_step` simply sets the counter to 1. + Note that the `new` constructor actually performs the first iteration, and the first call to `prove_step` simply sets the counter to 1. - Primary curve: - $(\mathsf{pc}_i, z_0, z_i)$: current program counter and inputs for the primary circuit - $U[\ ],W[\ ]$: List of relaxed instance/witness pairs for all the circuits on the primary curve. diff --git a/prover/src/supernova/circuit.rs b/prover/src/supernova/circuit.rs new file mode 100644 index 0000000..1846cff --- /dev/null +++ b/prover/src/supernova/circuit.rs @@ -0,0 +1,775 @@ +//! Supernova implementation support arbitrary argumented circuits and running +//! instances. There are two Verification Circuits for each argumented circuit: +//! The primary and the secondary. Each of them is over a cycle curve but +//! only the primary executes the next step of the computation. +//! Each circuit takes as input 2 hashes. +//! Each circuit folds the last invocation of the other into the respective +//! running instance, specified by `augmented_circuit_index` +//! +//! The augmented circuit F' for `SuperNova` that includes everything from Nova +//! and additionally checks: +//! 1. Ui[] are contained in X[0] hash pre-image. +//! 2. R1CS Instance u is folded into Ui[augmented_circuit_index] correctly; just like Nova IVC. +//! 3. (optional by F logic) F circuit might check `program_counter_{i}` invoked current F +//! circuit is legal or not. +//! 3. F circuit produce `program_counter_{i+1}` and sent to next round to optionally constraint +//! the next F' argumented circuit. +use std::marker::PhantomData; + +use bellpepper::gadgets::{boolean_utils::conditionally_select_slice, Assignment}; +use bellpepper_core::{ + boolean::{AllocatedBit, Boolean}, + num::AllocatedNum, + ConstraintSystem, SynthesisError, +}; +use ff::{Field, PrimeField}; +use itertools::Itertools as _; +use serde::{Deserialize, Serialize}; + +use crate::{ + constants::{NIO_NOVA_FOLD, NUM_HASH_BITS}, + gadgets::{ + alloc_num_equals, alloc_scalar_as_base, alloc_zero, conditionally_select_alloc_relaxed_r1cs, + conditionally_select_vec_allocated_relaxed_r1cs_instance, le_bits_to_num, AllocatedPoint, + AllocatedR1CSInstance, AllocatedRelaxedR1CSInstance, + }, + r1cs::{R1CSInstance, RelaxedR1CSInstance}, + supernova::{ + num_ro_inputs, + utils::{get_from_vec_alloc_relaxed_r1cs, get_selector_vec_from_index}, + }, + traits::{commitment::CommitmentTrait, Engine, ROCircuitTrait, ROConstantsCircuit}, + zip_with, Commitment, +}; + +// NOTE: This trait below is actually useful outside of this if you want to +// implement a step circuit on your own type. We use it in our prover code. +// However, there is a conflicting "StepCircuit" in +// `crate::traits::circuit::StepCircuit` which I deleted. We should likely have +// a supertrait here for NIVC that provides the circuit index because we only +// want that when we are using NIVC. Program counter should be able to be put to +// `None` otherwise, or we could handle that slightly differently too + +/// A helper trait for a step of the incremental computation for `SuperNova` +/// (i.e., circuit for F) -- to be implemented by applications. +pub trait StepCircuit: Send + Sync + Clone { + /// Return the the number of inputs or outputs of each step + /// (this method is called only at circuit synthesis time) + /// `synthesize` and `output` methods are expected to take as + /// input a vector of size equal to arity and output a vector of size equal + /// to arity + fn arity(&self) -> usize; + + /// Return this `StepCircuit`'s assigned index, for use when enforcing the + /// program counter. + fn circuit_index(&self) -> usize; + + /// Synthesize the circuit for a computation step and return variable + /// that corresponds to the output of the step `pc_{i+1}` and `z_{i+1}` + #[allow(clippy::type_complexity)] + fn synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError>; +} + +// NOTES: This seems to just enforce that when we call a circuit at a given +// step, it matches the set program counter. + +/// A helper trait for a step of the incremental computation for `SuperNova` +/// (i.e., circuit for F) -- automatically implemented for `StepCircuit` and +/// used internally to enforce that the circuit selected by the program counter +/// is used at each step. +pub trait EnforcingStepCircuit: Send + Sync + Clone + StepCircuit { + /// Delegate synthesis to `StepCircuit::synthesize`, and additionally, + /// enforce the constraint that program counter `pc`, if supplied, is + /// equal to the circuit's assigned index. + #[allow(clippy::type_complexity)] + fn enforcing_synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + if let Some(pc) = pc { + let circuit_index = F::from(self.circuit_index() as u64); + + // pc * 1 = circuit_index + cs.enforce( + || "pc matches circuit index", + |lc| lc + pc.get_variable(), + |lc| lc + CS::one(), + |lc| lc + (circuit_index, CS::one()), + ); + } + self.synthesize(cs, pc, z) + } +} + +impl> EnforcingStepCircuit for S {} + +/// A trivial step circuit that simply returns the input +/// NOTE: Should only be used as secondary circuit!!! +#[derive(Clone, Debug, Default)] +pub struct TrivialCircuit { + _p: PhantomData, +} + +impl StepCircuit for TrivialCircuit +where F: PrimeField +{ + fn arity(&self) -> usize { 1 } + + /// This will not interfere with other circuit indices in the primary + /// circuit. + fn circuit_index(&self) -> usize { 0 } + + fn synthesize>( + &self, + _cs: &mut CS, + program_counter: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + Ok((program_counter.cloned(), z.to_vec())) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct SuperNovaAugmentedCircuitParams { + limb_width: usize, + n_limbs: usize, + is_primary_circuit: bool, // A boolean indicating if this is the primary circuit +} + +impl SuperNovaAugmentedCircuitParams { + pub const fn new(limb_width: usize, n_limbs: usize, is_primary_circuit: bool) -> Self { + Self { limb_width, n_limbs, is_primary_circuit } + } + + pub fn get_n_limbs(&self) -> usize { self.n_limbs } +} + +#[derive(Debug)] +pub struct SuperNovaAugmentedCircuitInputs<'a, E: Engine> { + pp_digest: E::Scalar, + i: E::Base, + /// Input to the circuit for the base case + z0: &'a [E::Base], + /// Input to the circuit for the non-base case + zi: Option<&'a [E::Base]>, + /// List of `RelaxedR1CSInstance`. + /// `None` if this is the base case. + /// Elements are `None` if the circuit at that index was not yet executed. + U: Option<&'a [Option>]>, + /// R1CS proof to be folded into U + u: Option<&'a R1CSInstance>, + /// Nova folding proof for accumulating u into U[j] + T: Option<&'a Commitment>, + /// Index of the current circuit + program_counter: Option, + /// Index j of circuit being folded into U[j] + last_augmented_circuit_index: E::Base, +} + +impl<'a, E: Engine> SuperNovaAugmentedCircuitInputs<'a, E> { + /// Create new inputs/witness for the verification circuit + #[allow(clippy::too_many_arguments)] + pub fn new( + pp_digest: E::Scalar, + i: E::Base, + z0: &'a [E::Base], + zi: Option<&'a [E::Base]>, + U: Option<&'a [Option>]>, + u: Option<&'a R1CSInstance>, + T: Option<&'a Commitment>, + program_counter: Option, + last_augmented_circuit_index: E::Base, + ) -> Self { + Self { pp_digest, i, z0, zi, U, u, T, program_counter, last_augmented_circuit_index } + } +} + +/// The augmented circuit F' in `SuperNova` that includes a step circuit F +/// and the circuit for the verifier in `SuperNova`'s non-interactive folding +/// scheme, `SuperNova` NIFS will fold strictly r1cs instance u with respective +/// relaxed r1cs instance `U[last_augmented_circuit_index]` +pub struct SuperNovaAugmentedCircuit<'a, E: Engine, SC: EnforcingStepCircuit> { + params: &'a SuperNovaAugmentedCircuitParams, + ro_consts: ROConstantsCircuit, + inputs: Option>, + step_circuit: &'a SC, // The function that is applied for each step + num_augmented_circuits: usize, // number of overall augmented circuits +} + +impl<'a, E: Engine, SC: EnforcingStepCircuit> SuperNovaAugmentedCircuit<'a, E, SC> { + /// Create a new verification circuit for the input relaxed r1cs instances + pub const fn new( + params: &'a SuperNovaAugmentedCircuitParams, + inputs: Option>, + step_circuit: &'a SC, + ro_consts: ROConstantsCircuit, + num_augmented_circuits: usize, + ) -> Self { + Self { params, inputs, step_circuit, ro_consts, num_augmented_circuits } + } + + /// Allocate all witnesses from the augmented function's non-deterministic + /// inputs. Optional entries are allocated as their default values. + #[allow(clippy::type_complexity)] + fn alloc_witness::Base>>( + &self, + mut cs: CS, + arity: usize, + num_augmented_circuits: usize, + ) -> Result< + ( + AllocatedNum, + AllocatedNum, + Vec>, + Vec>, + Vec>, + AllocatedR1CSInstance, + AllocatedPoint, + Option>, + Vec, + ), + SynthesisError, + > { + let last_augmented_circuit_index = + AllocatedNum::alloc(cs.namespace(|| "last_augmented_circuit_index"), || { + Ok(self.inputs.get()?.last_augmented_circuit_index) + })?; + + // Allocate the params + let params = alloc_scalar_as_base::( + cs.namespace(|| "params"), + self.inputs.as_ref().map(|inputs| inputs.pp_digest), + )?; + + // Allocate i + let i = AllocatedNum::alloc(cs.namespace(|| "i"), || Ok(self.inputs.get()?.i))?; + + // Allocate program_counter only on primary circuit + let program_counter = if self.params.is_primary_circuit { + Some(AllocatedNum::alloc(cs.namespace(|| "program_counter"), || { + Ok(self.inputs.get()?.program_counter.expect("program_counter missing")) + })?) + } else { + None + }; + + // Allocate z0 + let z_0 = (0..arity) + .map(|i| { + AllocatedNum::alloc(cs.namespace(|| format!("z0_{i}")), || Ok(self.inputs.get()?.z0[i])) + }) + .collect::>, _>>()?; + + // Allocate zi. If inputs.zi is not provided (base case) allocate default value + // 0 + let zero = vec![E::Base::ZERO; arity]; + let z_i = (0..arity) + .map(|i| { + AllocatedNum::alloc(cs.namespace(|| format!("zi_{i}")), || { + Ok(self.inputs.get()?.zi.unwrap_or(&zero)[i]) + }) + }) + .collect::>, _>>()?; + + // Allocate the running instances + let U = (0..num_augmented_circuits) + .map(|i| { + AllocatedRelaxedR1CSInstance::alloc( + cs.namespace(|| format!("Allocate U {:?}", i)), + self.inputs.as_ref().and_then(|inputs| inputs.U.and_then(|U| U[i].as_ref())), + self.params.limb_width, + self.params.n_limbs, + ) + }) + .collect::>, _>>()?; + + // Allocate the r1cs instance to be folded in + let u = AllocatedR1CSInstance::alloc( + cs.namespace(|| "allocate instance u to fold"), + self.inputs.as_ref().and_then(|inputs| inputs.u), + )?; + + // Allocate T + let T = AllocatedPoint::alloc( + cs.namespace(|| "allocate T"), + self.inputs.as_ref().and_then(|inputs| inputs.T.map(|T| T.to_coordinates())), + )?; + T.check_on_curve(cs.namespace(|| "check T on curve"))?; + + // Compute instance selector + let last_augmented_circuit_selector = get_selector_vec_from_index( + cs.namespace(|| "instance selector"), + &last_augmented_circuit_index, + num_augmented_circuits, + )?; + + Ok((params, i, z_0, z_i, U, u, T, program_counter, last_augmented_circuit_selector)) + } + + /// Synthesizes base case and returns the new relaxed `R1CSInstance` + fn synthesize_base_case::Base>>( + &self, + mut cs: CS, + u: AllocatedR1CSInstance, + last_augmented_circuit_selector: &[Boolean], + ) -> Result>, SynthesisError> { + let mut cs = cs.namespace(|| "alloc U_i default"); + + // Allocate a default relaxed r1cs instance + let default = AllocatedRelaxedR1CSInstance::default( + cs.namespace(|| "Allocate primary U_default".to_string()), + self.params.limb_width, + self.params.n_limbs, + )?; + + // The primary circuit just initialize single AllocatedRelaxedR1CSInstance + let U_default = if self.params.is_primary_circuit { + vec![default] + } else { + // The secondary circuit convert the incoming R1CS instance on index which match + // last_augmented_circuit_index + let incoming_r1cs = AllocatedRelaxedR1CSInstance::from_r1cs_instance( + cs.namespace(|| "Allocate incoming_r1cs"), + u, + self.params.limb_width, + self.params.n_limbs, + )?; + + last_augmented_circuit_selector + .iter() + .enumerate() + .map(|(i, equal_bit)| { + // If index match last_augmented_circuit_index, then return incoming_r1cs, + // otherwise return the default one + conditionally_select_alloc_relaxed_r1cs( + cs.namespace(|| format!("select on index namespace {:?}", i)), + &incoming_r1cs, + &default, + equal_bit, + ) + }) + .collect::>, _>>()? + }; + Ok(U_default) + } + + /// Synthesizes non base case and returns the new relaxed `R1CSInstance` + /// And a boolean indicating if all checks pass + #[allow(clippy::too_many_arguments)] + fn synthesize_non_base_case::Base>>( + &self, + mut cs: CS, + params: &AllocatedNum, + i: &AllocatedNum, + z_0: &[AllocatedNum], + z_i: &[AllocatedNum], + U: &[AllocatedRelaxedR1CSInstance], + u: &AllocatedR1CSInstance, + T: &AllocatedPoint, + arity: usize, + last_augmented_circuit_selector: &[Boolean], + program_counter: &Option>, + ) -> Result<(Vec>, AllocatedBit), SynthesisError> + { + // Check that u.x[0] = Hash(params, i, program_counter, z0, zi, U[]) + let mut ro = E::ROCircuit::new( + self.ro_consts.clone(), + num_ro_inputs( + self.num_augmented_circuits, + self.params.get_n_limbs(), + arity, + self.params.is_primary_circuit, + ), + ); + ro.absorb(params); + ro.absorb(i); + + if self.params.is_primary_circuit { + let Some(program_counter) = program_counter.as_ref() else { + return Err(SynthesisError::AssignmentMissing); + }; + ro.absorb(program_counter) + } + + for e in z_0 { + ro.absorb(e); + } + for e in z_i { + ro.absorb(e); + } + + U.iter().enumerate().try_for_each(|(i, U)| { + U.absorb_in_ro(cs.namespace(|| format!("absorb U {:?}", i)), &mut ro) + })?; + + let hash_bits = ro.squeeze(cs.namespace(|| "Input hash"), NUM_HASH_BITS)?; + let hash = le_bits_to_num(cs.namespace(|| "bits to hash"), &hash_bits)?; + let check_pass: AllocatedBit = alloc_num_equals( + cs.namespace(|| "check consistency of u.X[0] with H(params, U, i, z0, zi)"), + &u.X[0], + &hash, + )?; + + // Run NIFS Verifier + let U_to_fold = get_from_vec_alloc_relaxed_r1cs( + cs.namespace(|| "U to fold"), + U, + last_augmented_circuit_selector, + )?; + let U_fold = U_to_fold.fold_with_r1cs( + cs.namespace(|| "compute fold of U and u"), + params, + u, + T, + self.ro_consts.clone(), + self.params.limb_width, + self.params.n_limbs, + )?; + + // update AllocatedRelaxedR1CSInstance on index match augmented circuit index + let U_next: Vec> = + zip_with!((U.iter(), last_augmented_circuit_selector.iter()), |U, equal_bit| { + conditionally_select_alloc_relaxed_r1cs( + cs.namespace(|| "select on index namespace"), + &U_fold, + U, + equal_bit, + ) + }) + .collect::>, _>>()?; + + Ok((U_next, check_pass)) + } + + #[allow(clippy::type_complexity)] + pub fn synthesize::Base>>( + self, + cs: &mut CS, + ) -> Result<(Option>, Vec>), SynthesisError> { + let arity = self.step_circuit.arity(); + let num_augmented_circuits = if self.params.is_primary_circuit { + // primary circuit only fold single running instance with secondary output + // strict r1cs instance + 1 + } else { + // secondary circuit contains the logic to choose one of multiple augments + // running instance to fold + self.num_augmented_circuits + }; + + if self.inputs.is_some() { + // Check arity of z0 + let z0_len = self.inputs.as_ref().map_or(0, |inputs| inputs.z0.len()); + if self.step_circuit.arity() != z0_len { + return Err(SynthesisError::IncompatibleLengthVector(format!( + "z0_len {:?} != arity length {:?}", + z0_len, + self.step_circuit.arity() + ))); + } + + // The primary curve should always fold the circuit with index 0 + let last_augmented_circuit_index = + self.inputs.get().map_or(E::Base::ZERO, |inputs| inputs.last_augmented_circuit_index); + if self.params.is_primary_circuit && last_augmented_circuit_index != E::Base::ZERO { + return Err(SynthesisError::IncompatibleLengthVector( + "primary circuit running instance only valid on index 0".to_string(), + )); + } + } + + // Allocate witnesses + let (params, i, z_0, z_i, U, u, T, program_counter, last_augmented_circuit_selector) = self + .alloc_witness( + cs.namespace(|| "allocate the circuit witness"), + arity, + num_augmented_circuits, + )?; + + // Compute variable indicating if this is the base case + let zero = alloc_zero(cs.namespace(|| "zero")); + let is_base_case = alloc_num_equals(cs.namespace(|| "Check if base case"), &i.clone(), &zero)?; + + // Synthesize the circuit for the non-base case and get the new running + // instances along with a boolean indicating if all checks have passed + // must use return `last_augmented_circuit_index_checked` since it got range + // checked + let (U_next_non_base, check_non_base_pass) = self.synthesize_non_base_case( + cs.namespace(|| "synthesize non base case"), + ¶ms, + &i, + &z_0, + &z_i, + &U, + &u, + &T, + arity, + &last_augmented_circuit_selector, + &program_counter, + )?; + + // Synthesize the circuit for the base case and get the new running instances + let U_next_base = self.synthesize_base_case( + cs.namespace(|| "base case"), + u.clone(), + &last_augmented_circuit_selector, + )?; + + // Either check_non_base_pass=true or we are in the base case + let should_be_false = AllocatedBit::nor( + cs.namespace(|| "check_non_base_pass nor base_case"), + &check_non_base_pass, + &is_base_case, + )?; + cs.enforce( + || "check_non_base_pass nor base_case = false", + |lc| lc + should_be_false.get_variable(), + |lc| lc + CS::one(), + |lc| lc, + ); + + // Compute the U_next + let U_next = conditionally_select_vec_allocated_relaxed_r1cs_instance( + cs.namespace(|| "U_next"), + &U_next_base[..], + &U_next_non_base[..], + &Boolean::from(is_base_case.clone()), + )?; + + // Compute i + 1 + let i_next = + AllocatedNum::alloc(cs.namespace(|| "i + 1"), || Ok(*i.get_value().get()? + E::Base::ONE))?; + cs.enforce( + || "check i + 1", + |lc| lc + i.get_variable() + CS::one(), + |lc| lc + CS::one(), + |lc| lc + i_next.get_variable(), + ); + + // Compute z_{i+1} + let z_input = conditionally_select_slice( + cs.namespace(|| "select input to F"), + &z_0, + &z_i, + &Boolean::from(is_base_case), + )?; + + let (program_counter_new, z_next) = self.step_circuit.enforcing_synthesize( + &mut cs.namespace(|| "F"), + program_counter.as_ref(), + &z_input, + )?; + + if z_next.len() != arity { + return Err(SynthesisError::IncompatibleLengthVector("z_next".to_string())); + } + + // To check correct folding sequencing we are just going to make a hash. + // The next RunningInstance folding can take the pre-image of this hash as + // witness and check. + + // "Finally, there is a subtle sizing issue in the above description: in each + // step, because Ui+1 is produced as the public IO of F0 + // program_counter+1, it must be contained in the public IO of instance + // ui+1. In the next iteration, because ui+1 is folded + // into Ui+1[program_counter+1], this means that Ui+1[program_counter+1] is at + // least as large as Ui by the properties of the folding scheme. This + // means that the list of running instances grows in each step. To + // alleviate this issue, we have each F0j only produce a hash + // of its outputs as public output. In the subsequent step, the next augmented + // function takes as non-deterministic input a preimage to this hash." pg.16 + + // https://eprint.iacr.org/2022/1758.pdf + + // Compute the new hash H(params, i+1, program_counter, z0, z_{i+1}, U_next) + let mut ro = E::ROCircuit::new( + self.ro_consts.clone(), + num_ro_inputs( + self.num_augmented_circuits, + self.params.get_n_limbs(), + self.step_circuit.arity(), + self.params.is_primary_circuit, + ), + ); + ro.absorb(¶ms); + ro.absorb(&i_next); + // optionally absorb program counter if exist + if program_counter.is_some() { + ro.absorb(program_counter_new.as_ref().expect("new program counter missing")) + } + for e in &z_0 { + ro.absorb(e); + } + for e in &z_next { + ro.absorb(e); + } + U_next.iter().enumerate().try_for_each(|(i, U)| { + U.absorb_in_ro(cs.namespace(|| format!("absorb U_new {:?}", i)), &mut ro) + })?; + + let hash_bits = ro.squeeze(cs.namespace(|| "output hash bits"), NUM_HASH_BITS)?; + let hash = le_bits_to_num(cs.namespace(|| "convert hash to num"), &hash_bits)?; + + // We are cycling of curve implementation, so primary/secondary will rotate hash + // in IO for the others to check bypass unmodified hash of other circuit + // as next X[0] and output the computed the computed hash as next X[1] + u.X[1].inputize(cs.namespace(|| "bypass unmodified hash of the other circuit"))?; + hash.inputize(cs.namespace(|| "output new hash of this circuit"))?; + + Ok((program_counter_new, z_next)) + } +} + +#[cfg(test)] +mod tests { + use expect_test::{expect, Expect}; + + use super::*; + use crate::{ + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + solver::SatisfyingAssignment, + test_shape_cs::TestShapeCS, + }, + constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, + gadgets::scalar_as_base, + provider::{poseidon::PoseidonConstantsCircuit, Bn256EngineIPA, GrumpkinEngine}, + supernova::circuit::TrivialCircuit, + traits::{snark::default_ck_hint, CurveCycleEquipped, Dual}, + }; + + // In the following we use 1 to refer to the primary, and 2 to refer to the + // secondary circuit + fn test_supernova_recursive_circuit_with( + primary_params: &SuperNovaAugmentedCircuitParams, + secondary_params: &SuperNovaAugmentedCircuitParams, + ro_consts1: ROConstantsCircuit>, + ro_consts2: ROConstantsCircuit, + num_constraints_primary: &Expect, + num_constraints_secondary: &Expect, + num_augmented_circuits: usize, + ) where + E1: CurveCycleEquipped, + { + let tc1 = TrivialCircuit::default(); + // Initialize the shape and ck for the primary + let circuit1: SuperNovaAugmentedCircuit< + '_, + Dual, + TrivialCircuit< as Engine>::Base>, + > = SuperNovaAugmentedCircuit::new( + primary_params, + None, + &tc1, + ro_consts1.clone(), + num_augmented_circuits, + ); + let mut cs: TestShapeCS = TestShapeCS::new(); + let _ = circuit1.synthesize(&mut cs); + let (shape1, ck1) = cs.r1cs_shape_and_key(&*default_ck_hint()); + + num_constraints_primary.assert_eq(&cs.num_constraints().to_string()); + + let tc2 = TrivialCircuit::default(); + // Initialize the shape and ck for the secondary + let circuit2: SuperNovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = + SuperNovaAugmentedCircuit::new( + secondary_params, + None, + &tc2, + ro_consts2.clone(), + num_augmented_circuits, + ); + let mut cs: TestShapeCS> = TestShapeCS::new(); + let _ = circuit2.synthesize(&mut cs); + let (shape2, ck2) = cs.r1cs_shape_and_key(&*default_ck_hint()); + + num_constraints_secondary.assert_eq(&cs.num_constraints().to_string()); + + // Execute the base case for the primary + let zero1 = < as Engine>::Base as Field>::ZERO; + let mut cs1 = SatisfyingAssignment::::new(); + let vzero1 = vec![zero1]; + let inputs1: SuperNovaAugmentedCircuitInputs<'_, Dual> = + SuperNovaAugmentedCircuitInputs::new( + scalar_as_base::(zero1), // pass zero for testing + zero1, + &vzero1, + None, + None, + None, + None, + Some(zero1), + zero1, + ); + let circuit1: SuperNovaAugmentedCircuit< + '_, + Dual, + TrivialCircuit< as Engine>::Base>, + > = SuperNovaAugmentedCircuit::new( + primary_params, + Some(inputs1), + &tc1, + ro_consts1, + num_augmented_circuits, + ); + let _ = circuit1.synthesize(&mut cs1); + let (inst1, witness1) = cs1.r1cs_instance_and_witness(&shape1, &ck1).unwrap(); + // Make sure that this is satisfiable + shape1.is_sat(&ck1, &inst1, &witness1).unwrap(); + + // Execute the base case for the secondary + let zero2 = <::Base as Field>::ZERO; + let mut cs2 = SatisfyingAssignment::>::new(); + let vzero2 = vec![zero2]; + let inputs2: SuperNovaAugmentedCircuitInputs<'_, E1> = SuperNovaAugmentedCircuitInputs::new( + scalar_as_base::>(zero2), // pass zero for testing + zero2, + &vzero2, + None, + None, + Some(&inst1), + None, + Some(zero2), + zero2, + ); + let circuit2: SuperNovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = + SuperNovaAugmentedCircuit::new( + secondary_params, + Some(inputs2), + &tc2, + ro_consts2, + num_augmented_circuits, + ); + let _ = circuit2.synthesize(&mut cs2); + let (inst2, witness2) = cs2.r1cs_instance_and_witness(&shape2, &ck2).unwrap(); + // Make sure that it is satisfiable + shape2.is_sat(&ck2, &inst2, &witness2).unwrap(); + } + + #[test] + fn test_supernova_recursive_circuit_grumpkin() { + let params1 = SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); + let params2 = SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); + let ro_consts1: ROConstantsCircuit = PoseidonConstantsCircuit::default(); + let ro_consts2: ROConstantsCircuit = PoseidonConstantsCircuit::default(); + + test_supernova_recursive_circuit_with::( + ¶ms1, + ¶ms2, + ro_consts1, + ro_consts2, + &expect!["10004"], + &expect!["10573"], + 1, + ); + // TODO: extend to num_augmented_circuits >= 2 + } +} diff --git a/src/supernova/error.rs b/prover/src/supernova/error.rs similarity index 59% rename from src/supernova/error.rs rename to prover/src/supernova/error.rs index 95a25d8..84cebea 100644 --- a/src/supernova/error.rs +++ b/prover/src/supernova/error.rs @@ -11,13 +11,13 @@ use crate::errors::NovaError; /// Errors returned by Nova #[derive(Debug, Eq, PartialEq, Error)] pub enum SuperNovaError { - /// Nova error - #[error("NovaError")] - NovaError(#[from] NovaError), - /// missing commitment key - #[error("MissingCK")] - MissingCK, - /// Extended error for supernova - #[error("UnSatIndex")] - UnSatIndex(&'static str, usize), + /// Nova error + #[error("NovaError")] + NovaError(#[from] NovaError), + /// missing commitment key + #[error("MissingCK")] + MissingCK, + /// Extended error for supernova + #[error("UnSatIndex")] + UnSatIndex(&'static str, usize), } diff --git a/prover/src/supernova/mod.rs b/prover/src/supernova/mod.rs new file mode 100644 index 0000000..5130f9c --- /dev/null +++ b/prover/src/supernova/mod.rs @@ -0,0 +1,1197 @@ +#![doc = include_str!("./Readme.md")] + +use std::{ops::Index, sync::Arc}; + +use bellpepper_core::{ConstraintSystem, SynthesisError}; +use ff::Field; +use itertools::Itertools as _; +use once_cell::sync::OnceCell; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; +use tracing::debug; + +use crate::{ + bellpepper::{ + r1cs::{NovaShape, NovaWitness}, + shape_cs::ShapeCS, + solver::SatisfyingAssignment, + }, + constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_HASH_BITS}, + digest::{DigestComputer, SimpleDigestible}, + errors::NovaError, + nifs::NIFS, + r1cs::{ + self, commitment_key_size, CommitmentKeyHint, R1CSInstance, R1CSResult, R1CSShape, R1CSWitness, + RelaxedR1CSInstance, RelaxedR1CSWitness, + }, + scalar_as_base, + traits::{ + commitment::{CommitmentEngineTrait, CommitmentTrait}, + AbsorbInROTrait, CurveCycleEquipped, Dual, Engine, ROConstants, ROConstantsCircuit, ROTrait, + }, + Commitment, CommitmentKey, R1CSWithArity, +}; + +mod circuit; // declare the module first +pub use circuit::{StepCircuit, SuperNovaAugmentedCircuitParams, TrivialCircuit}; +use circuit::{SuperNovaAugmentedCircuit, SuperNovaAugmentedCircuitInputs}; +use error::SuperNovaError; + +/// A struct that manages all the digests of the primary circuits of a SuperNova +/// instance +#[derive(Debug, PartialEq, Eq, Serialize)] +pub struct CircuitDigests { + digests: Vec, +} + +impl SimpleDigestible for CircuitDigests {} + +impl std::ops::Deref for CircuitDigests { + type Target = Vec; + + fn deref(&self) -> &Self::Target { &self.digests } +} + +impl CircuitDigests { + /// Construct a new [`CircuitDigests`] + pub fn new(digests: Vec) -> Self { Self { digests } } + + /// Return the [`CircuitDigests`]' digest. + pub fn digest(&self) -> E::Scalar { + let dc: DigestComputer<'_, ::Scalar, Self> = DigestComputer::new(self); + dc.digest().expect("Failure in computing digest") + } +} + +/// A vector of [`R1CSWithArity`] adjoined to a set of [`PublicParams`] +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct PublicParams +where E1: CurveCycleEquipped { + /// The internal circuit shapes + circuit_shapes: Vec>, + + ro_consts_primary: ROConstants, + ro_consts_circuit_primary: ROConstantsCircuit>, + ck_primary: Arc>, // This is shared between all circuit params + augmented_circuit_params_primary: SuperNovaAugmentedCircuitParams, + + ro_consts_secondary: ROConstants>, + ro_consts_circuit_secondary: ROConstantsCircuit, + ck_secondary: Arc>>, + circuit_shape_secondary: R1CSWithArity>, + augmented_circuit_params_secondary: SuperNovaAugmentedCircuitParams, + + /// Digest constructed from this `PublicParams`' parameters + #[serde(skip, default = "OnceCell::new")] + digest: OnceCell, +} + +/// Auxiliary [`PublicParams`] information about the commitment keys and +/// secondary circuit. This is used as a helper struct when reconstructing +/// [`PublicParams`] downstream in lurk. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct AuxParams +where E1: CurveCycleEquipped { + pub ro_consts_primary: ROConstants, + pub ro_consts_circuit_primary: ROConstantsCircuit>, + pub ck_primary: Arc>, // This is shared between all circuit params + pub augmented_circuit_params_primary: SuperNovaAugmentedCircuitParams, + + pub ro_consts_secondary: ROConstants>, + pub ro_consts_circuit_secondary: ROConstantsCircuit, + pub ck_secondary: Arc>>, + pub circuit_shape_secondary: R1CSWithArity>, + pub augmented_circuit_params_secondary: SuperNovaAugmentedCircuitParams, + + pub digest: E1::Scalar, +} + +use std::io::Cursor; + +use crate::{ + fast_serde, + fast_serde::{FastSerde, SerdeByteError, SerdeByteTypes}, +}; + +impl FastSerde for AuxParams +where + E1: CurveCycleEquipped, + >::CommitmentKey: FastSerde, + <::CE as CommitmentEngineTrait>::CommitmentKey: FastSerde, +{ + /// Byte format: + /// [0..4] - Magic number (4 bytes) + /// [4] - Serde type: AuxParams (u8) + /// [5] - Number of sections (u8 = 8) + /// Sections (repeated 8 times): + /// [N] - Section type (u8) + /// [N+1..5] - Section size (u32) + /// [N+5..] - Section data (variable length) + /// Section types: + /// 1: ro_consts_primary (bincode) + /// 2: ro_consts_circuit_primary (bincode) + /// 3: ck_primary (FastSerde) + /// 4: ro_consts_secondary (bincode) + /// 5: ro_consts_circuit_secondary (bincode) + /// 6: ck_secondary (FastSerde) + /// 7: circuit_shape_secondary (json) + /// 8: digest (bincode) + fn to_bytes(&self) -> Vec { + let mut out = Vec::new(); + + // Write header + out.extend_from_slice(&fast_serde::MAGIC_NUMBER); + out.push(fast_serde::SerdeByteTypes::AuxParams as u8); + out.push(8); // num_sections + + // Write sections + Self::write_section_bytes(&mut out, 1, &bincode::serialize(&self.ro_consts_primary).unwrap()); + Self::write_section_bytes( + &mut out, + 2, + &bincode::serialize(&self.ro_consts_circuit_primary).unwrap(), + ); + Self::write_section_bytes(&mut out, 3, &self.ck_primary.to_bytes()); + Self::write_section_bytes(&mut out, 4, &bincode::serialize(&self.ro_consts_secondary).unwrap()); + Self::write_section_bytes( + &mut out, + 5, + &bincode::serialize(&self.ro_consts_circuit_secondary).unwrap(), + ); + Self::write_section_bytes(&mut out, 6, &self.ck_secondary.to_bytes()); + Self::write_section_bytes( + &mut out, + 7, + &bincode::serialize(&self.circuit_shape_secondary).unwrap(), + ); + Self::write_section_bytes(&mut out, 8, &bincode::serialize(&self.digest).unwrap()); + + out + } + + fn from_bytes(bytes: &[u8]) -> Result { + let mut cursor = Cursor::new(bytes); + + // Validate header + Self::validate_header(&mut cursor, SerdeByteTypes::AuxParams, 8)?; + + // Read all sections + let ro_consts_primary = bincode::deserialize(&Self::read_section_bytes(&mut cursor, 1)?)?; + let ro_consts_circuit_primary = + bincode::deserialize(&Self::read_section_bytes(&mut cursor, 2)?)?; + let ck_primary = Arc::new(>::CommitmentKey::from_bytes( + &Self::read_section_bytes(&mut cursor, 3)?, + )?); + let ro_consts_secondary = bincode::deserialize(&Self::read_section_bytes(&mut cursor, 4)?)?; + let ro_consts_circuit_secondary = + bincode::deserialize(&Self::read_section_bytes(&mut cursor, 5)?)?; + let ck_secondary = Arc::new(<::CE as CommitmentEngineTrait< + E1::Secondary, + >>::CommitmentKey::from_bytes(&Self::read_section_bytes( + &mut cursor, + 6, + )?)?); + let circuit_shape_secondary = bincode::deserialize(&Self::read_section_bytes(&mut cursor, 7)?)?; + let digest = bincode::deserialize(&Self::read_section_bytes(&mut cursor, 8)?)?; + + // NOTE: This does not check the digest. Maybe we should. + Ok(Self { + ro_consts_primary, + ro_consts_circuit_primary, + ck_primary, + augmented_circuit_params_primary: SuperNovaAugmentedCircuitParams::new( + BN_LIMB_WIDTH, + BN_N_LIMBS, + true, + ), + ro_consts_secondary, + ro_consts_circuit_secondary, + ck_secondary, + circuit_shape_secondary, + augmented_circuit_params_secondary: SuperNovaAugmentedCircuitParams::new( + BN_LIMB_WIDTH, + BN_N_LIMBS, + false, + ), + digest, + }) + } +} + +impl Index for PublicParams +where E1: CurveCycleEquipped +{ + type Output = R1CSWithArity; + + fn index(&self, index: usize) -> &Self::Output { &self.circuit_shapes[index] } +} + +impl SimpleDigestible for PublicParams where E1: CurveCycleEquipped {} + +impl PublicParams +where E1: CurveCycleEquipped +{ + /// Construct a new [`PublicParams`] + /// + /// # Note + /// + /// Public parameters set up a number of bases for the homomorphic + /// commitment scheme of Nova. + /// + /// Some final compressing SNARKs, like variants of Spartan, use computation + /// commitments that require larger sizes for these parameters. These + /// SNARKs provide a hint for these values by implementing + /// `RelaxedR1CSSNARKTrait::commitment_key_floor()`, which can be passed to + /// this function. + /// + /// If you're not using such a SNARK, pass `&(|_| 0)` instead. + /// + /// # Arguments + /// + /// * `non_uniform_circuit`: The non-uniform circuit of type `NC`. + /// * `ck_hint1`: A `CommitmentKeyHint` for `E1`, which is a function that provides a hint for the + /// number of generators required in the commitment scheme for the primary circuit. + /// * `ck_hint2`: A `CommitmentKeyHint` for `E2`, similar to `ck_hint1`, but for the secondary + /// circuit. + pub fn setup>( + non_uniform_circuit: &NC, + ck_hint1: &CommitmentKeyHint, + ck_hint2: &CommitmentKeyHint>, + ) -> Self { + let num_circuits = non_uniform_circuit.num_circuits(); + + let augmented_circuit_params_primary = + SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); + let ro_consts_primary: ROConstants = ROConstants::::default(); + // ro_consts_circuit_primary are parameterized by E2 because the type alias uses + // E2::Base = E1::Scalar + let ro_consts_circuit_primary: ROConstantsCircuit> = + ROConstantsCircuit::>::default(); + + let circuit_shapes = get_circuit_shapes(non_uniform_circuit); + + let ck_primary = Self::compute_primary_ck(&circuit_shapes, ck_hint1); + let ck_primary = Arc::new(ck_primary); + + let augmented_circuit_params_secondary = + SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); + let ro_consts_secondary = ROConstants::>::default(); + let c_secondary = non_uniform_circuit.secondary_circuit(); + let F_arity_secondary = c_secondary.arity(); + let ro_consts_circuit_secondary: ROConstantsCircuit = ROConstantsCircuit::::default(); + + let circuit_secondary: SuperNovaAugmentedCircuit<'_, E1, NC::C2> = + SuperNovaAugmentedCircuit::new( + &augmented_circuit_params_secondary, + None, + &c_secondary, + ro_consts_circuit_secondary.clone(), + num_circuits, + ); + let mut cs: ShapeCS> = ShapeCS::new(); + circuit_secondary.synthesize(&mut cs).expect("circuit synthesis failed"); + let (r1cs_shape_secondary, ck_secondary) = cs.r1cs_shape_and_key(ck_hint2); + let ck_secondary = Arc::new(ck_secondary); + let circuit_shape_secondary = R1CSWithArity::new(r1cs_shape_secondary, F_arity_secondary); + + let pp = Self { + circuit_shapes, + ro_consts_primary, + ro_consts_circuit_primary, + ck_primary, + augmented_circuit_params_primary, + ro_consts_secondary, + ro_consts_circuit_secondary, + ck_secondary, + circuit_shape_secondary, + augmented_circuit_params_secondary, + digest: OnceCell::new(), + }; + + // make sure to initialize the `OnceCell` and compute the digest + // and avoid paying for unexpected performance costs later + pp.digest(); + pp + } + + /// Breaks down an instance of [`PublicParams`] into the circuit params and + /// auxiliary params. + pub fn into_parts(self) -> (Vec>, AuxParams) { + let digest = self.digest(); + + let Self { + circuit_shapes, + ro_consts_primary, + ro_consts_circuit_primary, + ck_primary, + augmented_circuit_params_primary, + ro_consts_secondary, + ro_consts_circuit_secondary, + ck_secondary, + circuit_shape_secondary, + augmented_circuit_params_secondary, + digest: _digest, + } = self; + + let aux_params = AuxParams { + ro_consts_primary, + ro_consts_circuit_primary, + ck_primary, + augmented_circuit_params_primary, + ro_consts_secondary, + ro_consts_circuit_secondary, + ck_secondary, + circuit_shape_secondary, + augmented_circuit_params_secondary, + digest, + }; + + (circuit_shapes, aux_params) + } + + /// Returns just the [`AuxParams`] portion of [`PublicParams`] from a + /// reference to [`PublicParams`]. + pub fn aux_params(&self) -> AuxParams { + AuxParams { + ro_consts_primary: self.ro_consts_primary.clone(), + ro_consts_circuit_primary: self.ro_consts_circuit_primary.clone(), + ck_primary: self.ck_primary.clone(), + augmented_circuit_params_primary: self.augmented_circuit_params_primary.clone(), + ro_consts_secondary: self.ro_consts_secondary.clone(), + ro_consts_circuit_secondary: self.ro_consts_circuit_secondary.clone(), + ck_secondary: self.ck_secondary.clone(), + circuit_shape_secondary: self.circuit_shape_secondary.clone(), + augmented_circuit_params_secondary: self.augmented_circuit_params_secondary.clone(), + digest: self.digest(), + } + } + + /// Create a [`PublicParams`] from a vector of raw [`R1CSWithArity`] and + /// auxiliary params. + pub fn from_parts(circuit_shapes: Vec>, aux_params: AuxParams) -> Self { + let pp = Self { + circuit_shapes, + ro_consts_primary: aux_params.ro_consts_primary, + ro_consts_circuit_primary: aux_params.ro_consts_circuit_primary, + ck_primary: aux_params.ck_primary, + augmented_circuit_params_primary: aux_params.augmented_circuit_params_primary, + ro_consts_secondary: aux_params.ro_consts_secondary, + ro_consts_circuit_secondary: aux_params.ro_consts_circuit_secondary, + ck_secondary: aux_params.ck_secondary, + circuit_shape_secondary: aux_params.circuit_shape_secondary, + augmented_circuit_params_secondary: aux_params.augmented_circuit_params_secondary, + digest: OnceCell::new(), + }; + assert_eq!( + aux_params.digest, + pp.digest(), + "param data is invalid; aux_params contained the incorrect digest" + ); + pp + } + + /// Create a [`PublicParams`] from a vector of raw [`R1CSWithArity`] and + /// auxiliary params. We don't check that the `aux_params.digest` is a + /// valid digest for the created params. + pub fn from_parts_unchecked( + circuit_shapes: Vec>, + aux_params: AuxParams, + ) -> Self { + Self { + circuit_shapes, + ro_consts_primary: aux_params.ro_consts_primary, + ro_consts_circuit_primary: aux_params.ro_consts_circuit_primary, + ck_primary: aux_params.ck_primary, + augmented_circuit_params_primary: aux_params.augmented_circuit_params_primary, + ro_consts_secondary: aux_params.ro_consts_secondary, + ro_consts_circuit_secondary: aux_params.ro_consts_circuit_secondary, + ck_secondary: aux_params.ck_secondary, + circuit_shape_secondary: aux_params.circuit_shape_secondary, + augmented_circuit_params_secondary: aux_params.augmented_circuit_params_secondary, + digest: aux_params.digest.into(), + } + } + + /// Compute primary and secondary commitment keys sized to handle the + /// largest of the circuits in the provided `R1CSWithArity`. + fn compute_primary_ck( + circuit_params: &[R1CSWithArity], + ck_hint1: &CommitmentKeyHint, + ) -> CommitmentKey { + let size_primary = circuit_params + .iter() + .map(|circuit| commitment_key_size(&circuit.r1cs_shape, ck_hint1)) + .max() + .unwrap(); + + E1::CE::setup(b"ck", size_primary) + } + + /// Return the [`PublicParams`]' digest. + pub fn digest(&self) -> E1::Scalar { + self + .digest + .get_or_try_init(|| { + let dc: DigestComputer<'_, ::Scalar, Self> = DigestComputer::new(self); + dc.digest() + }) + .cloned() + .expect("Failure in retrieving digest") + } + + /// Returns the number of constraints and variables of inner circuit based + /// on index + pub fn num_constraints_and_variables(&self, index: usize) -> (usize, usize) { + (self.circuit_shapes[index].r1cs_shape.num_cons, self.circuit_shapes[index].r1cs_shape.num_vars) + } + + /// Returns the number of constraints and variables of the secondary circuit + pub fn num_constraints_and_variables_secondary(&self) -> (usize, usize) { + ( + self.circuit_shape_secondary.r1cs_shape.num_cons, + self.circuit_shape_secondary.r1cs_shape.num_vars, + ) + } + + /// All of the primary circuit digests of this [`PublicParams`] + pub fn circuit_param_digests(&self) -> CircuitDigests { + let digests = self.circuit_shapes.iter().map(|cp| cp.digest()).collect::>(); + CircuitDigests { digests } + } + + /// Returns all the primary R1CS Shapes + fn primary_r1cs_shapes(&self) -> Vec<&R1CSShape> { + self.circuit_shapes.iter().map(|cs| &cs.r1cs_shape).collect::>() + } +} + +pub fn get_circuit_shapes>( + non_uniform_circuit: &NC, +) -> Vec> { + let num_circuits = non_uniform_circuit.num_circuits(); + let augmented_circuit_params_primary = + SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); + + // ro_consts_circuit_primary are parameterized by E2 because the type alias uses + // E2::Base = E1::Scalar + let ro_consts_circuit_primary: ROConstantsCircuit> = + ROConstantsCircuit::>::default(); + + (0..num_circuits) + .map(|i| { + let c_primary = non_uniform_circuit.primary_circuit(i); + let F_arity = c_primary.arity(); + // Initialize ck for the primary + let circuit_primary: SuperNovaAugmentedCircuit<'_, Dual, NC::C1> = + SuperNovaAugmentedCircuit::new( + &augmented_circuit_params_primary, + None, + &c_primary, + ro_consts_circuit_primary.clone(), + num_circuits, + ); + let mut cs: ShapeCS = ShapeCS::new(); + circuit_primary.synthesize(&mut cs).expect("circuit synthesis failed"); + + // We use the largest commitment_key for all instances + let r1cs_shape_primary = cs.r1cs_shape(); + R1CSWithArity::new(r1cs_shape_primary, F_arity) + }) + .collect::>() +} + +/// A resource buffer for SuperNova's [`RecursiveSNARK`] for storing scratch +/// values that are computed by `prove_step`, which allows the reuse of memory +/// allocations and avoids unnecessary new allocations in the critical section. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +struct ResourceBuffer { + l_w: Option>, + l_u: Option>, + + ABC_Z_1: R1CSResult, + ABC_Z_2: R1CSResult, + + /// buffer for `commit_T` + T: Vec, +} + +/// A SNARK that proves the correct execution of an non-uniform incremental +/// computation +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct RecursiveSNARK +where E1: CurveCycleEquipped { + // Cached digest of the public parameters + pp_digest: E1::Scalar, + num_augmented_circuits: usize, + + // Number of iterations performed up to now + i: usize, + + // Inputs and outputs of the primary circuits + z0_primary: Vec, + zi_primary: Vec, + + // Proven circuit index, and current program counter + proven_circuit_index: usize, + program_counter: E1::Scalar, + + /// Buffer for memory needed by the primary fold-step + buffer_primary: ResourceBuffer, + /// Buffer for memory needed by the secondary fold-step + buffer_secondary: ResourceBuffer>, + + // Relaxed instances for the primary circuits + // Entries are `None` if the circuit has not been executed yet + r_W_primary: Vec>>, + r_U_primary: Vec>>, + + // Inputs and outputs of the secondary circuit + z0_secondary: Vec< as Engine>::Scalar>, + zi_secondary: Vec< as Engine>::Scalar>, + // Relaxed instance for the secondary circuit + r_W_secondary: RelaxedR1CSWitness>, + r_U_secondary: RelaxedR1CSInstance>, + // Proof for the secondary circuit to be accumulated into r_secondary in the next iteration + l_w_secondary: R1CSWitness>, + l_u_secondary: R1CSInstance>, +} + +impl RecursiveSNARK +where E1: CurveCycleEquipped +{ + /// iterate base step to get new instance of recursive SNARK + #[allow(clippy::too_many_arguments)] + pub fn new>( + pp: &PublicParams, + non_uniform_circuit: &C0, + c_primary: &C0::C1, + c_secondary: &C0::C2, + z0_primary: &[E1::Scalar], + z0_secondary: &[ as Engine>::Scalar], + ) -> Result { + let num_augmented_circuits = non_uniform_circuit.num_circuits(); + let circuit_index = non_uniform_circuit.initial_circuit_index(); + + let r1cs_secondary = &pp.circuit_shape_secondary.r1cs_shape; + + // check the length of the secondary initial input + if z0_secondary.len() != pp.circuit_shape_secondary.F_arity { + return Err(SuperNovaError::NovaError(NovaError::InvalidStepOutputLength)); + } + + // check the arity of all the primary circuits match the initial input length + pp.circuit_shapes.iter().try_for_each(|circuit| { + if circuit.F_arity != z0_primary.len() { + return Err(SuperNovaError::NovaError(NovaError::InvalidStepOutputLength)); + } + Ok(()) + })?; + + // base case for the primary + let mut cs_primary = SatisfyingAssignment::::new(); + let program_counter = E1::Scalar::from(circuit_index as u64); + let inputs_primary: SuperNovaAugmentedCircuitInputs<'_, Dual> = + SuperNovaAugmentedCircuitInputs::new( + scalar_as_base::(pp.digest()), + E1::Scalar::ZERO, + z0_primary, + None, // zi = None for basecase + None, // U = [None], since no previous proofs have been computed + None, // u = None since we are not verifying a secondary circuit + None, // T = None since there is not proof to fold + Some(program_counter), // pc = initial_program_counter for primary circuit + E1::Scalar::ZERO, // u_index is always zero for the primary circuit + ); + + let circuit_primary: SuperNovaAugmentedCircuit<'_, Dual, C0::C1> = + SuperNovaAugmentedCircuit::new( + &pp.augmented_circuit_params_primary, + Some(inputs_primary), + c_primary, + pp.ro_consts_circuit_primary.clone(), + num_augmented_circuits, + ); + + let (zi_primary_pc_next, zi_primary) = + circuit_primary.synthesize(&mut cs_primary).map_err(|err| { + debug!("err {:?}", err); + NovaError::from(err) + })?; + if zi_primary.len() != pp[circuit_index].F_arity { + return Err(SuperNovaError::NovaError(NovaError::InvalidStepOutputLength)); + } + let (u_primary, w_primary) = cs_primary + .r1cs_instance_and_witness(&pp[circuit_index].r1cs_shape, &pp.ck_primary) + .map_err(|err| { + debug!("err {:?}", err); + err + })?; + + // base case for the secondary + let mut cs_secondary = SatisfyingAssignment::>::new(); + let u_primary_index = as Engine>::Scalar::from(circuit_index as u64); + let inputs_secondary: SuperNovaAugmentedCircuitInputs<'_, E1> = + SuperNovaAugmentedCircuitInputs::new( + pp.digest(), + as Engine>::Scalar::ZERO, + z0_secondary, + None, // zi = None for basecase + None, // U = Empty list of accumulators for the primary circuits + Some(&u_primary), // Proof for first iteration of current primary circuit + None, // T = None, since we just copy u_primary rather than fold it + None, // program_counter is always None for secondary circuit + u_primary_index, // index of the circuit proof u_primary + ); + + let circuit_secondary: SuperNovaAugmentedCircuit<'_, E1, C0::C2> = + SuperNovaAugmentedCircuit::new( + &pp.augmented_circuit_params_secondary, + Some(inputs_secondary), + c_secondary, + pp.ro_consts_circuit_secondary.clone(), + num_augmented_circuits, + ); + let (_, zi_secondary) = + circuit_secondary.synthesize(&mut cs_secondary).map_err(NovaError::from)?; + if zi_secondary.len() != pp.circuit_shape_secondary.F_arity { + return Err(NovaError::InvalidStepOutputLength.into()); + } + let (u_secondary, w_secondary) = cs_secondary + .r1cs_instance_and_witness(r1cs_secondary, &pp.ck_secondary) + .map_err(|_| SuperNovaError::NovaError(NovaError::UnSat))?; + + // IVC proof for the primary circuit + let l_w_primary = w_primary; + let l_u_primary = u_primary; + let r_W_primary = + RelaxedR1CSWitness::from_r1cs_witness(&pp[circuit_index].r1cs_shape, l_w_primary); + + let r_U_primary = RelaxedR1CSInstance::from_r1cs_instance( + &*pp.ck_primary, + &pp[circuit_index].r1cs_shape, + l_u_primary, + ); + + // IVC proof of the secondary circuit + let l_w_secondary = w_secondary; + let l_u_secondary = u_secondary; + + // Initialize relaxed instance/witness pair for the secondary circuit proofs + let r_W_secondary = RelaxedR1CSWitness::>::default(r1cs_secondary); + let r_U_secondary = RelaxedR1CSInstance::default(&*pp.ck_secondary, r1cs_secondary); + + // Outputs of the two circuits and next program counter thus far. + let zi_primary = zi_primary + .iter() + .map(|v| v.get_value().ok_or(NovaError::from(SynthesisError::AssignmentMissing).into())) + .collect::::Scalar>, SuperNovaError>>()?; + let zi_primary_pc_next = + zi_primary_pc_next + .expect("zi_primary_pc_next missing") + .get_value() + .ok_or::(NovaError::from(SynthesisError::AssignmentMissing).into())?; + let zi_secondary = zi_secondary + .iter() + .map(|v| v.get_value().ok_or(NovaError::from(SynthesisError::AssignmentMissing).into())) + .collect:: as Engine>::Scalar>, SuperNovaError>>()?; + + // handle the base case by initialize U_next in next round + let r_W_primary_initial_list = (0..num_augmented_circuits) + .map(|i| (i == circuit_index).then(|| r_W_primary.clone())) + .collect::>>>(); + + let r_U_primary_initial_list = (0..num_augmented_circuits) + .map(|i| (i == circuit_index).then(|| r_U_primary.clone())) + .collect::>>>(); + + // find the largest length r1cs shape for the buffer size + let max_num_cons = + pp.circuit_shapes.iter().map(|circuit| circuit.r1cs_shape.num_cons).max().unwrap(); + + let buffer_primary = ResourceBuffer { + l_w: None, + l_u: None, + ABC_Z_1: R1CSResult::default(max_num_cons), + ABC_Z_2: R1CSResult::default(max_num_cons), + T: r1cs::default_T::(max_num_cons), + }; + + let buffer_secondary = ResourceBuffer { + l_w: None, + l_u: None, + ABC_Z_1: R1CSResult::default(r1cs_secondary.num_cons), + ABC_Z_2: R1CSResult::default(r1cs_secondary.num_cons), + T: r1cs::default_T::>(r1cs_secondary.num_cons), + }; + + Ok(Self { + pp_digest: pp.digest(), + num_augmented_circuits, + i: 0_usize, // after base case, next iteration start from 1 + z0_primary: z0_primary.to_vec(), + zi_primary, + + proven_circuit_index: circuit_index, + program_counter: zi_primary_pc_next, + + buffer_primary, + buffer_secondary, + + r_W_primary: r_W_primary_initial_list, + r_U_primary: r_U_primary_initial_list, + z0_secondary: z0_secondary.to_vec(), + zi_secondary, + r_W_secondary, + r_U_secondary, + l_w_secondary, + l_u_secondary, + }) + } + + /// Inputs of the primary circuits + pub fn z0_primary(&self) -> &Vec { &self.z0_primary } + + /// Outputs of the primary circuits + pub fn zi_primary(&self) -> &Vec { &self.zi_primary } + + /// Inputs of the secondary circuits + pub fn z0_secondary(&self) -> &Vec< as Engine>::Scalar> { &self.z0_secondary } + + /// Outputs of the secondary circuits + pub fn zi_secondary(&self) -> &Vec< as Engine>::Scalar> { &self.zi_secondary } + + /// Current program counter + pub fn program_counter(&self) -> E1::Scalar { self.program_counter } + + /// executing a step of the incremental computation + #[allow(clippy::too_many_arguments)] + #[tracing::instrument(skip_all, name = "supernova::RecursiveSNARK::prove_step")] + pub fn prove_step, C2: StepCircuit< as Engine>::Scalar>>( + &mut self, + pp: &PublicParams, + c_primary: &C1, + c_secondary: &C2, + ) -> Result<(), SuperNovaError> { + // First step was already done in the constructor + if self.i == 0 { + self.i = 1; + return Ok(()); + } + + // save the inputs before proceeding to the `i+1`th step + let r_U_primary_i = self.r_U_primary.clone(); + // Create single-entry accumulator list for the secondary circuit to hand to + // SuperNovaAugmentedCircuitInputs + let r_U_secondary_i = vec![Some(self.r_U_secondary.clone())]; + let l_u_secondary_i = self.l_u_secondary.clone(); + + let circuit_index = c_primary.circuit_index(); + assert_eq!(self.program_counter, E1::Scalar::from(circuit_index as u64)); + + // fold the secondary circuit's instance + let (nifs_secondary, _) = NIFS::prove_mut( + &*pp.ck_secondary, + &pp.ro_consts_secondary, + &scalar_as_base::(self.pp_digest), + &pp.circuit_shape_secondary.r1cs_shape, + &mut self.r_U_secondary, + &mut self.r_W_secondary, + &self.l_u_secondary, + &self.l_w_secondary, + &mut self.buffer_secondary.T, + &mut self.buffer_secondary.ABC_Z_1, + &mut self.buffer_secondary.ABC_Z_2, + ) + .map_err(SuperNovaError::NovaError)?; + + let mut cs_primary = SatisfyingAssignment::::with_capacity( + pp[circuit_index].r1cs_shape.num_io + 1, + pp[circuit_index].r1cs_shape.num_vars, + ); + let T = Commitment::>::decompress(&nifs_secondary.comm_T) + .map_err(SuperNovaError::NovaError)?; + let inputs_primary: SuperNovaAugmentedCircuitInputs<'_, Dual> = + SuperNovaAugmentedCircuitInputs::new( + scalar_as_base::(self.pp_digest), + E1::Scalar::from(self.i as u64), + &self.z0_primary, + Some(&self.zi_primary), + Some(&r_U_secondary_i), + Some(&l_u_secondary_i), + Some(&T), + Some(self.program_counter), + E1::Scalar::ZERO, + ); + + let circuit_primary: SuperNovaAugmentedCircuit<'_, Dual, C1> = + SuperNovaAugmentedCircuit::new( + &pp.augmented_circuit_params_primary, + Some(inputs_primary), + c_primary, + pp.ro_consts_circuit_primary.clone(), + self.num_augmented_circuits, + ); + + let (zi_primary_pc_next, zi_primary) = + circuit_primary.synthesize(&mut cs_primary).map_err(NovaError::from)?; + if zi_primary.len() != pp[circuit_index].F_arity { + return Err(SuperNovaError::NovaError(NovaError::InvalidInitialInputLength)); + } + + let (l_u_primary, l_w_primary) = cs_primary + .r1cs_instance_and_witness(&pp[circuit_index].r1cs_shape, &pp.ck_primary) + .map_err(SuperNovaError::NovaError)?; + + let (r_U_primary, r_W_primary) = if let (Some(Some(r_U_primary)), Some(Some(r_W_primary))) = + (self.r_U_primary.get_mut(circuit_index), self.r_W_primary.get_mut(circuit_index)) + { + (r_U_primary, r_W_primary) + } else { + self.r_U_primary[circuit_index] = + Some(RelaxedR1CSInstance::default(&*pp.ck_primary, &pp[circuit_index].r1cs_shape)); + self.r_W_primary[circuit_index] = + Some(RelaxedR1CSWitness::default(&pp[circuit_index].r1cs_shape)); + ( + self.r_U_primary[circuit_index].as_mut().unwrap(), + self.r_W_primary[circuit_index].as_mut().unwrap(), + ) + }; + + let (nifs_primary, _) = NIFS::prove_mut( + &*pp.ck_primary, + &pp.ro_consts_primary, + &self.pp_digest, + &pp[circuit_index].r1cs_shape, + r_U_primary, + r_W_primary, + &l_u_primary, + &l_w_primary, + &mut self.buffer_primary.T, + &mut self.buffer_primary.ABC_Z_1, + &mut self.buffer_primary.ABC_Z_2, + ) + .map_err(SuperNovaError::NovaError)?; + + let mut cs_secondary = SatisfyingAssignment::>::with_capacity( + pp.circuit_shape_secondary.r1cs_shape.num_io + 1, + pp.circuit_shape_secondary.r1cs_shape.num_vars, + ); + let binding = + Commitment::::decompress(&nifs_primary.comm_T).map_err(SuperNovaError::NovaError)?; + let inputs_secondary: SuperNovaAugmentedCircuitInputs<'_, E1> = + SuperNovaAugmentedCircuitInputs::new( + self.pp_digest, + as Engine>::Scalar::from(self.i as u64), + &self.z0_secondary, + Some(&self.zi_secondary), + Some(&r_U_primary_i), + Some(&l_u_primary), + Some(&binding), + None, // pc is always None for secondary circuit + as Engine>::Scalar::from(circuit_index as u64), + ); + + let circuit_secondary: SuperNovaAugmentedCircuit<'_, E1, C2> = SuperNovaAugmentedCircuit::new( + &pp.augmented_circuit_params_secondary, + Some(inputs_secondary), + c_secondary, + pp.ro_consts_circuit_secondary.clone(), + self.num_augmented_circuits, + ); + let (_, zi_secondary) = + circuit_secondary.synthesize(&mut cs_secondary).map_err(NovaError::from)?; + if zi_secondary.len() != pp.circuit_shape_secondary.F_arity { + return Err(SuperNovaError::NovaError(NovaError::InvalidInitialInputLength)); + } + + let (l_u_secondary_next, l_w_secondary_next) = cs_secondary + .r1cs_instance_and_witness(&pp.circuit_shape_secondary.r1cs_shape, &pp.ck_secondary)?; + + // update the running instances and witnesses + let zi_primary = zi_primary + .iter() + .map(|v| v.get_value().ok_or(NovaError::from(SynthesisError::AssignmentMissing).into())) + .collect::::Scalar>, SuperNovaError>>()?; + let zi_primary_pc_next = + zi_primary_pc_next + .expect("zi_primary_pc_next missing") + .get_value() + .ok_or::(NovaError::from(SynthesisError::AssignmentMissing).into())?; + let zi_secondary = zi_secondary + .iter() + .map(|v| v.get_value().ok_or(NovaError::from(SynthesisError::AssignmentMissing).into())) + .collect:: as Engine>::Scalar>, SuperNovaError>>()?; + + if zi_primary.len() != pp[circuit_index].F_arity + || zi_secondary.len() != pp.circuit_shape_secondary.F_arity + { + return Err(SuperNovaError::NovaError(NovaError::InvalidStepOutputLength)); + } + + self.l_w_secondary = l_w_secondary_next; + self.l_u_secondary = l_u_secondary_next; + self.i += 1; + self.zi_primary = zi_primary; + self.zi_secondary = zi_secondary; + self.proven_circuit_index = circuit_index; + self.program_counter = zi_primary_pc_next; + Ok(()) + } + + /// verify recursive snark + #[allow(clippy::type_complexity)] + pub fn verify( + &self, + pp: &PublicParams, + z0_primary: &[E1::Scalar], + z0_secondary: &[ as Engine>::Scalar], + ) -> Result<(Vec, Vec< as Engine>::Scalar>), SuperNovaError> { + // number of steps cannot be zero + if self.i == 0 { + debug!("must verify on valid RecursiveSNARK where i > 0"); + return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); + } + + // Check lengths of r_primary + if self.r_U_primary.len() != self.num_augmented_circuits + || self.r_W_primary.len() != self.num_augmented_circuits + { + debug!("r_primary length mismatch"); + return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); + } + + // Check that there are no missing instance/witness pairs + self.r_U_primary.iter().zip_eq(self.r_W_primary.iter()).enumerate().try_for_each( + |(i, (u, w))| match (u, w) { + (Some(_), Some(_)) | (None, None) => Ok(()), + _ => { + debug!("r_primary[{:?}]: mismatched instance/witness pair", i); + Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)) + }, + }, + )?; + + let circuit_index = self.proven_circuit_index; + + // check we have an instance/witness pair for the circuit_index + if self.r_U_primary[circuit_index].is_none() { + debug!("r_primary[{:?}]: instance/witness pair is missing", circuit_index); + return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); + } + + // check the (relaxed) R1CS instances public outputs. + { + for (i, r_U_primary_i) in self.r_U_primary.iter().enumerate() { + if let Some(u) = r_U_primary_i { + if u.X.len() != 2 { + debug!("r_U_primary[{:?}] got instance length {:?} != 2", i, u.X.len(),); + return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); + } + } + } + + if self.l_u_secondary.X.len() != 2 { + debug!("l_U_secondary got instance length {:?} != 2", self.l_u_secondary.X.len(),); + return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); + } + + if self.r_U_secondary.X.len() != 2 { + debug!("r_U_secondary got instance length {:?} != 2", self.r_U_secondary.X.len(),); + return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); + } + } + + let hash_primary = { + let num_absorbs = num_ro_inputs( + self.num_augmented_circuits, + pp.augmented_circuit_params_primary.get_n_limbs(), + pp[circuit_index].F_arity, + true, // is_primary + ); + + let mut hasher = as Engine>::RO::new(pp.ro_consts_secondary.clone(), num_absorbs); + hasher.absorb(self.pp_digest); + hasher.absorb(E1::Scalar::from(self.i as u64)); + hasher.absorb(self.program_counter); + + for e in z0_primary { + hasher.absorb(*e); + } + for e in &self.zi_primary { + hasher.absorb(*e); + } + + self.r_U_secondary.absorb_in_ro(&mut hasher); + hasher.squeeze(NUM_HASH_BITS) + }; + + let hash_secondary = { + let num_absorbs = num_ro_inputs( + self.num_augmented_circuits, + pp.augmented_circuit_params_secondary.get_n_limbs(), + pp.circuit_shape_secondary.F_arity, + false, // is_primary + ); + let mut hasher = ::RO::new(pp.ro_consts_primary.clone(), num_absorbs); + hasher.absorb(scalar_as_base::(self.pp_digest)); + hasher.absorb( as Engine>::Scalar::from(self.i as u64)); + + for e in z0_secondary { + hasher.absorb(*e); + } + for e in &self.zi_secondary { + hasher.absorb(*e); + } + + self.r_U_primary.iter().enumerate().for_each(|(i, U)| { + U.as_ref() + .unwrap_or(&RelaxedR1CSInstance::default(&*pp.ck_primary, &pp[i].r1cs_shape)) + .absorb_in_ro(&mut hasher); + }); + hasher.squeeze(NUM_HASH_BITS) + }; + + if hash_primary != self.l_u_secondary.X[0] { + debug!( + "hash_primary {:?} not equal l_u_secondary.X[0] {:?}", + hash_primary, self.l_u_secondary.X[0] + ); + return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); + } + if hash_secondary != scalar_as_base::>(self.l_u_secondary.X[1]) { + debug!( + "hash_secondary {:?} not equal l_u_secondary.X[1] {:?}", + hash_secondary, self.l_u_secondary.X[1] + ); + return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); + } + + // check the satisfiability of all instance/witness pairs + let (res_r_primary, (res_r_secondary, res_l_secondary)) = rayon::join( + || { + self.r_U_primary.par_iter().zip_eq(self.r_W_primary.par_iter()).enumerate().try_for_each( + |(i, (u, w))| { + if let (Some(u), Some(w)) = (u, w) { + pp[i].r1cs_shape.is_sat_relaxed(&pp.ck_primary, u, w)? + } + Ok(()) + }, + ) + }, + || { + rayon::join( + || { + pp.circuit_shape_secondary.r1cs_shape.is_sat_relaxed( + &pp.ck_secondary, + &self.r_U_secondary, + &self.r_W_secondary, + ) + }, + || { + pp.circuit_shape_secondary.r1cs_shape.is_sat( + &pp.ck_secondary, + &self.l_u_secondary, + &self.l_w_secondary, + ) + }, + ) + }, + ); + + res_r_primary.map_err(|err| match err { + NovaError::UnSatIndex(i) => SuperNovaError::UnSatIndex("r_primary", i), + e => SuperNovaError::NovaError(e), + })?; + res_r_secondary.map_err(|err| match err { + NovaError::UnSatIndex(i) => SuperNovaError::UnSatIndex("r_secondary", i), + e => SuperNovaError::NovaError(e), + })?; + res_l_secondary.map_err(|err| match err { + NovaError::UnSatIndex(i) => SuperNovaError::UnSatIndex("l_secondary", i), + e => SuperNovaError::NovaError(e), + })?; + + Ok((self.zi_primary.clone(), self.zi_secondary.clone())) + } +} + +/// SuperNova helper trait, for implementors that provide sets of sub-circuits +/// to be proved via NIVC. `C1` must be a type (likely an `Enum`) for which a +/// potentially-distinct instance can be supplied for each `index` below +/// `self.num_circuits()`. +pub trait NonUniformCircuit +where E1: CurveCycleEquipped { + /// The type of the step-circuits on the primary + type C1: StepCircuit; + /// The type of the step-circuits on the secondary + type C2: StepCircuit< as Engine>::Scalar>; + + /// Initial circuit index, defaults to zero. + fn initial_circuit_index(&self) -> usize { 0 } + + /// How many circuits are provided? + fn num_circuits(&self) -> usize; + + /// Return a new instance of the primary circuit at `index`. + fn primary_circuit(&self, circuit_index: usize) -> Self::C1; + + /// Return a new instance of the secondary circuit. + fn secondary_circuit(&self) -> Self::C2; +} + +/// Compute the circuit digest of a supernova [`StepCircuit`]. +/// +/// Note for callers: This function should be called with its performance +/// characteristics in mind. It will synthesize and digest the full `circuit` +/// given. +pub fn circuit_digest>( + circuit: &C, + num_augmented_circuits: usize, +) -> E1::Scalar { + let augmented_circuit_params = + SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); + + // ro_consts_circuit are parameterized by E2 because the type alias uses + // E2::Base = E1::Scalar + let ro_consts_circuit = ROConstantsCircuit::>::default(); + + // Initialize ck for the primary + let augmented_circuit: SuperNovaAugmentedCircuit<'_, Dual, C> = + SuperNovaAugmentedCircuit::new( + &augmented_circuit_params, + None, + circuit, + ro_consts_circuit, + num_augmented_circuits, + ); + let mut cs: ShapeCS = ShapeCS::new(); + let _ = augmented_circuit.synthesize(&mut cs); + + let F_arity = circuit.arity(); + let circuit_params = R1CSWithArity::new(cs.r1cs_shape(), F_arity); + circuit_params.digest() +} + +/// Compute the number of absorbs for the random-oracle computing the circuit +/// output X = H(vk, i, pc, z0, zi, U) +fn num_ro_inputs(num_circuits: usize, num_limbs: usize, arity: usize, is_primary: bool) -> usize { + let num_circuits = if is_primary { 1 } else { num_circuits }; + + // [W(x,y,∞), E(x,y,∞), u] + [X0, X1] * #num_limb + let instance_size = 3 + 3 + 1 + 2 * num_limbs; + + 2 // params, i + + usize::from(is_primary) // optional program counter + + 2 * arity // z0, zi + + num_circuits * instance_size +} + +pub mod error; +pub mod snark; +mod utils; + +#[cfg(test)] mod test; diff --git a/prover/src/supernova/snark.rs b/prover/src/supernova/snark.rs new file mode 100644 index 0000000..bae4e6f --- /dev/null +++ b/prover/src/supernova/snark.rs @@ -0,0 +1,565 @@ +//! This module defines a final compressing SNARK for supernova proofs + +use serde::{Deserialize, Serialize}; + +use super::{error::SuperNovaError, PublicParams, RecursiveSNARK}; +use crate::{ + constants::NUM_HASH_BITS, + errors::NovaError, + r1cs::{R1CSInstance, RelaxedR1CSWitness}, + scalar_as_base, + traits::{ + snark::{BatchedRelaxedR1CSSNARKTrait, RelaxedR1CSSNARKTrait}, + AbsorbInROTrait, CurveCycleEquipped, Dual, Engine, ROTrait, + }, + RelaxedR1CSInstance, NIFS, +}; + +/// A type that holds the prover key for `CompressedSNARK` +#[derive(Debug, Serialize, Deserialize)] +pub struct ProverKey +where + E1: CurveCycleEquipped, + S1: BatchedRelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, { + pub pk_primary: S1::ProverKey, + pub pk_secondary: S2::ProverKey, +} + +/// A type that holds the verifier key for `CompressedSNARK` +#[derive(Debug, Serialize, Deserialize)] +pub struct VerifierKey +where + E1: CurveCycleEquipped, + S1: BatchedRelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, { + pub vk_primary: S1::VerifierKey, + pub vk_secondary: S2::VerifierKey, +} + +/// A SNARK that proves the knowledge of a valid `RecursiveSNARK` +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct CompressedSNARK +where + E1: CurveCycleEquipped, + S1: BatchedRelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, { + r_U_primary: Vec>, + r_W_snark_primary: S1, + + r_U_secondary: RelaxedR1CSInstance>, + l_u_secondary: R1CSInstance>, + nifs_secondary: NIFS>, + f_W_snark_secondary: S2, + + num_steps: usize, + program_counter: E1::Scalar, + + zn_primary: Vec, + zn_secondary: Vec< as Engine>::Scalar>, +} + +impl CompressedSNARK +where + E1: CurveCycleEquipped, + S1: BatchedRelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, +{ + pub fn initialize_pk( + pp: &PublicParams, + primary_vk_digest: E1::Scalar, + secondary_vk_digest: as Engine>::Scalar, + ) -> Result, SuperNovaError> { + // TODO: Should we actually clone here? + let pk_primary = S1::initialize_pk(pp.ck_primary.clone(), primary_vk_digest)?; + let pk_secondary = S2::initialize_pk(pp.ck_secondary.clone(), secondary_vk_digest)?; + + Ok(ProverKey { pk_primary, pk_secondary }) + } + + /// Creates prover and verifier keys for `CompressedSNARK` + #[allow(clippy::type_complexity)] + pub fn setup( + pp: &PublicParams, + ) -> Result<(ProverKey, VerifierKey), SuperNovaError> { + let (pk_primary, vk_primary) = S1::setup(pp.ck_primary.clone(), pp.primary_r1cs_shapes())?; + + let (pk_secondary, vk_secondary) = + S2::setup(pp.ck_secondary.clone(), &pp.circuit_shape_secondary.r1cs_shape)?; + + let prover_key = ProverKey { pk_primary, pk_secondary }; + let verifier_key = VerifierKey { vk_primary, vk_secondary }; + + Ok((prover_key, verifier_key)) + } + + /// Create a new `CompressedSNARK` + pub fn prove( + pp: &PublicParams, + pk: &ProverKey, + recursive_snark: &RecursiveSNARK, + ) -> Result { + // fold the secondary circuit's instance + let res_secondary = NIFS::prove( + &*pp.ck_secondary, + &pp.ro_consts_secondary, + &scalar_as_base::(pp.digest()), + &pp.circuit_shape_secondary.r1cs_shape, + &recursive_snark.r_U_secondary, + &recursive_snark.r_W_secondary, + &recursive_snark.l_u_secondary, + &recursive_snark.l_w_secondary, + ); + + let (nifs_secondary, (f_U_secondary, f_W_secondary), _) = res_secondary?; + + // Prepare the list of primary Relaxed R1CS instances (a default instance is + // provided for uninitialized circuits) + let r_U_primary = recursive_snark + .r_U_primary + .iter() + .enumerate() + .map(|(idx, r_U)| { + r_U + .clone() + .unwrap_or_else(|| RelaxedR1CSInstance::default(&*pp.ck_primary, &pp[idx].r1cs_shape)) + }) + .collect::>(); + + // Prepare the list of primary relaxed R1CS witnesses (a default witness is + // provided for uninitialized circuits) + let r_W_primary: Vec> = recursive_snark + .r_W_primary + .iter() + .enumerate() + .map(|(idx, r_W)| { + r_W.clone().unwrap_or_else(|| RelaxedR1CSWitness::default(&pp[idx].r1cs_shape)) + }) + .collect::>(); + + // Generate a primary SNARK proof for the list of primary circuits + let r_W_snark_primary = S1::prove( + &pp.ck_primary, + &pk.pk_primary, + pp.primary_r1cs_shapes(), + &r_U_primary, + &r_W_primary, + )?; + + // Generate a secondary SNARK proof for the secondary circuit + let f_W_snark_secondary = S2::prove( + &pp.ck_secondary, + &pk.pk_secondary, + &pp.circuit_shape_secondary.r1cs_shape, + &f_U_secondary, + &f_W_secondary, + )?; + + let compressed_snark = Self { + r_U_primary, + r_W_snark_primary, + + r_U_secondary: recursive_snark.r_U_secondary.clone(), + l_u_secondary: recursive_snark.l_u_secondary.clone(), + nifs_secondary, + f_W_snark_secondary, + + num_steps: recursive_snark.i, + program_counter: recursive_snark.program_counter, + + zn_primary: recursive_snark.zi_primary.clone(), + zn_secondary: recursive_snark.zi_secondary.clone(), + }; + + Ok(compressed_snark) + } + + /// Verify the correctness of the `CompressedSNARK` + #[allow(clippy::type_complexity)] + pub fn verify( + &self, + pp: &PublicParams, + vk: &VerifierKey, + z0_primary: &[E1::Scalar], + z0_secondary: &[ as Engine>::Scalar], + ) -> Result<(Vec, Vec< as Engine>::Scalar>), SuperNovaError> { + // Assumes that each circuit has the same arity, so we just use the first one' + let num_field_primary_ro = 3 // params_next, i_new, program_counter_new + + 2 * pp[0].F_arity // zo, z1 + + (7 + 2 * pp.augmented_circuit_params_primary.get_n_limbs()); // # 1 * (7 + [X0, X1]*#num_limb) + + // secondary circuit + // NOTE: This count ensure the number of witnesses sent by the prover must equal + // the number of NIVC circuits + let num_field_secondary_ro = 2 // params_next, i_new + + 2 * pp.circuit_shape_secondary.F_arity // zo, z1 + + pp.circuit_shapes.len() * (7 + 2 * pp.augmented_circuit_params_primary.get_n_limbs()); // #num_augment + + // Compute the primary and secondary hashes given the digest, program counter, + // instances, and witnesses provided by the prover + let (hash_primary, hash_secondary) = { + let mut hasher = + as Engine>::RO::new(pp.ro_consts_secondary.clone(), num_field_primary_ro); + + hasher.absorb(pp.digest()); + hasher.absorb(E1::Scalar::from(self.num_steps as u64)); + hasher.absorb(self.program_counter); + + for e in z0_primary { + hasher.absorb(*e); + } + + for e in &self.zn_primary { + hasher.absorb(*e); + } + + self.r_U_secondary.absorb_in_ro(&mut hasher); + + let mut hasher2 = + ::RO::new(pp.ro_consts_primary.clone(), num_field_secondary_ro); + + hasher2.absorb(scalar_as_base::(pp.digest())); + hasher2.absorb( as Engine>::Scalar::from(self.num_steps as u64)); + + for e in z0_secondary { + hasher2.absorb(*e); + } + + for e in &self.zn_secondary { + hasher2.absorb(*e); + } + + self.r_U_primary.iter().for_each(|U| { + U.absorb_in_ro(&mut hasher2); + }); + + (hasher.squeeze(NUM_HASH_BITS), hasher2.squeeze(NUM_HASH_BITS)) + }; + + // Compare the computed hashes with the public IO of the last invocation of + // `prove_step` + if hash_primary != self.l_u_secondary.X[0] { + return Err(NovaError::ProofVerifyError.into()); + } + + if hash_secondary != scalar_as_base::>(self.l_u_secondary.X[1]) { + return Err(NovaError::ProofVerifyError.into()); + } + + // Verify the primary SNARK + let res_primary = self.r_W_snark_primary.verify(&vk.vk_primary, &self.r_U_primary); + + // Fold the secondary circuit's instance + let f_U_secondary = self.nifs_secondary.verify( + &pp.ro_consts_secondary, + &scalar_as_base::(pp.digest()), + &self.r_U_secondary, + &self.l_u_secondary, + )?; + + // Verify the secondary SNARK + let res_secondary = self.f_W_snark_secondary.verify(&vk.vk_secondary, &f_U_secondary); + + res_primary?; + + res_secondary?; + + Ok((self.zn_primary.clone(), self.zn_secondary.clone())) + } +} + +#[cfg(test)] +mod test { + use std::marker::PhantomData; + + use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; + use ff::Field; + + use super::*; + use crate::{ + provider::{ipa_pc, Bn256EngineIPA}, + spartan::{batched, batched_ppsnark, snark::RelaxedR1CSSNARK}, + supernova::{circuit::TrivialCircuit, NonUniformCircuit, StepCircuit}, + }; + + type EE = ipa_pc::EvaluationEngine; + type S1 = batched::BatchedRelaxedR1CSSNARK>; + type S1PP = batched_ppsnark::BatchedRelaxedR1CSSNARK>; + type S2 = RelaxedR1CSSNARK>; + + #[derive(Clone)] + struct SquareCircuit { + _p: PhantomData, + } + + impl StepCircuit for SquareCircuit { + fn arity(&self) -> usize { 1 } + + fn circuit_index(&self) -> usize { 0 } + + fn synthesize>( + &self, + cs: &mut CS, + _pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> + { + let z_i = &z[0]; + + let z_next = z_i.square(cs.namespace(|| "z_i^2"))?; + + let next_pc = AllocatedNum::alloc(cs.namespace(|| "next_pc"), || Ok(E::Scalar::from(1u64)))?; + + cs.enforce( + || "next_pc = 1", + |lc| lc + CS::one(), + |lc| lc + next_pc.get_variable(), + |lc| lc + CS::one(), + ); + + Ok((Some(next_pc), vec![z_next])) + } + } + + #[derive(Clone)] + struct CubeCircuit { + _p: PhantomData, + } + + impl StepCircuit for CubeCircuit { + fn arity(&self) -> usize { 1 } + + fn circuit_index(&self) -> usize { 1 } + + fn synthesize>( + &self, + cs: &mut CS, + _pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> + { + let z_i = &z[0]; + + let z_sq = z_i.square(cs.namespace(|| "z_i^2"))?; + let z_cu = z_sq.mul(cs.namespace(|| "z_i^3"), z_i)?; + + let next_pc = AllocatedNum::alloc(cs.namespace(|| "next_pc"), || Ok(E::Scalar::from(0u64)))?; + + cs.enforce(|| "next_pc = 0", |lc| lc + CS::one(), |lc| lc + next_pc.get_variable(), |lc| lc); + + Ok((Some(next_pc), vec![z_cu])) + } + } + + #[derive(Clone)] + enum TestCircuit { + Square(SquareCircuit), + Cube(CubeCircuit), + } + + impl TestCircuit { + fn new(num_steps: usize) -> Vec { + let mut circuits = Vec::new(); + + for idx in 0..num_steps { + if idx % 2 == 0 { + circuits.push(Self::Square(SquareCircuit { _p: PhantomData })) + } else { + circuits.push(Self::Cube(CubeCircuit { _p: PhantomData })) + } + } + + circuits + } + } + + impl StepCircuit for TestCircuit { + fn arity(&self) -> usize { 1 } + + fn circuit_index(&self) -> usize { + match self { + Self::Square(c) => c.circuit_index(), + Self::Cube(c) => c.circuit_index(), + } + } + + fn synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> + { + match self { + Self::Square(c) => c.synthesize(cs, pc, z), + Self::Cube(c) => c.synthesize(cs, pc, z), + } + } + } + + impl NonUniformCircuit for TestCircuit { + type C1 = Self; + type C2 = TrivialCircuit< as Engine>::Scalar>; + + fn num_circuits(&self) -> usize { 2 } + + fn primary_circuit(&self, circuit_index: usize) -> Self { + match circuit_index { + 0 => Self::Square(SquareCircuit { _p: PhantomData }), + 1 => Self::Cube(CubeCircuit { _p: PhantomData }), + _ => panic!("Invalid circuit index"), + } + } + + fn secondary_circuit(&self) -> Self::C2 { Default::default() } + } + + #[derive(Clone)] + struct BigPowerCircuit { + _p: PhantomData, + } + + impl StepCircuit for BigPowerCircuit { + fn arity(&self) -> usize { 1 } + + fn circuit_index(&self) -> usize { 1 } + + fn synthesize>( + &self, + cs: &mut CS, + _pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> + { + let mut x = z[0].clone(); + let mut y = x.clone(); + for i in 0..10_000 { + y = x.square(cs.namespace(|| format!("x_sq_{i}")))?; + x = y.clone(); + } + + let next_pc = AllocatedNum::alloc(cs.namespace(|| "next_pc"), || Ok(E::Scalar::from(0u64)))?; + + cs.enforce(|| "next_pc = 0", |lc| lc + CS::one(), |lc| lc + next_pc.get_variable(), |lc| lc); + + Ok((Some(next_pc), vec![y])) + } + } + + #[derive(Clone)] + enum BigTestCircuit { + Square(SquareCircuit), + BigPower(BigPowerCircuit), + } + + impl BigTestCircuit { + fn new(num_steps: usize) -> Vec { + let mut circuits = Vec::new(); + + for idx in 0..num_steps { + if idx % 2 == 0 { + circuits.push(Self::Square(SquareCircuit { _p: PhantomData })) + } else { + circuits.push(Self::BigPower(BigPowerCircuit { _p: PhantomData })) + } + } + + circuits + } + } + + impl StepCircuit for BigTestCircuit { + fn arity(&self) -> usize { 1 } + + fn circuit_index(&self) -> usize { + match self { + Self::Square(c) => c.circuit_index(), + Self::BigPower(c) => c.circuit_index(), + } + } + + fn synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> + { + match self { + Self::Square(c) => c.synthesize(cs, pc, z), + Self::BigPower(c) => c.synthesize(cs, pc, z), + } + } + } + + impl NonUniformCircuit for BigTestCircuit { + type C1 = Self; + type C2 = TrivialCircuit< as Engine>::Scalar>; + + fn num_circuits(&self) -> usize { 2 } + + fn primary_circuit(&self, circuit_index: usize) -> Self { + match circuit_index { + 0 => Self::Square(SquareCircuit { _p: PhantomData }), + 1 => Self::BigPower(BigPowerCircuit { _p: PhantomData }), + _ => panic!("Invalid circuit index"), + } + } + + fn secondary_circuit(&self) -> Self::C2 { Default::default() } + } + + fn test_compression_with(num_steps: usize, circuits_factory: F) + where + E1: CurveCycleEquipped, + S1: BatchedRelaxedR1CSSNARKTrait, + S2: RelaxedR1CSSNARKTrait>, + C: NonUniformCircuit as Engine>::Scalar>> + + StepCircuit, + F: Fn(usize) -> Vec, { + let secondary_circuit = TrivialCircuit::default(); + let test_circuits = circuits_factory(num_steps); + + let pp = PublicParams::setup(&test_circuits[0], &*S1::ck_floor(), &*S2::ck_floor()); + + let z0_primary = vec![E1::Scalar::from(17u64)]; + let z0_secondary = vec![ as Engine>::Scalar::ZERO]; + + let mut recursive_snark = RecursiveSNARK::new( + &pp, + &test_circuits[0], + &test_circuits[0], + &secondary_circuit, + &z0_primary, + &z0_secondary, + ) + .unwrap(); + + for circuit in test_circuits.iter().take(num_steps) { + recursive_snark.prove_step(&pp, circuit, &secondary_circuit).unwrap(); + + recursive_snark.verify(&pp, &z0_primary, &z0_secondary).unwrap(); + } + + let (prover_key, verifier_key) = CompressedSNARK::<_, S1, S2>::setup(&pp).unwrap(); + + let compressed_snark = CompressedSNARK::prove(&pp, &prover_key, &recursive_snark).unwrap(); + + compressed_snark.verify(&pp, &verifier_key, &z0_primary, &z0_secondary).unwrap(); + } + + #[test] + fn test_nivc_trivial_with_compression() { + const NUM_STEPS: usize = 6; + test_compression_with::, S2<_>, _, _>(NUM_STEPS, TestCircuit::new); + test_compression_with::, S2<_>, _, _>(NUM_STEPS, TestCircuit::new); + } + + #[test] + fn test_compression_with_circuit_size_difference() { + const NUM_STEPS: usize = 4; + test_compression_with::, S2<_>, _, _>(NUM_STEPS, BigTestCircuit::new); + test_compression_with::, S2<_>, _, _>(NUM_STEPS, BigTestCircuit::new); + } +} diff --git a/prover/src/supernova/test.rs b/prover/src/supernova/test.rs new file mode 100644 index 0000000..74cabc3 --- /dev/null +++ b/prover/src/supernova/test.rs @@ -0,0 +1,770 @@ +use core::marker::PhantomData; +use std::fmt::Write; + +use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; +use expect_test::{expect, Expect}; +use ff::{Field, PrimeField}; +use tap::TapOptional; + +use super::{utils::get_selector_vec_from_index, *}; +use crate::{ + bellpepper::test_shape_cs::TestShapeCS, + gadgets::{alloc_one, alloc_zero}, + provider::{poseidon::PoseidonConstantsCircuit, Bn256EngineIPA, GrumpkinEngine}, + supernova::circuit::{StepCircuit, TrivialCircuit}, + traits::snark::default_ck_hint, +}; + +#[derive(Clone, Debug, Default)] +struct CubicCircuit { + _p: PhantomData, + circuit_index: usize, + rom_size: usize, +} + +impl CubicCircuit { + fn new(circuit_index: usize, rom_size: usize) -> Self { + Self { circuit_index, rom_size, _p: PhantomData } + } +} + +fn next_rom_index_and_pc>( + cs: &mut CS, + rom_index: &AllocatedNum, + allocated_rom: &[AllocatedNum], + pc: &AllocatedNum, +) -> Result<(AllocatedNum, AllocatedNum), SynthesisError> { + // Compute a selector for the current rom_index in allocated_rom + let current_rom_selector = + get_selector_vec_from_index(cs.namespace(|| "rom selector"), rom_index, allocated_rom.len())?; + + // Enforce that allocated_rom[rom_index] = pc + for (rom, bit) in allocated_rom.iter().zip_eq(current_rom_selector.iter()) { + // if bit = 1, then rom = pc + // bit * (rom - pc) = 0 + cs.enforce( + || "enforce bit = 1 => rom = pc", + |lc| lc + &bit.lc(CS::one(), F::ONE), + |lc| lc + rom.get_variable() - pc.get_variable(), + |lc| lc, + ); + } + + // Get the index of the current rom, or the index of the invalid rom if no match + let current_rom_index = current_rom_selector + .iter() + .position(|bit| bit.get_value().is_some_and(|v| v)) + .unwrap_or_default(); + let next_rom_index = current_rom_index + 1; + + let rom_index_next = AllocatedNum::alloc_infallible(cs.namespace(|| "next rom index"), || { + F::from(next_rom_index as u64) + }); + cs.enforce( + || " rom_index + 1 - next_rom_index_num = 0", + |lc| lc, + |lc| lc, + |lc| lc + rom_index.get_variable() + CS::one() - rom_index_next.get_variable(), + ); + + // Allocate the next pc without checking. + // The next iteration will check whether the next pc is valid. + let pc_next = AllocatedNum::alloc_infallible(cs.namespace(|| "next pc"), || { + allocated_rom.get(next_rom_index).and_then(|v| v.get_value()).unwrap_or(-F::ONE) + }); + + Ok((rom_index_next, pc_next)) +} + +impl StepCircuit for CubicCircuit +where F: PrimeField +{ + fn arity(&self) -> usize { + 2 + self.rom_size // value + rom_pc + rom[].len() + } + + fn circuit_index(&self) -> usize { self.circuit_index } + + fn synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + let rom_index = &z[1]; + let allocated_rom = &z[2..]; + + let (rom_index_next, pc_next) = next_rom_index_and_pc( + &mut cs.namespace(|| "next and rom_index and pc"), + rom_index, + allocated_rom, + pc.ok_or(SynthesisError::AssignmentMissing)?, + )?; + + // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and `y` are + // respectively the input and output. + let x = &z[0]; + let x_sq = x.square(cs.namespace(|| "x_sq"))?; + let x_cu = x_sq.mul(cs.namespace(|| "x_cu"), x)?; + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + Ok(x_cu.get_value().unwrap() + x.get_value().unwrap() + F::from(5u64)) + })?; + + cs.enforce( + || "y = x^3 + x + 5", + |lc| { + lc + x_cu.get_variable() + + x.get_variable() + + CS::one() + + CS::one() + + CS::one() + + CS::one() + + CS::one() + }, + |lc| lc + CS::one(), + |lc| lc + y.get_variable(), + ); + + let mut z_next = vec![y]; + z_next.push(rom_index_next); + z_next.extend(z[2..].iter().cloned()); + Ok((Some(pc_next), z_next)) + } +} + +#[derive(Clone, Debug, Default)] +struct SquareCircuit { + _p: PhantomData, + circuit_index: usize, + rom_size: usize, +} + +impl SquareCircuit { + fn new(circuit_index: usize, rom_size: usize) -> Self { + Self { circuit_index, rom_size, _p: PhantomData } + } +} + +impl StepCircuit for SquareCircuit +where F: PrimeField +{ + fn arity(&self) -> usize { + 2 + self.rom_size // value + rom_pc + rom[].len() + } + + fn circuit_index(&self) -> usize { self.circuit_index } + + fn synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + let rom_index = &z[1]; + let allocated_rom = &z[2..]; + + let (rom_index_next, pc_next) = next_rom_index_and_pc( + &mut cs.namespace(|| "next and rom_index and pc"), + rom_index, + allocated_rom, + pc.ok_or(SynthesisError::AssignmentMissing)?, + )?; + + // Consider an equation: `x^2 + x + 5 = y`, where `x` and `y` are respectively + // the input and output. + let x = &z[0]; + let x_sq = x.square(cs.namespace(|| "x_sq"))?; + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + Ok(x_sq.get_value().unwrap() + x.get_value().unwrap() + F::from(5u64)) + })?; + + cs.enforce( + || "y = x^2 + x + 5", + |lc| { + lc + x_sq.get_variable() + + x.get_variable() + + CS::one() + + CS::one() + + CS::one() + + CS::one() + + CS::one() + }, + |lc| lc + CS::one(), + |lc| lc + y.get_variable(), + ); + + let mut z_next = vec![y]; + z_next.push(rom_index_next); + z_next.extend(z[2..].iter().cloned()); + Ok((Some(pc_next), z_next)) + } +} + +fn print_constraints_name_on_error_index< + E1, + C1: StepCircuit, + C2: StepCircuit< as Engine>::Scalar>, +>( + err: &SuperNovaError, + pp: &PublicParams, + c_primary: &C1, + c_secondary: &C2, + num_augmented_circuits: usize, +) where + E1: CurveCycleEquipped, +{ + match err { + SuperNovaError::UnSatIndex(msg, index) if *msg == "r_primary" => { + let circuit_primary: SuperNovaAugmentedCircuit<'_, Dual, C1> = + SuperNovaAugmentedCircuit::new( + &pp.augmented_circuit_params_primary, + None, + c_primary, + pp.ro_consts_circuit_primary.clone(), + num_augmented_circuits, + ); + let mut cs: TestShapeCS = TestShapeCS::new(); + let _ = circuit_primary.synthesize(&mut cs); + cs.constraints + .get(*index) + .tap_some(|constraint| debug!("{msg} failed at constraint {}", constraint.3)); + }, + SuperNovaError::UnSatIndex(msg, index) if *msg == "r_secondary" || *msg == "l_secondary" => { + let circuit_secondary: SuperNovaAugmentedCircuit<'_, E1, C2> = SuperNovaAugmentedCircuit::new( + &pp.augmented_circuit_params_secondary, + None, + c_secondary, + pp.ro_consts_circuit_secondary.clone(), + num_augmented_circuits, + ); + let mut cs: TestShapeCS> = TestShapeCS::new(); + let _ = circuit_secondary.synthesize(&mut cs); + cs.constraints + .get(*index) + .tap_some(|constraint| debug!("{msg} failed at constraint {}", constraint.3)); + }, + _ => (), + } +} + +const OPCODE_0: usize = 0; +const OPCODE_1: usize = 1; + +struct TestROM { + rom: Vec, + _p: PhantomData, +} + +#[derive(Debug, Clone)] +enum TestROMCircuit { + Cubic(CubicCircuit), + Square(SquareCircuit), +} + +impl StepCircuit for TestROMCircuit { + fn arity(&self) -> usize { + match self { + Self::Cubic(x) => x.arity(), + Self::Square(x) => x.arity(), + } + } + + fn circuit_index(&self) -> usize { + match self { + Self::Cubic(x) => x.circuit_index(), + Self::Square(x) => x.circuit_index(), + } + } + + fn synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + match self { + Self::Cubic(x) => x.synthesize(cs, pc, z), + Self::Square(x) => x.synthesize(cs, pc, z), + } + } +} + +impl NonUniformCircuit for TestROM +where E1: CurveCycleEquipped +{ + type C1 = TestROMCircuit; + type C2 = TrivialCircuit< as Engine>::Scalar>; + + fn num_circuits(&self) -> usize { 2 } + + fn primary_circuit(&self, circuit_index: usize) -> Self::C1 { + match circuit_index { + 0 => TestROMCircuit::Cubic(CubicCircuit::new(circuit_index, self.rom.len())), + 1 => TestROMCircuit::Square(SquareCircuit::new(circuit_index, self.rom.len())), + _ => panic!("unsupported primary circuit index"), + } + } + + fn secondary_circuit(&self) -> Self::C2 { Default::default() } + + fn initial_circuit_index(&self) -> usize { self.rom[0] } +} + +impl TestROM { + fn new(rom: Vec) -> Self { Self { rom, _p: Default::default() } } +} + +fn test_trivial_nivc_with() +where E1: CurveCycleEquipped { + // Here demo a simple RAM machine + // - with 2 argumented circuit + // - each argumented circuit contains primary and secondary circuit + // - a memory commitment via a public IO `rom` (like a program) to constraint the sequence + // execution + + // This test also ready to add more argumented circuit and ROM can be arbitrary + // length + + // ROM is for constraints the sequence of execution order for opcode + + // TODO: replace with memory commitment along with suggestion from Supernova 4.4 + // optimisations + + // This is mostly done with the existing Nova code. With additions of U_i[] and + // program_counter checks in the augmented circuit. + + let rom = vec![ + OPCODE_1, OPCODE_1, OPCODE_0, OPCODE_0, OPCODE_1, OPCODE_1, OPCODE_0, OPCODE_0, OPCODE_1, + OPCODE_1, + ]; // Rom can be arbitrary length. + + let test_rom = TestROM::::new(rom); + + let pp = PublicParams::setup(&test_rom, &*default_ck_hint(), &*default_ck_hint()); + + // extend z0_primary/secondary with rom content + let mut z0_primary = vec![::Scalar::ONE]; + z0_primary.push(::Scalar::ZERO); // rom_index = 0 + z0_primary.extend(test_rom.rom.iter().map(|opcode| ::Scalar::from(*opcode as u64))); + let z0_secondary = vec![ as Engine>::Scalar::ONE]; + + let mut recursive_snark_option: Option> = None; + + for &op_code in test_rom.rom.iter() { + let circuit_primary = test_rom.primary_circuit(op_code); + let circuit_secondary = test_rom.secondary_circuit(); + + let mut recursive_snark = recursive_snark_option.unwrap_or_else(|| { + RecursiveSNARK::new( + &pp, + &test_rom, + &circuit_primary, + &circuit_secondary, + &z0_primary, + &z0_secondary, + ) + .unwrap() + }); + + recursive_snark.prove_step(&pp, &circuit_primary, &circuit_secondary).unwrap(); + recursive_snark + .verify(&pp, &z0_primary, &z0_secondary) + .map_err(|err| { + print_constraints_name_on_error_index( + &err, + &pp, + &circuit_primary, + &circuit_secondary, + test_rom.num_circuits(), + ) + }) + .unwrap(); + + recursive_snark_option = Some(recursive_snark) + } + + assert!(recursive_snark_option.is_some()); + + // Now you can handle the Result using if let + let RecursiveSNARK { zi_primary, zi_secondary, program_counter, .. } = + &recursive_snark_option.unwrap(); + + println!("zi_primary: {:?}", zi_primary); + println!("zi_secondary: {:?}", zi_secondary); + println!("final program_counter: {:?}", program_counter); + + // The final program counter should be -1 + assert_eq!(*program_counter, -::Scalar::ONE); +} + +#[test] +#[tracing_test::traced_test] +fn test_trivial_nivc() { + // Experimenting with selecting the running claims for nifs + test_trivial_nivc_with::(); +} + +// In the following we use 1 to refer to the primary, and 2 to refer to the +// secondary circuit +fn test_recursive_circuit_with( + primary_params: &SuperNovaAugmentedCircuitParams, + secondary_params: &SuperNovaAugmentedCircuitParams, + ro_consts1: ROConstantsCircuit>, + ro_consts2: ROConstantsCircuit, + num_constraints_primary: &Expect, + num_constraints_secondary: &Expect, +) where + E1: CurveCycleEquipped, +{ + // Initialize the shape and ck for the primary + let step_circuit1 = TrivialCircuit::default(); + let arity1 = step_circuit1.arity(); + let circuit1: SuperNovaAugmentedCircuit< + '_, + Dual, + TrivialCircuit< as Engine>::Base>, + > = SuperNovaAugmentedCircuit::new(primary_params, None, &step_circuit1, ro_consts1.clone(), 2); + let mut cs: ShapeCS = ShapeCS::new(); + if let Err(e) = circuit1.synthesize(&mut cs) { + panic!("{}", e) + } + let (shape1, ck1) = cs.r1cs_shape_and_key(&*default_ck_hint()); + num_constraints_primary.assert_eq(&cs.num_constraints().to_string()); + + // Initialize the shape and ck for the secondary + let step_circuit2 = TrivialCircuit::default(); + let arity2 = step_circuit2.arity(); + let circuit2: SuperNovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = + SuperNovaAugmentedCircuit::new(secondary_params, None, &step_circuit2, ro_consts2.clone(), 2); + let mut cs: ShapeCS> = ShapeCS::new(); + if let Err(e) = circuit2.synthesize(&mut cs) { + panic!("{}", e) + } + let (shape2, ck2) = cs.r1cs_shape_and_key(&*default_ck_hint()); + num_constraints_secondary.assert_eq(&cs.num_constraints().to_string()); + + // Execute the base case for the primary + let zero1 = < as Engine>::Base as Field>::ZERO; + let z0 = vec![zero1; arity1]; + let mut cs1 = SatisfyingAssignment::::new(); + let inputs1: SuperNovaAugmentedCircuitInputs<'_, Dual> = SuperNovaAugmentedCircuitInputs::new( + scalar_as_base::(zero1), // pass zero for testing + zero1, + &z0, + None, + None, + None, + None, + Some(zero1), + zero1, + ); + let step_circuit = TrivialCircuit::default(); + let circuit1: SuperNovaAugmentedCircuit< + '_, + Dual, + TrivialCircuit< as Engine>::Base>, + > = SuperNovaAugmentedCircuit::new(primary_params, Some(inputs1), &step_circuit, ro_consts1, 2); + if let Err(e) = circuit1.synthesize(&mut cs1) { + panic!("{}", e) + } + let (inst1, witness1) = cs1.r1cs_instance_and_witness(&shape1, &ck1).unwrap(); + // Make sure that this is satisfiable + shape1.is_sat(&ck1, &inst1, &witness1).unwrap(); + + // Execute the base case for the secondary + let zero2 = <::Base as Field>::ZERO; + let z0 = vec![zero2; arity2]; + let mut cs2 = SatisfyingAssignment::>::new(); + let inputs2: SuperNovaAugmentedCircuitInputs<'_, E1> = SuperNovaAugmentedCircuitInputs::new( + scalar_as_base::>(zero2), // pass zero for testing + zero2, + &z0, + None, + None, + Some(&inst1), + None, + None, + zero2, + ); + let step_circuit = TrivialCircuit::default(); + let circuit2: SuperNovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = + SuperNovaAugmentedCircuit::new(secondary_params, Some(inputs2), &step_circuit, ro_consts2, 2); + if let Err(e) = circuit2.synthesize(&mut cs2) { + panic!("{}", e) + } + let (inst2, witness2) = cs2.r1cs_instance_and_witness(&shape2, &ck2).unwrap(); + // Make sure that it is satisfiable + shape2.is_sat(&ck2, &inst2, &witness2).unwrap(); +} + +#[test] +fn test_recursive_circuit() { + let params1 = SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); + let params2 = SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); + let ro_consts1: ROConstantsCircuit = PoseidonConstantsCircuit::default(); + let ro_consts2: ROConstantsCircuit = PoseidonConstantsCircuit::default(); + + test_recursive_circuit_with::( + ¶ms1, + ¶ms2, + ro_consts1, + ro_consts2, + &expect!["10004"], + &expect!["12206"], + ); +} + +fn test_pp_digest_with(non_uniform_circuit: &NC, expected: &Expect) +where + E1: CurveCycleEquipped, + NC: NonUniformCircuit, { + // TODO: add back in https://github.com/argumentcomputer/arecibo/issues/53 + // // this tests public parameters with a size specifically intended for a + // spark-compressed SNARK let pp_hint1 = + // Some(SPrime::::commitment_key_floor()); let pp_hint2 = + // Some(SPrime::::commitment_key_floor()); + let pp = PublicParams::::setup(non_uniform_circuit, &*default_ck_hint(), &*default_ck_hint()); + + let digest_str = pp.digest().to_repr().as_ref().iter().fold(String::new(), |mut output, b| { + let _ = write!(output, "{b:02x}"); + output + }); + expected.assert_eq(&digest_str); +} + +#[test] +fn test_supernova_pp_digest() { + let rom = vec![ + OPCODE_1, OPCODE_1, OPCODE_0, OPCODE_0, OPCODE_1, OPCODE_1, OPCODE_0, OPCODE_0, OPCODE_1, + OPCODE_1, + ]; // Rom can be arbitrary length. + let test_rom_grumpkin = TestROM::::new(rom); + + test_pp_digest_with::(&test_rom_grumpkin, &expect![ + "30418e576c11dd698054a6cc69d1b1e43ddf0f562abfb50b777147afad741a01" + ]); +} + +// y is a non-deterministic hint representing the cube root of the input at a +// step. +#[derive(Clone, Debug)] +struct CubeRootCheckingCircuit { + y: Option, +} + +impl StepCircuit for CubeRootCheckingCircuit +where F: PrimeField +{ + fn arity(&self) -> usize { 1 } + + fn circuit_index(&self) -> usize { 0 } + + fn synthesize>( + &self, + cs: &mut CS, + _pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + let x = &z[0]; + + // we allocate a variable and set it to the provided non-deterministic hint. + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + self.y.ok_or(SynthesisError::AssignmentMissing) + })?; + + // We now check if y = x^{1/3} by checking if y^3 = x + let y_sq = y.square(cs.namespace(|| "y_sq"))?; + let y_cube = y_sq.mul(cs.namespace(|| "y_cube"), &y)?; + + cs.enforce( + || "y^3 = x", + |lc| lc + y_cube.get_variable(), + |lc| lc + CS::one(), + |lc| lc + x.get_variable(), + ); + + let next_pc = alloc_one(&mut cs.namespace(|| "next_pc")); + + Ok((Some(next_pc), vec![y])) + } +} + +// y is a non-deterministic hint representing the fifth root of the input at a +// step. +#[derive(Clone, Debug)] +struct FifthRootCheckingCircuit { + y: Option, +} + +impl StepCircuit for FifthRootCheckingCircuit +where F: PrimeField +{ + fn arity(&self) -> usize { 1 } + + fn circuit_index(&self) -> usize { 1 } + + fn synthesize>( + &self, + cs: &mut CS, + _pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + let x = &z[0]; + + // we allocate a variable and set it to the provided non-deterministic hint. + let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { + self.y.ok_or(SynthesisError::AssignmentMissing) + })?; + + // We now check if y = x^{1/5} by checking if y^5 = x + let y_sq = y.square(cs.namespace(|| "y_sq"))?; + let y_quad = y_sq.square(cs.namespace(|| "y_quad"))?; + let y_pow_5 = y_quad.mul(cs.namespace(|| "y_fifth"), &y)?; + + cs.enforce( + || "y^5 = x", + |lc| lc + y_pow_5.get_variable(), + |lc| lc + CS::one(), + |lc| lc + x.get_variable(), + ); + + let next_pc = alloc_zero(&mut cs.namespace(|| "next_pc")); + + Ok((Some(next_pc), vec![y])) + } +} + +#[derive(Clone, Debug)] +enum RootCheckingCircuit { + Cube(CubeRootCheckingCircuit), + Fifth(FifthRootCheckingCircuit), +} + +impl RootCheckingCircuit { + fn new(num_steps: usize) -> (Vec, Vec) { + let mut powers = Vec::new(); + let rng = &mut rand::rngs::OsRng; + let mut seed = F::random(rng); + + for i in 0..num_steps + 1 { + let seed_sq = seed.clone().square(); + // Cube-root and fifth-root circuits alternate. We compute the hints backward, + // so the calculations appear to be associated with the 'wrong' + // circuit. The final circuit is discarded, and only the final seed is used (as + // z_0). + powers.push(if i % 2 == num_steps % 2 { + seed *= seed_sq; + Self::Fifth(FifthRootCheckingCircuit { y: Some(seed) }) + } else { + seed *= seed_sq.clone().square(); + Self::Cube(CubeRootCheckingCircuit { y: Some(seed) }) + }) + } + + // reverse the powers to get roots + let roots = powers.into_iter().rev().collect::>(); + (vec![roots[0].get_y().unwrap()], roots[1..].to_vec()) + } + + fn get_y(&self) -> Option { + match self { + Self::Fifth(x) => x.y, + Self::Cube(x) => x.y, + } + } +} + +impl StepCircuit for RootCheckingCircuit +where F: PrimeField +{ + fn arity(&self) -> usize { 1 } + + fn circuit_index(&self) -> usize { + match self { + Self::Cube(x) => x.circuit_index(), + Self::Fifth(x) => x.circuit_index(), + } + } + + fn synthesize>( + &self, + cs: &mut CS, + pc: Option<&AllocatedNum>, + z: &[AllocatedNum], + ) -> Result<(Option>, Vec>), SynthesisError> { + match self { + Self::Cube(c) => c.synthesize(cs, pc, z), + Self::Fifth(c) => c.synthesize(cs, pc, z), + } + } +} + +impl NonUniformCircuit for RootCheckingCircuit +where E1: CurveCycleEquipped +{ + type C1 = Self; + type C2 = TrivialCircuit< as Engine>::Scalar>; + + fn num_circuits(&self) -> usize { 2 } + + fn primary_circuit(&self, circuit_index: usize) -> Self { + match circuit_index { + 0 => Self::Cube(CubeRootCheckingCircuit { y: None }), + 1 => Self::Fifth(FifthRootCheckingCircuit { y: None }), + _ => unreachable!(), + } + } + + fn secondary_circuit(&self) -> Self::C2 { TrivialCircuit::::default() } +} + +fn test_nivc_nondet_with() +where E1: CurveCycleEquipped { + let circuit_secondary = TrivialCircuit::default(); + + let num_steps = 3; + + // produce non-deterministic hint + let (z0_primary, roots) = RootCheckingCircuit::new(num_steps); + assert_eq!(num_steps, roots.len()); + let z0_secondary = vec![ as Engine>::Scalar::ZERO]; + + // produce public parameters + let pp = PublicParams::::setup(&roots[0], &*default_ck_hint(), &*default_ck_hint()); + // produce a recursive SNARK + + let circuit_primary = &roots[0]; + + let mut recursive_snark = RecursiveSNARK::::new( + &pp, + circuit_primary, + circuit_primary, + &circuit_secondary, + &z0_primary, + &z0_secondary, + ) + .map_err(|err| { + print_constraints_name_on_error_index(&err, &pp, circuit_primary, &circuit_secondary, 2) + }) + .unwrap(); + + for circuit_primary in roots.iter().take(num_steps) { + let res = recursive_snark.prove_step(&pp, circuit_primary, &circuit_secondary); + assert!(res + .map_err(|err| { + print_constraints_name_on_error_index(&err, &pp, circuit_primary, &circuit_secondary, 2) + }) + .is_ok()); + + // verify the recursive SNARK + recursive_snark + .verify(&pp, &z0_primary, &z0_secondary) + .map_err(|err| { + print_constraints_name_on_error_index(&err, &pp, circuit_primary, &circuit_secondary, 2) + }) + .unwrap(); + } +} + +#[test] +fn test_nivc_nondet() { test_nivc_nondet_with::(); } diff --git a/src/supernova/utils.rs b/prover/src/supernova/utils.rs similarity index 57% rename from src/supernova/utils.rs rename to prover/src/supernova/utils.rs index 4ea421c..08c09c1 100644 --- a/src/supernova/utils.rs +++ b/prover/src/supernova/utils.rs @@ -1,15 +1,15 @@ use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - num::AllocatedNum, - ConstraintSystem, LinearCombination, SynthesisError, + boolean::{AllocatedBit, Boolean}, + num::AllocatedNum, + ConstraintSystem, LinearCombination, SynthesisError, }; use ff::PrimeField; use itertools::Itertools as _; use crate::{ - constants::NIO_NOVA_FOLD, - gadgets::{conditionally_select_alloc_relaxed_r1cs, AllocatedRelaxedR1CSInstance}, - traits::Engine, + constants::NIO_NOVA_FOLD, + gadgets::{conditionally_select_alloc_relaxed_r1cs, AllocatedRelaxedR1CSInstance}, + traits::Engine, }; /// Return the element of `a` given by the indicator bit in `selector_vec`. @@ -25,89 +25,84 @@ use crate::{ // `a`. The larger the elements, the fewer are needed before multicase becomes // cost-effective. pub fn get_from_vec_alloc_relaxed_r1cs::Base>>( - mut cs: CS, - a: &[AllocatedRelaxedR1CSInstance], - selector_vec: &[Boolean], + mut cs: CS, + a: &[AllocatedRelaxedR1CSInstance], + selector_vec: &[Boolean], ) -> Result, SynthesisError> { - assert_eq!(a.len(), selector_vec.len()); - - // Compare all instances in `a` to the first one - let first: AllocatedRelaxedR1CSInstance = a - .first() - .cloned() - .ok_or_else(|| SynthesisError::IncompatibleLengthVector("empty vec length".to_string()))?; - - // Since `selector_vec` is correct, only one entry is 1. - // If selector_vec[0] is 1, then all `conditionally_select` will return `first`. - // Otherwise, the correct instance will be selected. - // TODO: reformulate when iterator_try_reduce stabilizes - let selected = a - .iter() - .zip_eq(selector_vec.iter()) - .enumerate() - .skip(1) - .try_fold(first, |matched, (i, (candidate, equal_bit))| { - conditionally_select_alloc_relaxed_r1cs( - cs.namespace(|| format!("next_matched_allocated-{:?}", i)), - candidate, - &matched, - equal_bit, - ) - })?; - - Ok(selected) + assert_eq!(a.len(), selector_vec.len()); + + // Compare all instances in `a` to the first one + let first: AllocatedRelaxedR1CSInstance = a + .first() + .cloned() + .ok_or_else(|| SynthesisError::IncompatibleLengthVector("empty vec length".to_string()))?; + + // Since `selector_vec` is correct, only one entry is 1. + // If selector_vec[0] is 1, then all `conditionally_select` will return `first`. + // Otherwise, the correct instance will be selected. + // TODO: reformulate when iterator_try_reduce stabilizes + let selected = a.iter().zip_eq(selector_vec.iter()).enumerate().skip(1).try_fold( + first, + |matched, (i, (candidate, equal_bit))| { + conditionally_select_alloc_relaxed_r1cs( + cs.namespace(|| format!("next_matched_allocated-{:?}", i)), + candidate, + &matched, + equal_bit, + ) + }, + )?; + + Ok(selected) } /// Compute a selector vector `s` of size `num_indices`, such that /// `s[i] == 1` if i == `target_index` and 0 otherwise. pub fn get_selector_vec_from_index>( - mut cs: CS, - target_index: &AllocatedNum, - num_indices: usize, + mut cs: CS, + target_index: &AllocatedNum, + num_indices: usize, ) -> Result, SynthesisError> { - assert_ne!(num_indices, 0); - - // Compute the selector vector non-deterministically - let selector = (0..num_indices) - .map(|idx| { - // b <- idx == target_index - Ok(Boolean::Is(AllocatedBit::alloc( - cs.namespace(|| format!("allocate s_{:?}", idx)), - target_index.get_value().map(|v| v == F::from(idx as u64)), - )?)) - }) - .collect::, SynthesisError>>()?; - - // Enforce ∑ selector[i] = 1 - { - let selected_sum = selector.iter().fold(LinearCombination::zero(), |lc, bit| { - lc + &bit.lc(CS::one(), F::ONE) - }); - cs.enforce( - || "exactly-one-selection", - |_| selected_sum, - |lc| lc + CS::one(), - |lc| lc + CS::one(), - ); - } - - // Enforce `target_index - ∑ i * selector[i] = 0`` - { - let selected_value = selector - .iter() - .enumerate() - .fold(LinearCombination::zero(), |lc, (i, bit)| { - lc + &bit.lc(CS::one(), F::from(i as u64)) - }); - cs.enforce( - || "target_index - ∑ i * selector[i] = 0", - |lc| lc, - |lc| lc, - |lc| lc + target_index.get_variable() - &selected_value, - ); - } - - Ok(selector) + assert_ne!(num_indices, 0); + + // Compute the selector vector non-deterministically + let selector = (0..num_indices) + .map(|idx| { + // b <- idx == target_index + Ok(Boolean::Is(AllocatedBit::alloc( + cs.namespace(|| format!("allocate s_{:?}", idx)), + target_index.get_value().map(|v| v == F::from(idx as u64)), + )?)) + }) + .collect::, SynthesisError>>()?; + + // Enforce ∑ selector[i] = 1 + { + let selected_sum = + selector.iter().fold(LinearCombination::zero(), |lc, bit| lc + &bit.lc(CS::one(), F::ONE)); + cs.enforce( + || "exactly-one-selection", + |_| selected_sum, + |lc| lc + CS::one(), + |lc| lc + CS::one(), + ); + } + + // Enforce `target_index - ∑ i * selector[i] = 0`` + { + let selected_value = selector + .iter() + .enumerate() + .fold(LinearCombination::zero(), |lc, (i, bit)| lc + &bit.lc(CS::one(), F::from(i as u64))); + cs.enforce( + || "target_index - ∑ i * selector[i] = 0", + |lc| lc, + |lc| lc, + |lc| lc + target_index.get_variable() - &selected_value, + ); + } + + Ok(selector) } // #[cfg(test)] diff --git a/prover/src/traits/commitment.rs b/prover/src/traits/commitment.rs new file mode 100644 index 0000000..97ed7dd --- /dev/null +++ b/prover/src/traits/commitment.rs @@ -0,0 +1,87 @@ +//! This module defines a collection of traits that define the behavior of a +//! commitment engine We require the commitment engine to provide a commitment +//! to vectors with a single group element +use core::{ + fmt::Debug, + ops::{Add, Mul, MulAssign}, +}; + +use serde::{Deserialize, Serialize}; + +use crate::{ + errors::NovaError, + traits::{AbsorbInROTrait, Engine, TranscriptReprTrait}, +}; + +/// A helper trait for types implementing scalar multiplication. +pub trait ScalarMul: Mul + MulAssign {} + +impl ScalarMul for T where T: Mul + MulAssign {} + +/// This trait defines the behavior of the commitment +pub trait CommitmentTrait: + Clone + + Copy + + Debug + + Default + + PartialEq + + Eq + + Send + + Sync + + TranscriptReprTrait + + Serialize + + for<'de> Deserialize<'de> + + AbsorbInROTrait + + Add + + ScalarMul { + /// Holds the type of the compressed commitment + type CompressedCommitment: Clone + + Debug + + PartialEq + + Eq + + Send + + Sync + + TranscriptReprTrait + + Serialize + + for<'de> Deserialize<'de>; + + /// Compresses self into a compressed commitment + fn compress(&self) -> Self::CompressedCommitment; + + /// Returns the coordinate representation of the commitment + fn to_coordinates(&self) -> (E::Base, E::Base, bool); + + /// Decompresses a compressed commitment into a commitment + fn decompress(c: &Self::CompressedCommitment) -> Result; +} + +/// A trait that helps determine the length of a structure. +/// Note this does not impose any memory representation constraints on the +/// structure. +pub trait Len { + /// Returns the length of the structure. + fn length(&self) -> usize; +} + +/// A trait that ties different pieces of the commitment generation together +pub trait CommitmentEngineTrait: Clone + Send + Sync { + /// Holds the type of the commitment key + /// The key should quantify its length in terms of group generators. + type CommitmentKey: Len + + Clone + + PartialEq + + Debug + + Send + + Sync + + Serialize + + for<'de> Deserialize<'de>; + + /// Holds the type of the commitment + type Commitment: CommitmentTrait; + + /// Samples a new commitment key of a specified size + fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey; + + /// Commits to the provided vector using the provided generators + fn commit(ck: &Self::CommitmentKey, v: &[E::Scalar]) -> Self::Commitment; +} diff --git a/prover/src/traits/evaluation.rs b/prover/src/traits/evaluation.rs new file mode 100644 index 0000000..faff5df --- /dev/null +++ b/prover/src/traits/evaluation.rs @@ -0,0 +1,60 @@ +//! This module defines a collection of traits that define the behavior of a +//! polynomial evaluation engine A vector of size N is treated as a multilinear +//! polynomial in \log{N} variables, and a commitment provided by the commitment +//! engine is treated as a multilinear polynomial commitment +use std::sync::Arc; + +use serde::{Deserialize, Serialize}; + +use crate::{ + errors::NovaError, + traits::{commitment::CommitmentEngineTrait, Engine}, +}; + +/// A trait that ties different pieces of the commitment evaluation together +pub trait EvaluationEngineTrait: Clone + Send + Sync { + /// A type that holds the prover key + type ProverKey: Send + Sync + Serialize + for<'de> Deserialize<'de>; + + /// A type that holds the verifier key + type VerifierKey: Send + + Sync + // required for easy Digest computation purposes, could be relaxed to + // [`crate::digest::Digestible`] + + Serialize + + for<'de> Deserialize<'de>; + + /// A type that holds the evaluation argument + type EvaluationArgument: Clone + Send + Sync + Serialize + for<'de> Deserialize<'de>; + + /// A method to perform any additional setup needed to produce proofs of + /// evaluations + /// + /// **Note:** This method should be cheap and should not copy most of the + /// commitment key. Look at `CommitmentEngineTrait::setup` for generating + /// SRS data. + fn setup( + ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, + ) -> (Self::ProverKey, Self::VerifierKey); + + /// A method to prove the evaluation of a multilinear polynomial + fn prove( + ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, + pk: &Self::ProverKey, + transcript: &mut E::TE, + comm: &<::CE as CommitmentEngineTrait>::Commitment, + poly: &[E::Scalar], + point: &[E::Scalar], + eval: &E::Scalar, + ) -> Result; + + /// A method to verify the purported evaluation of a multilinear polynomials + fn verify( + vk: &Self::VerifierKey, + transcript: &mut E::TE, + comm: &<::CE as CommitmentEngineTrait>::Commitment, + point: &[E::Scalar], + eval: &E::Scalar, + arg: &Self::EvaluationArgument, + ) -> Result<(), NovaError>; +} diff --git a/prover/src/traits/mod.rs b/prover/src/traits/mod.rs new file mode 100644 index 0000000..e1c54c7 --- /dev/null +++ b/prover/src/traits/mod.rs @@ -0,0 +1,182 @@ +//! This module defines various traits required by the users of the library to +//! implement. +use core::fmt::Debug; + +use bellpepper_core::{boolean::AllocatedBit, num::AllocatedNum, ConstraintSystem, SynthesisError}; +use ff::{PrimeField, PrimeFieldBits}; +use num_bigint::BigInt; +use serde::{Deserialize, Serialize}; + +use crate::errors::NovaError; + +pub mod commitment; + +use commitment::CommitmentEngineTrait; + +/// Represents an element of a group +/// This is currently tailored for an elliptic curve group +pub trait Group: Clone + Copy + Debug + Send + Sync + Sized + Eq + PartialEq { + /// A type representing an element of the base field of the group + type Base: PrimeFieldBits + Serialize + for<'de> Deserialize<'de>; + + /// A type representing an element of the scalar field of the group + type Scalar: PrimeFieldBits + PrimeFieldExt + Send + Sync + Serialize + for<'de> Deserialize<'de>; + + /// Returns A, B, the order of the group, the size of the base field as big + /// integers + fn group_params() -> (Self::Base, Self::Base, BigInt, BigInt); +} + +/// A collection of engines that are required by the library +pub trait Engine: Clone + Copy + Debug + Send + Sync + Sized + Eq + PartialEq { + /// A type representing an element of the base field of the group + type Base: PrimeFieldBits + TranscriptReprTrait + Serialize + for<'de> Deserialize<'de>; + + /// A type representing an element of the scalar field of the group + type Scalar: PrimeFieldBits + + PrimeFieldExt + + Send + + Sync + + TranscriptReprTrait + + Serialize + + for<'de> Deserialize<'de>; + + /// A type that represents an element of the group + type GE: Group + Serialize + for<'de> Deserialize<'de>; + + /// A type that represents a circuit-friendly sponge that consumes elements + /// from the base field and squeezes out elements of the scalar field + type RO: ROTrait; + + /// An alternate implementation of `Self::RO` in the circuit model + type ROCircuit: ROCircuitTrait; + + /// A type that provides a generic Fiat-Shamir transcript to be used when + /// externalizing proofs + type TE: TranscriptEngineTrait; + + /// A type that defines a commitment engine over scalars in the group + type CE: CommitmentEngineTrait; +} + +/// This is a convenience trait to pair engines which fields are in a curve +/// cycle relationship +pub trait CurveCycleEquipped: Engine { + /// The secondary `Engine` of `Self` + type Secondary: Engine::Scalar, Scalar = ::Base>; +} + +/// Convenience projection to the secondary `Engine` of a `CurveCycleEquipped` +pub type Dual = ::Secondary; + +/// A helper trait to absorb different objects in RO +pub trait AbsorbInROTrait { + /// Absorbs the value in the provided RO + fn absorb_in_ro(&self, ro: &mut E::RO); +} + +/// A helper trait that defines the behavior of a hash function that we use as +/// an RO +pub trait ROTrait { + /// The circuit alter ego of this trait impl - this constrains it to use the + /// same constants + type CircuitRO: ROCircuitTrait; + + /// A type representing constants/parameters associated with the hash + /// function + type Constants: Debug + + Default + + Clone + + PartialEq + + Send + + Sync + + Serialize + + for<'de> Deserialize<'de>; + + /// Initializes the hash function + fn new(constants: Self::Constants, num_absorbs: usize) -> Self; + + /// Adds a scalar to the internal state + fn absorb(&mut self, e: Base); + + /// Returns a challenge of `num_bits` by hashing the internal state + fn squeeze(&mut self, num_bits: usize) -> Scalar; +} + +/// A helper trait that defines the behavior of a hash function that we use as +/// an RO in the circuit model +pub trait ROCircuitTrait { + /// the vanilla alter ego of this trait - this constrains it to use the same + /// constants + type NativeRO: ROTrait; + + /// A type representing constants/parameters associated with the hash + /// function on this Base field + type Constants: Debug + + Default + + Clone + + PartialEq + + Send + + Sync + + Serialize + + for<'de> Deserialize<'de>; + + /// Initializes the hash function + fn new(constants: Self::Constants, num_absorbs: usize) -> Self; + + /// Adds a scalar to the internal state + fn absorb(&mut self, e: &AllocatedNum); + + /// Returns a challenge of `num_bits` by hashing the internal state + fn squeeze>( + &mut self, + cs: CS, + num_bits: usize, + ) -> Result, SynthesisError>; +} + +/// An alias for constants associated with `E::RO` +pub type ROConstants = + <::RO as ROTrait<::Base, ::Scalar>>::Constants; + +/// An alias for constants associated with `E::ROCircuit` +pub type ROConstantsCircuit = + <::ROCircuit as ROCircuitTrait<::Base>>::Constants; + +/// This trait allows types to implement how they want to be added to +/// `TranscriptEngine` +pub trait TranscriptReprTrait: Send + Sync { + /// returns a byte representation of self to be added to the transcript + fn to_transcript_bytes(&self) -> Vec; +} + +/// This trait defines the behavior of a transcript engine compatible with +/// Spartan +pub trait TranscriptEngineTrait: Send + Sync { + /// initializes the transcript + fn new(label: &'static [u8]) -> Self; + + /// returns a scalar element of the group as a challenge + fn squeeze(&mut self, label: &'static [u8]) -> Result; + + /// absorbs any type that implements `TranscriptReprTrait` under a label + fn absorb>(&mut self, label: &'static [u8], o: &T); + + /// adds a domain separator + fn dom_sep(&mut self, bytes: &'static [u8]); +} + +/// Defines additional methods on `PrimeField` objects +pub trait PrimeFieldExt: PrimeField { + /// Returns a scalar representing the bytes + fn from_uniform(bytes: &[u8]) -> Self; +} + +impl> TranscriptReprTrait for &[T] { + fn to_transcript_bytes(&self) -> Vec { + self.iter().flat_map(|t| t.to_transcript_bytes()).collect::>() + } +} + +pub mod evaluation; +pub mod snark; diff --git a/prover/src/traits/snark.rs b/prover/src/traits/snark.rs new file mode 100644 index 0000000..7af5a43 --- /dev/null +++ b/prover/src/traits/snark.rs @@ -0,0 +1,135 @@ +//! This module defines a collection of traits that define the behavior of a +//! `zkSNARK` for `RelaxedR1CS` +use std::sync::Arc; + +use serde::{Deserialize, Serialize}; + +use crate::{ + errors::NovaError, + r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, + traits::Engine, + CommitmentKey, +}; + +// NOTES: This function seems heavily reliant on dynamic allocation all to +// return 0 in the end... + +/// Public parameter creation takes a size hint. This size hint carries the +/// particular requirements of the final compressing SNARK the user expected to +/// use with these public parameters, and the below is a sensible default, which +/// is to not require any more bases then the usual (maximum of the number of +/// variables and constraints of the involved R1CS circuit). +#[allow(clippy::type_complexity)] +pub fn default_ck_hint() -> Box Fn(&'a R1CSShape) -> usize> { + // The default is to not put an additional floor on the size of the commitment + // key + Box::new(|_shape: &R1CSShape| 0) +} + +// NOTES: I'm not sure having a trait here is really necessary unless you're +// wanting to have a much larger abstraction. I'd consider just gutting this and +// forming one SNARK that we use. + +/// A trait that defines the behavior of a `zkSNARK` +pub trait RelaxedR1CSSNARKTrait: + Send + Sync + Serialize + for<'de> Deserialize<'de> { + /// A type that represents the prover's key + type ProverKey: Send + Sync; + + /// A type that represents the verifier's key + type VerifierKey: Send + Sync + Serialize; + + /// This associated function (not a method) provides a hint that offers + /// a minimum sizing cue for the commitment key used by this SNARK + /// implementation. The commitment key passed in setup should then + /// be at least as large as this hint. + #[allow(clippy::type_complexity)] + fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { + // The default is to not put an additional floor on the size of the commitment + // key + default_ck_hint() + } + + /// Initialize a ProvingKey directly from a CommitmentKey and a + /// already known vk_digest. + fn initialize_pk( + ck: Arc>, + vk_digest: E::Scalar, + ) -> Result; + + /// Produces the keys for the prover and the verifier + fn setup( + ck: Arc>, + S: &R1CSShape, + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError>; + + /// Produces a new SNARK for a relaxed R1CS + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + S: &R1CSShape, + U: &RelaxedR1CSInstance, + W: &RelaxedR1CSWitness, + ) -> Result; + + /// Verifies a SNARK for a relaxed R1CS + fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError>; +} + +/// A trait that defines the behavior of a `zkSNARK` to prove knowledge of +/// satisfying witness to batches of relaxed R1CS instances. +pub trait BatchedRelaxedR1CSSNARKTrait: + Send + Sync + Serialize + for<'de> Deserialize<'de> { + /// A type that represents the prover's key + type ProverKey: Send + Sync + Serialize + for<'de> Deserialize<'de>; + + /// A type that represents the verifier's key + type VerifierKey: Send + Sync + DigestHelperTrait + Serialize + for<'de> Deserialize<'de>; + + // NOTES: If we don't need something more general here, this is just an odd + // thing to have defined generically since it just calls the weird function + // above. + + /// This associated function (not a method) provides a hint that offers + /// a minimum sizing cue for the commitment key used by this SNARK + /// implementation. The commitment key passed in setup should then + /// be at least as large as this hint. + #[allow(clippy::type_complexity)] + fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { default_ck_hint() } + + /// Initialize a ProvingKey directly from a CommitmentKey and a + /// already known vk_digest. + fn initialize_pk( + ck: Arc>, + vk_digest: E::Scalar, + ) -> Result; + + /// Produces the keys for the prover and the verifier + /// + /// **Note:** This method should be cheap and should not copy most of the + /// commitment key. Look at `CommitmentEngineTrait::setup` for generating + /// SRS data. + fn setup( + ck: Arc>, // NOTES: Why `Arc` this? + S: Vec<&R1CSShape>, /* NOTES: Why not a &[R1CSShape] here?, would get the same + * thing across as an iter i think */ + ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError>; + + /// Produces a new SNARK for a batch of relaxed R1CS + fn prove( + ck: &CommitmentKey, + pk: &Self::ProverKey, + S: Vec<&R1CSShape>, + U: &[RelaxedR1CSInstance], + W: &[RelaxedR1CSWitness], + ) -> Result; + + /// Verifies a SNARK for a batch of relaxed R1CS + fn verify(&self, vk: &Self::VerifierKey, U: &[RelaxedR1CSInstance]) -> Result<(), NovaError>; +} + +/// A helper trait that defines the behavior of a verifier key of `zkSNARK` +pub trait DigestHelperTrait { + /// Returns the digest of the verifier's key + fn digest(&self) -> E::Scalar; +} diff --git a/src/bellpepper/mod.rs b/src/bellpepper/mod.rs deleted file mode 100644 index 35e8796..0000000 --- a/src/bellpepper/mod.rs +++ /dev/null @@ -1,64 +0,0 @@ -//! Support for generating R1CS from [Bellpepper]. -//! -//! [Bellpepper]: https://github.com/argumentcomputer/bellpepper - -pub mod r1cs; -pub mod shape_cs; -pub mod solver; -pub mod test_shape_cs; - -#[cfg(test)] -mod tests { - use bellpepper_core::{num::AllocatedNum, ConstraintSystem}; - use ff::PrimeField; - - use crate::{ - bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - shape_cs::ShapeCS, - solver::SatisfyingAssignment, - }, - provider::Bn256EngineKZG, - traits::{snark::default_ck_hint, Engine}, - }; - - fn synthesize_alloc_bit>(cs: &mut CS) { - // get two bits as input and check that they are indeed bits - let a = AllocatedNum::alloc_infallible(cs.namespace(|| "a"), || Fr::ONE); - let _ = a.inputize(cs.namespace(|| "a is input")); - cs.enforce( - || "check a is 0 or 1", - |lc| lc + CS::one() - a.get_variable(), - |lc| lc + a.get_variable(), - |lc| lc, - ); - let b = AllocatedNum::alloc_infallible(cs.namespace(|| "b"), || Fr::ONE); - let _ = b.inputize(cs.namespace(|| "b is input")); - cs.enforce( - || "check b is 0 or 1", - |lc| lc + CS::one() - b.get_variable(), - |lc| lc + b.get_variable(), - |lc| lc, - ); - } - - fn test_alloc_bit_with() { - // First create the shape - let mut cs: ShapeCS = ShapeCS::new(); - synthesize_alloc_bit(&mut cs); - let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); - - // Now get the assignment - let mut cs = SatisfyingAssignment::::new(); - synthesize_alloc_bit(&mut cs); - let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - - // Make sure that this is satisfiable - shape.is_sat(&ck, &inst, &witness).unwrap(); - } - - #[test] - fn test_alloc_bit() { - test_alloc_bit_with::(); - } -} diff --git a/src/bellpepper/r1cs.rs b/src/bellpepper/r1cs.rs deleted file mode 100644 index 431f5a3..0000000 --- a/src/bellpepper/r1cs.rs +++ /dev/null @@ -1,162 +0,0 @@ -//! Support for generating R1CS using bellpepper. - -#![allow(non_snake_case)] - -use bellpepper_core::{Index, LinearCombination}; -use ff::PrimeField; - -use super::{shape_cs::ShapeCS, solver::SatisfyingAssignment, test_shape_cs::TestShapeCS}; -use crate::{ - errors::NovaError, - r1cs::{commitment_key, CommitmentKeyHint, R1CSInstance, R1CSShape, R1CSWitness, SparseMatrix}, - traits::Engine, - CommitmentKey, -}; - -/// `NovaWitness` provide a method for acquiring an `R1CSInstance` and -/// `R1CSWitness` from implementers. -pub trait NovaWitness { - /// Return an instance and witness, given a shape and ck. - fn r1cs_instance_and_witness( - self, - shape: &R1CSShape, - ck: &CommitmentKey, - ) -> Result<(R1CSInstance, R1CSWitness), NovaError>; -} - -/// `NovaShape` provides methods for acquiring `R1CSShape` and `CommitmentKey` -/// from implementers. -pub trait NovaShape { - /// Return an appropriate `R1CSShape` and `CommitmentKey` structs. - /// A `CommitmentKeyHint` should be provided to help guide the construction - /// of the `CommitmentKey`. This parameter is documented in - /// `r1cs::R1CS::commitment_key`. - fn r1cs_shape_and_key( - &self, - ck_hint: &CommitmentKeyHint, - ) -> (R1CSShape, CommitmentKey) { - let S = self.r1cs_shape(); - let ck = commitment_key(&S, ck_hint); - - (S, ck) - } - /// Return an appropriate `R1CSShape`. - fn r1cs_shape(&self) -> R1CSShape; -} - -impl NovaWitness for SatisfyingAssignment { - fn r1cs_instance_and_witness( - self, - shape: &R1CSShape, - ck: &CommitmentKey, - ) -> Result<(R1CSInstance, R1CSWitness), NovaError> { - let (input_assignment, aux_assignment) = self.to_assignments(); - let W = R1CSWitness::::new(shape, aux_assignment)?; - let X = input_assignment[1..].to_owned(); - - let comm_W = W.commit(ck); - - let instance = R1CSInstance::::new(shape, comm_W, X)?; - - Ok((instance, W)) - } -} - -macro_rules! impl_nova_shape { - ( $name:ident) => { - impl NovaShape for $name - where - E::Scalar: PrimeField, - { - fn r1cs_shape(&self) -> R1CSShape { - let mut A = SparseMatrix::::empty(); - let mut B = SparseMatrix::::empty(); - let mut C: SparseMatrix<::Scalar> = SparseMatrix::::empty(); - - let mut num_cons_added = 0; - let mut X = (&mut A, &mut B, &mut C, &mut num_cons_added); - let num_inputs = self.num_inputs(); - let num_constraints = self.num_constraints(); - let num_vars = self.num_aux(); - - for constraint in self.constraints.iter() { - add_constraint( - &mut X, - num_vars, - &constraint.0, - &constraint.1, - &constraint.2, - ); - } - assert_eq!(num_cons_added, num_constraints); - - A.cols = num_vars + num_inputs; - B.cols = num_vars + num_inputs; - C.cols = num_vars + num_inputs; - - // Don't count One as an input for shape's purposes. - let res = R1CSShape::new(num_constraints, num_vars, num_inputs - 1, A, B, C); - res.unwrap() - } - } - }; -} - -impl_nova_shape!(ShapeCS); -impl_nova_shape!(TestShapeCS); - -fn add_constraint( - X: &mut ( - &mut SparseMatrix, - &mut SparseMatrix, - &mut SparseMatrix, - &mut usize, - ), - num_vars: usize, - a_lc: &LinearCombination, - b_lc: &LinearCombination, - c_lc: &LinearCombination, -) { - let (A, B, C, nn) = X; - let n = **nn; - assert_eq!(n, A.num_rows(), "A: invalid shape"); - assert_eq!(n, B.num_rows(), "B: invalid shape"); - assert_eq!(n, C.num_rows(), "C: invalid shape"); - - let add_constraint_component = |index: Index, coeff: &S, M: &mut SparseMatrix| { - // we add constraints to the matrix only if the associated coefficient is - // non-zero - if *coeff != S::ZERO { - match index { - Index::Input(idx) => { - // Inputs come last, with input 0, representing 'one', - // at position num_vars within the witness vector. - let idx = idx + num_vars; - M.data.push(*coeff); - M.indices.push(idx); - } - Index::Aux(idx) => { - M.data.push(*coeff); - M.indices.push(idx); - } - } - } - }; - - for (index, coeff) in a_lc.iter() { - add_constraint_component(index.0, coeff, A); - } - A.indptr.push(A.indices.len()); - - for (index, coeff) in b_lc.iter() { - add_constraint_component(index.0, coeff, B) - } - B.indptr.push(B.indices.len()); - - for (index, coeff) in c_lc.iter() { - add_constraint_component(index.0, coeff, C) - } - C.indptr.push(C.indices.len()); - - **nn += 1; -} diff --git a/src/bellpepper/shape_cs.rs b/src/bellpepper/shape_cs.rs deleted file mode 100644 index a7aa662..0000000 --- a/src/bellpepper/shape_cs.rs +++ /dev/null @@ -1,107 +0,0 @@ -//! Support for generating R1CS shape using bellpepper. - -use bellpepper_core::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable}; -use ff::PrimeField; - -use crate::traits::Engine; - -/// `ShapeCS` is a `ConstraintSystem` for creating `R1CSShape`s for a circuit. -pub struct ShapeCS -where - E::Scalar: PrimeField, -{ - /// All constraints added to the `ShapeCS`. - pub constraints: Vec<( - LinearCombination, - LinearCombination, - LinearCombination, - )>, - inputs: usize, - aux: usize, -} - -impl ShapeCS { - /// Create a new, default `ShapeCS`, - pub fn new() -> Self { - Self::default() - } - - /// Returns the number of constraints defined for this `ShapeCS`. - pub fn num_constraints(&self) -> usize { - self.constraints.len() - } - - /// Returns the number of inputs defined for this `ShapeCS`. - pub fn num_inputs(&self) -> usize { - self.inputs - } - - /// Returns the number of aux inputs defined for this `ShapeCS`. - pub fn num_aux(&self) -> usize { - self.aux - } -} - -impl Default for ShapeCS { - fn default() -> Self { - Self { - constraints: vec![], - inputs: 1, - aux: 0, - } - } -} - -impl ConstraintSystem for ShapeCS { - type Root = Self; - - fn alloc(&mut self, _annotation: A, _f: F) -> Result - where - F: FnOnce() -> Result, - A: FnOnce() -> AR, - AR: Into, - { - self.aux += 1; - - Ok(Variable::new_unchecked(Index::Aux(self.aux - 1))) - } - - fn alloc_input(&mut self, _annotation: A, _f: F) -> Result - where - F: FnOnce() -> Result, - A: FnOnce() -> AR, - AR: Into, - { - self.inputs += 1; - - Ok(Variable::new_unchecked(Index::Input(self.inputs - 1))) - } - - fn enforce(&mut self, _annotation: A, a: LA, b: LB, c: LC) - where - A: FnOnce() -> AR, - AR: Into, - LA: FnOnce(LinearCombination) -> LinearCombination, - LB: FnOnce(LinearCombination) -> LinearCombination, - LC: FnOnce(LinearCombination) -> LinearCombination, - { - let a = a(LinearCombination::zero()); - let b = b(LinearCombination::zero()); - let c = c(LinearCombination::zero()); - - self.constraints.push((a, b, c)); - } - - fn push_namespace(&mut self, _name_fn: N) - where - NR: Into, - N: FnOnce() -> NR, - { - } - - fn pop_namespace(&mut self) {} - - fn get_root(&mut self) -> &mut Self::Root { - self - } -} diff --git a/src/bellpepper/test_shape_cs.rs b/src/bellpepper/test_shape_cs.rs deleted file mode 100644 index 923e432..0000000 --- a/src/bellpepper/test_shape_cs.rs +++ /dev/null @@ -1,320 +0,0 @@ -//! Support for generating R1CS shape using bellpepper. -//! `TestShapeCS` implements a superset of `ShapeCS`, adding non-trivial -//! namespace support for use in testing. - -use core::fmt::Write; -use std::{ - cmp::Ordering, - collections::{BTreeMap, HashMap}, -}; - -use bellpepper_core::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable}; -use ff::{Field, PrimeField}; - -use crate::traits::Engine; - -#[derive(Clone, Copy)] -struct OrderedVariable(Variable); - -#[allow(dead_code)] -#[derive(Debug)] -enum NamedObject { - Constraint(usize), - Var(Variable), - Namespace, -} - -impl Eq for OrderedVariable {} -impl PartialEq for OrderedVariable { - fn eq(&self, other: &Self) -> bool { - match (self.0.get_unchecked(), other.0.get_unchecked()) { - (Index::Input(ref a), Index::Input(ref b)) | (Index::Aux(ref a), Index::Aux(ref b)) => { - a == b - } - _ => false, - } - } -} -impl PartialOrd for OrderedVariable { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} -impl Ord for OrderedVariable { - fn cmp(&self, other: &Self) -> Ordering { - match (self.0.get_unchecked(), other.0.get_unchecked()) { - (Index::Input(ref a), Index::Input(ref b)) | (Index::Aux(ref a), Index::Aux(ref b)) => { - a.cmp(b) - } - (Index::Input(_), Index::Aux(_)) => Ordering::Less, - (Index::Aux(_), Index::Input(_)) => Ordering::Greater, - } - } -} - -/// `TestShapeCS` is a `ConstraintSystem` for creating `R1CSShape`s for a -/// circuit. -pub struct TestShapeCS { - named_objects: HashMap, - current_namespace: Vec, - /// All constraints added to the `TestShapeCS`. - pub constraints: Vec<( - LinearCombination, - LinearCombination, - LinearCombination, - String, - )>, - inputs: Vec, - aux: Vec, -} - -fn proc_lc( - terms: &LinearCombination, -) -> BTreeMap { - let mut map = BTreeMap::new(); - for (var, &coeff) in terms.iter() { - map.entry(OrderedVariable(var)) - .or_insert_with(|| Scalar::ZERO) - .add_assign(&coeff); - } - - // Remove terms that have a zero coefficient to normalize - let mut to_remove = vec![]; - for (var, coeff) in map.iter() { - if coeff.is_zero().into() { - to_remove.push(*var) - } - } - - for var in to_remove { - map.remove(&var); - } - - map -} - -impl TestShapeCS -where - E::Scalar: PrimeField, -{ - #[allow(unused)] - /// Create a new, default `TestShapeCS`, - pub fn new() -> Self { - Self::default() - } - - /// Returns the number of constraints defined for this `TestShapeCS`. - pub fn num_constraints(&self) -> usize { - self.constraints.len() - } - - /// Returns the number of inputs defined for this `TestShapeCS`. - pub fn num_inputs(&self) -> usize { - self.inputs.len() - } - - /// Returns the number of aux inputs defined for this `TestShapeCS`. - pub fn num_aux(&self) -> usize { - self.aux.len() - } - - /// Print all public inputs, aux inputs, and constraint names. - #[allow(dead_code)] - pub fn pretty_print_list(&self) -> Vec { - let mut result = Vec::new(); - - for input in &self.inputs { - result.push(format!("INPUT {input}")); - } - for aux in &self.aux { - result.push(format!("AUX {aux}")); - } - - for (_a, _b, _c, name) in &self.constraints { - result.push(name.to_string()); - } - - result - } - - /// Print all iputs and a detailed representation of each constraint. - #[allow(dead_code)] - pub fn pretty_print(&self) -> String { - let mut s = String::new(); - - for input in &self.inputs { - writeln!(s, "INPUT {}", &input).unwrap() - } - - let negone = -::ONE; - - let powers_of_two = (0..E::Scalar::NUM_BITS) - .map(|i| E::Scalar::from(2u64).pow_vartime([u64::from(i)])) - .collect::>(); - - let pp = |s: &mut String, lc: &LinearCombination| { - s.push('('); - let mut is_first = true; - for (var, coeff) in proc_lc::(lc) { - if coeff == negone { - s.push_str(" - ") - } else if !is_first { - s.push_str(" + ") - } - is_first = false; - - if coeff != ::ONE && coeff != negone { - for (i, x) in powers_of_two.iter().enumerate() { - if x == &coeff { - write!(s, "2^{i} . ").unwrap(); - break; - } - } - - write!(s, "{coeff:?} . ").unwrap() - } - - match var.0.get_unchecked() { - Index::Input(i) => { - write!(s, "`I{}`", &self.inputs[i]).unwrap(); - } - Index::Aux(i) => { - write!(s, "`A{}`", &self.aux[i]).unwrap(); - } - } - } - if is_first { - // Nothing was visited, print 0. - s.push('0'); - } - s.push(')'); - }; - - for (a, b, c, name) in &self.constraints { - s.push('\n'); - - write!(s, "{name}: ").unwrap(); - pp(&mut s, a); - write!(s, " * ").unwrap(); - pp(&mut s, b); - s.push_str(" = "); - pp(&mut s, c); - } - - s.push('\n'); - - s - } - - /// Associate `NamedObject` with `path`. - /// `path` must not already have an associated object. - fn set_named_obj(&mut self, path: String, to: NamedObject) { - assert!( - !self.named_objects.contains_key(&path), - "tried to create object at existing path: {path}" - ); - - self.named_objects.insert(path, to); - } -} - -impl Default for TestShapeCS { - fn default() -> Self { - let mut map = HashMap::new(); - map.insert("ONE".into(), NamedObject::Var(Self::one())); - Self { - named_objects: map, - current_namespace: vec![], - constraints: vec![], - inputs: vec![String::from("ONE")], - aux: vec![], - } - } -} - -impl ConstraintSystem for TestShapeCS -where - E::Scalar: PrimeField, -{ - type Root = Self; - - fn alloc(&mut self, annotation: A, _f: F) -> Result - where - F: FnOnce() -> Result, - A: FnOnce() -> AR, - AR: Into, - { - let path = compute_path(&self.current_namespace, &annotation().into()); - self.aux.push(path); - - Ok(Variable::new_unchecked(Index::Aux(self.aux.len() - 1))) - } - - fn alloc_input(&mut self, annotation: A, _f: F) -> Result - where - F: FnOnce() -> Result, - A: FnOnce() -> AR, - AR: Into, - { - let path = compute_path(&self.current_namespace, &annotation().into()); - self.inputs.push(path); - - Ok(Variable::new_unchecked(Index::Input(self.inputs.len() - 1))) - } - - fn enforce(&mut self, annotation: A, a: LA, b: LB, c: LC) - where - A: FnOnce() -> AR, - AR: Into, - LA: FnOnce(LinearCombination) -> LinearCombination, - LB: FnOnce(LinearCombination) -> LinearCombination, - LC: FnOnce(LinearCombination) -> LinearCombination, - { - let path = compute_path(&self.current_namespace, &annotation().into()); - let index = self.constraints.len(); - self.set_named_obj(path.clone(), NamedObject::Constraint(index)); - - let a = a(LinearCombination::zero()); - let b = b(LinearCombination::zero()); - let c = c(LinearCombination::zero()); - - self.constraints.push((a, b, c, path)); - } - - fn push_namespace(&mut self, name_fn: N) - where - NR: Into, - N: FnOnce() -> NR, - { - let name = name_fn().into(); - let path = compute_path(&self.current_namespace, &name); - self.set_named_obj(path, NamedObject::Namespace); - self.current_namespace.push(name); - } - - fn pop_namespace(&mut self) { - assert!(self.current_namespace.pop().is_some()); - } - - fn get_root(&mut self) -> &mut Self::Root { - self - } -} - -fn compute_path(ns: &[String], this: &str) -> String { - assert!(!this.contains('/'), "'/' is not allowed in names"); - - let mut name = String::new(); - - let mut needs_separation = false; - for ns in ns.iter().chain(Some(this.to_string()).iter()) { - if needs_separation { - name += "/"; - } - - name += ns; - needs_separation = true; - } - - name -} diff --git a/src/circuit.rs b/src/circuit.rs deleted file mode 100644 index 5cc5a5f..0000000 --- a/src/circuit.rs +++ /dev/null @@ -1,528 +0,0 @@ -//! There are two augmented circuits: the primary and the secondary. -//! Each of them is over a curve in a 2-cycle of elliptic curves. -//! We have two running instances. Each circuit takes as input 2 hashes: one for -//! each of the running instances. Each of these hashes is H(params = H(shape, -//! ck), i, z0, zi, U). Each circuit folds the last invocation of the other into -//! the running instance - -use bellpepper::gadgets::{boolean_utils::conditionally_select_slice, Assignment}; -use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - num::AllocatedNum, - ConstraintSystem, SynthesisError, -}; -use ff::Field; -use serde::{Deserialize, Serialize}; - -use crate::{ - constants::{NIO_NOVA_FOLD, NUM_FE_WITHOUT_IO_FOR_CRHF, NUM_HASH_BITS}, - gadgets::{ - alloc_num_equals, alloc_scalar_as_base, alloc_zero, le_bits_to_num, AllocatedPoint, - AllocatedR1CSInstance, AllocatedRelaxedR1CSInstance, - }, - r1cs::{R1CSInstance, RelaxedR1CSInstance}, - supernova::StepCircuit, - traits::{commitment::CommitmentTrait, Engine, ROCircuitTrait, ROConstantsCircuit}, - Commitment, -}; - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct NovaAugmentedCircuitParams { - limb_width: usize, - n_limbs: usize, - is_primary_circuit: bool, // A boolean indicating if this is the primary circuit -} - -impl NovaAugmentedCircuitParams { - pub const fn new(limb_width: usize, n_limbs: usize, is_primary_circuit: bool) -> Self { - Self { - limb_width, - n_limbs, - is_primary_circuit, - } - } -} - -// NOTES: All these options here seem to point towards using a typestate pattern -// or something. - -#[derive(Debug, Serialize)] -#[serde(bound = "")] -pub struct NovaAugmentedCircuitInputs { - params: E::Scalar, - i: E::Base, - z0: Vec, - zi: Option>, - U: Option>, - u: Option>, - T: Option>, -} - -impl NovaAugmentedCircuitInputs { - /// Create new inputs/witness for the verification circuit - pub fn new( - params: E::Scalar, - i: E::Base, - z0: Vec, - zi: Option>, - U: Option>, - u: Option>, - T: Option>, - ) -> Self { - Self { - params, - i, - z0, - zi, - U, - u, - T, - } - } -} - -/// The augmented circuit F' in Nova that includes a step circuit F -/// and the circuit for the verifier in Nova's non-interactive folding scheme -pub struct NovaAugmentedCircuit<'a, E: Engine, SC: StepCircuit> { - params: &'a NovaAugmentedCircuitParams, - ro_consts: ROConstantsCircuit, - inputs: Option>, - step_circuit: &'a SC, // The function that is applied for each step -} - -impl<'a, E: Engine, SC: StepCircuit> NovaAugmentedCircuit<'a, E, SC> { - /// Create a new verification circuit for the input relaxed r1cs instances - pub const fn new( - params: &'a NovaAugmentedCircuitParams, - inputs: Option>, - step_circuit: &'a SC, - ro_consts: ROConstantsCircuit, - ) -> Self { - Self { - params, - inputs, - step_circuit, - ro_consts, - } - } - - /// Allocate all witnesses and return - fn alloc_witness::Base>>( - &self, - mut cs: CS, - arity: usize, - ) -> Result< - ( - AllocatedNum, - AllocatedNum, - Vec>, - Vec>, - AllocatedRelaxedR1CSInstance, - AllocatedR1CSInstance, - AllocatedPoint, - ), - SynthesisError, - > { - // Allocate the params - let params = alloc_scalar_as_base::( - cs.namespace(|| "params"), - self.inputs.as_ref().map(|inputs| inputs.params), - )?; - - // Allocate i - let i = AllocatedNum::alloc(cs.namespace(|| "i"), || Ok(self.inputs.get()?.i))?; - - // Allocate z0 - let z_0 = (0..arity) - .map(|i| { - AllocatedNum::alloc(cs.namespace(|| format!("z0_{i}")), || { - Ok(self.inputs.get()?.z0[i]) - }) - }) - .collect::>, _>>()?; - - // Allocate zi. If inputs.zi is not provided (base case) allocate default value - // 0 - let zero = vec![E::Base::ZERO; arity]; - let z_i = (0..arity) - .map(|i| { - AllocatedNum::alloc(cs.namespace(|| format!("zi_{i}")), || { - Ok(self.inputs.get()?.zi.as_ref().unwrap_or(&zero)[i]) - }) - }) - .collect::>, _>>()?; - - // Allocate the running instance - let U: AllocatedRelaxedR1CSInstance = - AllocatedRelaxedR1CSInstance::alloc( - cs.namespace(|| "Allocate U"), - self.inputs.as_ref().and_then(|inputs| inputs.U.as_ref()), - self.params.limb_width, - self.params.n_limbs, - )?; - - // Allocate the instance to be folded in - let u = AllocatedR1CSInstance::alloc( - cs.namespace(|| "allocate instance u to fold"), - self.inputs.as_ref().and_then(|inputs| inputs.u.as_ref()), - )?; - - // Allocate T - let T = AllocatedPoint::alloc( - cs.namespace(|| "allocate T"), - self.inputs - .as_ref() - .and_then(|inputs| inputs.T.map(|T| T.to_coordinates())), - )?; - T.check_on_curve(cs.namespace(|| "check T on curve"))?; - - Ok((params, i, z_0, z_i, U, u, T)) - } - - /// Synthesizes base case and returns the new relaxed `R1CSInstance` - fn synthesize_base_case::Base>>( - &self, - mut cs: CS, - u: AllocatedR1CSInstance, - ) -> Result, SynthesisError> { - let U_default: AllocatedRelaxedR1CSInstance = - if self.params.is_primary_circuit { - // The primary circuit just returns the default R1CS instance - AllocatedRelaxedR1CSInstance::default( - cs.namespace(|| "Allocate U_default"), - self.params.limb_width, - self.params.n_limbs, - )? - } else { - // The secondary circuit returns the incoming R1CS instance - AllocatedRelaxedR1CSInstance::from_r1cs_instance( - cs.namespace(|| "Allocate U_default"), - u, - self.params.limb_width, - self.params.n_limbs, - )? - }; - Ok(U_default) - } - - /// Synthesizes non base case and returns the new relaxed `R1CSInstance` - /// And a boolean indicating if all checks pass - fn synthesize_non_base_case::Base>>( - &self, - mut cs: CS, - params: &AllocatedNum, - i: &AllocatedNum, - z_0: &[AllocatedNum], - z_i: &[AllocatedNum], - U: &AllocatedRelaxedR1CSInstance, - u: &AllocatedR1CSInstance, - T: &AllocatedPoint, - arity: usize, - ) -> Result<(AllocatedRelaxedR1CSInstance, AllocatedBit), SynthesisError> - { - // Check that u.x[0] = Hash(params, U, i, z0, zi) - let mut ro = E::ROCircuit::new( - self.ro_consts.clone(), - NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * arity, - ); - ro.absorb(params); - ro.absorb(i); - for e in z_0 { - ro.absorb(e); - } - for e in z_i { - ro.absorb(e); - } - U.absorb_in_ro(cs.namespace(|| "absorb U"), &mut ro)?; - - let hash_bits = ro.squeeze(cs.namespace(|| "Input hash"), NUM_HASH_BITS)?; - let hash = le_bits_to_num(cs.namespace(|| "bits to hash"), &hash_bits)?; - let check_pass = alloc_num_equals( - cs.namespace(|| "check consistency of u.X[0] with H(params, U, i, z0, zi)"), - &u.X[0], - &hash, - )?; - - // Run NIFS Verifier - let U_fold = U.fold_with_r1cs( - cs.namespace(|| "compute fold of U and u"), - params, - u, - T, - self.ro_consts.clone(), - self.params.limb_width, - self.params.n_limbs, - )?; - - Ok((U_fold, check_pass)) - } -} - -impl<'a, E: Engine, SC: StepCircuit> NovaAugmentedCircuit<'a, E, SC> { - /// synthesize circuit giving constraint system - pub fn synthesize::Base>>( - self, - cs: &mut CS, - ) -> Result>, SynthesisError> { - let arity = self.step_circuit.arity(); - - // Allocate all witnesses - let (params, i, z_0, z_i, U, u, T) = - self.alloc_witness(cs.namespace(|| "allocate the circuit witness"), arity)?; - - // Compute variable indicating if this is the base case - let zero = alloc_zero(cs.namespace(|| "zero")); - let is_base_case = - alloc_num_equals(cs.namespace(|| "Check if base case"), &i.clone(), &zero)?; - - // Synthesize the circuit for the base case and get the new running instance - let Unew_base = self.synthesize_base_case(cs.namespace(|| "base case"), u.clone())?; - - // Synthesize the circuit for the non-base case and get the new running - // instance along with a boolean indicating if all checks have passed - let (Unew_non_base, check_non_base_pass) = self.synthesize_non_base_case( - cs.namespace(|| "synthesize non base case"), - ¶ms, - &i, - &z_0, - &z_i, - &U, - &u, - &T, - arity, - )?; - - // Either check_non_base_pass=true or we are in the base case - let should_be_false = AllocatedBit::nor( - cs.namespace(|| "check_non_base_pass nor base_case"), - &check_non_base_pass, - &is_base_case, - )?; - cs.enforce( - || "check_non_base_pass nor base_case = false", - |lc| lc + should_be_false.get_variable(), - |lc| lc + CS::one(), - |lc| lc, - ); - - // Compute the U_new - let Unew = Unew_base.conditionally_select( - cs.namespace(|| "compute U_new"), - &Unew_non_base, - &Boolean::from(is_base_case.clone()), - )?; - - // Compute i + 1 - let i_new = AllocatedNum::alloc(cs.namespace(|| "i + 1"), || { - Ok(*i.get_value().get()? + E::Base::ONE) - })?; - cs.enforce( - || "check i + 1", - |lc| lc, - |lc| lc, - |lc| lc + i_new.get_variable() - CS::one() - i.get_variable(), - ); - - // Compute z_{i+1} - let z_input = conditionally_select_slice( - cs.namespace(|| "select input to F"), - &z_0, - &z_i, - &Boolean::from(is_base_case), - )?; - - // TODO: Note, I changed this here because I removed the other `StepCircuit` - // trait. - let (_pc, z_next) = - self.step_circuit - .synthesize(&mut cs.namespace(|| "F"), None, &z_input)?; - - if z_next.len() != arity { - return Err(SynthesisError::IncompatibleLengthVector( - "z_next".to_string(), - )); - } - - // Compute the new hash H(params, Unew, i+1, z0, z_{i+1}) - let mut ro = E::ROCircuit::new(self.ro_consts, NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * arity); - ro.absorb(¶ms); - ro.absorb(&i_new); - for e in &z_0 { - ro.absorb(e); - } - for e in &z_next { - ro.absorb(e); - } - Unew.absorb_in_ro(cs.namespace(|| "absorb U_new"), &mut ro)?; - let hash_bits = ro.squeeze(cs.namespace(|| "output hash bits"), NUM_HASH_BITS)?; - let hash = le_bits_to_num(cs.namespace(|| "convert hash to num"), &hash_bits)?; - - // Outputs the computed hash and u.X[1] that corresponds to the hash of the - // other circuit - u.X[1].inputize(cs.namespace(|| "Output unmodified hash of the other circuit"))?; - hash.inputize(cs.namespace(|| "output new hash of this circuit"))?; - - Ok(z_next) - } -} - -// #[cfg(test)] -// mod tests { -// use expect_test::{expect, Expect}; - -// use super::*; -// use crate::{ -// bellpepper::{ -// r1cs::{NovaShape, NovaWitness}, -// solver::SatisfyingAssignment, -// test_shape_cs::TestShapeCS, -// }, -// constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, -// gadgets::scalar_as_base, -// provider::{ -// poseidon::PoseidonConstantsCircuit, Bn256EngineKZG, -// GrumpkinEngine, PallasEngine, Secp256k1Engine, Secq256k1Engine, -// VestaEngine, }, -// traits::{snark::default_ck_hint, CurveCycleEquipped, Dual}, -// }; - -// // In the following we use 1 to refer to the primary, and 2 to refer to -// the // secondary circuit -// fn test_recursive_circuit_with( -// primary_params: &NovaAugmentedCircuitParams, -// secondary_params: &NovaAugmentedCircuitParams, -// ro_consts1: ROConstantsCircuit>, -// ro_consts2: ROConstantsCircuit, -// expected_num_constraints_primary: &Expect, -// expected_num_constraints_secondary: &Expect, -// ) where -// E1: CurveCycleEquipped, -// { -// let tc1 = TrivialCircuit::default(); -// // Initialize the shape and ck for the primary -// let circuit1: NovaAugmentedCircuit< -// '_, -// Dual, -// TrivialCircuit< as Engine>::Base>, -// > = NovaAugmentedCircuit::new(primary_params, None, &tc1, -// > ro_consts1.clone()); -// let mut cs: TestShapeCS = TestShapeCS::new(); -// let _ = circuit1.synthesize(&mut cs); -// let (shape1, ck1) = cs.r1cs_shape_and_key(&*default_ck_hint()); - -// expected_num_constraints_primary.assert_eq(&cs.num_constraints(). -// to_string()); - -// let tc2 = TrivialCircuit::default(); -// // Initialize the shape and ck for the secondary -// let circuit2: NovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = NovaAugmentedCircuit::new(secondary_params, -// None, &tc2, ro_consts2.clone()); let mut cs: TestShapeCS> = -// TestShapeCS::new(); let _ = circuit2.synthesize(&mut cs); -// let (shape2, ck2) = cs.r1cs_shape_and_key(&*default_ck_hint()); - -// expected_num_constraints_secondary.assert_eq(&cs.num_constraints(). -// to_string()); - -// // Execute the base case for the primary -// let zero1 = < as Engine>::Base as Field>::ZERO; -// let mut cs1 = SatisfyingAssignment::::new(); -// let inputs1: NovaAugmentedCircuitInputs> = -// NovaAugmentedCircuitInputs::new( scalar_as_base::(zero1), // -// pass zero for testing zero1, -// vec![zero1], -// None, -// None, -// None, -// None, -// ); -// let circuit1: NovaAugmentedCircuit< -// '_, -// Dual, -// TrivialCircuit< as Engine>::Base>, -// > = NovaAugmentedCircuit::new(primary_params, Some(inputs1), &tc1, -// > ro_consts1); -// let _ = circuit1.synthesize(&mut cs1); -// let (inst1, witness1) = cs1.r1cs_instance_and_witness(&shape1, -// &ck1).unwrap(); // Make sure that this is satisfiable -// shape1.is_sat(&ck1, &inst1, &witness1).unwrap(); - -// // Execute the base case for the secondary -// let zero2 = <::Base as Field>::ZERO; -// let mut cs2 = SatisfyingAssignment::>::new(); -// let inputs2: NovaAugmentedCircuitInputs = -// NovaAugmentedCircuitInputs::new( -// scalar_as_base::>(zero2), // pass zero for testing -// zero2, vec![zero2], -// None, -// None, -// Some(inst1), -// None, -// ); -// let circuit2: NovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = NovaAugmentedCircuit::new(secondary_params, -// Some(inputs2), &tc2, ro_consts2); let _ = circuit2.synthesize(&mut -// cs2); let (inst2, witness2) = cs2.r1cs_instance_and_witness(&shape2, -// &ck2).unwrap(); // Make sure that it is satisfiable -// shape2.is_sat(&ck2, &inst2, &witness2).unwrap(); -// } - -// #[test] -// fn test_recursive_circuit_pasta() { -// // this test checks against values that must be replicated in -// benchmarks if // changed here -// let params1 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, -// BN_N_LIMBS, true); let params2 = -// NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); -// let ro_consts1: ROConstantsCircuit = -// PoseidonConstantsCircuit::default(); let ro_consts2: -// ROConstantsCircuit = PoseidonConstantsCircuit::default(); - -// test_recursive_circuit_with::( -// ¶ms1, -// ¶ms2, -// ro_consts1, -// ro_consts2, -// &expect!["9817"], -// &expect!["10349"], -// ); -// } - -// #[test] -// fn test_recursive_circuit_bn256_grumpkin() { -// let params1 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, -// BN_N_LIMBS, true); let params2 = -// NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); -// let ro_consts1: ROConstantsCircuit = -// PoseidonConstantsCircuit::default(); let ro_consts2: -// ROConstantsCircuit = PoseidonConstantsCircuit::default(); - -// test_recursive_circuit_with::( -// ¶ms1, -// ¶ms2, -// ro_consts1, -// ro_consts2, -// &expect!["9985"], -// &expect!["10538"], -// ); -// } - -// #[test] -// fn test_recursive_circuit_secpq() { -// let params1 = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, -// BN_N_LIMBS, true); let params2 = -// NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); -// let ro_consts1: ROConstantsCircuit = -// PoseidonConstantsCircuit::default(); let ro_consts2: -// ROConstantsCircuit = PoseidonConstantsCircuit::default(); - -// test_recursive_circuit_with::( -// ¶ms1, -// ¶ms2, -// ro_consts1, -// ro_consts2, -// &expect!["10264"], -// &expect!["10961"], -// ); -// } -// } diff --git a/src/cyclefold/circuit.rs b/src/cyclefold/circuit.rs deleted file mode 100644 index 63ae346..0000000 --- a/src/cyclefold/circuit.rs +++ /dev/null @@ -1,285 +0,0 @@ -//! This module defines Cyclefold circuit - -use bellpepper::gadgets::boolean_utils::conditionally_select; -use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - ConstraintSystem, SynthesisError, -}; -use ff::Field; -use neptune::{circuit2::poseidon_hash_allocated, poseidon::PoseidonConstants}; - -use crate::{ - constants::NUM_CHALLENGE_BITS, - gadgets::{alloc_zero, le_bits_to_num, AllocatedPoint}, - traits::{commitment::CommitmentTrait, Engine}, - Commitment, -}; - -/// A structure containing the CycleFold circuit inputs and implementing the -/// synthesize function -pub struct CycleFoldCircuit { - commit_1: Option>, - commit_2: Option>, - scalar: Option<[bool; NUM_CHALLENGE_BITS]>, - poseidon_constants: PoseidonConstants, -} - -impl Default for CycleFoldCircuit { - fn default() -> Self { - let poseidon_constants = PoseidonConstants::new(); - Self { - commit_1: None, - commit_2: None, - scalar: None, - poseidon_constants, - } - } -} -impl CycleFoldCircuit { - /// Create a new CycleFold circuit with the given inputs - pub fn new( - commit_1: Option>, - commit_2: Option>, - scalar: Option<[bool; NUM_CHALLENGE_BITS]>, - ) -> Self { - let poseidon_constants = PoseidonConstants::new(); - Self { - commit_1, - commit_2, - scalar, - poseidon_constants, - } - } - - fn alloc_witness::Base>>( - &self, - mut cs: CS, - ) -> Result< - ( - AllocatedPoint, // commit_1 - AllocatedPoint, // commit_2 - Vec, // scalar - ), - SynthesisError, - > { - let commit_1 = AllocatedPoint::alloc( - cs.namespace(|| "allocate C_1"), - self.commit_1.map(|C_1| C_1.to_coordinates()), - )?; - commit_1.check_on_curve(cs.namespace(|| "commit_1 on curve"))?; - - let commit_2 = AllocatedPoint::alloc( - cs.namespace(|| "allocate C_2"), - self.commit_2.map(|C_2| C_2.to_coordinates()), - )?; - commit_2.check_on_curve(cs.namespace(|| "commit_2 on curve"))?; - - let scalar: Vec = self - .scalar - .unwrap_or([false; NUM_CHALLENGE_BITS]) - .into_iter() - .enumerate() - .map(|(idx, bit)| { - AllocatedBit::alloc(cs.namespace(|| format!("scalar bit {idx}")), Some(bit)) - }) - .collect::, _>>()?; - - Ok((commit_1, commit_2, scalar)) - } - - /// Synthesize the CycleFold circuit - pub fn synthesize::Base>>( - &self, - mut cs: CS, - ) -> Result<(), SynthesisError> { - let (C_1, C_2, r) = self.alloc_witness(cs.namespace(|| "allocate circuit witness"))?; - - // Calculate C_final - let r_C_2 = C_2.scalar_mul(cs.namespace(|| "r * C_2"), &r)?; - - let C_final = C_1.add(cs.namespace(|| "C_1 + r * C_2"), &r_C_2)?; - - self.inputize_point(&C_1, cs.namespace(|| "inputize C_1"))?; - self.inputize_point(&C_2, cs.namespace(|| "inputize C_2"))?; - self.inputize_point(&C_final, cs.namespace(|| "inputize C_final"))?; - - let scalar = le_bits_to_num(cs.namespace(|| "get scalar"), &r)?; - - scalar.inputize(cs.namespace(|| "scalar"))?; - - Ok(()) - } - - // Represent the point in the public IO as its 2-ary Poseidon hash - fn inputize_point( - &self, - point: &AllocatedPoint, - mut cs: CS, - ) -> Result<(), SynthesisError> - where - E: Engine, - CS: ConstraintSystem, - { - let (x, y, is_infinity) = point.get_coordinates(); - let preimage = vec![x.clone(), y.clone()]; - let val = poseidon_hash_allocated( - cs.namespace(|| "hash point"), - preimage, - &self.poseidon_constants, - )?; - - let zero = alloc_zero(cs.namespace(|| "zero")); - - let is_infinity_bit = AllocatedBit::alloc( - cs.namespace(|| "is_infinity"), - Some(is_infinity.get_value().unwrap_or(E::Base::ONE) == E::Base::ONE), - )?; - - cs.enforce( - || "infinity_bit matches", - |lc| lc, - |lc| lc, - |lc| lc + is_infinity_bit.get_variable() - is_infinity.get_variable(), - ); - - // Output 0 when it is the point at infinity - let output = conditionally_select( - cs.namespace(|| "select output"), - &zero, - &val, - &Boolean::from(is_infinity_bit), - )?; - - output.inputize(cs.namespace(|| "inputize hash"))?; - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use expect_test::{expect, Expect}; - use ff::{Field, PrimeField, PrimeFieldBits}; - use neptune::Poseidon; - use rand_core::OsRng; - - use super::*; - use crate::{ - bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - shape_cs::ShapeCS, - solver::SatisfyingAssignment, - }, - constants::NIO_CYCLE_FOLD, - gadgets::scalar_as_base, - provider::Bn256EngineKZG, - traits::{ - commitment::CommitmentEngineTrait, snark::default_ck_hint, CurveCycleEquipped, Dual, - }, - }; - - fn test_cyclefold_circuit_size_with(expected_constraints: &Expect, expected_vars: &Expect) - where - E1: CurveCycleEquipped, - { - // Instantiate the circuit with trivial inputs - let circuit: CycleFoldCircuit> = CycleFoldCircuit::default(); - - // Synthesize the R1CS shape - let mut cs: ShapeCS = ShapeCS::new(); - let _ = circuit.synthesize(cs.namespace(|| "synthesizing shape")); - - // Extract the number of constraints and variables - let num_constraints = cs.num_constraints(); - let num_variables = cs.num_aux(); - let num_io = cs.num_inputs(); - - // Check the number of constraints and variables match the expected values - expected_constraints.assert_eq(&num_constraints.to_string()); - expected_vars.assert_eq(&num_variables.to_string()); - assert_eq!(num_io, NIO_CYCLE_FOLD + 1); // includes 1 - } - - #[test] - fn test_cyclefold_circuit_size() { - test_cyclefold_circuit_size_with::(&expect!("2090"), &expect!("2081")); - } - - fn test_cyclefold_circuit_sat_with() { - let rng = OsRng; - - let ck = < as Engine>::CE as CommitmentEngineTrait>>::setup(b"test", 5); - - // Generate random vectors to commit to - let v1 = (0..5) - .map(|_| < as Engine>::Scalar as Field>::random(rng)) - .collect::>(); - let v2 = (0..5) - .map(|_| < as Engine>::Scalar as Field>::random(rng)) - .collect::>(); - - // Calculate the random commitments - let C_1 = < as Engine>::CE as CommitmentEngineTrait>>::commit(&ck, &v1); - let C_2 = < as Engine>::CE as CommitmentEngineTrait>>::commit(&ck, &v2); - - // Generate a random scalar - let val: u128 = rand::random(); - let r = < as Engine>::Scalar as PrimeField>::from_u128(val); - let r_bits = r - .to_le_bits() - .into_iter() - .take(128) - .collect::>() - .try_into() - .unwrap(); - - let circuit: CycleFoldCircuit> = - CycleFoldCircuit::new(Some(C_1), Some(C_2), Some(r_bits)); - - // Calculate the result out of circuit - let native_result = C_1 + C_2 * r; - - // Generate the R1CS shape and commitment key - let mut cs: ShapeCS = ShapeCS::new(); - let _ = circuit.synthesize(cs.namespace(|| "synthesizing shape")); - let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); - - // Synthesize the R1CS circuit on the random inputs - let mut cs = SatisfyingAssignment::::new(); - circuit - .synthesize(cs.namespace(|| "synthesizing witness")) - .unwrap(); - - let (instance, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - let X = &instance.X; - - // Helper functio to calculate the hash - let compute_hash = |P: Commitment>| -> E::Scalar { - let (x, y, is_infinity) = P.to_coordinates(); - if is_infinity { - return E::Scalar::ZERO; - } - - let mut hasher = Poseidon::new_with_preimage(&[x, y], &circuit.poseidon_constants); - - hasher.hash() - }; - - // Check the circuit calculates the right thing - let hash_1 = compute_hash(C_1); - assert_eq!(hash_1, X[0]); - let hash_2 = compute_hash(C_2); - assert_eq!(hash_2, X[1]); - let hash_res = compute_hash(native_result); - assert_eq!(hash_res, X[2]); - assert_eq!(r, scalar_as_base::(X[3])); - - // Check the R1CS equation is satisfied - shape.is_sat(&ck, &instance, &witness).unwrap(); - } - - #[test] - fn test_cyclefold_circuit_sat() { - test_cyclefold_circuit_sat_with::(); - } -} diff --git a/src/cyclefold/gadgets.rs b/src/cyclefold/gadgets.rs deleted file mode 100644 index 7edd0c6..0000000 --- a/src/cyclefold/gadgets.rs +++ /dev/null @@ -1,706 +0,0 @@ -//! This module defines many of the gadgets needed in the primary folding -//! circuit - -use bellpepper::gadgets::Assignment; -use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; -use ff::Field; -use itertools::Itertools; - -use super::util::FoldingData; -use crate::{ - constants::{BN_N_LIMBS, NIO_CYCLE_FOLD, NUM_CHALLENGE_BITS}, - gadgets::{ - alloc_bignat_constant, f_to_nat, le_bits_to_num, AllocatedPoint, - AllocatedRelaxedR1CSInstance, BigNat, Num, - }, - r1cs::R1CSInstance, - traits::{commitment::CommitmentTrait, Engine, Group, ROCircuitTrait, ROConstantsCircuit}, -}; - -// An allocated version of the R1CS instance obtained from a single cyclefold -// invocation -pub struct AllocatedCycleFoldInstance { - W: AllocatedPoint, - X: [BigNat; NIO_CYCLE_FOLD], -} - -impl AllocatedCycleFoldInstance { - pub fn alloc>( - mut cs: CS, - inst: Option<&R1CSInstance>, - limb_width: usize, - n_limbs: usize, - ) -> Result { - let W = AllocatedPoint::alloc( - cs.namespace(|| "allocate W"), - inst.map(|u| u.comm_W.to_coordinates()), - )?; - W.check_on_curve(cs.namespace(|| "check W on curve"))?; - - if let Some(inst) = inst { - if inst.X.len() != NIO_CYCLE_FOLD { - return Err(SynthesisError::IncompatibleLengthVector(String::from( - "R1CS instance has wrong arity", - ))); - } - } - - let X: [BigNat; NIO_CYCLE_FOLD] = (0..NIO_CYCLE_FOLD) - .map(|idx| { - BigNat::alloc_from_nat( - cs.namespace(|| format!("allocating IO {idx}")), - || Ok(f_to_nat(inst.map_or(&E::Scalar::ZERO, |inst| &inst.X[idx]))), - limb_width, - n_limbs, - ) - }) - .collect::, _>>()? - .try_into() - .map_err(|err: Vec<_>| { - SynthesisError::IncompatibleLengthVector(format!( - "{} != {NIO_CYCLE_FOLD}", - err.len() - )) - })?; - - Ok(Self { W, X }) - } - - pub fn absorb_in_ro( - &self, - mut cs: CS, - ro: &mut impl ROCircuitTrait, - ) -> Result<(), SynthesisError> - where - CS: ConstraintSystem, - { - ro.absorb(&self.W.x); - ro.absorb(&self.W.y); - ro.absorb(&self.W.is_infinity); - self.X - .iter() - .enumerate() - .try_for_each(|(io_idx, x)| -> Result<(), SynthesisError> { - x.as_limbs().iter().enumerate().try_for_each( - |(limb_idx, limb)| -> Result<(), SynthesisError> { - ro.absorb(&limb.as_allocated_num(cs.namespace(|| { - format!("convert limb {limb_idx} of X[{io_idx}] to num") - }))?); - Ok(()) - }, - ) - })?; - - Ok(()) - } -} - -/// An circuit allocated version of the `FoldingData` coming from a CycleFold -/// invocation. -pub struct AllocatedCycleFoldData { - pub U: AllocatedRelaxedR1CSInstance, - pub u: AllocatedCycleFoldInstance, - pub T: AllocatedPoint, -} - -impl AllocatedCycleFoldData { - pub fn alloc>( - mut cs: CS, - inst: Option<&FoldingData>, - limb_width: usize, - n_limbs: usize, - ) -> Result { - let U = AllocatedRelaxedR1CSInstance::alloc( - cs.namespace(|| "U"), - inst.map(|x| &x.U), - limb_width, - n_limbs, - )?; - - let u = AllocatedCycleFoldInstance::alloc( - cs.namespace(|| "u"), - inst.map(|x| &x.u), - limb_width, - n_limbs, - )?; - - let T = AllocatedPoint::alloc(cs.namespace(|| "T"), inst.map(|x| x.T.to_coordinates()))?; - T.check_on_curve(cs.namespace(|| "T on curve"))?; - - Ok(Self { U, u, T }) - } - - /// The NIFS verifier which folds the CycleFold instance into a running - /// relaxed R1CS instance. - pub fn apply_fold( - &self, - mut cs: CS, - params: &AllocatedNum, - ro_consts: ROConstantsCircuit, - limb_width: usize, - n_limbs: usize, - ) -> Result, SynthesisError> - where - CS: ConstraintSystem, - { - // Compute r: - let mut ro = E::ROCircuit::new( - ro_consts, - 1 + (3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS) + (3 + NIO_CYCLE_FOLD * BN_N_LIMBS) + 3, /* digest + (U) + (u) + T */ - ); - ro.absorb(params); - - self.U.absorb_in_ro( - cs.namespace(|| "absorb cyclefold running instance"), - &mut ro, - )?; - // running instance `U` does not need to absorbed since u.X[0] = Hash(params, U, - // i, z0, zi) - self.u - .absorb_in_ro(cs.namespace(|| "absorb cyclefold instance"), &mut ro)?; - - ro.absorb(&self.T.x); - ro.absorb(&self.T.y); - ro.absorb(&self.T.is_infinity); - let r_bits = ro.squeeze(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; - let r = le_bits_to_num(cs.namespace(|| "r"), &r_bits)?; - - // W_fold = self.W + r * u.W - let rW = self.u.W.scalar_mul(cs.namespace(|| "r * u.W"), &r_bits)?; - let W_fold = self.U.W.add(cs.namespace(|| "self.W + r * u.W"), &rW)?; - - // E_fold = self.E + r * T - let rT = self.T.scalar_mul(cs.namespace(|| "r * T"), &r_bits)?; - let E_fold = self.U.E.add(cs.namespace(|| "self.E + r * T"), &rT)?; - - // u_fold = u_r + r - let u_fold = AllocatedNum::alloc(cs.namespace(|| "u_fold"), || { - Ok(*self.U.u.get_value().get()? + r.get_value().get()?) - })?; - cs.enforce( - || "Check u_fold", - |lc| lc, - |lc| lc, - |lc| lc + u_fold.get_variable() - self.U.u.get_variable() - r.get_variable(), - ); - - // Fold the IO: - // Analyze r into limbs - let r_bn = BigNat::from_num( - cs.namespace(|| "allocate r_bn"), - &Num::from(r), - limb_width, - n_limbs, - )?; - - // Allocate the order of the non-native field as a constant - let m_bn = alloc_bignat_constant( - cs.namespace(|| "alloc m"), - &E::GE::group_params().2, - limb_width, - n_limbs, - )?; - - let mut X_fold = vec![]; - - // Calculate the - for (idx, (X, x)) in self.U.X.iter().zip_eq(self.u.X.iter()).enumerate() { - let (_, r) = x.mult_mod(cs.namespace(|| format!("r*u.X[{idx}]")), &r_bn, &m_bn)?; - let r_new = X.add(&r)?; - let X_i_fold = - r_new.red_mod(cs.namespace(|| format!("reduce folded X[{idx}]")), &m_bn)?; - X_fold.push(X_i_fold); - } - - let X_fold = X_fold.try_into().map_err(|err: Vec<_>| { - SynthesisError::IncompatibleLengthVector(format!("{} != {NIO_CYCLE_FOLD}", err.len())) - })?; - - Ok(AllocatedRelaxedR1CSInstance { - W: W_fold, - E: E_fold, - u: u_fold, - X: X_fold, - }) - } -} - -pub mod emulated { - use bellpepper::gadgets::{boolean_utils::conditionally_select, Assignment}; - use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - num::AllocatedNum, - ConstraintSystem, SynthesisError, - }; - use ff::Field; - - use super::FoldingData; - use crate::{ - constants::{NUM_CHALLENGE_BITS, NUM_FE_IN_EMULATED_POINT}, - gadgets::{ - alloc_bignat_constant, alloc_zero, conditionally_select_allocated_bit, - conditionally_select_bignat, f_to_nat, le_bits_to_num, BigNat, - }, - traits::{commitment::CommitmentTrait, Engine, Group, ROCircuitTrait, ROConstantsCircuit}, - RelaxedR1CSInstance, - }; - - /// An allocated version of a curve point from the non-native curve - #[derive(Clone)] - pub struct AllocatedEmulPoint - where - G: Group, - { - pub x: BigNat, - pub y: BigNat, - pub is_infinity: AllocatedBit, - } - - impl AllocatedEmulPoint - where - G: Group, - { - pub fn alloc( - mut cs: CS, - coords: Option<(G::Scalar, G::Scalar, bool)>, - limb_width: usize, - n_limbs: usize, - ) -> Result - where - CS: ConstraintSystem<::Base>, - { - let x = BigNat::alloc_from_nat( - cs.namespace(|| "x"), - || { - Ok(f_to_nat( - &coords.map_or(::ZERO, |val| val.0), - )) - }, - limb_width, - n_limbs, - )?; - - let y = BigNat::alloc_from_nat( - cs.namespace(|| "y"), - || { - Ok(f_to_nat( - &coords.map_or(::ZERO, |val| val.1), - )) - }, - limb_width, - n_limbs, - )?; - - let is_infinity = AllocatedBit::alloc( - cs.namespace(|| "alloc is_infinity"), - coords.map_or(Some(true), |(_, _, is_infinity)| Some(is_infinity)), - )?; - - Ok(Self { x, y, is_infinity }) - } - - pub fn absorb_in_ro( - &self, - mut cs: CS, - ro: &mut impl ROCircuitTrait, - ) -> Result<(), SynthesisError> - where - CS: ConstraintSystem, - { - let x_bn = self - .x - .as_limbs() - .iter() - .enumerate() - .map(|(i, limb)| { - limb.as_allocated_num(cs.namespace(|| format!("convert limb {i} of x to num"))) - }) - .collect::>, _>>()?; - - for limb in x_bn { - ro.absorb(&limb) - } - - let y_bn = self - .y - .as_limbs() - .iter() - .enumerate() - .map(|(i, limb)| { - limb.as_allocated_num(cs.namespace(|| format!("convert limb {i} of y to num"))) - }) - .collect::>, _>>()?; - - for limb in y_bn { - ro.absorb(&limb) - } - - let is_infinity_num: AllocatedNum = - AllocatedNum::alloc(cs.namespace(|| "is_infinity"), || { - self.is_infinity.get_value().map_or( - Err(SynthesisError::AssignmentMissing), - |bit| { - if bit { - Ok(G::Base::ONE) - } else { - Ok(G::Base::ZERO) - } - }, - ) - })?; - - cs.enforce( - || "constrain num equals bit", - |lc| lc, - |lc| lc, - |lc| lc + is_infinity_num.get_variable() - self.is_infinity.get_variable(), - ); - - ro.absorb(&is_infinity_num); - - Ok(()) - } - - fn conditionally_select>( - &self, - mut cs: CS, - other: &Self, - condition: &Boolean, - ) -> Result { - let x = conditionally_select_bignat( - cs.namespace(|| "x = cond ? self.x : other.x"), - &self.x, - &other.x, - condition, - )?; - - let y = conditionally_select_bignat( - cs.namespace(|| "y = cond ? self.y : other.y"), - &self.y, - &other.y, - condition, - )?; - - let is_infinity = conditionally_select_allocated_bit( - cs.namespace(|| "is_infinity = cond ? self.is_infinity : other.is_infinity"), - &self.is_infinity, - &other.is_infinity, - condition, - )?; - - Ok(Self { x, y, is_infinity }) - } - - pub fn default>( - mut cs: CS, - limb_width: usize, - n_limbs: usize, - ) -> Result { - let x = alloc_bignat_constant( - cs.namespace(|| "allocate x_default = 0"), - &f_to_nat(&G::Base::ZERO), - limb_width, - n_limbs, - )?; - let y = alloc_bignat_constant( - cs.namespace(|| "allocate y_default = 0"), - &f_to_nat(&G::Base::ZERO), - limb_width, - n_limbs, - )?; - - let is_infinity = - AllocatedBit::alloc(cs.namespace(|| "allocate is_infinity"), Some(true))?; - cs.enforce( - || "is_infinity = 1", - |lc| lc, - |lc| lc, - |lc| lc + CS::one() - is_infinity.get_variable(), - ); - - Ok(Self { x, y, is_infinity }) - } - } - - /// A non-native circuit version of a `RelaxedR1CSInstance`. This is used - /// for the in-circuit representation of the primary running instance - pub struct AllocatedEmulRelaxedR1CSInstance { - pub comm_W: AllocatedEmulPoint, - pub comm_E: AllocatedEmulPoint, - u: AllocatedNum, - x0: AllocatedNum, - x1: AllocatedNum, - } - - impl AllocatedEmulRelaxedR1CSInstance - where - E: Engine, - { - pub fn alloc>( - mut cs: CS, - inst: Option<&RelaxedR1CSInstance>, - limb_width: usize, - n_limbs: usize, - ) -> Result - where - CS: ConstraintSystem<::Base>, - { - let comm_W = AllocatedEmulPoint::alloc( - cs.namespace(|| "allocate comm_W"), - inst.map(|x| x.comm_W.to_coordinates()), - limb_width, - n_limbs, - )?; - - let comm_E = AllocatedEmulPoint::alloc( - cs.namespace(|| "allocate comm_E"), - inst.map(|x| x.comm_E.to_coordinates()), - limb_width, - n_limbs, - )?; - - let u = AllocatedNum::alloc(cs.namespace(|| "allocate u"), || { - inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.u)) - })?; - - let x0 = AllocatedNum::alloc(cs.namespace(|| "allocate x0"), || { - inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.X[0])) - })?; - - let x1 = AllocatedNum::alloc(cs.namespace(|| "allocate x1"), || { - inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.X[1])) - })?; - - Ok(Self { - comm_W, - comm_E, - u, - x0, - x1, - }) - } - - /// Performs a folding of a primary R1CS instance (`u_W`, `u_x0`, - /// `u_x1`) into a running `AllocatedEmulRelaxedR1CSInstance` - /// As the curve operations are performed in the CycleFold circuit and - /// provided to the primary circuit as non-deterministic advice, - /// this folding simply sets those values as the new witness and - /// error vector commitments. - pub fn fold_with_r1cs::Base>>( - &self, - mut cs: CS, - pp_digest: &AllocatedNum, - W_new: AllocatedEmulPoint, - E_new: AllocatedEmulPoint, - u_W: &AllocatedEmulPoint, - u_x0: &AllocatedNum, - u_x1: &AllocatedNum, - comm_T: &AllocatedEmulPoint, - ro_consts: ROConstantsCircuit, - ) -> Result { - let mut ro = E::ROCircuit::new( - ro_consts, - 1 + NUM_FE_IN_EMULATED_POINT + 2 + NUM_FE_IN_EMULATED_POINT, /* pp_digest + u.W + u.x + comm_T */ - ); - ro.absorb(pp_digest); - - // Absorb u - // Absorb the witness - u_W.absorb_in_ro(cs.namespace(|| "absorb u_W"), &mut ro)?; - // Absorb public IO - ro.absorb(u_x0); - ro.absorb(u_x1); - - // Absorb comm_T - comm_T.absorb_in_ro(cs.namespace(|| "absorb comm_T"), &mut ro)?; - - let r_bits = ro.squeeze(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; - let r = le_bits_to_num(cs.namespace(|| "r"), &r_bits)?; - - let u_fold = self.u.add(cs.namespace(|| "u_fold = u + r"), &r)?; - let x0_fold = - AllocatedNum::alloc(cs.namespace(|| "x0"), || { - Ok(*self.x0.get_value().get()? - + *r.get_value().get()? * *u_x0.get_value().get()?) - })?; - cs.enforce( - || "x0_fold = x0 + r * u_x0", - |lc| lc + r.get_variable(), - |lc| lc + u_x0.get_variable(), - |lc| lc + x0_fold.get_variable() - self.x0.get_variable(), - ); - - let x1_fold = - AllocatedNum::alloc(cs.namespace(|| "x1"), || { - Ok(*self.x1.get_value().get()? - + *r.get_value().get()? * *u_x1.get_value().get()?) - })?; - cs.enforce( - || "x1_fold = x1 + r * u_x1", - |lc| lc + r.get_variable(), - |lc| lc + u_x1.get_variable(), - |lc| lc + x1_fold.get_variable() - self.x1.get_variable(), - ); - - Ok(Self { - comm_W: W_new, - comm_E: E_new, - u: u_fold, - x0: x0_fold, - x1: x1_fold, - }) - } - - pub fn absorb_in_ro( - &self, - mut cs: CS, - ro: &mut impl ROCircuitTrait, - ) -> Result<(), SynthesisError> - where - CS: ConstraintSystem<::Base>, - { - self.comm_W - .absorb_in_ro(cs.namespace(|| "absorb comm_W"), ro)?; - self.comm_E - .absorb_in_ro(cs.namespace(|| "absorb comm_E"), ro)?; - - ro.absorb(&self.u); - ro.absorb(&self.x0); - ro.absorb(&self.x1); - - Ok(()) - } - - pub fn conditionally_select::Base>>( - &self, - mut cs: CS, - other: &Self, - condition: &Boolean, - ) -> Result { - let comm_W = self.comm_W.conditionally_select( - cs.namespace(|| "comm_W = cond ? self.comm_W : other.comm_W"), - &other.comm_W, - condition, - )?; - - let comm_E = self.comm_E.conditionally_select( - cs.namespace(|| "comm_E = cond? self.comm_E : other.comm_E"), - &other.comm_E, - condition, - )?; - - let u = conditionally_select( - cs.namespace(|| "u = cond ? self.u : other.u"), - &self.u, - &other.u, - condition, - )?; - - let x0 = conditionally_select( - cs.namespace(|| "x0 = cond ? self.x0 : other.x0"), - &self.x0, - &other.x0, - condition, - )?; - - let x1 = conditionally_select( - cs.namespace(|| "x1 = cond ? self.x1 : other.x1"), - &self.x1, - &other.x1, - condition, - )?; - - Ok(Self { - comm_W, - comm_E, - u, - x0, - x1, - }) - } - - pub fn default::Base>>( - mut cs: CS, - limb_width: usize, - n_limbs: usize, - ) -> Result { - let comm_W = AllocatedEmulPoint::default( - cs.namespace(|| "default comm_W"), - limb_width, - n_limbs, - )?; - let comm_E = comm_W.clone(); - - let u = alloc_zero(cs.namespace(|| "u = 0")); - - let x0 = u.clone(); - let x1 = u.clone(); - - Ok(Self { - comm_W, - comm_E, - u, - x0, - x1, - }) - } - } - - /// The in-circuit representation of the primary folding data. - pub struct AllocatedFoldingData { - pub U: AllocatedEmulRelaxedR1CSInstance, - pub u_W: AllocatedEmulPoint, - pub u_x0: AllocatedNum, - pub u_x1: AllocatedNum, - pub T: AllocatedEmulPoint, - } - - impl AllocatedFoldingData { - pub fn alloc>( - mut cs: CS, - inst: Option<&FoldingData>, - limb_width: usize, - n_limbs: usize, - ) -> Result - where - CS: ConstraintSystem<::Base>, - { - let U = AllocatedEmulRelaxedR1CSInstance::alloc( - cs.namespace(|| "allocate U"), - inst.map(|x| &x.U), - limb_width, - n_limbs, - )?; - - let u_W = AllocatedEmulPoint::alloc( - cs.namespace(|| "allocate u_W"), - inst.map(|x| x.u.comm_W.to_coordinates()), - limb_width, - n_limbs, - )?; - - let u_x0 = AllocatedNum::alloc(cs.namespace(|| "allocate u_x0"), || { - inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.u.X[0])) - })?; - - let u_x1 = AllocatedNum::alloc(cs.namespace(|| "allocate u_x1"), || { - inst.map_or(Ok(E::Base::ZERO), |inst| Ok(inst.u.X[1])) - })?; - - let T = AllocatedEmulPoint::alloc( - cs.namespace(|| "allocate T"), - inst.map(|x| x.T.to_coordinates()), - limb_width, - n_limbs, - )?; - - Ok(Self { - U, - u_W, - u_x0, - u_x1, - T, - }) - } - } -} diff --git a/src/cyclefold/nifs.rs b/src/cyclefold/nifs.rs deleted file mode 100644 index 2991351..0000000 --- a/src/cyclefold/nifs.rs +++ /dev/null @@ -1,161 +0,0 @@ -//! This module defines the needed wrong-field NIFS prover - -use std::marker::PhantomData; - -use super::util::{absorb_cyclefold_r1cs, absorb_primary_commitment, absorb_primary_r1cs}; -use crate::{ - constants::{NIO_CYCLE_FOLD, NUM_CHALLENGE_BITS, NUM_FE_IN_EMULATED_POINT}, - errors::NovaError, - gadgets::scalar_as_base, - r1cs::{R1CSInstance, R1CSShape, R1CSWitness, RelaxedR1CSInstance, RelaxedR1CSWitness}, - traits::{commitment::CommitmentTrait, AbsorbInROTrait, Engine, ROConstants, ROTrait}, - CommitmentKey, CompressedCommitment, -}; - -/// A SNARK that holds the proof of a step of an incremental computation of the -/// primary circuit in the CycleFold folding scheme. -/// The difference of this folding scheme from the Nova NIFS in `src/nifs.rs` is -/// that this -#[derive(Debug)] -pub struct PrimaryNIFS -where - E1: Engine::Scalar>, - E2: Engine::Scalar>, -{ - pub(crate) comm_T: CompressedCommitment, - _p: PhantomData, -} - -impl PrimaryNIFS -where - E1: Engine::Scalar>, - E2: Engine::Scalar>, -{ - /// Takes a relaxed R1CS instance-witness pair (U1, W1) and an R1CS - /// instance-witness pair (U2, W2) and folds them into a new relaxed - /// R1CS instance-witness pair (U, W) and a commitment to the cross term - /// T. It also provides the challenge r used to fold the instances. - pub fn prove( - ck: &CommitmentKey, - ro_consts: &ROConstants, - pp_digest: &E1::Scalar, - S: &R1CSShape, - U1: &RelaxedR1CSInstance, - W1: &RelaxedR1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - ) -> Result< - ( - Self, - (RelaxedR1CSInstance, RelaxedR1CSWitness), - E1::Scalar, - ), - NovaError, - > { - let arity = U1.X.len(); - - if arity != U2.X.len() { - return Err(NovaError::InvalidInputLength); - } - - let mut ro = E2::RO::new( - ro_consts.clone(), - 1 + NUM_FE_IN_EMULATED_POINT + arity + NUM_FE_IN_EMULATED_POINT, /* pp_digest + u.W - * + u.X + T */ - ); - - ro.absorb(*pp_digest); - - absorb_primary_r1cs::(U2, &mut ro); - - let (T, comm_T) = S.commit_T(ck, U1, W1, U2, W2)?; - - absorb_primary_commitment::(&comm_T, &mut ro); - - let r = scalar_as_base::(ro.squeeze(NUM_CHALLENGE_BITS)); - - let U = U1.fold(U2, &comm_T, &r); - - let W = W1.fold(W2, &T, &r)?; - - Ok(( - Self { - comm_T: comm_T.compress(), - _p: PhantomData, - }, - (U, W), - r, - )) - } -} - -/// A SNARK that holds the proof of a step of an incremental computation of the -/// CycleFold circuit The difference of this folding scheme from the Nova NIFS -/// in `src/nifs.rs` is that this folding prover and verifier must fold in the -/// `RelaxedR1CSInstance` accumulator because the optimization in the -#[derive(Debug)] -pub struct CycleFoldNIFS { - pub(crate) comm_T: CompressedCommitment, -} - -impl CycleFoldNIFS { - /// Folds an R1CS instance/witness pair (U2, W2) into a relaxed R1CS - /// instance/witness (U1, W1) returning the new folded accumulator and a - /// commitment to the cross-term. - pub fn prove( - ck: &CommitmentKey, - ro_consts: &ROConstants, - pp_digest: &E::Scalar, - S: &R1CSShape, - U1: &RelaxedR1CSInstance, - W1: &RelaxedR1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - ) -> Result<(Self, (RelaxedR1CSInstance, RelaxedR1CSWitness)), NovaError> { - // Check `U1` and `U2` have the same arity - if U2.X.len() != NIO_CYCLE_FOLD || U1.X.len() != NIO_CYCLE_FOLD { - return Err(NovaError::InvalidInputLength); - } - - // initialize a new RO - let mut ro = E::RO::new( - ro_consts.clone(), - 46, /* 1 + (3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS) + (3 + NIO_CYCLE_FOLD * - * BN_N_LIMBS) + 3, // digest + (U) + (u) + T */ - ); - - // append the digest of pp to the transcript - ro.absorb(scalar_as_base::(*pp_digest)); - - // append U1 to the transcript. - // NOTE: this must be here because the IO for `U2` does not have the data of the - // hash of `U1` - U1.absorb_in_ro(&mut ro); - - // append U2 to transcript - absorb_cyclefold_r1cs(U2, &mut ro); - - // compute a commitment to the cross-term - let (T, comm_T) = S.commit_T(ck, U1, W1, U2, W2)?; - - // append `comm_T` to the transcript and obtain a challenge - comm_T.absorb_in_ro(&mut ro); - - // compute a challenge from the RO - let r = ro.squeeze(NUM_CHALLENGE_BITS); - - // fold the instance using `r` and `comm_T` - let U = U1.fold(U2, &comm_T, &r); - - // fold the witness using `r` and `T` - let W = W1.fold(W2, &T, &r)?; - - // return the folded instance and witness - Ok(( - Self { - comm_T: comm_T.compress(), - }, - (U, W), - )) - } -} diff --git a/src/cyclefold/nova_circuit.rs b/src/cyclefold/nova_circuit.rs deleted file mode 100644 index ed56f08..0000000 --- a/src/cyclefold/nova_circuit.rs +++ /dev/null @@ -1,565 +0,0 @@ -//! This module defines the Nova augmented circuit used for Cyclefold - -use bellpepper::gadgets::{ - boolean::Boolean, boolean_utils::conditionally_select_slice, num::AllocatedNum, Assignment, -}; -use bellpepper_core::{boolean::AllocatedBit, ConstraintSystem, SynthesisError}; -use ff::Field; -use serde::{Deserialize, Serialize}; - -use super::{ - gadgets::{emulated, AllocatedCycleFoldData}, - util::FoldingData, -}; -use crate::{ - constants::{BN_N_LIMBS, NIO_CYCLE_FOLD, NUM_FE_IN_EMULATED_POINT, NUM_HASH_BITS}, - gadgets::{ - alloc_num_equals, alloc_scalar_as_base, alloc_zero, le_bits_to_num, - AllocatedRelaxedR1CSInstance, - }, - supernova::StepCircuit, - traits::{commitment::CommitmentTrait, Engine, ROCircuitTrait, ROConstantsCircuit}, - Commitment, -}; - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct AugmentedCircuitParams { - limb_width: usize, - n_limbs: usize, -} - -impl AugmentedCircuitParams { - pub const fn new(limb_width: usize, n_limbs: usize) -> Self { - Self { - limb_width, - n_limbs, - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct AugmentedCircuitInputs -where - E1: Engine::Scalar>, - E2: Engine::Scalar>, -{ - pp_digest: E1::Scalar, - i: E1::Base, - z0: Vec, - - zi: Option>, - data_p: Option>, - - data_c_1: Option>, - data_c_2: Option>, - - E_new: Option>, - W_new: Option>, -} - -impl AugmentedCircuitInputs -where - E1: Engine::Scalar>, - E2: Engine::Scalar>, -{ - pub fn new( - pp_digest: E1::Scalar, - i: E1::Base, - z0: Vec, - zi: Option>, - data_p: Option>, - data_c_1: Option>, - data_c_2: Option>, - E_new: Option>, - W_new: Option>, - ) -> Self { - Self { - pp_digest, - i, - z0, - zi, - data_p, - data_c_1, - data_c_2, - E_new, - W_new, - } - } -} -pub struct AugmentedCircuit<'a, E1, E2, SC> -where - E1: Engine::Scalar>, - E2: Engine::Scalar>, - SC: StepCircuit, -{ - params: &'a AugmentedCircuitParams, - ro_consts: ROConstantsCircuit, - inputs: Option>, - step_circuit: &'a SC, -} - -impl<'a, E1, E2, SC> AugmentedCircuit<'a, E1, E2, SC> -where - E1: Engine::Scalar>, - E2: Engine::Scalar>, - SC: StepCircuit, -{ - pub const fn new( - params: &'a AugmentedCircuitParams, - ro_consts: ROConstantsCircuit, - inputs: Option>, - step_circuit: &'a SC, - ) -> Self { - Self { - params, - ro_consts, - inputs, - step_circuit, - } - } - - fn alloc_witness::Base>>( - &self, - mut cs: CS, - arity: usize, - ) -> Result< - ( - AllocatedNum, // pp_digest - AllocatedNum, // i - Vec>, // z0 - Vec>, // zi - emulated::AllocatedFoldingData, // data_p - AllocatedCycleFoldData, // data_c_1 - AllocatedCycleFoldData, // data_c_2 - emulated::AllocatedEmulPoint, // E_new - emulated::AllocatedEmulPoint, // W_new - ), - SynthesisError, - > { - let pp_digest = alloc_scalar_as_base::( - cs.namespace(|| "params"), - self.inputs.as_ref().map(|inputs| inputs.pp_digest), - )?; - - let i = AllocatedNum::alloc(cs.namespace(|| "i"), || Ok(self.inputs.get()?.i))?; - - let z_0 = (0..arity) - .map(|i| { - AllocatedNum::alloc(cs.namespace(|| format!("z0_{i}")), || { - Ok(self.inputs.get()?.z0[i]) - }) - }) - .collect::>, _>>()?; - - // Allocate zi. If inputs.zi is not provided (base case) allocate default value - // 0 - let zero = vec![E1::Base::ZERO; arity]; - let z_i = (0..arity) - .map(|i| { - AllocatedNum::alloc(cs.namespace(|| format!("zi_{i}")), || { - Ok(self.inputs.get()?.zi.as_ref().unwrap_or(&zero)[i]) - }) - }) - .collect::>, _>>()?; - - let data_p = emulated::AllocatedFoldingData::alloc( - cs.namespace(|| "data_p"), - self.inputs - .as_ref() - .and_then(|inputs| inputs.data_p.as_ref()), - self.params.limb_width, - self.params.n_limbs, - )?; - - let data_c_1 = AllocatedCycleFoldData::alloc( - cs.namespace(|| "data_c_1"), - self.inputs - .as_ref() - .and_then(|inputs| inputs.data_c_1.as_ref()), - self.params.limb_width, - self.params.n_limbs, - )?; - - let data_c_2 = AllocatedCycleFoldData::alloc( - cs.namespace(|| "data_c_2"), - self.inputs - .as_ref() - .and_then(|inputs| inputs.data_c_2.as_ref()), - self.params.limb_width, - self.params.n_limbs, - )?; - - let E_new = emulated::AllocatedEmulPoint::alloc( - cs.namespace(|| "E_new"), - self.inputs - .as_ref() - .and_then(|inputs| inputs.E_new.as_ref()) - .map(|E_new| E_new.to_coordinates()), - self.params.limb_width, - self.params.n_limbs, - )?; - - let W_new = emulated::AllocatedEmulPoint::alloc( - cs.namespace(|| "W_new"), - self.inputs - .as_ref() - .and_then(|inputs| inputs.W_new.as_ref()) - .map(|W_new| W_new.to_coordinates()), - self.params.limb_width, - self.params.n_limbs, - )?; - - Ok(( - pp_digest, i, z_0, z_i, data_p, data_c_1, data_c_2, E_new, W_new, - )) - } - - pub fn synthesize_base_case::Base>>( - &self, - mut cs: CS, - ) -> Result< - ( - AllocatedRelaxedR1CSInstance, - emulated::AllocatedEmulRelaxedR1CSInstance, - ), - SynthesisError, - > { - let U_c_default = AllocatedRelaxedR1CSInstance::default( - cs.namespace(|| "Allocate U_c_default"), - self.params.limb_width, - self.params.n_limbs, - )?; - - let U_p_default = emulated::AllocatedEmulRelaxedR1CSInstance::default( - cs.namespace(|| "Allocated U_p_default"), - self.params.limb_width, - self.params.n_limbs, - )?; - - // In the first folding step return the default relaxed instances for both the - // CycleFold and primary running accumulators - Ok((U_c_default, U_p_default)) - } - - pub fn synthesize_non_base_case::Base>>( - &self, - mut cs: CS, - pp_digest: &AllocatedNum, - i: &AllocatedNum, - z_0: &[AllocatedNum], - z_i: &[AllocatedNum], - data_p: &emulated::AllocatedFoldingData, - data_c_1: &AllocatedCycleFoldData, - data_c_2: &AllocatedCycleFoldData, - E_new: emulated::AllocatedEmulPoint, - W_new: emulated::AllocatedEmulPoint, - arity: usize, - ) -> Result< - ( - AllocatedRelaxedR1CSInstance, - emulated::AllocatedEmulRelaxedR1CSInstance, - AllocatedBit, - ), - SynthesisError, - > { - // Follows the outline written down here https://hackmd.io/@argumentcomputer/HybHrnNFT - - // Calculate the hash of the non-deterministic advice for the primary circuit - let mut ro_p = E1::ROCircuit::new( - self.ro_consts.clone(), - 2 + 2 * arity + 2 * NUM_FE_IN_EMULATED_POINT + 3, - ); - - ro_p.absorb(pp_digest); - ro_p.absorb(i); - for e in z_0 { - ro_p.absorb(e) - } - for e in z_i { - ro_p.absorb(e) - } - data_p - .U - .absorb_in_ro(cs.namespace(|| "absorb U_p"), &mut ro_p)?; - - let hash_bits_p = ro_p.squeeze(cs.namespace(|| "primary hash bits"), NUM_HASH_BITS)?; - let hash_p = le_bits_to_num(cs.namespace(|| "primary hash"), &hash_bits_p)?; - - // check the hash matches the public IO from the last primary instance - let check_primary = alloc_num_equals( - cs.namespace(|| "u.X[0] = H(params, i, z0, zi, U_p)"), - &data_p.u_x0, - &hash_p, - )?; - - // Calculate the hash of the non-dterministic advice for the secondary circuit - let mut ro_c = E1::ROCircuit::new( - self.ro_consts.clone(), - 1 + 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, // pp + i + W + E + u + X - ); - - ro_c.absorb(pp_digest); - ro_c.absorb(i); - data_c_1 - .U - .absorb_in_ro(cs.namespace(|| "absorb U_c"), &mut ro_c)?; - let hash_c_bits = ro_c.squeeze(cs.namespace(|| "cyclefold hash bits"), NUM_HASH_BITS)?; - let hash_c = le_bits_to_num(cs.namespace(|| "cyclefold hash"), &hash_c_bits)?; - - // check the hash matches the public IO from the last primary instance - let check_cyclefold = alloc_num_equals( - cs.namespace(|| "u.X[1] = H(params, U_c)"), - &data_p.u_x1, - &hash_c, - )?; - - let check_io = AllocatedBit::and( - cs.namespace(|| "both IOs match"), - &check_primary, - &check_cyclefold, - )?; - - // Run NIVC.V on U_c, u_c_1, T_c_1 - let U_int = data_c_1.apply_fold( - cs.namespace(|| "fold u_c_1 into U_c"), - pp_digest, - self.ro_consts.clone(), - self.params.limb_width, - self.params.n_limbs, - )?; - - // Calculate h_int = H(pp, U_c_int) - let mut ro_c_int = E1::ROCircuit::new( - self.ro_consts.clone(), - 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, // pp + W + E + u + X - ); - ro_c_int.absorb(pp_digest); - U_int.absorb_in_ro(cs.namespace(|| "absorb U_c_int"), &mut ro_c_int)?; - let h_c_int_bits = - ro_c_int.squeeze(cs.namespace(|| "intermediate hash bits"), NUM_HASH_BITS)?; - let h_c_int = le_bits_to_num(cs.namespace(|| "intermediate hash"), &h_c_int_bits)?; - - // Calculate h_1 = H(pp, U_c_1) - let mut ro_c_1 = E1::ROCircuit::new( - self.ro_consts.clone(), - 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, // pp + W + E + u + X - ); - - ro_c_1.absorb(pp_digest); - data_c_2 - .U - .absorb_in_ro(cs.namespace(|| "absorb U_c_1"), &mut ro_c_1)?; - let h_c_1_bits = ro_c_1.squeeze(cs.namespace(|| "cyclefold_1 hash bits"), NUM_HASH_BITS)?; - let h_c_1 = le_bits_to_num(cs.namespace(|| "cyclefold_1 hash"), &h_c_1_bits)?; - - // Check the intermediate-calculated running instance matches the - // non-deterministic advice provided to the prover - let check_cyclefold_int = - alloc_num_equals(cs.namespace(|| "h_int = h_c_1"), &h_c_int, &h_c_1)?; - - let checks_pass = AllocatedBit::and( - cs.namespace(|| "all checks passed"), - &check_io, - &check_cyclefold_int, - )?; - - // calculate the folded CycleFold accumulator - let U_c = data_c_2.apply_fold( - cs.namespace(|| "fold u_c_2 into U_c_1"), - pp_digest, - self.ro_consts.clone(), - self.params.limb_width, - self.params.n_limbs, - )?; - - // calculate the folded primary circuit accumulator - let U_p = data_p.U.fold_with_r1cs( - cs.namespace(|| "fold u_p into U_p"), - pp_digest, - W_new, - E_new, - &data_p.u_W, - &data_p.u_x0, - &data_p.u_x1, - &data_p.T, - self.ro_consts.clone(), - )?; - - Ok((U_c, U_p, checks_pass)) - } - - pub fn synthesize::Base>>( - self, - cs: &mut CS, - ) -> Result>, SynthesisError> { - // Circuit is documented here: https://hackmd.io/SBvAur_2RQmaduDi7gYbhw - let arity = self.step_circuit.arity(); - - // Allocate the witness - let (pp_digest, i, z_0, z_i, data_p, data_c_1, data_c_2, E_new, W_new) = - self.alloc_witness(cs.namespace(|| "alloc_witness"), arity)?; - - let zero = alloc_zero(cs.namespace(|| "zero")); - let is_base_case = alloc_num_equals(cs.namespace(|| "is base case"), &i, &zero)?; - - let (U_new_c_base, U_new_p_base) = - self.synthesize_base_case(cs.namespace(|| "base case"))?; - - let (U_new_c_non_base, U_new_p_non_base, check_non_base_pass) = self - .synthesize_non_base_case( - cs.namespace(|| "synthesize non base case"), - &pp_digest, - &i, - &z_0, - &z_i, - &data_p, - &data_c_1, - &data_c_2, - E_new, - W_new, - arity, - )?; - - let should_be_false = AllocatedBit::nor( - cs.namespace(|| "check_non_base_pass nor base_case"), - &check_non_base_pass, - &is_base_case, - )?; - cs.enforce( - || "check_non_base_pass nor base_case = false", - |lc| lc + should_be_false.get_variable(), - |lc| lc + CS::one(), - |lc| lc, - ); - - // select the new running primary instance - let Unew_p = U_new_p_base.conditionally_select( - cs.namespace(|| "compute Unew_p"), - &U_new_p_non_base, - &Boolean::from(is_base_case.clone()), - )?; - - // select the new running CycleFold instance - let Unew_c = U_new_c_base.conditionally_select( - cs.namespace(|| "compute Unew_c"), - &U_new_c_non_base, - &Boolean::from(is_base_case.clone()), - )?; - - // Compute i + 1 - let i_new = AllocatedNum::alloc(cs.namespace(|| "i + 1"), || { - Ok(*i.get_value().get()? + E1::Base::ONE) - })?; - cs.enforce( - || "check i + 1", - |lc| lc, - |lc| lc, - |lc| lc + i_new.get_variable() - CS::one() - i.get_variable(), - ); - - // Compute z_{i+1} - let z_input = conditionally_select_slice( - cs.namespace(|| "select input to F"), - &z_0, - &z_i, - &Boolean::from(is_base_case), - )?; - - let (_pc, z_next) = - self.step_circuit - .synthesize(&mut cs.namespace(|| "F"), None, &z_input)?; - - if z_next.len() != arity { - return Err(SynthesisError::IncompatibleLengthVector( - "z_next".to_string(), - )); - } - - // Calculate the first component of the public IO as the hash of the calculated - // primary running instance - let mut ro_p = E1::ROCircuit::new( - self.ro_consts.clone(), - 2 + 2 * arity + (2 * NUM_FE_IN_EMULATED_POINT + 3), // pp + i + z_0 + z_next + (U_p) - ); - ro_p.absorb(&pp_digest); - ro_p.absorb(&i_new); - for e in &z_0 { - ro_p.absorb(e); - } - for e in &z_next { - ro_p.absorb(e); - } - Unew_p.absorb_in_ro(cs.namespace(|| "absorb Unew_p"), &mut ro_p)?; - let hash_p_bits = ro_p.squeeze(cs.namespace(|| "hash_p_bits"), NUM_HASH_BITS)?; - let hash_p = le_bits_to_num(cs.namespace(|| "hash_p"), &hash_p_bits)?; - - // Calculate the second component of the public IO as the hash of the calculated - // CycleFold running instance - let mut ro_c = E1::ROCircuit::new( - self.ro_consts, - 1 + 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, // pp + i + W + E + u + X - ); - ro_c.absorb(&pp_digest); - ro_c.absorb(&i_new); - Unew_c.absorb_in_ro(cs.namespace(|| "absorb Unew_c"), &mut ro_c)?; - let hash_c_bits = ro_c.squeeze(cs.namespace(|| "hash_c_bits"), NUM_HASH_BITS)?; - let hash_c = le_bits_to_num(cs.namespace(|| "hash_c"), &hash_c_bits)?; - - hash_p.inputize(cs.namespace(|| "u_p.x[0] = hash_p"))?; - hash_c.inputize(cs.namespace(|| "u_p.x[1] = hash_c"))?; - - Ok(z_next) - } -} - -// #[cfg(test)] -// mod test { -// use expect_test::{expect, Expect}; - -// use super::*; -// use crate::{ -// bellpepper::test_shape_cs::TestShapeCS, -// constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, -// provider::{Bn256EngineKZG, PallasEngine, Secp256k1Engine}, -// traits::{circuit::TrivialCircuit, CurveCycleEquipped, Dual}, -// }; - -// fn test_augmented_circuit_size_with(expected_cons: &Expect, -// expected_var: &Expect) where -// E: CurveCycleEquipped, -// { -// let params = AugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS); - -// let ro_consts = ROConstantsCircuit::::default(); - -// let step_circuit = TrivialCircuit::::default(); - -// let circuit = AugmentedCircuit::, -// TrivialCircuit>::new( ¶ms, -// ro_consts, -// None, -// &step_circuit, -// ); -// let mut cs: TestShapeCS> = TestShapeCS::default(); - -// let res = circuit.synthesize(&mut cs); - -// res.unwrap(); - -// let num_constraints = cs.num_constraints(); -// let num_variables = cs.num_aux(); - -// expected_cons.assert_eq(&num_constraints.to_string()); -// expected_var.assert_eq(&num_variables.to_string()); -// } - -// #[test] -// fn test_augmented_circuit_size() { -// test_augmented_circuit_size_with::(&expect!["33289"], -// &expect!["33323"]); -// test_augmented_circuit_size_with::(&expect!["35125" -// ], &expect!["35159"]); -// test_augmented_circuit_size_with::(&expect!["33856"], -// &expect!["33890"]); } -// } diff --git a/src/cyclefold/snark.rs b/src/cyclefold/snark.rs deleted file mode 100644 index 6d8ef3b..0000000 --- a/src/cyclefold/snark.rs +++ /dev/null @@ -1,563 +0,0 @@ -//! This module defines the Cyclefold `RecursiveSNARK` type with its `new`, -//! `prove_step`, and `verify` methods. - -use bellpepper_core::{ConstraintSystem, SynthesisError}; -use ff::PrimeFieldBits; -use once_cell::sync::OnceCell; -use serde::{Deserialize, Serialize}; - -use super::{ - nifs::{CycleFoldNIFS, PrimaryNIFS}, - nova_circuit::{AugmentedCircuit, AugmentedCircuitInputs, AugmentedCircuitParams}, - util::{absorb_primary_relaxed_r1cs, FoldingData}, -}; -use crate::{ - bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - shape_cs::ShapeCS, - solver::SatisfyingAssignment, - }, - constants::{ - BN_LIMB_WIDTH, BN_N_LIMBS, NIO_CYCLE_FOLD, NUM_CHALLENGE_BITS, NUM_FE_IN_EMULATED_POINT, - NUM_HASH_BITS, - }, - cyclefold::circuit::CycleFoldCircuit, - errors::NovaError, - gadgets::scalar_as_base, - r1cs::{ - self, CommitmentKeyHint, R1CSInstance, R1CSResult, R1CSWitness, RelaxedR1CSInstance, - RelaxedR1CSWitness, - }, - supernova::StepCircuit, - traits::{ - commitment::CommitmentTrait, AbsorbInROTrait, CurveCycleEquipped, Dual, Engine, - ROConstantsCircuit, ROTrait, - }, - Commitment, CommitmentKey, DigestComputer, R1CSWithArity, ROConstants, ResourceBuffer, - SimpleDigestible, -}; - -/// The public parameters used in the CycleFold recursive SNARK proof and -/// verification -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct PublicParams -where - E1: CurveCycleEquipped, -{ - F_arity_primary: usize, - ro_consts_primary: ROConstants>, - ro_consts_circuit_primary: ROConstantsCircuit>, - ck_primary: CommitmentKey, - circuit_shape_primary: R1CSWithArity, - augmented_circuit_params: AugmentedCircuitParams, - - ro_consts_cyclefold: ROConstants>, - ck_cyclefold: CommitmentKey>, - circuit_shape_cyclefold: R1CSWithArity>, - #[serde(skip, default = "OnceCell::new")] - digest: OnceCell, -} - -impl PublicParams -where - E1: CurveCycleEquipped, -{ - /// Builds the public parameters for the circuit `C1`. - /// The same note for public parameter hints apply as in the case for Nova's - /// public parameters: For some final compressing SNARKs the size of the - /// commitment key must be larger, so we include `ck_hint_primary` and - /// `ck_hint_cyclefold` parameters to accommodate this. - pub fn setup>( - c_primary: &C1, - ck_hint_primary: &CommitmentKeyHint, - ck_hint_cyclefold: &CommitmentKeyHint>, - ) -> Self { - let F_arity_primary = c_primary.arity(); - let ro_consts_primary = ROConstants::>::default(); - let ro_consts_circuit_primary = ROConstantsCircuit::>::default(); - - let augmented_circuit_params = AugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS); - let circuit_primary: AugmentedCircuit<'_, Dual, E1, C1> = AugmentedCircuit::new( - &augmented_circuit_params, - ro_consts_circuit_primary.clone(), - None, - c_primary, - ); - let mut cs: ShapeCS = ShapeCS::new(); - let _ = circuit_primary.synthesize(&mut cs); - let (r1cs_shape_primary, ck_primary) = cs.r1cs_shape_and_key(ck_hint_primary); - let circuit_shape_primary = R1CSWithArity::new(r1cs_shape_primary, F_arity_primary); - - let ro_consts_cyclefold = ROConstants::>::default(); - let mut cs: ShapeCS> = ShapeCS::new(); - let circuit_cyclefold: CycleFoldCircuit = CycleFoldCircuit::default(); - let _ = circuit_cyclefold.synthesize(&mut cs); - let (r1cs_shape_cyclefold, ck_cyclefold) = cs.r1cs_shape_and_key(ck_hint_cyclefold); - let circuit_shape_cyclefold = R1CSWithArity::new(r1cs_shape_cyclefold, 0); - - Self { - F_arity_primary, - ro_consts_primary, - ro_consts_circuit_primary, - ck_primary, - circuit_shape_primary, - augmented_circuit_params, - ro_consts_cyclefold, - ck_cyclefold, - circuit_shape_cyclefold, - digest: OnceCell::new(), - } - } - - /// Calculate the digest of the public parameters. - pub fn digest(&self) -> E1::Scalar { - self.digest - .get_or_try_init(|| DigestComputer::new(self).digest()) - .cloned() - .expect("Failure in retrieving digest") - } - - /// Returns the number of constraints in the primary and cyclefold circuits - pub const fn num_constraints(&self) -> (usize, usize) { - ( - self.circuit_shape_primary.r1cs_shape.num_cons, - self.circuit_shape_cyclefold.r1cs_shape.num_cons, - ) - } - - /// Returns the number of variables in the primary and cyclefold circuits - pub const fn num_variables(&self) -> (usize, usize) { - ( - self.circuit_shape_primary.r1cs_shape.num_vars, - self.circuit_shape_cyclefold.r1cs_shape.num_vars, - ) - } -} - -impl SimpleDigestible for PublicParams where E1: CurveCycleEquipped {} - -/// A SNARK that proves the correct execution of an incremental computation in -/// the CycleFold folding scheme. -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct RecursiveSNARK -where - E1: CurveCycleEquipped, -{ - // Input - z0_primary: Vec, - - // primary circuit data - r_W_primary: RelaxedR1CSWitness, - r_U_primary: RelaxedR1CSInstance, - l_w_primary: R1CSWitness, - l_u_primary: R1CSInstance, - - // cyclefold circuit data - r_W_cyclefold: RelaxedR1CSWitness>, - r_U_cyclefold: RelaxedR1CSInstance>, - - // memory buffers for folding steps - buffer_primary: ResourceBuffer, - buffer_cyclefold: ResourceBuffer>, - - i: usize, - zi_primary: Vec, -} - -impl RecursiveSNARK -where - E1: CurveCycleEquipped, -{ - /// Create a new instance of a recursive SNARK - pub fn new>( - pp: &PublicParams, - c_primary: &C1, - z0_primary: &[E1::Scalar], - ) -> Result { - if z0_primary.len() != pp.F_arity_primary { - return Err(NovaError::InvalidInitialInputLength); - } - - let r1cs_primary = &pp.circuit_shape_primary.r1cs_shape; - let r1cs_cyclefold = &pp.circuit_shape_cyclefold.r1cs_shape; - - let r_U_cyclefold = RelaxedR1CSInstance::default(&pp.ck_cyclefold, r1cs_cyclefold); - let r_W_cyclefold = RelaxedR1CSWitness::default(r1cs_cyclefold); - - let mut cs_primary = SatisfyingAssignment::::new(); - let inputs_primary: AugmentedCircuitInputs, E1> = AugmentedCircuitInputs::new( - scalar_as_base::(pp.digest()), - as Engine>::Base::from(0u64), - z0_primary.to_vec(), - None, - None, - None, - None, - None, - None, - ); - - let circuit_primary = AugmentedCircuit::new( - &pp.augmented_circuit_params, - pp.ro_consts_circuit_primary.clone(), - Some(inputs_primary), - c_primary, - ); - - let zi_primary = circuit_primary.synthesize(&mut cs_primary)?; - let (l_u_primary, l_w_primary) = - cs_primary.r1cs_instance_and_witness(r1cs_primary, &pp.ck_primary)?; - - let r_U_primary = RelaxedR1CSInstance::default(&pp.ck_primary, r1cs_primary); - let r_W_primary = RelaxedR1CSWitness::default(r1cs_primary); - - let zi_primary = zi_primary - .iter() - .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) - .collect::, _>>()?; - - let buffer_primary = ResourceBuffer { - l_w: None, - l_u: None, - ABC_Z_1: R1CSResult::default(r1cs_primary.num_cons), - ABC_Z_2: R1CSResult::default(r1cs_primary.num_cons), - T: r1cs::default_T::(r1cs_primary.num_cons), - }; - - let buffer_cyclefold = ResourceBuffer { - l_w: None, - l_u: None, - ABC_Z_1: R1CSResult::default(r1cs_cyclefold.num_cons), - ABC_Z_2: R1CSResult::default(r1cs_cyclefold.num_cons), - T: r1cs::default_T::>(r1cs_cyclefold.num_cons), - }; - - Ok(Self { - z0_primary: z0_primary.to_vec(), - r_W_primary, - r_U_primary, - l_w_primary, - l_u_primary, - r_W_cyclefold, - r_U_cyclefold, - buffer_primary, - buffer_cyclefold, - i: 0, - zi_primary, - }) - } - - /// Update the `RecursiveSNARK` by proving a step of the incremental - /// computation. - pub fn prove_step>( - &mut self, - pp: &PublicParams, - c_primary: &C1, - ) -> Result<(), NovaError> { - if self.i == 0 { - self.i = 1; - return Ok(()); - } - - let (nifs_primary, (r_U_primary, r_W_primary), r) = PrimaryNIFS::>::prove( - &pp.ck_primary, - &pp.ro_consts_primary, - &pp.digest(), - &pp.circuit_shape_primary.r1cs_shape, - &self.r_U_primary, - &self.r_W_primary, - &self.l_u_primary, - &self.l_w_primary, - )?; - - let r_bools = r - .to_le_bits() - .iter() - .map(|b| Some(*b)) - .take(NUM_CHALLENGE_BITS) - .collect::>>() - .map(|v| v.try_into().unwrap()); - - let comm_T = Commitment::::decompress(&nifs_primary.comm_T)?; - let E_new = self.r_U_primary.comm_E + comm_T * r; - - let W_new = self.r_U_primary.comm_W + self.l_u_primary.comm_W * r; - - let mut cs_cyclefold_E = SatisfyingAssignment::>::with_capacity( - pp.circuit_shape_cyclefold.r1cs_shape.num_io + 1, - pp.circuit_shape_cyclefold.r1cs_shape.num_vars, - ); - - let circuit_cyclefold_E: CycleFoldCircuit = - CycleFoldCircuit::new(Some(self.r_U_primary.comm_E), Some(comm_T), r_bools); - - let _ = circuit_cyclefold_E.synthesize(&mut cs_cyclefold_E); - - let (l_u_cyclefold_E, l_w_cyclefold_E) = cs_cyclefold_E - .r1cs_instance_and_witness(&pp.circuit_shape_cyclefold.r1cs_shape, &pp.ck_cyclefold) - .map_err(|_| NovaError::UnSat)?; - - // TODO: check if this is better or worse than `prove_mut` with a clone of - // `self.r_U_cyclefold` - let (nifs_cyclefold_E, (r_U_cyclefold_E, r_W_cyclefold_E)) = CycleFoldNIFS::prove( - &pp.ck_cyclefold, - &pp.ro_consts_cyclefold, - &scalar_as_base::(pp.digest()), - &pp.circuit_shape_cyclefold.r1cs_shape, - &self.r_U_cyclefold, - &self.r_W_cyclefold, - &l_u_cyclefold_E, - &l_w_cyclefold_E, - )?; - - let comm_T_E = Commitment::>::decompress(&nifs_cyclefold_E.comm_T)?; - - let mut cs_cyclefold_W = SatisfyingAssignment::>::with_capacity( - pp.circuit_shape_cyclefold.r1cs_shape.num_io + 1, - pp.circuit_shape_cyclefold.r1cs_shape.num_vars, - ); - - let circuit_cyclefold_W: CycleFoldCircuit = CycleFoldCircuit::new( - Some(self.r_U_primary.comm_W), - Some(self.l_u_primary.comm_W), - r_bools, - ); - - let _ = circuit_cyclefold_W.synthesize(&mut cs_cyclefold_W); - - let (l_u_cyclefold_W, l_w_cyclefold_W) = cs_cyclefold_W - .r1cs_instance_and_witness(&pp.circuit_shape_cyclefold.r1cs_shape, &pp.ck_cyclefold) - .map_err(|_| NovaError::UnSat)?; - - // TODO: check if this is better or worse than `prove_mut` with a clone of - // r_U_cyclefold_E - let (nifs_cyclefold_W, (r_U_cyclefold_W, r_W_cyclefold_W)) = CycleFoldNIFS::prove( - &pp.ck_cyclefold, - &pp.ro_consts_cyclefold, - &scalar_as_base::(pp.digest()), - &pp.circuit_shape_cyclefold.r1cs_shape, - &r_U_cyclefold_E, - &r_W_cyclefold_E, - &l_u_cyclefold_W, - &l_w_cyclefold_W, - )?; - - let comm_T_W = Commitment::>::decompress(&nifs_cyclefold_W.comm_T)?; - - let mut cs_primary = SatisfyingAssignment::::with_capacity( - pp.circuit_shape_primary.r1cs_shape.num_io + 1, - pp.circuit_shape_primary.r1cs_shape.num_vars, - ); - - let data_p = FoldingData::new(self.r_U_primary.clone(), self.l_u_primary.clone(), comm_T); - let data_c_E = FoldingData::new(self.r_U_cyclefold.clone(), l_u_cyclefold_E, comm_T_E); - let data_c_W = FoldingData::new(r_U_cyclefold_E, l_u_cyclefold_W, comm_T_W); - - let inputs_primary: AugmentedCircuitInputs, E1> = AugmentedCircuitInputs::new( - scalar_as_base::(pp.digest()), - as Engine>::Base::from(self.i as u64), - self.z0_primary.clone(), - Some(self.zi_primary.clone()), - Some(data_p), - Some(data_c_E), - Some(data_c_W), - Some(E_new), - Some(W_new), - ); - - let circuit_primary: AugmentedCircuit<'_, Dual, E1, C1> = AugmentedCircuit::new( - &pp.augmented_circuit_params, - pp.ro_consts_circuit_primary.clone(), - Some(inputs_primary), - c_primary, - ); - - let zi_primary = circuit_primary.synthesize(&mut cs_primary)?; - - let (l_u_primary, l_w_primary) = cs_primary - .r1cs_instance_and_witness(&pp.circuit_shape_primary.r1cs_shape, &pp.ck_primary) - .map_err(|_| NovaError::UnSat)?; - - self.zi_primary = zi_primary - .iter() - .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) - .collect::, _>>()?; - - self.r_U_primary = r_U_primary; - self.r_W_primary = r_W_primary; - self.l_u_primary = l_u_primary; - self.l_w_primary = l_w_primary; - self.r_U_cyclefold = r_U_cyclefold_W; - self.r_W_cyclefold = r_W_cyclefold_W; - - self.i += 1; - - Ok(()) - } - - /// Verify the correctness of the `RecursiveSNARK` - pub fn verify( - &self, - pp: &PublicParams, - num_steps: usize, - z0_primary: &[E1::Scalar], - ) -> Result, NovaError> { - // number of steps cannot be zero - let is_num_steps_zero = num_steps == 0; - - // check if the provided proof has executed num_steps - let is_num_steps_not_match = self.i != num_steps; - - // check if the initial inputs match - let is_inputs_not_match = self.z0_primary != z0_primary; - - // check if the (relaxed) R1CS instances have two public outputs - let is_instance_has_two_outputs = self.r_U_primary.X.len() != 2; - - if is_num_steps_zero - || is_num_steps_not_match - || is_inputs_not_match - || is_instance_has_two_outputs - { - return Err(NovaError::ProofVerifyError); - } - - // Calculate the hashes of the primary running instance and cyclefold running - // instance - let (hash_primary, hash_cyclefold) = { - let mut hasher = as Engine>::RO::new( - pp.ro_consts_primary.clone(), - 2 + 2 * pp.F_arity_primary + 2 * NUM_FE_IN_EMULATED_POINT + 3, - ); - hasher.absorb(pp.digest()); - hasher.absorb(E1::Scalar::from(num_steps as u64)); - for e in z0_primary { - hasher.absorb(*e); - } - for e in &self.zi_primary { - hasher.absorb(*e); - } - absorb_primary_relaxed_r1cs::>(&self.r_U_primary, &mut hasher); - let hash_primary = hasher.squeeze(NUM_HASH_BITS); - - let mut hasher = as Engine>::RO::new( - pp.ro_consts_cyclefold.clone(), - 1 + 1 + 3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS, - ); - hasher.absorb(pp.digest()); - hasher.absorb(E1::Scalar::from(num_steps as u64)); - self.r_U_cyclefold.absorb_in_ro(&mut hasher); - let hash_cyclefold = hasher.squeeze(NUM_HASH_BITS); - - (hash_primary, hash_cyclefold) - }; - - // Verify the hashes equal the public IO for the final primary instance - if scalar_as_base::>(hash_primary) != self.l_u_primary.X[0] - || scalar_as_base::>(hash_cyclefold) != self.l_u_primary.X[1] - { - return Err(NovaError::ProofVerifyError); - } - - // Verify the satisfiability of running relaxed instances, and the final primary - // instance. - let (res_r_primary, (res_l_primary, res_r_cyclefold)) = rayon::join( - || { - pp.circuit_shape_primary.r1cs_shape.is_sat_relaxed( - &pp.ck_primary, - &self.r_U_primary, - &self.r_W_primary, - ) - }, - || { - rayon::join( - || { - pp.circuit_shape_primary.r1cs_shape.is_sat( - &pp.ck_primary, - &self.l_u_primary, - &self.l_w_primary, - ) - }, - || { - pp.circuit_shape_cyclefold.r1cs_shape.is_sat_relaxed( - &pp.ck_cyclefold, - &self.r_U_cyclefold, - &self.r_W_cyclefold, - ) - }, - ) - }, - ); - - res_r_primary?; - res_l_primary?; - res_r_cyclefold?; - - Ok(self.zi_primary.clone()) - } -} - -// #[cfg(test)] -// mod test { -// use std::marker::PhantomData; - -// use bellpepper_core::num::AllocatedNum; - -// use super::*; -// use crate::{ -// provider::{Bn256EngineKZG, PallasEngine, Secp256k1Engine}, -// traits::snark::default_ck_hint, -// }; - -// #[derive(Clone)] -// struct SquareCircuit { -// _p: PhantomData, -// } - -// impl StepCircuit for SquareCircuit { -// fn arity(&self) -> usize { -// 1 -// } - -// fn synthesize>( -// &self, -// cs: &mut CS, -// z: &[AllocatedNum], -// ) -> Result>, SynthesisError> { -// let x = &z[0]; -// let x_sq = x.square(cs.namespace(|| "x_sq"))?; - -// Ok(vec![x_sq]) -// } -// } - -// fn test_trivial_cyclefold_prove_verify_with() { -// let primary_circuit = SquareCircuit:: { _p: PhantomData }; - -// let pp = -// PublicParams::::setup(&primary_circuit, &*default_ck_hint(), -// &*default_ck_hint()); - -// let z0 = vec![E::Scalar::from(2u64)]; - -// let mut recursive_snark = RecursiveSNARK::new(&pp, &primary_circuit, -// &z0).unwrap(); - -// (1..5).for_each(|iter| { -// let res_proof = recursive_snark.prove_step(&pp, -// &primary_circuit); res_proof.unwrap(); - -// let res_verify = recursive_snark.verify(&pp, iter, &z0); -// res_verify.unwrap(); -// }); -// } - -// #[test] -// fn test_cyclefold_prove_verify() { -// test_trivial_cyclefold_prove_verify_with::(); -// test_trivial_cyclefold_prove_verify_with::(); -// test_trivial_cyclefold_prove_verify_with::(); -// } -// } diff --git a/src/cyclefold/util.rs b/src/cyclefold/util.rs deleted file mode 100644 index d5f9411..0000000 --- a/src/cyclefold/util.rs +++ /dev/null @@ -1,90 +0,0 @@ -//! This module defines some useful utilities for RO absorbing, and the Folding -//! data used in the CycleFold folding scheme. - -use ff::Field; -use serde::{Deserialize, Serialize}; - -use crate::{ - constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, - gadgets::{f_to_nat, nat_to_limbs, scalar_as_base}, - r1cs::{R1CSInstance, RelaxedR1CSInstance}, - traits::{commitment::CommitmentTrait, AbsorbInROTrait, Engine, ROTrait}, - Commitment, -}; - -/// Absorb a commitment over engine `E1` into an RO over engine `E2` by -/// absorbing the limbs -pub(super) fn absorb_primary_commitment( - comm: &impl CommitmentTrait, - ro: &mut impl ROTrait, -) where - E1: Engine::Scalar>, - E2: Engine::Scalar>, -{ - let (x, y, is_infinity) = comm.to_coordinates(); - - let x_limbs = nat_to_limbs(&f_to_nat(&x), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); - let y_limbs = nat_to_limbs(&f_to_nat(&y), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); - - for limb in x_limbs { - ro.absorb(scalar_as_base::(limb)); - } - for limb in y_limbs { - ro.absorb(scalar_as_base::(limb)); - } - if is_infinity { - ro.absorb(::Scalar::ONE); - } else { - ro.absorb(::Scalar::ZERO); - } -} - -pub(super) fn absorb_primary_r1cs( - u: &R1CSInstance, - ro: &mut impl ROTrait, -) where - E1: Engine::Scalar>, - E2: Engine::Scalar>, -{ - absorb_primary_commitment::(&u.comm_W, ro); - for x in &u.X { - ro.absorb(*x); - } -} - -pub(super) fn absorb_cyclefold_r1cs(u: &R1CSInstance, ro: &mut E::RO) { - u.comm_W.absorb_in_ro(ro); - u.X.iter().for_each(|x| { - let limbs: Vec = nat_to_limbs(&f_to_nat(x), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); - limbs - .into_iter() - .for_each(|limb| ro.absorb(scalar_as_base::(limb))); - }); -} - -pub(super) fn absorb_primary_relaxed_r1cs(U: &RelaxedR1CSInstance, ro: &mut E2::RO) -where - E1: Engine::Scalar>, - E2: Engine::Scalar>, -{ - absorb_primary_commitment::(&U.comm_W, ro); - absorb_primary_commitment::(&U.comm_E, ro); - ro.absorb(U.u); - for e in &U.X { - ro.absorb(*e); - } -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub(super) struct FoldingData { - pub U: RelaxedR1CSInstance, - pub u: R1CSInstance, - pub T: Commitment, -} - -impl FoldingData { - pub fn new(U: RelaxedR1CSInstance, u: R1CSInstance, T: Commitment) -> Self { - Self { U, u, T } - } -} diff --git a/src/digest.rs b/src/digest.rs deleted file mode 100644 index 48b32f9..0000000 --- a/src/digest.rs +++ /dev/null @@ -1,162 +0,0 @@ -use std::{io, marker::PhantomData}; - -use bincode::Options; -use ff::PrimeField; -use serde::Serialize; -use sha3::{Digest, Sha3_256}; - -use crate::constants::NUM_HASH_BITS; - -/// Trait for components with potentially discrete digests to be included in -/// their container's digest. -pub trait Digestible { - /// Write the byte representation of Self in a byte buffer - fn write_bytes(&self, byte_sink: &mut W) -> Result<(), io::Error>; -} - -/// Marker trait to be implemented for types that implement `Digestible` and -/// `Serialize`. Their instances will be serialized to bytes then digested. -pub trait SimpleDigestible: Serialize {} - -impl Digestible for T { - fn write_bytes(&self, byte_sink: &mut W) -> Result<(), io::Error> { - let config = bincode::DefaultOptions::new() - .with_little_endian() - .with_fixint_encoding(); - // Note: bincode recursively length-prefixes every field! - config - .serialize_into(byte_sink, self) - .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) - } -} - -pub struct DigestComputer<'a, F, T> { - inner: &'a T, - _phantom: PhantomData, -} - -impl<'a, F: PrimeField, T: Digestible> DigestComputer<'a, F, T> { - fn hasher() -> Sha3_256 { - Sha3_256::new() - } - - fn map_to_field(digest: &[u8]) -> F { - let bv = (0..NUM_HASH_BITS).map(|i| { - let (byte_pos, bit_pos) = (i / 8, i % 8); - let bit = (digest[byte_pos] >> bit_pos) & 1; - bit == 1 - }); - - // turn the bit vector into a scalar - let mut digest = F::ZERO; - let mut coeff = F::ONE; - for bit in bv { - if bit { - digest += coeff; - } - coeff += coeff; - } - digest - } - - /// Create a new `DigestComputer` - pub fn new(inner: &'a T) -> Self { - DigestComputer { - inner, - _phantom: PhantomData, - } - } - - /// Compute the digest of a `Digestible` instance. - pub fn digest(&self) -> Result { - let mut hasher = Self::hasher(); - self.inner.write_bytes(&mut hasher)?; - let bytes: [u8; 32] = hasher.finalize().into(); - Ok(Self::map_to_field(&bytes)) - } -} - -// #[cfg(test)] -// mod tests { -// use ff::Field; -// use once_cell::sync::OnceCell; -// use serde::{Deserialize, Serialize}; - -// use super::{DigestComputer, SimpleDigestible}; -// use crate::traits::Engine; - -// type E = PallasEngine; - -// #[derive(Serialize, Deserialize)] -// struct S { -// i: usize, -// #[serde(skip, default = "OnceCell::new")] -// digest: OnceCell, -// } - -// impl SimpleDigestible for S {} - -// impl S { -// fn new(i: usize) -> Self { -// Self { -// i, -// digest: OnceCell::new(), -// } -// } - -// fn digest(&self) -> E::Scalar { -// self.digest -// .get_or_try_init(|| DigestComputer::new(self).digest()) -// .cloned() -// .unwrap() -// } -// } - -// #[test] -// fn test_digest_field_not_ingested_in_computation() { -// let s1 = S::::new(42); - -// // let's set up a struct with a weird digest field to make sure the -// digest // computation does not depend of it -// let oc = OnceCell::new(); -// oc.set(::Scalar::ONE).unwrap(); - -// let s2: S = S { i: 42, digest: oc }; - -// assert_eq!( -// DigestComputer::<::Scalar, _>::new(&s1) -// .digest() -// .unwrap(), -// DigestComputer::<::Scalar, _>::new(&s2) -// .digest() -// .unwrap() -// ); - -// // note: because of the semantics of `OnceCell::get_or_try_init`, the -// above // equality will not result in `s1.digest() == s2.digest` -// assert_ne!( -// s2.digest(), -// DigestComputer::<::Scalar, _>::new(&s2) -// .digest() -// .unwrap() -// ); -// } - -// #[test] -// fn test_digest_impervious_to_serialization() { -// let good_s = S::::new(42); - -// // let's set up a struct with a weird digest field to confuse -// deserializers let oc = OnceCell::new(); -// oc.set(::Scalar::ONE).unwrap(); - -// let bad_s: S = S { i: 42, digest: oc }; -// // this justifies the adjective "bad" -// assert_ne!(good_s.digest(), bad_s.digest()); - -// let naughty_bytes = bincode::serialize(&bad_s).unwrap(); - -// let retrieved_s: S = -// bincode::deserialize(&naughty_bytes).unwrap(); assert_eq!(good_s. -// digest(), retrieved_s.digest()) } -// } diff --git a/src/errors.rs b/src/errors.rs deleted file mode 100644 index ddd517d..0000000 --- a/src/errors.rs +++ /dev/null @@ -1,101 +0,0 @@ -//! This module defines errors returned by the library. -use core::fmt::Debug; - -use thiserror::Error; - -/// Errors returned by Nova -#[derive(Debug, Eq, PartialEq, Error)] -#[non_exhaustive] -pub enum NovaError { - /// returned if the supplied row or col in (row,col,val) tuple is out of - /// range - #[error("InvalidIndex")] - InvalidIndex, - /// returned if the step circuit calls inputize or alloc_io in its - /// synthesize method instead of passing output with the return value - #[error("InvalidStepCircuitIO")] - InvalidStepCircuitIO, - /// returned if the supplied input is not of the right length - #[error("InvalidInputLength")] - InvalidInputLength, - /// returned if the supplied witness is not of the right length - #[error("InvalidWitnessLength")] - InvalidWitnessLength, - /// returned if the supplied witness is not a satisfying witness to a given - /// shape and instance - #[error("UnSat")] - UnSat, - /// returned if the supplied witness is not a satisfying witness to a given - /// shape and instance, with error constraint index - #[error("UnSatIndex")] - UnSatIndex(usize), - /// returned when the supplied compressed commitment cannot be decompressed - #[error("DecompressionError")] - DecompressionError, - /// returned if proof verification fails - #[error("ProofVerifyError")] - ProofVerifyError, - /// returned if the provided commitment key is not of sufficient length - #[error("InvalidCommitmentKeyLength")] - InvalidCommitmentKeyLength, - /// returned if the provided number of steps is zero - #[error("InvalidNumSteps")] - InvalidNumSteps, - /// returned if there is an error in the proof/verification of a PCS - #[error("PCSError")] - PCSError(#[from] PCSError), - /// returned when an invalid sum-check proof is provided - #[error("InvalidSumcheckProof")] - InvalidSumcheckProof, - /// returned when the initial input to an incremental computation differs - /// from a previously declared arity - #[error("InvalidInitialInputLength")] - InvalidInitialInputLength, - /// returned when the step execution produces an output whose length differs - /// from a previously declared arity - #[error("InvalidStepOutputLength")] - InvalidStepOutputLength, - /// returned when the transcript engine encounters an overflow of the round - /// number - #[error("InternalTranscriptError")] - InternalTranscriptError, - /// returned when the multiset check fails - #[error("InvalidMultisetProof")] - InvalidMultisetProof, - /// returned when the product proof check fails - #[error("InvalidProductProof")] - InvalidProductProof, - /// returned when the consistency with public IO and assignment used fails - #[error("IncorrectWitness")] - IncorrectWitness, - /// return when error during synthesis - #[error("SynthesisError: {0}")] - SynthesisError(String), - /// returned when there is an error creating a digest - #[error("DigestError")] - DigestError, - /// returned when the prover cannot prove the provided statement due to - /// completeness error - #[error("InternalError")] - InternalError, -} - -/// Errors specific to the Polynomial commitment scheme -#[derive(Debug, Eq, PartialEq, Error)] -pub enum PCSError { - /// returned when an invalid PCS evaluation argument is provided - #[error("InvalidPCS")] - InvalidPCS, - /// returned when there is a Zeromorph error - #[error("ZMError")] - ZMError, - /// returned when a length check fails in a PCS - #[error("LengthError")] - LengthError, -} - -impl From for NovaError { - fn from(err: bellpepper_core::SynthesisError) -> Self { - Self::SynthesisError(err.to_string()) - } -} diff --git a/src/fast_serde.rs b/src/fast_serde.rs deleted file mode 100644 index 16bebd7..0000000 --- a/src/fast_serde.rs +++ /dev/null @@ -1,100 +0,0 @@ -//! This module implements fast serde for reading and writing -//! key objects requires for proof generation and verification. -//! With WASM in particular, serializing via standard binary serializers -//! like bincode causes a dramatic decrease in performance. This simple -//! serializers parses in bytes very efficiently. -//! -//! In the future, it can be extended to do direct memory access to the -//! javascript runtime. For now it does a single copy of the data into -//! the rust runtime. - -use std::io::{Cursor, Read}; - -use thiserror::Error; - -pub static MAGIC_NUMBER: [u8; 4] = [0x50, 0x4C, 0x55, 0x54]; -pub enum SerdeByteTypes { - AuxParams = 0x01, - UniversalKZGParam = 0x02, - CommitmentKey = 0x03, - ProverParams = 0x04 -} - -#[derive(Debug, Error)] -pub enum SerdeByteError { - #[error("{}", "invalid magic number")] - InvalidMagicNumber, - #[error("{}", "invalid serde type")] - InvalidSerdeType, - #[error("{}", "invalid section count")] - InvalidSectionCount, - #[error("{}", "invalid section type")] - InvalidSectionType, - #[error("{}", "invalid section size")] - InvalidSectionSize, - #[error(transparent)] - IoError(#[from] std::io::Error), - #[error(transparent)] - BincodeError(#[from] Box), - #[error("{}", "g1 decode error")] - G1DecodeError, - #[error("{}", "g2 decode error")] - G2DecodeError, -} - -/// A trait for fast conversions to bytes -pub trait FastSerde: Sized { - fn to_bytes(&self) -> Vec; - fn from_bytes(bytes: &Vec) -> Result; - - fn validate_header( - cursor: &mut Cursor<&Vec>, - expected_type: SerdeByteTypes, - expected_sections: u8, - ) -> Result<(), SerdeByteError> { - let mut magic = [0u8; 4]; - cursor.read_exact(&mut magic)?; - if magic != MAGIC_NUMBER { - return Err(SerdeByteError::InvalidMagicNumber); - } - - let mut serde_type = [0u8; 1]; - cursor.read_exact(&mut serde_type)?; - if serde_type[0] != expected_type as u8 { - return Err(SerdeByteError::InvalidSerdeType); - } - - let mut num_sections = [0u8; 1]; - cursor.read_exact(&mut num_sections)?; - if num_sections[0] != expected_sections { - return Err(SerdeByteError::InvalidSectionCount); - } - - Ok(()) - } - - fn read_section_bytes( - cursor: &mut Cursor<&Vec>, - expected_type: u8, - ) -> Result, SerdeByteError> { - let mut section_type = [0u8; 1]; - cursor.read_exact(&mut section_type)?; - if section_type[0] != expected_type { - return Err(SerdeByteError::InvalidSectionType); - } - - let mut section_size = [0u8; 4]; - cursor.read_exact(&mut section_size)?; - let size = u32::from_le_bytes(section_size) as usize; - let mut section_data = vec![0u8; size]; - cursor.read_exact(&mut section_data)?; - - Ok(section_data) - } - - fn write_section_bytes(out: &mut Vec, section_type: u8, data: &Vec) { - out.push(section_type); - out.extend_from_slice(&(data.len() as u32).to_le_bytes()); - out.extend_from_slice(data); - } -} diff --git a/src/gadgets/ecc.rs b/src/gadgets/ecc.rs deleted file mode 100644 index d189f3f..0000000 --- a/src/gadgets/ecc.rs +++ /dev/null @@ -1,1161 +0,0 @@ -//! This module implements various elliptic curve gadgets -#![allow(non_snake_case)] -use bellpepper::gadgets::{boolean_utils::conditionally_select, Assignment}; -use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - num::AllocatedNum, - ConstraintSystem, SynthesisError, -}; -use ff::{Field, PrimeField}; - -use crate::{ - gadgets::utils::{ - alloc_num_equals, alloc_one, alloc_zero, conditionally_select2, select_num_or_one, - select_num_or_zero, select_num_or_zero2, select_one_or_diff2, select_one_or_num2, - select_zero_or_num2, - }, - traits::Group, -}; - -/// `AllocatedPoint` provides an elliptic curve abstraction inside a circuit. -#[derive(Debug, Clone)] -pub struct AllocatedPoint { - pub(crate) x: AllocatedNum, - pub(crate) y: AllocatedNum, - pub(crate) is_infinity: AllocatedNum, -} - -impl AllocatedPoint { - /// Allocates a new point on the curve using coordinates provided by - /// `coords`. If coords = None, it allocates the default infinity point - pub fn alloc>( - mut cs: CS, - coords: Option<(G::Base, G::Base, bool)>, - ) -> Result { - let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { - Ok(coords.map_or(G::Base::ZERO, |c| c.0)) - })?; - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok(coords.map_or(G::Base::ZERO, |c| c.1)) - })?; - let is_infinity = AllocatedNum::alloc(cs.namespace(|| "is_infinity"), || { - Ok(if coords.map_or(true, |c| c.2) { - G::Base::ONE - } else { - G::Base::ZERO - }) - })?; - cs.enforce( - || "is_infinity is bit", - |lc| lc + is_infinity.get_variable(), - |lc| lc + CS::one() - is_infinity.get_variable(), - |lc| lc, - ); - - Ok(Self { x, y, is_infinity }) - } - - /// checks if `self` is on the curve or if it is infinity - pub fn check_on_curve(&self, mut cs: CS) -> Result<(), SynthesisError> - where - CS: ConstraintSystem, - { - // check that (x,y) is on the curve if it is not infinity - // we will check that (1- is_infinity) * y^2 = (1-is_infinity) * (x^3 + Ax + B) - // note that is_infinity is already restricted to be in the set {0, 1} - let y_square = self.y.square(cs.namespace(|| "y_square"))?; - let x_square = self.x.square(cs.namespace(|| "x_square"))?; - let x_cube = self.x.mul(cs.namespace(|| "x_cube"), &x_square)?; - - let rhs = AllocatedNum::alloc(cs.namespace(|| "rhs"), || { - if *self.is_infinity.get_value().get()? == G::Base::ONE { - Ok(G::Base::ZERO) - } else { - Ok(*x_cube.get_value().get()? - + *self.x.get_value().get()? * G::group_params().0 - + G::group_params().1) - } - })?; - - cs.enforce( - || "rhs = (1-is_infinity) * (x^3 + Ax + B)", - |lc| { - lc + x_cube.get_variable() - + (G::group_params().0, self.x.get_variable()) - + (G::group_params().1, CS::one()) - }, - |lc| lc + CS::one() - self.is_infinity.get_variable(), - |lc| lc + rhs.get_variable(), - ); - - // check that (1-infinity) * y_square = rhs - cs.enforce( - || "check that y_square * (1 - is_infinity) = rhs", - |lc| lc + y_square.get_variable(), - |lc| lc + CS::one() - self.is_infinity.get_variable(), - |lc| lc + rhs.get_variable(), - ); - - Ok(()) - } - - /// Allocates a default point on the curve, set to the identity point. - pub fn default>(mut cs: CS) -> Self { - let zero = alloc_zero(cs.namespace(|| "zero")); - let one = alloc_one(cs.namespace(|| "one")); - - Self { - x: zero.clone(), - y: zero, - is_infinity: one, - } - } - - /// Returns coordinates associated with the point. - #[allow(unused)] - pub const fn get_coordinates( - &self, - ) -> ( - &AllocatedNum, - &AllocatedNum, - &AllocatedNum, - ) { - (&self.x, &self.y, &self.is_infinity) - } - - /// Negates the provided point - pub fn negate>( - &self, - mut cs: CS, - ) -> Result { - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || Ok(-*self.y.get_value().get()?))?; - - cs.enforce( - || "check y = - self.y", - |lc| lc + self.y.get_variable(), - |lc| lc + CS::one(), - |lc| lc - y.get_variable(), - ); - - Ok(Self { - x: self.x.clone(), - y, - is_infinity: self.is_infinity.clone(), - }) - } - - /// Add two points (may be equal) - pub fn add>( - &self, - mut cs: CS, - other: &Self, - ) -> Result { - // Compute boolean equal indicating if self = other - - let equal_x = alloc_num_equals( - cs.namespace(|| "check self.x == other.x"), - &self.x, - &other.x, - )?; - - let equal_y = alloc_num_equals( - cs.namespace(|| "check self.y == other.y"), - &self.y, - &other.y, - )?; - - // Compute the result of the addition and the result of double self - let result_from_add = - self.add_internal(cs.namespace(|| "add internal"), other, &equal_x)?; - let result_from_double = self.double(cs.namespace(|| "double"))?; - - // Output: - // If (self == other) { - // return double(self) - // }else { - // if (self.x == other.x){ - // return infinity [negation] - // } else { - // return add(self, other) - // } - // } - let result_for_equal_x = Self::select_point_or_infinity( - cs.namespace(|| "equal_y ? result_from_double : infinity"), - &result_from_double, - &Boolean::from(equal_y), - )?; - - Self::conditionally_select( - cs.namespace(|| "equal ? result_from_double : result_from_add"), - &result_for_equal_x, - &result_from_add, - &Boolean::from(equal_x), - ) - } - - /// Adds other point to this point and returns the result. Assumes that the - /// two points are different and that both `other.is_infinity` and - /// `this.is_infinity` are bits - pub fn add_internal>( - &self, - mut cs: CS, - other: &Self, - equal_x: &AllocatedBit, - ) -> Result { - //************************************************************************/ - // lambda = (other.y - self.y) * (other.x - self.x).invert().unwrap(); - //************************************************************************/ - // First compute (other.x - self.x).inverse() - // If either self or other are the infinity point or self.x = other.x then - // compute bogus values Specifically, - // x_diff = self != inf && other != inf && self.x == other.x ? (other.x - - // self.x) : 1 - - // Compute self.is_infinity OR other.is_infinity = - // NOT(NOT(self.is_ifninity) AND NOT(other.is_infinity)) - let at_least_one_inf = AllocatedNum::alloc(cs.namespace(|| "at least one inf"), || { - Ok(G::Base::ONE - - (G::Base::ONE - *self.is_infinity.get_value().get()?) - * (G::Base::ONE - *other.is_infinity.get_value().get()?)) - })?; - cs.enforce( - || "1 - at least one inf = (1-self.is_infinity) * (1-other.is_infinity)", - |lc| lc + CS::one() - self.is_infinity.get_variable(), - |lc| lc + CS::one() - other.is_infinity.get_variable(), - |lc| lc + CS::one() - at_least_one_inf.get_variable(), - ); - - // Now compute x_diff_is_actual = at_least_one_inf OR equal_x - let x_diff_is_actual = - AllocatedNum::alloc(cs.namespace(|| "allocate x_diff_is_actual"), || { - Ok(if *equal_x.get_value().get()? { - G::Base::ONE - } else { - *at_least_one_inf.get_value().get()? - }) - })?; - cs.enforce( - || "1 - x_diff_is_actual = (1-equal_x) * (1-at_least_one_inf)", - |lc| lc + CS::one() - at_least_one_inf.get_variable(), - |lc| lc + CS::one() - equal_x.get_variable(), - |lc| lc + CS::one() - x_diff_is_actual.get_variable(), - ); - - // x_diff = 1 if either self.is_infinity or other.is_infinity or self.x = - // other.x else self.x - other.x - let x_diff = select_one_or_diff2( - cs.namespace(|| "Compute x_diff"), - &other.x, - &self.x, - &x_diff_is_actual, - )?; - - let lambda = AllocatedNum::alloc(cs.namespace(|| "lambda"), || { - let x_diff_inv = if *x_diff_is_actual.get_value().get()? == G::Base::ONE { - // Set to default - G::Base::ONE - } else { - // Set to the actual inverse - (*other.x.get_value().get()? - *self.x.get_value().get()?) - .invert() - .unwrap() - }; - - Ok((*other.y.get_value().get()? - *self.y.get_value().get()?) * x_diff_inv) - })?; - cs.enforce( - || "Check that lambda is correct", - |lc| lc + lambda.get_variable(), - |lc| lc + x_diff.get_variable(), - |lc| lc + other.y.get_variable() - self.y.get_variable(), - ); - - //************************************************************************/ - // x = lambda * lambda - self.x - other.x; - //************************************************************************/ - let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { - Ok(*lambda.get_value().get()? * lambda.get_value().get()? - - *self.x.get_value().get()? - - *other.x.get_value().get()?) - })?; - cs.enforce( - || "check that x is correct", - |lc| lc + lambda.get_variable(), - |lc| lc + lambda.get_variable(), - |lc| lc + x.get_variable() + self.x.get_variable() + other.x.get_variable(), - ); - - //************************************************************************/ - // y = lambda * (self.x - x) - self.y; - //************************************************************************/ - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok( - *lambda.get_value().get()? * (*self.x.get_value().get()? - *x.get_value().get()?) - - *self.y.get_value().get()?, - ) - })?; - - cs.enforce( - || "Check that y is correct", - |lc| lc + lambda.get_variable(), - |lc| lc + self.x.get_variable() - x.get_variable(), - |lc| lc + y.get_variable() + self.y.get_variable(), - ); - - //************************************************************************/ - // We only return the computed x, y if neither of the points is infinity and - // self.x != other.y if self.is_infinity return other.clone() - // elif other.is_infinity return self.clone() - // elif self.x == other.x return infinity - // Otherwise return the computed points. - //************************************************************************/ - // Now compute the output x - - let x1 = conditionally_select2( - cs.namespace(|| "x1 = other.is_infinity ? self.x : x"), - &self.x, - &x, - &other.is_infinity, - )?; - - let x = conditionally_select2( - cs.namespace(|| "x = self.is_infinity ? other.x : x1"), - &other.x, - &x1, - &self.is_infinity, - )?; - - let y1 = conditionally_select2( - cs.namespace(|| "y1 = other.is_infinity ? self.y : y"), - &self.y, - &y, - &other.is_infinity, - )?; - - let y = conditionally_select2( - cs.namespace(|| "y = self.is_infinity ? other.y : y1"), - &other.y, - &y1, - &self.is_infinity, - )?; - - let is_infinity1 = select_num_or_zero2( - cs.namespace(|| "is_infinity1 = other.is_infinity ? self.is_infinity : 0"), - &self.is_infinity, - &other.is_infinity, - )?; - - let is_infinity = conditionally_select2( - cs.namespace(|| "is_infinity = self.is_infinity ? other.is_infinity : is_infinity1"), - &other.is_infinity, - &is_infinity1, - &self.is_infinity, - )?; - - Ok(Self { x, y, is_infinity }) - } - - /// Doubles the supplied point. - pub fn double>( - &self, - mut cs: CS, - ) -> Result { - //*************************************************************/ - // lambda = (G::Base::from(3) * self.x * self.x + G::GG::A()) - // * (G::Base::from(2)) * self.y).invert().unwrap(); - // ********************************************************** - - // Compute tmp = (G::Base::ONE + G::Base::ONE)* self.y ? self != inf : 1 - let tmp_actual = AllocatedNum::alloc(cs.namespace(|| "tmp_actual"), || { - Ok(*self.y.get_value().get()? + *self.y.get_value().get()?) - })?; - cs.enforce( - || "check tmp_actual", - |lc| lc + CS::one() + CS::one(), - |lc| lc + self.y.get_variable(), - |lc| lc + tmp_actual.get_variable(), - ); - - let tmp = select_one_or_num2(cs.namespace(|| "tmp"), &tmp_actual, &self.is_infinity)?; - - // Now compute lambda as (G::Base::from(3) * self.x * self.x + G::GG::A()) * - // tmp_inv - - let prod_1 = AllocatedNum::alloc(cs.namespace(|| "alloc prod 1"), || { - Ok(G::Base::from(3) * self.x.get_value().get()? * self.x.get_value().get()?) - })?; - cs.enforce( - || "Check prod 1", - |lc| lc + (G::Base::from(3), self.x.get_variable()), - |lc| lc + self.x.get_variable(), - |lc| lc + prod_1.get_variable(), - ); - - let lambda = AllocatedNum::alloc(cs.namespace(|| "alloc lambda"), || { - let tmp_inv = if *self.is_infinity.get_value().get()? == G::Base::ONE { - // Return default value 1 - G::Base::ONE - } else { - // Return the actual inverse - (*tmp.get_value().get()?).invert().unwrap() - }; - - Ok(tmp_inv * (*prod_1.get_value().get()? + G::group_params().0)) - })?; - - cs.enforce( - || "Check lambda", - |lc| lc + tmp.get_variable(), - |lc| lc + lambda.get_variable(), - |lc| lc + prod_1.get_variable() + (G::group_params().0, CS::one()), - ); - - // ********************************************************** - // x = lambda * lambda - self.x - self.x; - // ********************************************************** - let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { - Ok( - ((*lambda.get_value().get()?) * (*lambda.get_value().get()?)) - - *self.x.get_value().get()? - - self.x.get_value().get()?, - ) - })?; - cs.enforce( - || "Check x", - |lc| lc + lambda.get_variable(), - |lc| lc + lambda.get_variable(), - |lc| lc + x.get_variable() + self.x.get_variable() + self.x.get_variable(), - ); - - // ********************************************************** - // y = lambda * (self.x - x) - self.y; - // ********************************************************** - let y = - AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok((*lambda.get_value().get()?) - * (*self.x.get_value().get()? - x.get_value().get()?) - - self.y.get_value().get()?) - })?; - cs.enforce( - || "Check y", - |lc| lc + lambda.get_variable(), - |lc| lc + self.x.get_variable() - x.get_variable(), - |lc| lc + y.get_variable() + self.y.get_variable(), - ); - - // ********************************************************** - // Only return the computed x and y if the point is not infinity - // ********************************************************** - // x - let x = select_zero_or_num2(cs.namespace(|| "final x"), &x, &self.is_infinity)?; - - // y - let y = select_zero_or_num2(cs.namespace(|| "final y"), &y, &self.is_infinity)?; - - // is_infinity - let is_infinity = self.is_infinity.clone(); - - Ok(Self { x, y, is_infinity }) - } - - /// A gadget for scalar multiplication, optimized to use incomplete addition - /// law. The optimization here is analogous to , - /// except we use complete addition law over affine coordinates instead of - /// projective coordinates for the tail bits - pub fn scalar_mul>( - &self, - mut cs: CS, - scalar_bits: &[AllocatedBit], - ) -> Result { - let split_len = core::cmp::min(scalar_bits.len(), (G::Base::NUM_BITS - 2) as usize); - let (incomplete_bits, complete_bits) = scalar_bits.split_at(split_len); - - // we convert AllocatedPoint into AllocatedPointNonInfinity; we deal with the - // case where self.is_infinity = 1 below - let mut p = AllocatedPointNonInfinity::from_allocated_point(self); - - // we assume the first bit to be 1, so we must initialize acc to self and double - // it we remove this assumption below - let mut acc = p; - p = acc.double_incomplete(cs.namespace(|| "double"))?; - - // perform the double-and-add loop to compute the scalar mul using incomplete - // addition law - for (i, bit) in incomplete_bits.iter().enumerate().skip(1) { - let temp = acc.add_incomplete(cs.namespace(|| format!("add {i}")), &p)?; - acc = AllocatedPointNonInfinity::conditionally_select( - cs.namespace(|| format!("acc_iteration_{i}")), - &temp, - &acc, - &Boolean::from(bit.clone()), - )?; - - p = p.double_incomplete(cs.namespace(|| format!("double {i}")))?; - } - - // convert back to AllocatedPoint - let res = { - // we set acc.is_infinity = self.is_infinity - let acc = acc.to_allocated_point(&self.is_infinity); - - // we remove the initial slack if bits[0] is as not as assumed (i.e., it is not - // 1) - let acc_minus_initial = { - let neg = self.negate(cs.namespace(|| "negate"))?; - acc.add(cs.namespace(|| "res minus self"), &neg) - }?; - - Self::conditionally_select( - cs.namespace(|| "remove slack if necessary"), - &acc, - &acc_minus_initial, - &Boolean::from(scalar_bits[0].clone()), - )? - }; - - // when self.is_infinity = 1, return the default point, else return res - // we already set res.is_infinity to be self.is_infinity, so we do not need to - // set it here - let default = Self::default(cs.namespace(|| "default")); - let x = conditionally_select2( - cs.namespace(|| "check if self.is_infinity is zero (x)"), - &default.x, - &res.x, - &self.is_infinity, - )?; - - let y = conditionally_select2( - cs.namespace(|| "check if self.is_infinity is zero (y)"), - &default.y, - &res.y, - &self.is_infinity, - )?; - - // we now perform the remaining scalar mul using complete addition law - let mut acc = Self { - x, - y, - is_infinity: res.is_infinity, - }; - let mut p_complete = p.to_allocated_point(&self.is_infinity); - - for (i, bit) in complete_bits.iter().enumerate() { - let temp = acc.add(cs.namespace(|| format!("add_complete {i}")), &p_complete)?; - acc = Self::conditionally_select( - cs.namespace(|| format!("acc_complete_iteration_{i}")), - &temp, - &acc, - &Boolean::from(bit.clone()), - )?; - - p_complete = p_complete.double(cs.namespace(|| format!("double_complete {i}")))?; - } - - Ok(acc) - } - - /// If condition outputs a otherwise outputs b - pub fn conditionally_select>( - mut cs: CS, - a: &Self, - b: &Self, - condition: &Boolean, - ) -> Result { - let x = conditionally_select(cs.namespace(|| "select x"), &a.x, &b.x, condition)?; - - let y = conditionally_select(cs.namespace(|| "select y"), &a.y, &b.y, condition)?; - - let is_infinity = conditionally_select( - cs.namespace(|| "select is_infinity"), - &a.is_infinity, - &b.is_infinity, - condition, - )?; - - Ok(Self { x, y, is_infinity }) - } - - /// If condition outputs a otherwise infinity - pub fn select_point_or_infinity>( - mut cs: CS, - a: &Self, - condition: &Boolean, - ) -> Result { - let x = select_num_or_zero(cs.namespace(|| "select x"), &a.x, condition)?; - - let y = select_num_or_zero(cs.namespace(|| "select y"), &a.y, condition)?; - - let is_infinity = select_num_or_one( - cs.namespace(|| "select is_infinity"), - &a.is_infinity, - condition, - )?; - - Ok(Self { x, y, is_infinity }) - } -} - -#[derive(Clone, Debug)] -/// `AllocatedPoint` but one that is guaranteed to be not infinity -pub struct AllocatedPointNonInfinity { - x: AllocatedNum, - y: AllocatedNum, -} - -impl AllocatedPointNonInfinity { - /// Creates a new `AllocatedPointNonInfinity` from the specified coordinates - #[allow(unused)] - pub const fn new(x: AllocatedNum, y: AllocatedNum) -> Self { - Self { x, y } - } - - /// Allocates a new point on the curve using coordinates provided by - /// `coords`. - #[allow(unused)] - pub fn alloc>( - mut cs: CS, - coords: Option<(G::Base, G::Base)>, - ) -> Result { - let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { - coords.map_or(Err(SynthesisError::AssignmentMissing), |c| Ok(c.0)) - })?; - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - coords.map_or(Err(SynthesisError::AssignmentMissing), |c| Ok(c.1)) - })?; - - Ok(Self { x, y }) - } - - /// Turns an `AllocatedPoint` into an `AllocatedPointNonInfinity` (assumes - /// it is not infinity) - pub fn from_allocated_point(p: &AllocatedPoint) -> Self { - Self { - x: p.x.clone(), - y: p.y.clone(), - } - } - - /// Returns an `AllocatedPoint` from an `AllocatedPointNonInfinity` - pub fn to_allocated_point(&self, is_infinity: &AllocatedNum) -> AllocatedPoint { - AllocatedPoint { - x: self.x.clone(), - y: self.y.clone(), - is_infinity: is_infinity.clone(), - } - } - - /// Returns coordinates associated with the point. - #[allow(unused)] - pub const fn get_coordinates(&self) -> (&AllocatedNum, &AllocatedNum) { - (&self.x, &self.y) - } - - /// Add two points assuming self != +/- other - pub fn add_incomplete(&self, mut cs: CS, other: &Self) -> Result - where - CS: ConstraintSystem, - { - // allocate a free variable that an honest prover sets to lambda = - // (y2-y1)/(x2-x1) - let lambda = AllocatedNum::alloc(cs.namespace(|| "lambda"), || { - if *other.x.get_value().get()? == *self.x.get_value().get()? { - Ok(G::Base::ONE) - } else { - Ok((*other.y.get_value().get()? - *self.y.get_value().get()?) - * (*other.x.get_value().get()? - *self.x.get_value().get()?) - .invert() - .unwrap()) - } - })?; - cs.enforce( - || "Check that lambda is computed correctly", - |lc| lc + lambda.get_variable(), - |lc| lc + other.x.get_variable() - self.x.get_variable(), - |lc| lc + other.y.get_variable() - self.y.get_variable(), - ); - - //************************************************************************/ - // x = lambda * lambda - self.x - other.x; - //************************************************************************/ - let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { - Ok(*lambda.get_value().get()? * lambda.get_value().get()? - - *self.x.get_value().get()? - - *other.x.get_value().get()?) - })?; - cs.enforce( - || "check that x is correct", - |lc| lc + lambda.get_variable(), - |lc| lc + lambda.get_variable(), - |lc| lc + x.get_variable() + self.x.get_variable() + other.x.get_variable(), - ); - - //************************************************************************/ - // y = lambda * (self.x - x) - self.y; - //************************************************************************/ - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok( - *lambda.get_value().get()? * (*self.x.get_value().get()? - *x.get_value().get()?) - - *self.y.get_value().get()?, - ) - })?; - - cs.enforce( - || "Check that y is correct", - |lc| lc + lambda.get_variable(), - |lc| lc + self.x.get_variable() - x.get_variable(), - |lc| lc + y.get_variable() + self.y.get_variable(), - ); - - Ok(Self { x, y }) - } - - /// doubles the point; since this is called with a point not at infinity, it - /// is guaranteed to be not infinity - pub fn double_incomplete>( - &self, - mut cs: CS, - ) -> Result { - // lambda = (3 x^2 + a) / 2 * y - - let x_sq = self.x.square(cs.namespace(|| "x_sq"))?; - - let lambda = AllocatedNum::alloc(cs.namespace(|| "lambda"), || { - let n = G::Base::from(3) * x_sq.get_value().get()? + G::group_params().0; - let d = G::Base::from(2) * *self.y.get_value().get()?; - if d == G::Base::ZERO { - Ok(G::Base::ONE) - } else { - Ok(n * d.invert().unwrap()) - } - })?; - cs.enforce( - || "Check that lambda is computed correctly", - |lc| lc + lambda.get_variable(), - |lc| lc + (G::Base::from(2), self.y.get_variable()), - |lc| lc + (G::Base::from(3), x_sq.get_variable()) + (G::group_params().0, CS::one()), - ); - - let x = AllocatedNum::alloc(cs.namespace(|| "x"), || { - Ok(*lambda.get_value().get()? * *lambda.get_value().get()? - - *self.x.get_value().get()? - - *self.x.get_value().get()?) - })?; - - cs.enforce( - || "check that x is correct", - |lc| lc + lambda.get_variable(), - |lc| lc + lambda.get_variable(), - |lc| lc + x.get_variable() + (G::Base::from(2), self.x.get_variable()), - ); - - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok( - *lambda.get_value().get()? * (*self.x.get_value().get()? - *x.get_value().get()?) - - *self.y.get_value().get()?, - ) - })?; - - cs.enforce( - || "Check that y is correct", - |lc| lc + lambda.get_variable(), - |lc| lc + self.x.get_variable() - x.get_variable(), - |lc| lc + y.get_variable() + self.y.get_variable(), - ); - - Ok(Self { x, y }) - } - - /// If condition outputs a otherwise outputs b - pub fn conditionally_select>( - mut cs: CS, - a: &Self, - b: &Self, - condition: &Boolean, - ) -> Result { - let x = conditionally_select(cs.namespace(|| "select x"), &a.x, &b.x, condition)?; - let y = conditionally_select(cs.namespace(|| "select y"), &a.y, &b.y, condition)?; - - Ok(Self { x, y }) - } -} - -#[cfg(test)] -mod tests { - use expect_test::{expect, Expect}; - use ff::{Field, PrimeFieldBits}; - use group::Curve; - use halo2curves::CurveAffine; - use rand::rngs::OsRng; - - use super::*; - use crate::{ - bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - solver::SatisfyingAssignment, - test_shape_cs::TestShapeCS, - }, - provider::{ - bn256_grumpkin::{bn256, grumpkin}, - Bn256EngineIPA, Bn256EngineKZG, GrumpkinEngine, - }, - traits::{snark::default_ck_hint, Engine}, - }; - - #[derive(Debug, Clone)] - pub struct Point { - x: G::Base, - y: G::Base, - is_infinity: bool, - } - - impl Point { - pub fn new(x: G::Base, y: G::Base, is_infinity: bool) -> Self { - Self { x, y, is_infinity } - } - - pub fn random_vartime() -> Self { - loop { - let x = G::Base::random(&mut OsRng); - let y = (x.square() * x + G::group_params().1).sqrt(); - if y.is_some().unwrap_u8() == 1 { - return Self { - x, - y: y.unwrap(), - is_infinity: false, - }; - } - } - } - - /// Add any two points - pub fn add(&self, other: &Self) -> Self { - if self.x == other.x { - // If self == other then call double - if self.y == other.y { - self.double() - } else { - // if self.x == other.x and self.y != other.y then return infinity - Self { - x: G::Base::ZERO, - y: G::Base::ZERO, - is_infinity: true, - } - } - } else { - self.add_internal(other) - } - } - - /// Add two different points - pub fn add_internal(&self, other: &Self) -> Self { - if self.is_infinity { - return other.clone(); - } - - if other.is_infinity { - return self.clone(); - } - - let lambda = (other.y - self.y) * (other.x - self.x).invert().unwrap(); - let x = lambda * lambda - self.x - other.x; - let y = lambda * (self.x - x) - self.y; - Self { - x, - y, - is_infinity: false, - } - } - - pub fn double(&self) -> Self { - if self.is_infinity { - return Self { - x: G::Base::ZERO, - y: G::Base::ZERO, - is_infinity: true, - }; - } - - let lambda = G::Base::from(3) - * self.x - * self.x - * ((G::Base::ONE + G::Base::ONE) * self.y).invert().unwrap(); - let x = lambda * lambda - self.x - self.x; - let y = lambda * (self.x - x) - self.y; - Self { - x, - y, - is_infinity: false, - } - } - - pub fn scalar_mul(&self, scalar: &G::Scalar) -> Self { - let mut res = Self { - x: G::Base::ZERO, - y: G::Base::ZERO, - is_infinity: true, - }; - - let bits = scalar.to_le_bits(); - for i in (0..bits.len()).rev() { - res = res.double(); - if bits[i] { - res = self.add(&res); - } - } - res - } - } - - // Allocate a random point. Only used for testing - pub fn alloc_random_point>( - mut cs: CS, - ) -> Result, SynthesisError> { - // get a random point - let p = Point::::random_vartime(); - AllocatedPoint::alloc(cs.namespace(|| "alloc p"), Some((p.x, p.y, p.is_infinity))) - } - - /// Make the point io - pub fn inputize_allocated_point>( - p: &AllocatedPoint, - mut cs: CS, - ) { - let _ = p.x.inputize(cs.namespace(|| "Input point.x")); - let _ = p.y.inputize(cs.namespace(|| "Input point.y")); - let _ = p - .is_infinity - .inputize(cs.namespace(|| "Input point.is_infinity")); - } - - #[test] - fn test_ecc_ops() { - test_ecc_ops_with::::GE>(); - test_ecc_ops_with::::GE>(); - } - - fn test_ecc_ops_with() - where - G: Group, - C: CurveAffine, - { - // perform some curve arithmetic - let a = Point::::random_vartime(); - let b = Point::::random_vartime(); - let c = a.add(&b); - let d = a.double(); - let s = G::Scalar::random(&mut OsRng); - let e = a.scalar_mul(&s); - - // perform the same computation by translating to curve types - let a_curve = C::from_xy( - C::Base::from_repr(a.x.to_repr()).unwrap(), - C::Base::from_repr(a.y.to_repr()).unwrap(), - ) - .unwrap(); - let b_curve = C::from_xy( - C::Base::from_repr(b.x.to_repr()).unwrap(), - C::Base::from_repr(b.y.to_repr()).unwrap(), - ) - .unwrap(); - let c_curve = (a_curve + b_curve).to_affine(); - let d_curve = (a_curve + a_curve).to_affine(); - let e_curve = a_curve - .mul(C::Scalar::from_repr(s.to_repr()).unwrap()) - .to_affine(); - - // transform c, d, and e into curve types - let c_curve_2 = C::from_xy( - C::Base::from_repr(c.x.to_repr()).unwrap(), - C::Base::from_repr(c.y.to_repr()).unwrap(), - ) - .unwrap(); - let d_curve_2 = C::from_xy( - C::Base::from_repr(d.x.to_repr()).unwrap(), - C::Base::from_repr(d.y.to_repr()).unwrap(), - ) - .unwrap(); - let e_curve_2 = C::from_xy( - C::Base::from_repr(e.x.to_repr()).unwrap(), - C::Base::from_repr(e.y.to_repr()).unwrap(), - ) - .unwrap(); - - // check that we have the same outputs - assert_eq!(c_curve, c_curve_2); - assert_eq!(d_curve, d_curve_2); - assert_eq!(e_curve, e_curve_2); - } - - fn synthesize_smul(mut cs: CS) -> (AllocatedPoint, AllocatedPoint, G::Scalar) - where - G: Group, - CS: ConstraintSystem, - { - let a = alloc_random_point(cs.namespace(|| "a")).unwrap(); - inputize_allocated_point(&a, cs.namespace(|| "inputize a")); - - let s = G::Scalar::random(&mut OsRng); - // Allocate bits for s - let bits: Vec = s - .to_le_bits() - .into_iter() - .enumerate() - .map(|(i, bit)| AllocatedBit::alloc(cs.namespace(|| format!("bit {i}")), Some(bit))) - .collect::, SynthesisError>>() - .unwrap(); - let e = a.scalar_mul(cs.namespace(|| "Scalar Mul"), &bits).unwrap(); - inputize_allocated_point(&e, cs.namespace(|| "inputize e")); - (a, e, s) - } - - #[test] - fn test_ecc_circuit_ops() { - test_ecc_circuit_ops_with::( - &expect!["2738"], - &expect!["2724"], - ); - test_ecc_circuit_ops_with::( - &expect!["2738"], - &expect!["2724"], - ); - } - - fn test_ecc_circuit_ops_with(expected_constraints: &Expect, expected_variables: &Expect) - where - E1: Engine::Scalar>, - E2: Engine::Scalar>, - { - // First create the shape - let mut cs: TestShapeCS = TestShapeCS::new(); - let _ = synthesize_smul::(cs.namespace(|| "synthesize")); - expected_constraints.assert_eq(&cs.num_constraints().to_string()); - expected_variables.assert_eq(&cs.num_aux().to_string()); - let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); - - // Then the satisfying assignment - let mut cs = SatisfyingAssignment::::new(); - let (a, e, s) = synthesize_smul::(cs.namespace(|| "synthesize")); - let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - - let a_p: Point = Point::new( - a.x.get_value().unwrap(), - a.y.get_value().unwrap(), - a.is_infinity.get_value().unwrap() == ::Base::ONE, - ); - let e_p: Point = Point::new( - e.x.get_value().unwrap(), - e.y.get_value().unwrap(), - e.is_infinity.get_value().unwrap() == ::Base::ONE, - ); - let e_new = a_p.scalar_mul(&s); - assert!(e_p.x == e_new.x && e_p.y == e_new.y); - // Make sure that this is satisfiable - shape.is_sat(&ck, &inst, &witness).unwrap(); - } - - fn synthesize_add_equal(mut cs: CS) -> (AllocatedPoint, AllocatedPoint) - where - G: Group, - CS: ConstraintSystem, - { - let a = alloc_random_point(cs.namespace(|| "a")).unwrap(); - inputize_allocated_point(&a, cs.namespace(|| "inputize a")); - let e = a.add(cs.namespace(|| "add a to a"), &a).unwrap(); - inputize_allocated_point(&e, cs.namespace(|| "inputize e")); - (a, e) - } - - #[test] - fn test_ecc_circuit_add_equal() { - test_ecc_circuit_add_equal_with::(); - test_ecc_circuit_add_equal_with::(); - } - - fn test_ecc_circuit_add_equal_with() - where - E1: Engine::Scalar>, - E2: Engine::Scalar>, - { - // First create the shape - let mut cs: TestShapeCS = TestShapeCS::new(); - let _ = synthesize_add_equal::(cs.namespace(|| "synthesize add equal")); - println!("Number of constraints: {}", cs.num_constraints()); - let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); - - // Then the satisfying assignment - let mut cs = SatisfyingAssignment::::new(); - let (a, e) = synthesize_add_equal::(cs.namespace(|| "synthesize add equal")); - let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - let a_p: Point = Point::new( - a.x.get_value().unwrap(), - a.y.get_value().unwrap(), - a.is_infinity.get_value().unwrap() == ::Base::ONE, - ); - let e_p: Point = Point::new( - e.x.get_value().unwrap(), - e.y.get_value().unwrap(), - e.is_infinity.get_value().unwrap() == ::Base::ONE, - ); - let e_new = a_p.add(&a_p); - assert!(e_p.x == e_new.x && e_p.y == e_new.y); - // Make sure that it is satisfiable - shape.is_sat(&ck, &inst, &witness).unwrap(); - } - - fn synthesize_add_negation(mut cs: CS) -> AllocatedPoint - where - G: Group, - CS: ConstraintSystem, - { - let a = alloc_random_point(cs.namespace(|| "a")).unwrap(); - inputize_allocated_point(&a, cs.namespace(|| "inputize a")); - let b = &mut a.clone(); - b.y = AllocatedNum::alloc(cs.namespace(|| "allocate negation of a"), || { - Ok(G::Base::ZERO) - }) - .unwrap(); - inputize_allocated_point(b, cs.namespace(|| "inputize b")); - let e = a.add(cs.namespace(|| "add a to b"), b).unwrap(); - e - } - - #[test] - fn test_ecc_circuit_add_negation() { - test_ecc_circuit_add_negation_with::( - &expect!["39"], - &expect!["34"], - ); - test_ecc_circuit_add_negation_with::( - &expect!["39"], - &expect!["34"], - ); - } - - fn test_ecc_circuit_add_negation_with( - expected_constraints: &Expect, - expected_variables: &Expect, - ) where - E1: Engine::Scalar>, - E2: Engine::Scalar>, - { - // First create the shape - let mut cs: TestShapeCS = TestShapeCS::new(); - let _ = synthesize_add_negation::(cs.namespace(|| "synthesize add equal")); - expected_constraints.assert_eq(&cs.num_constraints().to_string()); - expected_variables.assert_eq(&cs.num_aux().to_string()); - let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); - - // Then the satisfying assignment - let mut cs = SatisfyingAssignment::::new(); - let e = synthesize_add_negation::(cs.namespace(|| "synthesize add negation")); - let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - let e_p: Point = Point::new( - e.x.get_value().unwrap(), - e.y.get_value().unwrap(), - e.is_infinity.get_value().unwrap() == ::Base::ONE, - ); - assert!(e_p.is_infinity); - // Make sure that it is satisfiable - shape.is_sat(&ck, &inst, &witness).unwrap(); - } -} diff --git a/src/gadgets/mod.rs b/src/gadgets/mod.rs deleted file mode 100644 index d7af674..0000000 --- a/src/gadgets/mod.rs +++ /dev/null @@ -1,26 +0,0 @@ -//! This module implements various gadgets necessary for Nova and applications -//! built with Nova. -mod ecc; -pub(crate) use ecc::AllocatedPoint; - -mod nonnative; -pub(crate) use nonnative::{ - bignat::{nat_to_limbs, BigNat}, - util::{f_to_nat, Num}, -}; - -mod r1cs; -pub(crate) use r1cs::{ - conditionally_select_alloc_relaxed_r1cs, - conditionally_select_vec_allocated_relaxed_r1cs_instance, AllocatedR1CSInstance, - AllocatedRelaxedR1CSInstance, -}; - -mod utils; -#[cfg(test)] -pub(crate) use utils::alloc_one; -pub(crate) use utils::{ - alloc_bignat_constant, alloc_num_equals, alloc_scalar_as_base, alloc_zero, - conditionally_select_allocated_bit, conditionally_select_bignat, le_bits_to_num, - scalar_as_base, -}; diff --git a/src/gadgets/nonnative/bignat.rs b/src/gadgets/nonnative/bignat.rs deleted file mode 100644 index 9af34dc..0000000 --- a/src/gadgets/nonnative/bignat.rs +++ /dev/null @@ -1,908 +0,0 @@ -use std::{ - borrow::Borrow, - cmp::{max, min}, - convert::From, -}; - -use bellpepper_core::{ConstraintSystem, LinearCombination, SynthesisError}; -use ff::PrimeField; -use itertools::Itertools as _; -use num_bigint::BigInt; -use num_traits::cast::ToPrimitive; - -use super::{ - util::{f_to_nat, nat_to_f, Bitvector, Num}, - OptionExt, -}; - -/// Compute the natural number represented by an array of limbs. -/// The limbs are assumed to be based the `limb_width` power of 2. -pub fn limbs_to_nat, I: DoubleEndedIterator>( - limbs: I, - limb_width: usize, -) -> BigInt { - limbs.rev().fold(BigInt::from(0), |mut acc, limb| { - acc <<= limb_width as u32; - acc += f_to_nat(limb.borrow()); - acc - }) -} - -fn int_with_n_ones(n: usize) -> BigInt { - let mut m = BigInt::from(1); - m <<= n as u32; - m -= 1; - m -} - -/// Compute the limbs encoding a natural number. -/// The limbs are assumed to be based the `limb_width` power of 2. -pub fn nat_to_limbs( - nat: &BigInt, - limb_width: usize, - n_limbs: usize, -) -> Result, SynthesisError> { - let mask = int_with_n_ones(limb_width); - let mut nat = nat.clone(); - if nat.bits() as usize <= n_limbs * limb_width { - Ok((0..n_limbs) - .map(|_| { - let r = &nat & &mask; - nat >>= limb_width as u32; - nat_to_f(&r).unwrap() - }) - .collect()) - } else { - eprintln!("nat {nat} does not fit in {n_limbs} limbs of width {limb_width}"); - Err(SynthesisError::Unsatisfiable) - } -} - -#[derive(Clone, PartialEq, Eq)] -pub struct BigNatParams { - pub min_bits: usize, - pub max_word: BigInt, - pub limb_width: usize, - pub n_limbs: usize, -} - -impl BigNatParams { - pub fn new(limb_width: usize, n_limbs: usize) -> Self { - let mut max_word = BigInt::from(1) << limb_width as u32; - max_word -= 1; - Self { - max_word, - n_limbs, - limb_width, - min_bits: 0, - } - } -} - -/// A representation of a large natural number (a member of {0, 1, 2, ... }) -#[derive(Clone)] -pub struct BigNat { - /// The linear combinations which constrain the value of each limb of the - /// number - pub limbs: Vec>, - /// The witness values for each limb (filled at witness-time) - pub limb_values: Option>, - /// The value of the whole number (filled at witness-time) - pub value: Option, - /// Parameters - pub params: BigNatParams, -} - -impl PartialEq for BigNat { - fn eq(&self, other: &Self) -> bool { - self.value == other.value && self.params == other.params - } -} -impl Eq for BigNat {} - -impl From> for Polynomial { - fn from(other: BigNat) -> Self { - Self { - coefficients: other.limbs, - values: other.limb_values, - } - } -} - -impl BigNat { - /// Allocates a `BigNat` in the circuit with `n_limbs` limbs of width - /// `limb_width` each. If `max_word` is missing, then it is assumed to - /// be `(2 << limb_width) - 1`. The value is provided by a closure - /// returning limb values. - pub fn alloc_from_limbs( - mut cs: CS, - f: F, - max_word: Option, - limb_width: usize, - n_limbs: usize, - ) -> Result - where - CS: ConstraintSystem, - F: FnOnce() -> Result, SynthesisError>, - { - let values_cell = f(); - let mut value = None; - let mut limb_values = None; - let limbs = (0..n_limbs) - .map(|limb_i| { - cs.alloc( - || format!("limb {limb_i}"), - || match values_cell { - Ok(ref vs) => { - if vs.len() != n_limbs { - eprintln!("Values do not match stated limb count"); - return Err(SynthesisError::Unsatisfiable); - } - if value.is_none() { - value = Some(limbs_to_nat::(vs.iter(), limb_width)); - } - if limb_values.is_none() { - limb_values = Some(vs.clone()); - } - Ok(vs[limb_i]) - } - // Hack b/c SynthesisError and io::Error don't implement Clone - Err(ref e) => Err(SynthesisError::from(std::io::Error::new( - std::io::ErrorKind::Other, - format!("{e}"), - ))), - }, - ) - .map(|v| LinearCombination::zero() + v) - }) - .collect::, _>>()?; - Ok(Self { - value, - limb_values, - limbs, - params: BigNatParams { - min_bits: 0, - n_limbs, - max_word: max_word.unwrap_or_else(|| int_with_n_ones(limb_width)), - limb_width, - }, - }) - } - - /// Allocates a `BigNat` in the circuit with `n_limbs` limbs of width - /// `limb_width` each. The `max_word` is guaranteed to be `(2 << - /// limb_width) - 1`. The value is provided by a closure returning a - /// natural number. - pub fn alloc_from_nat( - mut cs: CS, - f: F, - limb_width: usize, - n_limbs: usize, - ) -> Result - where - CS: ConstraintSystem, - F: FnOnce() -> Result, - { - let all_values_cell = - f().and_then(|v| Ok((nat_to_limbs::(&v, limb_width, n_limbs)?, v))); - let mut value = None; - let mut limb_values = Vec::new(); - let limbs = (0..n_limbs) - .map(|limb_i| { - cs.alloc( - || format!("limb {limb_i}"), - || match all_values_cell { - Ok((ref vs, ref v)) => { - if value.is_none() { - value = Some(v.clone()); - } - limb_values.push(vs[limb_i]); - Ok(vs[limb_i]) - } - // Hack b/c SynthesisError and io::Error don't implement Clone - Err(ref e) => Err(SynthesisError::from(std::io::Error::new( - std::io::ErrorKind::Other, - format!("{e}"), - ))), - }, - ) - .map(|v| LinearCombination::zero() + v) - }) - .collect::, _>>()?; - Ok(Self { - value, - limb_values: (!limb_values.is_empty()).then_some(limb_values), - limbs, - params: BigNatParams::new(limb_width, n_limbs), - }) - } - - /// Allocates a `BigNat` in the circuit with `n_limbs` limbs of width - /// `limb_width` each. The `max_word` is guaranteed to be `(2 << - /// limb_width) - 1`. The value is provided by an allocated number - pub fn from_num>( - mut cs: CS, - n: &Num, - limb_width: usize, - n_limbs: usize, - ) -> Result { - let bignat = Self::alloc_from_nat( - cs.namespace(|| "bignat"), - || { - Ok({ - n.value - .as_ref() - .map(|n| f_to_nat(n)) - .ok_or(SynthesisError::AssignmentMissing)? - }) - }, - limb_width, - n_limbs, - )?; - - // check if bignat equals n - // (1) decompose `bignat` into a bitvector `bv` - let bv = bignat.decompose(cs.namespace(|| "bv"))?; - // (2) recompose bits and check if it equals n - n.is_equal(cs.namespace(|| "n"), &bv); - - Ok(bignat) - } - - pub fn as_limbs(&self) -> Vec> { - let mut limbs = Vec::new(); - for (i, lc) in self.limbs.iter().enumerate() { - limbs.push(Num::new( - self.limb_values.as_ref().map(|vs| vs[i]), - lc.clone(), - )); - } - limbs - } - - pub fn assert_well_formed>( - &self, - mut cs: CS, - ) -> Result<(), SynthesisError> { - // swap the option and iterator - let limb_values_split = - (0..self.limbs.len()).map(|i| self.limb_values.as_ref().map(|vs| vs[i])); - for (i, (limb, limb_value)) in self.limbs.iter().zip_eq(limb_values_split).enumerate() { - Num::new(limb_value, limb.clone()) - .fits_in_bits(cs.namespace(|| format!("{i}")), self.params.limb_width)?; - } - Ok(()) - } - - /// Break `self` up into a bit-vector. - pub fn decompose>( - &self, - mut cs: CS, - ) -> Result, SynthesisError> { - let limb_values_split = - (0..self.limbs.len()).map(|i| self.limb_values.as_ref().map(|vs| vs[i])); - let bitvectors: Vec> = self - .limbs - .iter() - .zip_eq(limb_values_split) - .enumerate() - .map(|(i, (limb, limb_value))| { - Num::new(limb_value, limb.clone()).decompose( - cs.namespace(|| format!("subdecmop {i}")), - self.params.limb_width, - ) - }) - .collect::, _>>()?; - let mut bits = Vec::new(); - let mut values = Vec::new(); - let mut allocations = Vec::new(); - for bv in bitvectors { - bits.extend(bv.bits); - if let Some(vs) = bv.values { - values.extend(vs) - }; - allocations.extend(bv.allocations); - } - let values = (!values.is_empty()).then_some(values); - Ok(Bitvector { - bits, - values, - allocations, - }) - } - - pub fn enforce_limb_width_agreement( - &self, - other: &Self, - location: &str, - ) -> Result { - if self.params.limb_width == other.params.limb_width { - Ok(self.params.limb_width) - } else { - eprintln!( - "Limb widths {}, {}, do not agree at {}", - self.params.limb_width, other.params.limb_width, location - ); - Err(SynthesisError::Unsatisfiable) - } - } - - pub fn from_poly(poly: Polynomial, limb_width: usize, max_word: BigInt) -> Self { - Self { - params: BigNatParams { - min_bits: 0, - max_word, - n_limbs: poly.coefficients.len(), - limb_width, - }, - limbs: poly.coefficients, - value: poly - .values - .as_ref() - .map(|limb_values| limbs_to_nat::(limb_values.iter(), limb_width)), - limb_values: poly.values, - } - } - - /// Constrain `self` to be equal to `other`, after carrying both. - pub fn equal_when_carried>( - &self, - mut cs: CS, - other: &Self, - ) -> Result<(), SynthesisError> { - self.enforce_limb_width_agreement(other, "equal_when_carried")?; - - // We'll propagate carries over the first `n` limbs. - let n = min(self.limbs.len(), other.limbs.len()); - let target_base = BigInt::from(1u8) << self.params.limb_width as u32; - let mut accumulated_extra = BigInt::from(0usize); - let max_word = max(&self.params.max_word, &other.params.max_word); - let carry_bits = - (((max_word.to_f64().unwrap() * 2.0).log2() - self.params.limb_width as f64).ceil() - + 0.1) as usize; - let mut carry_in = Num::new(Some(Scalar::ZERO), LinearCombination::zero()); - - for i in 0..n { - let carry = Num::alloc(cs.namespace(|| format!("carry value {i}")), || { - Ok(nat_to_f( - &((f_to_nat(&self.limb_values.grab()?[i]) - + f_to_nat(&carry_in.value.unwrap()) - + max_word - - f_to_nat(&other.limb_values.grab()?[i])) - / &target_base), - ) - .unwrap()) - })?; - accumulated_extra += max_word; - - cs.enforce( - || format!("carry {i}"), - |lc| lc, - |lc| lc, - |lc| { - lc + &carry_in.num + &self.limbs[i] - &other.limbs[i] - + (nat_to_f(max_word).unwrap(), CS::one()) - - (nat_to_f(&target_base).unwrap(), &carry.num) - - ( - nat_to_f(&(&accumulated_extra % &target_base)).unwrap(), - CS::one(), - ) - }, - ); - - accumulated_extra /= &target_base; - - if i < n - 1 { - carry.fits_in_bits(cs.namespace(|| format!("carry {i} decomp")), carry_bits)?; - } else { - cs.enforce( - || format!("carry {i} is out"), - |lc| lc, - |lc| lc, - |lc| lc + &carry.num - (nat_to_f(&accumulated_extra).unwrap(), CS::one()), - ); - } - carry_in = carry; - } - - for (i, zero_limb) in self.limbs.iter().enumerate().skip(n) { - cs.enforce( - || format!("zero self {i}"), - |lc| lc, - |lc| lc, - |lc| lc + zero_limb, - ); - } - for (i, zero_limb) in other.limbs.iter().enumerate().skip(n) { - cs.enforce( - || format!("zero other {i}"), - |lc| lc, - |lc| lc, - |lc| lc + zero_limb, - ); - } - Ok(()) - } - - /// Constrain `self` to be equal to `other`, after carrying both. - /// Uses regrouping internally to take full advantage of the field size and - /// reduce the amount of carrying. - pub fn equal_when_carried_regroup>( - &self, - mut cs: CS, - other: &Self, - ) -> Result<(), SynthesisError> { - self.enforce_limb_width_agreement(other, "equal_when_carried_regroup")?; - let max_word = max(&self.params.max_word, &other.params.max_word); - let carry_bits = - (((max_word.to_f64().unwrap() * 2.0).log2() - self.params.limb_width as f64).ceil() - + 0.1) as usize; - let limbs_per_group = max( - (Scalar::CAPACITY as usize - carry_bits) / self.params.limb_width, - 1, - ); - - let self_grouped = self.group_limbs(limbs_per_group); - let other_grouped = other.group_limbs(limbs_per_group); - self_grouped.equal_when_carried(cs.namespace(|| "grouped"), &other_grouped) - } - - pub fn add(&self, other: &Self) -> Result { - self.enforce_limb_width_agreement(other, "add")?; - let n_limbs = max(self.params.n_limbs, other.params.n_limbs); - let max_word = &self.params.max_word + &other.params.max_word; - let limbs: Vec> = (0..n_limbs) - .map(|i| match (self.limbs.get(i), other.limbs.get(i)) { - (Some(a), Some(b)) => a.clone() + b, - (Some(a), None) => a.clone(), - (None, Some(b)) => b.clone(), - (None, None) => unreachable!(), - }) - .collect(); - let limb_values: Option> = self.limb_values.as_ref().and_then(|x| { - other.limb_values.as_ref().map(|y| { - (0..n_limbs) - .map(|i| match (x.get(i), y.get(i)) { - (Some(a), Some(b)) => { - let mut t = *a; - t.add_assign(b); - t - } - (Some(a), None) | (None, Some(a)) => *a, - (None, None) => unreachable!(), - }) - .collect() - }) - }); - let value = self - .value - .as_ref() - .and_then(|x| other.value.as_ref().map(|y| x + y)); - Ok(Self { - limb_values, - value, - limbs, - params: BigNatParams { - min_bits: max(self.params.min_bits, other.params.min_bits), - n_limbs, - max_word, - limb_width: self.params.limb_width, - }, - }) - } - - /// Compute a `BigNat` constrained to be equal to `self * other % modulus`. - pub fn mult_mod>( - &self, - mut cs: CS, - other: &Self, - modulus: &Self, - ) -> Result<(Self, Self), SynthesisError> { - self.enforce_limb_width_agreement(other, "mult_mod")?; - let limb_width = self.params.limb_width; - let quotient_bits = - (self.n_bits() + other.n_bits()).saturating_sub(modulus.params.min_bits); - let quotient_limbs = quotient_bits.saturating_sub(1) / limb_width + 1; - let quotient = Self::alloc_from_nat( - cs.namespace(|| "quotient"), - || { - Ok({ - let mut x = self.value.grab()?.clone(); - x *= other.value.grab()?; - x /= modulus.value.grab()?; - x - }) - }, - self.params.limb_width, - quotient_limbs, - )?; - quotient.assert_well_formed(cs.namespace(|| "quotient rangecheck"))?; - let remainder = Self::alloc_from_nat( - cs.namespace(|| "remainder"), - || { - Ok({ - let mut x = self.value.grab()?.clone(); - x *= other.value.grab()?; - x %= modulus.value.grab()?; - x - }) - }, - self.params.limb_width, - modulus.limbs.len(), - )?; - remainder.assert_well_formed(cs.namespace(|| "remainder rangecheck"))?; - let a_poly = Polynomial::from(self.clone()); - let b_poly = Polynomial::from(other.clone()); - let mod_poly = Polynomial::from(modulus.clone()); - let q_poly = Polynomial::from(quotient.clone()); - let r_poly = Polynomial::from(remainder.clone()); - - // a * b - let left = a_poly.alloc_product(cs.namespace(|| "left"), &b_poly)?; - let right_product = q_poly.alloc_product(cs.namespace(|| "right_product"), &mod_poly)?; - // q * m + r - let right = right_product.sum(&r_poly); - - let left_max_word = { - let mut x = BigInt::from(min(self.limbs.len(), other.limbs.len())); - x *= &self.params.max_word; - x *= &other.params.max_word; - x - }; - let right_max_word = { - let mut x = BigInt::from(min(quotient.limbs.len(), modulus.limbs.len())); - x *= "ient.params.max_word; - x *= &modulus.params.max_word; - x += &remainder.params.max_word; - x - }; - - let left_int = Self::from_poly(left, limb_width, left_max_word); - let right_int = Self::from_poly(right, limb_width, right_max_word); - left_int.equal_when_carried_regroup(cs.namespace(|| "carry"), &right_int)?; - Ok((quotient, remainder)) - } - - /// Compute a `BigNat` constrained to be equal to `self * other % modulus`. - pub fn red_mod>( - &self, - mut cs: CS, - modulus: &Self, - ) -> Result { - self.enforce_limb_width_agreement(modulus, "red_mod")?; - let limb_width = self.params.limb_width; - let quotient_bits = self.n_bits().saturating_sub(modulus.params.min_bits); - let quotient_limbs = quotient_bits.saturating_sub(1) / limb_width + 1; - let quotient = Self::alloc_from_nat( - cs.namespace(|| "quotient"), - || Ok(self.value.grab()? / modulus.value.grab()?), - self.params.limb_width, - quotient_limbs, - )?; - quotient.assert_well_formed(cs.namespace(|| "quotient rangecheck"))?; - let remainder = Self::alloc_from_nat( - cs.namespace(|| "remainder"), - || Ok(self.value.grab()? % modulus.value.grab()?), - self.params.limb_width, - modulus.limbs.len(), - )?; - remainder.assert_well_formed(cs.namespace(|| "remainder rangecheck"))?; - let mod_poly = Polynomial::from(modulus.clone()); - let q_poly = Polynomial::from(quotient.clone()); - let r_poly = Polynomial::from(remainder.clone()); - - // q * m + r - let right_product = q_poly.alloc_product(cs.namespace(|| "right_product"), &mod_poly)?; - let right = right_product.sum(&r_poly); - - let right_max_word = { - let mut x = BigInt::from(min(quotient.limbs.len(), modulus.limbs.len())); - x *= "ient.params.max_word; - x *= &modulus.params.max_word; - x += &remainder.params.max_word; - x - }; - - let right_int = Self::from_poly(right, limb_width, right_max_word); - self.equal_when_carried_regroup(cs.namespace(|| "carry"), &right_int)?; - Ok(remainder) - } - - /// Combines limbs into groups. - pub fn group_limbs(&self, limbs_per_group: usize) -> Self { - let n_groups = (self.limbs.len() - 1) / limbs_per_group + 1; - let limb_values = self.limb_values.as_ref().map(|vs| { - let mut values: Vec = vec![Scalar::ZERO; n_groups]; - let mut shift = Scalar::ONE; - let limb_block = (0..self.params.limb_width).fold(Scalar::ONE, |mut l, _| { - l = l.double(); - l - }); - for (i, v) in vs.iter().enumerate() { - if i % limbs_per_group == 0 { - shift = Scalar::ONE; - } - let mut a = shift; - a *= v; - values[i / limbs_per_group].add_assign(&a); - shift.mul_assign(&limb_block); - } - values - }); - let limbs = { - let mut limbs: Vec> = - vec![LinearCombination::zero(); n_groups]; - let mut shift = Scalar::ONE; - let limb_block = (0..self.params.limb_width).fold(Scalar::ONE, |mut l, _| { - l = l.double(); - l - }); - for (i, limb) in self.limbs.iter().enumerate() { - if i % limbs_per_group == 0 { - shift = Scalar::ONE; - } - limbs[i / limbs_per_group] = - std::mem::replace(&mut limbs[i / limbs_per_group], LinearCombination::zero()) - + (shift, limb); - shift.mul_assign(&limb_block); - } - limbs - }; - let max_word = (0..limbs_per_group).fold(BigInt::from(0u8), |mut acc, i| { - acc.set_bit((i * self.params.limb_width) as u64, true); - acc - }) * &self.params.max_word; - Self { - params: BigNatParams { - min_bits: self.params.min_bits, - limb_width: self.params.limb_width * limbs_per_group, - n_limbs: limbs.len(), - max_word, - }, - limbs, - limb_values, - value: self.value.clone(), - } - } - - pub fn n_bits(&self) -> usize { - assert!(self.params.n_limbs > 0); - self.params.limb_width * (self.params.n_limbs - 1) + self.params.max_word.bits() as usize - } -} - -pub struct Polynomial { - pub coefficients: Vec>, - pub values: Option>, -} - -impl Polynomial { - pub fn alloc_product>( - &self, - mut cs: CS, - other: &Self, - ) -> Result { - let n_product_coeffs = self.coefficients.len() + other.coefficients.len() - 1; - let values = self.values.as_ref().and_then(|self_vs| { - other.values.as_ref().map(|other_vs| { - let mut values: Vec = std::iter::repeat_with(|| Scalar::ZERO) - .take(n_product_coeffs) - .collect(); - for (self_i, self_v) in self_vs.iter().enumerate() { - for (other_i, other_v) in other_vs.iter().enumerate() { - let mut v = *self_v; - v.mul_assign(other_v); - values[self_i + other_i].add_assign(&v); - } - } - values - }) - }); - let coefficients = (0..n_product_coeffs) - .map(|i| { - Ok(LinearCombination::zero() - + cs.alloc(|| format!("prod {i}"), || Ok(values.grab()?[i]))?) - }) - .collect::>, SynthesisError>>()?; - let product = Self { - coefficients, - values, - }; - let one = Scalar::ONE; - let mut x = Scalar::ZERO; - for _ in 1..(n_product_coeffs + 1) { - x.add_assign(&one); - cs.enforce( - || format!("pointwise product @ {x:?}"), - |lc| { - let mut i = Scalar::ONE; - self.coefficients.iter().fold(lc, |lc, c| { - let r = lc + (i, c); - i.mul_assign(&x); - r - }) - }, - |lc| { - let mut i = Scalar::ONE; - other.coefficients.iter().fold(lc, |lc, c| { - let r = lc + (i, c); - i.mul_assign(&x); - r - }) - }, - |lc| { - let mut i = Scalar::ONE; - product.coefficients.iter().fold(lc, |lc, c| { - let r = lc + (i, c); - i.mul_assign(&x); - r - }) - }, - ) - } - Ok(product) - } - - pub fn sum(&self, other: &Self) -> Self { - let n_coeffs = max(self.coefficients.len(), other.coefficients.len()); - let values = self.values.as_ref().and_then(|self_vs| { - other.values.as_ref().map(|other_vs| { - (0..n_coeffs) - .map(|i| { - let mut s = Scalar::ZERO; - if i < self_vs.len() { - s.add_assign(&self_vs[i]); - } - if i < other_vs.len() { - s.add_assign(&other_vs[i]); - } - s - }) - .collect() - }) - }); - let coefficients = (0..n_coeffs) - .map(|i| { - let mut lc = LinearCombination::zero(); - if i < self.coefficients.len() { - lc = lc + &self.coefficients[i]; - } - if i < other.coefficients.len() { - lc = lc + &other.coefficients[i]; - } - lc - }) - .collect(); - Self { - coefficients, - values, - } - } -} - -#[cfg(test)] -mod tests { - use bellpepper_core::{test_cs::TestConstraintSystem, Circuit}; - #[cfg(not(target_arch = "wasm32"))] - use proptest::prelude::*; - - use super::*; - use crate::provider::bn256_grumpkin::bn256::Scalar; - - pub struct PolynomialMultiplier { - pub a: Vec, - pub b: Vec, - } - - impl Circuit for PolynomialMultiplier { - fn synthesize>( - self, - cs: &mut CS, - ) -> Result<(), SynthesisError> { - let a = Polynomial { - coefficients: self - .a - .iter() - .enumerate() - .map(|(i, x)| { - Ok(LinearCombination::zero() - + cs.alloc(|| format!("coeff_a {i}"), || Ok(*x))?) - }) - .collect::>, SynthesisError>>()?, - values: Some(self.a), - }; - let b = Polynomial { - coefficients: self - .b - .iter() - .enumerate() - .map(|(i, x)| { - Ok(LinearCombination::zero() - + cs.alloc(|| format!("coeff_b {i}"), || Ok(*x))?) - }) - .collect::>, SynthesisError>>()?, - values: Some(self.b), - }; - let _prod = a.alloc_product(cs.namespace(|| "product"), &b)?; - Ok(()) - } - } - - #[test] - fn test_polynomial_multiplier_circuit() { - let mut cs = TestConstraintSystem::::new(); - - let circuit = PolynomialMultiplier { - a: [1, 1, 1].iter().map(|i| Scalar::from_u128(*i)).collect(), - b: [1, 1].iter().map(|i| Scalar::from_u128(*i)).collect(), - }; - - circuit.synthesize(&mut cs).expect("synthesis failed"); - - if let Some(token) = cs.which_is_unsatisfied() { - eprintln!("Error: {} is unsatisfied", token); - } - } - - #[derive(Debug)] - pub struct BigNatBitDecompInputs { - pub n: BigInt, - } - - pub struct BigNatBitDecompParams { - pub limb_width: usize, - pub n_limbs: usize, - } - - pub struct BigNatBitDecomp { - inputs: Option, - params: BigNatBitDecompParams, - } - - impl Circuit for BigNatBitDecomp { - fn synthesize>( - self, - cs: &mut CS, - ) -> Result<(), SynthesisError> { - let n = BigNat::alloc_from_nat( - cs.namespace(|| "n"), - || Ok(self.inputs.grab()?.n.clone()), - self.params.limb_width, - self.params.n_limbs, - )?; - n.decompose(cs.namespace(|| "decomp"))?; - Ok(()) - } - } - - #[cfg(not(target_arch = "wasm32"))] - proptest! { - #![proptest_config(ProptestConfig { - cases: 10, // this test is costlier as max n gets larger - .. ProptestConfig::default() - })] - #[test] - fn test_big_nat_can_decompose(n in any::(), limb_width in 40u8..200) { - let n = n as usize; - - let n_limbs = if n == 0 { - 1 - } else { - (n - 1) / limb_width as usize + 1 - }; - - let circuit = BigNatBitDecomp { - inputs: Some(BigNatBitDecompInputs { - n: BigInt::from(n), - }), - params: BigNatBitDecompParams { - limb_width: limb_width as usize, - n_limbs, - }, - }; - let mut cs = TestConstraintSystem::::new(); - circuit.synthesize(&mut cs).expect("synthesis failed"); - prop_assert!(cs.is_satisfied()); - } - } -} diff --git a/src/gadgets/nonnative/mod.rs b/src/gadgets/nonnative/mod.rs deleted file mode 100644 index 93cfc74..0000000 --- a/src/gadgets/nonnative/mod.rs +++ /dev/null @@ -1,35 +0,0 @@ -//! This module implements various gadgets necessary for doing non-native -//! arithmetic Code in this module is adapted from [bellman-bignat](https://github.com/alex-ozdemir/bellman-bignat), which is licenced under MIT - -use bellpepper_core::SynthesisError; -use ff::PrimeField; - -trait OptionExt { - fn grab(&self) -> Result<&T, SynthesisError>; -} - -impl OptionExt for Option { - fn grab(&self) -> Result<&T, SynthesisError> { - self.as_ref().ok_or(SynthesisError::AssignmentMissing) - } -} - -trait BitAccess { - fn get_bit(&self, i: usize) -> Option; -} - -impl BitAccess for Scalar { - fn get_bit(&self, i: usize) -> Option { - if i as u32 >= Scalar::NUM_BITS { - return None; - } - - let (byte_pos, bit_pos) = (i / 8, i % 8); - let byte = self.to_repr().as_ref()[byte_pos]; - let bit = byte >> bit_pos & 1; - Some(bit == 1) - } -} - -pub mod bignat; -pub mod util; diff --git a/src/gadgets/nonnative/util.rs b/src/gadgets/nonnative/util.rs deleted file mode 100644 index 1fcf366..0000000 --- a/src/gadgets/nonnative/util.rs +++ /dev/null @@ -1,293 +0,0 @@ -use std::{ - convert::From, - io::{self, Write}, -}; - -use bellpepper_core::{ - num::AllocatedNum, ConstraintSystem, LinearCombination, SynthesisError, Variable, -}; -use byteorder::WriteBytesExt; -use ff::PrimeField; -use num_bigint::{BigInt, Sign}; - -use super::{BitAccess, OptionExt}; - -#[derive(Clone)] -/// A representation of a bit -pub struct Bit { - /// The linear combination which constrain the value of the bit - pub bit: LinearCombination, - #[allow(unused)] - /// The value of the bit (filled at witness-time) - pub value: Option, -} - -#[derive(Clone)] -/// A representation of a bit-vector -pub struct Bitvector { - /// The linear combination which constrain the values of the bits - pub bits: Vec>, - /// The value of the bits (filled at witness-time) - pub values: Option>, - /// Allocated bit variables - pub allocations: Vec>, -} - -impl Bit { - /// Allocate a variable in the constraint system which can only be a - /// boolean value. - pub fn alloc>( - mut cs: CS, - value: Option, - ) -> Result { - let var = cs.alloc( - || "boolean", - || { - if *value.grab()? { - Ok(Scalar::ONE) - } else { - Ok(Scalar::ZERO) - } - }, - )?; - - // Constrain: (1 - a) * a = 0 - // This constrains a to be either 0 or 1. - cs.enforce( - || "boolean constraint", - |lc| lc + CS::one() - var, - |lc| lc + var, - |lc| lc, - ); - - Ok(Self { - bit: LinearCombination::zero() + var, - value, - }) - } -} - -pub struct Num { - pub num: LinearCombination, - pub value: Option, -} - -impl Num { - pub const fn new(value: Option, num: LinearCombination) -> Self { - Self { value, num } - } - pub fn alloc(mut cs: CS, value: F) -> Result - where - CS: ConstraintSystem, - F: FnOnce() -> Result, - { - let mut new_value = None; - let var = cs.alloc( - || "num", - || { - let tmp = value()?; - - new_value = Some(tmp); - - Ok(tmp) - }, - )?; - - Ok(Self { - value: new_value, - num: LinearCombination::zero() + var, - }) - } - - pub fn fits_in_bits>( - &self, - mut cs: CS, - n_bits: usize, - ) -> Result<(), SynthesisError> { - let v = self.value; - - // Allocate all but the first bit. - let bits: Vec = (1..n_bits) - .map(|i| { - cs.alloc( - || format!("bit {i}"), - || { - let r = if *v.grab()?.get_bit(i).grab()? { - Scalar::ONE - } else { - Scalar::ZERO - }; - Ok(r) - }, - ) - }) - .collect::>()?; - - for (i, v) in bits.iter().enumerate() { - cs.enforce( - || format!("{i} is bit"), - |lc| lc + *v, - |lc| lc + CS::one() - *v, - |lc| lc, - ) - } - - // Last bit - cs.enforce( - || "last bit", - |mut lc| { - let mut f = Scalar::ONE; - lc = lc + &self.num; - for v in bits.iter() { - f = f.double(); - lc = lc - (f, *v); - } - lc - }, - |mut lc| { - lc = lc + CS::one(); - let mut f = Scalar::ONE; - lc = lc - &self.num; - for v in bits.iter() { - f = f.double(); - lc = lc + (f, *v); - } - lc - }, - |lc| lc, - ); - Ok(()) - } - - /// Checks if the natural number equals an array of bits. - pub fn is_equal>(&self, mut cs: CS, other: &Bitvector) { - let mut f = Scalar::ONE; - let sum = other - .allocations - .iter() - .fold(LinearCombination::zero(), |lc, bit| { - let l = lc + (f, &bit.bit); - f = f.double(); - l - }); - let sum_lc = LinearCombination::zero() + &self.num - ∑ - cs.enforce(|| "sum", |lc| lc + &sum_lc, |lc| lc + CS::one(), |lc| lc); - } - - /// Compute the natural number represented by an array of limbs. - /// The limbs are assumed to be based the `limb_width` power of 2. - /// Low-index bits are low-order - pub fn decompose>( - &self, - mut cs: CS, - n_bits: usize, - ) -> Result, SynthesisError> { - let values: Option> = self.value.as_ref().map(|v| { - let num = *v; - (0..n_bits).map(|i| num.get_bit(i).unwrap()).collect() - }); - let allocations: Vec> = (0..n_bits) - .map(|bit_i| { - Bit::alloc( - cs.namespace(|| format!("bit{bit_i}")), - values.as_ref().map(|vs| vs[bit_i]), - ) - }) - .collect::, _>>()?; - let mut f = Scalar::ONE; - let sum = allocations - .iter() - .fold(LinearCombination::zero(), |lc, bit| { - let l = lc + (f, &bit.bit); - f = f.double(); - l - }); - let sum_lc = LinearCombination::zero() + &self.num - ∑ - cs.enforce(|| "sum", |lc| lc + &sum_lc, |lc| lc + CS::one(), |lc| lc); - let bits: Vec> = allocations - .iter() - .map(|a| LinearCombination::zero() + &a.bit) - .collect(); - Ok(Bitvector { - allocations, - values, - bits, - }) - } - - pub fn as_allocated_num>( - &self, - mut cs: CS, - ) -> Result, SynthesisError> { - let new = AllocatedNum::alloc(cs.namespace(|| "alloc"), || Ok(*self.value.grab()?))?; - cs.enforce( - || "eq", - |lc| lc, - |lc| lc, - |lc| lc + new.get_variable() - &self.num, - ); - Ok(new) - } -} - -impl From> for Num { - fn from(a: AllocatedNum) -> Self { - Self::new(a.get_value(), LinearCombination::zero() + a.get_variable()) - } -} - -fn write_be(f: &F, mut writer: W) -> io::Result<()> { - for digit in f.to_repr().as_ref().iter().rev() { - writer.write_u8(*digit)?; - } - - Ok(()) -} - -/// Convert a field element to a natural number -pub fn f_to_nat(f: &Scalar) -> BigInt { - let mut s = Vec::new(); - write_be(f, &mut s).unwrap(); - BigInt::from_bytes_le(Sign::Plus, f.to_repr().as_ref()) -} - -/// Convert a natural number to a field element. -/// Returns `None` if the number is too big for the field. -pub fn nat_to_f(n: &BigInt) -> Option { - Scalar::from_str_vartime(&format!("{n}")) -} - -#[cfg(test)] -mod tests { - use bitvec::field::BitField as _; - use ff::PrimeFieldBits; - use rand::SeedableRng; - use rand_chacha::ChaCha20Rng; - - // the write_be function above assumes Field::to_repr() outputs a representation - // that's an instance of `AsRef<[u8]>` in lower endian. We test that here, - // as this is not what the I2OSP standard recommends and may change in some - // implementations. - fn test_repr_is_le_with() { - let mut rng = ChaCha20Rng::from_seed([0u8; 32]); - for _i in 0..50 { - let f = F::random(&mut rng); - // This is guaranteed to be in LE - let le_bits = f.to_le_bits(); - let leftmost_u64 = le_bits[..64].load_le::(); - - // This is not - let f_repr = f.to_repr(); - let bytes: [u8; 8] = f_repr.as_ref()[..8].try_into().unwrap(); - let u64_from_repr = u64::from_le_bytes(bytes); - - assert_eq!(leftmost_u64, u64_from_repr); - } - } - - #[test] - fn test_repr_is_le() { - test_repr_is_le_with::(); - test_repr_is_le_with::(); - } -} diff --git a/src/gadgets/r1cs.rs b/src/gadgets/r1cs.rs deleted file mode 100644 index 1f5eb9d..0000000 --- a/src/gadgets/r1cs.rs +++ /dev/null @@ -1,427 +0,0 @@ -//! This module implements various gadgets necessary for folding R1CS types. -use bellpepper::gadgets::{ - boolean::Boolean, boolean_utils::conditionally_select, num::AllocatedNum, Assignment, -}; -use bellpepper_core::{ConstraintSystem, SynthesisError}; -use ff::Field; -use itertools::Itertools as _; - -use super::nonnative::{ - bignat::BigNat, - util::{f_to_nat, Num}, -}; -use crate::{ - constants::{NUM_CHALLENGE_BITS, NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD}, - gadgets::{ - ecc::AllocatedPoint, - utils::{ - alloc_bignat_constant, alloc_one, alloc_scalar_as_base, conditionally_select_bignat, - le_bits_to_num, - }, - }, - r1cs::{R1CSInstance, RelaxedR1CSInstance}, - traits::{commitment::CommitmentTrait, Engine, Group, ROCircuitTrait, ROConstantsCircuit}, -}; - -/// An Allocated R1CS Instance -#[derive(Clone)] -pub struct AllocatedR1CSInstance { - pub(crate) W: AllocatedPoint, - pub(crate) X: [AllocatedNum; N], -} - -impl AllocatedR1CSInstance { - /// Takes the r1cs instance and creates a new allocated r1cs instance - pub fn alloc::Base>>( - mut cs: CS, - u: Option<&R1CSInstance>, - ) -> Result { - let W = AllocatedPoint::alloc( - cs.namespace(|| "allocate W"), - u.map(|u| u.comm_W.to_coordinates()), - )?; - W.check_on_curve(cs.namespace(|| "check W on curve"))?; - - let X: [AllocatedNum; N] = (0..N) - .map(|idx| { - alloc_scalar_as_base::( - cs.namespace(|| format!("allocating X[{idx}]")), - u.map(|u| u.X[idx]), - ) - }) - .collect::, _>>()? - .try_into() - .map_err(|err: Vec<_>| { - SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) - })?; - - Ok(Self { W, X }) - } - - /// Absorb the provided instance in the RO - pub fn absorb_in_ro(&self, ro: &mut E::ROCircuit) { - ro.absorb(&self.W.x); - ro.absorb(&self.W.y); - ro.absorb(&self.W.is_infinity); - self.X.iter().for_each(|x| ro.absorb(x)); - } -} - -/// An Allocated Relaxed R1CS Instance -#[derive(Clone)] -pub struct AllocatedRelaxedR1CSInstance { - pub(crate) W: AllocatedPoint, - pub(crate) E: AllocatedPoint, - pub(crate) u: AllocatedNum, - pub(crate) X: [BigNat; N], -} - -impl AllocatedRelaxedR1CSInstance { - /// Allocates the given `RelaxedR1CSInstance` as a witness of the circuit - pub fn alloc::Base>>( - mut cs: CS, - inst: Option<&RelaxedR1CSInstance>, - limb_width: usize, - n_limbs: usize, - ) -> Result { - // We do not need to check that W or E are well-formed (e.g., on the curve) as - // we do a hash check in the Nova augmented circuit, which ensures that - // the relaxed instance came from a prior iteration of Nova. - let W = AllocatedPoint::alloc( - cs.namespace(|| "allocate W"), - inst.map(|inst| inst.comm_W.to_coordinates()), - )?; - - let E = AllocatedPoint::alloc( - cs.namespace(|| "allocate E"), - inst.map(|inst| inst.comm_E.to_coordinates()), - )?; - - // u << |E::Base| despite the fact that u is a scalar. - // So we parse all of its bytes as a E::Base element - let u = - alloc_scalar_as_base::(cs.namespace(|| "allocate u"), inst.map(|inst| inst.u))?; - - // Allocate X. If the input instance is None then allocate components as zero. - let X = (0..N) - .map(|idx| { - BigNat::alloc_from_nat( - cs.namespace(|| format!("allocate X[{idx}]")), - || Ok(f_to_nat(&inst.map_or(E::Scalar::ZERO, |inst| inst.X[idx]))), - limb_width, - n_limbs, - ) - }) - .collect::, _>>()? - .try_into() - .map_err(|err: Vec<_>| { - SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) - })?; - - Ok(Self { W, E, u, X }) - } - - /// Allocates the hardcoded default `RelaxedR1CSInstance` in the circuit. - /// W = E = 0, u = 0, X0 = X1 = 0 - pub fn default::Base>>( - mut cs: CS, - limb_width: usize, - n_limbs: usize, - ) -> Result { - let W = AllocatedPoint::default(cs.namespace(|| "allocate W")); - let E = W.clone(); - - let u = W.x.clone(); // In the default case, W.x = u = 0 - - // X is allocated and in the honest prover case set to zero - // If the prover is malicious, it can set to arbitrary values, but the resulting - // relaxed R1CS instance with the the checked default values of W, E, and u must - // still be satisfying - - let X = (0..N) - .map(|idx| { - BigNat::alloc_from_nat( - cs.namespace(|| format!("allocate X_default[{idx}]")), - || Ok(f_to_nat(&E::Scalar::ZERO)), - limb_width, - n_limbs, - ) - }) - .collect::, _>>()? - .try_into() - .map_err(|err: Vec<_>| { - SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) - })?; - - Ok(Self { W, E, u, X }) - } - - /// Allocates the R1CS Instance as a `RelaxedR1CSInstance` in the circuit. - /// E = 0, u = 1 - pub fn from_r1cs_instance::Base>>( - mut cs: CS, - inst: AllocatedR1CSInstance, - limb_width: usize, - n_limbs: usize, - ) -> Result { - let E = AllocatedPoint::default(cs.namespace(|| "allocate default E")); - - let u = alloc_one(cs.namespace(|| "one")); - - let X = inst - .X - .into_iter() - .enumerate() - .map(|(idx, x)| { - BigNat::from_num( - cs.namespace(|| format!("allocate X[{idx}] from relaxed r1cs")), - &Num::from(x), - limb_width, - n_limbs, - ) - }) - .collect::, _>>()? - .try_into() - .map_err(|err: Vec<_>| { - SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) - })?; - - Ok(Self { W: inst.W, E, u, X }) - } - - /// Absorb the provided instance in the RO - pub fn absorb_in_ro::Base>>( - &self, - mut cs: CS, - ro: &mut E::ROCircuit, - ) -> Result<(), SynthesisError> { - ro.absorb(&self.W.x); - ro.absorb(&self.W.y); - ro.absorb(&self.W.is_infinity); - ro.absorb(&self.E.x); - ro.absorb(&self.E.y); - ro.absorb(&self.E.is_infinity); - ro.absorb(&self.u); - - self.X.iter().enumerate().try_for_each(|(idx, X)| { - X.as_limbs().iter().enumerate().try_for_each( - |(i, limb)| -> Result<(), SynthesisError> { - ro.absorb(&limb.as_allocated_num( - cs.namespace(|| format!("convert limb {i} of X_r[{idx}] to num")), - )?); - Ok(()) - }, - ) - })?; - - Ok(()) - } - - /// Folds self with a relaxed r1cs instance and returns the result - pub fn fold_with_r1cs::Base>>( - &self, - mut cs: CS, - params: &AllocatedNum, // hash of R1CSShape of F' - u: &AllocatedR1CSInstance, - T: &AllocatedPoint, - ro_consts: ROConstantsCircuit, - limb_width: usize, - n_limbs: usize, - ) -> Result { - // Compute r: - let mut ro = E::ROCircuit::new(ro_consts, NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD + N); - ro.absorb(params); - - // running instance `U` does not need to absorbed since u.X[0] = Hash(params, U, - // i, z0, zi) - u.absorb_in_ro(&mut ro); - - ro.absorb(&T.x); - ro.absorb(&T.y); - ro.absorb(&T.is_infinity); - let r_bits = ro.squeeze(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; - let r = le_bits_to_num(cs.namespace(|| "r"), &r_bits)?; - - // W_fold = self.W + r * u.W - let rW = u.W.scalar_mul(cs.namespace(|| "r * u.W"), &r_bits)?; - let W_fold = self.W.add(cs.namespace(|| "self.W + r * u.W"), &rW)?; - - // E_fold = self.E + r * T - let rT = T.scalar_mul(cs.namespace(|| "r * T"), &r_bits)?; - let E_fold = self.E.add(cs.namespace(|| "self.E + r * T"), &rT)?; - - // u_fold = u_r + r - let u_fold = AllocatedNum::alloc(cs.namespace(|| "u_fold"), || { - Ok(*self.u.get_value().get()? + r.get_value().get()?) - })?; - cs.enforce( - || "Check u_fold", - |lc| lc, - |lc| lc, - |lc| lc + u_fold.get_variable() - self.u.get_variable() - r.get_variable(), - ); - - // Fold the IO: - // Analyze r into limbs - let r_bn = BigNat::from_num( - cs.namespace(|| "allocate r_bn"), - &Num::from(r), - limb_width, - n_limbs, - )?; - - // Allocate the order of the non-native field as a constant - let m_bn = alloc_bignat_constant( - cs.namespace(|| "alloc m"), - &E::GE::group_params().2, - limb_width, - n_limbs, - )?; - - let mut X_fold = vec![]; - - for (idx, (X, x)) in self.X.iter().zip_eq(u.X.iter()).enumerate() { - let x_bn = BigNat::from_num( - cs.namespace(|| format!("allocate u.X[{idx}]_bn")), - &Num::from(x.clone()), - limb_width, - n_limbs, - )?; - - let (_, r) = x_bn.mult_mod(cs.namespace(|| format!("r*u.X[{idx}]")), &r_bn, &m_bn)?; - let r_new = X.add(&r)?; - let X_i_fold = - r_new.red_mod(cs.namespace(|| format!("reduce folded X[{idx}]")), &m_bn)?; - X_fold.push(X_i_fold); - } - - let X_fold = X_fold.try_into().map_err(|err: Vec<_>| { - SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) - })?; - - Ok(Self { - W: W_fold, - E: E_fold, - u: u_fold, - X: X_fold, - }) - } - - /// If the condition is true then returns this otherwise it returns the - /// other - pub fn conditionally_select::Base>>( - &self, - cs: CS, - other: &Self, - condition: &Boolean, - ) -> Result { - conditionally_select_alloc_relaxed_r1cs(cs, self, other, condition) - } -} - -/// c = cond ? a: b, where a, b: `AllocatedRelaxedR1CSInstance` -pub fn conditionally_select_alloc_relaxed_r1cs< - E: Engine, - CS: ConstraintSystem<::Base>, - const N: usize, ->( - mut cs: CS, - a: &AllocatedRelaxedR1CSInstance, - b: &AllocatedRelaxedR1CSInstance, - condition: &Boolean, -) -> Result, SynthesisError> { - let c_X = - a.X.iter() - .zip_eq(b.X.iter()) - .enumerate() - .map(|(idx, (a, b))| { - conditionally_select_bignat( - cs.namespace(|| format!("X[{idx}] = cond ? a.X[{idx}] : b.X[{idx}]")), - a, - b, - condition, - ) - }) - .collect::, _>>()?; - - let c_X = c_X.try_into().map_err(|err: Vec<_>| { - SynthesisError::IncompatibleLengthVector(format!("{} != {N}", err.len())) - })?; - - let c = AllocatedRelaxedR1CSInstance { - W: conditionally_select_point( - cs.namespace(|| "W = cond ? a.W : b.W"), - &a.W, - &b.W, - condition, - )?, - E: conditionally_select_point( - cs.namespace(|| "E = cond ? a.E : b.E"), - &a.E, - &b.E, - condition, - )?, - u: conditionally_select( - cs.namespace(|| "u = cond ? a.u : b.u"), - &a.u, - &b.u, - condition, - )?, - X: c_X, - }; - Ok(c) -} - -/// c = cond ? a: b, where a, b: `Vec` -pub fn conditionally_select_vec_allocated_relaxed_r1cs_instance< - E: Engine, - CS: ConstraintSystem<::Base>, - const N: usize, ->( - mut cs: CS, - a: &[AllocatedRelaxedR1CSInstance], - b: &[AllocatedRelaxedR1CSInstance], - condition: &Boolean, -) -> Result>, SynthesisError> { - a.iter() - .enumerate() - .zip_eq(b.iter()) - .map(|((i, a), b)| { - a.conditionally_select( - cs.namespace(|| format!("cond ? a[{}]: b[{}]", i, i)), - b, - condition, - ) - }) - .collect::>, _>>() -} - -/// c = cond ? a: b, where a, b: `AllocatedPoint` -pub fn conditionally_select_point>( - mut cs: CS, - a: &AllocatedPoint, - b: &AllocatedPoint, - condition: &Boolean, -) -> Result, SynthesisError> { - let c = AllocatedPoint { - x: conditionally_select( - cs.namespace(|| "x = cond ? a.x : b.x"), - &a.x, - &b.x, - condition, - )?, - y: conditionally_select( - cs.namespace(|| "y = cond ? a.y : b.y"), - &a.y, - &b.y, - condition, - )?, - is_infinity: conditionally_select( - cs.namespace(|| "is_infinity = cond ? a.is_infinity : b.is_infinity"), - &a.is_infinity, - &b.is_infinity, - condition, - )?, - }; - Ok(c) -} diff --git a/src/gadgets/utils.rs b/src/gadgets/utils.rs deleted file mode 100644 index 9eb770e..0000000 --- a/src/gadgets/utils.rs +++ /dev/null @@ -1,404 +0,0 @@ -//! This module implements various low-level gadgets -use bellpepper::gadgets::Assignment; -use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - num::AllocatedNum, - ConstraintSystem, LinearCombination, SynthesisError, -}; -use ff::{Field, PrimeField, PrimeFieldBits}; -use num_bigint::BigInt; - -use super::nonnative::bignat::{nat_to_limbs, BigNat}; -use crate::traits::Engine; - -/// Gets as input the little indian representation of a number and spits out the -/// number -pub fn le_bits_to_num( - mut cs: CS, - bits: &[AllocatedBit], -) -> Result, SynthesisError> -where - Scalar: PrimeField + PrimeFieldBits, - CS: ConstraintSystem, -{ - // We loop over the input bits and construct the constraint - // and the field element that corresponds to the result - let mut lc = LinearCombination::zero(); - let mut coeff = Scalar::ONE; - let mut fe = Some(Scalar::ZERO); - for bit in bits.iter() { - lc = lc + (coeff, bit.get_variable()); - fe = bit.get_value().map(|val| { - if val { - fe.unwrap() + coeff - } else { - fe.unwrap() - } - }); - coeff = coeff.double(); - } - let num = AllocatedNum::alloc(cs.namespace(|| "Field element"), || { - fe.ok_or(SynthesisError::AssignmentMissing) - })?; - lc = lc - num.get_variable(); - cs.enforce(|| "compute number from bits", |lc| lc, |lc| lc, |_| lc); - Ok(num) -} - -/// Allocate a variable that is set to zero -pub fn alloc_zero>(mut cs: CS) -> AllocatedNum { - let zero = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc"), || F::ZERO); - cs.enforce( - || "check zero is valid", - |lc| lc, - |lc| lc, - |lc| lc + zero.get_variable(), - ); - zero -} - -/// Allocate a variable that is set to one -pub fn alloc_one>(mut cs: CS) -> AllocatedNum { - let one = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc"), || F::ONE); - cs.enforce( - || "check one is valid", - |lc| lc + CS::one(), - |lc| lc + CS::one(), - |lc| lc + one.get_variable(), - ); - - one -} - -/// Allocate a scalar as a base. Only to be used is the scalar fits in base! -pub fn alloc_scalar_as_base( - mut cs: CS, - input: Option, -) -> Result, SynthesisError> -where - E: Engine, - CS: ConstraintSystem<::Base>, -{ - AllocatedNum::alloc(cs.namespace(|| "allocate scalar as base"), || { - let val = scalar_as_base::(input.unwrap_or(E::Scalar::ZERO)); - Ok(val) - }) -} - -/// interpret scalar as base -pub fn scalar_as_base(input: E::Scalar) -> E::Base { - let input_bits = input.to_le_bits(); - let mut mult = E::Base::ONE; - let mut val = E::Base::ZERO; - for bit in input_bits { - if bit { - val += mult; - } - mult = mult + mult; - } - val -} - -/// Allocate bignat a constant -pub fn alloc_bignat_constant>( - mut cs: CS, - val: &BigInt, - limb_width: usize, - n_limbs: usize, -) -> Result, SynthesisError> { - let limbs = nat_to_limbs(val, limb_width, n_limbs).unwrap(); - let bignat = BigNat::alloc_from_limbs( - cs.namespace(|| "alloc bignat"), - || Ok(limbs.clone()), - None, - limb_width, - n_limbs, - )?; - // Now enforce that the limbs are all equal to the constants - (0..n_limbs).for_each(|i| { - cs.enforce( - || format!("check limb {i}"), - |lc| lc + &bignat.limbs[i], - |lc| lc + CS::one(), - |lc| lc + (limbs[i], CS::one()), - ); - }); - Ok(bignat) -} - -/// Check that two numbers are equal and return a bit -pub fn alloc_num_equals>( - mut cs: CS, - a: &AllocatedNum, - b: &AllocatedNum, -) -> Result { - // Allocate and constrain `r`: result boolean bit. - // It equals `true` if `a` equals `b`, `false` otherwise - let r_value = match (a.get_value(), b.get_value()) { - (Some(a), Some(b)) => Some(a == b), - _ => None, - }; - - let r = AllocatedBit::alloc(cs.namespace(|| "r"), r_value)?; - - // Allocate t s.t. t=1 if a == b else 1/(a - b) - - let t = AllocatedNum::alloc(cs.namespace(|| "t"), || { - let a_val = *a.get_value().get()?; - let b_val = *b.get_value().get()?; - Ok(if a_val == b_val { - F::ONE - } else { - (a_val - b_val).invert().unwrap() - }) - })?; - - cs.enforce( - || "t*(a - b) = 1 - r", - |lc| lc + t.get_variable(), - |lc| lc + a.get_variable() - b.get_variable(), - |lc| lc + CS::one() - r.get_variable(), - ); - - cs.enforce( - || "r*(a - b) = 0", - |lc| lc + r.get_variable(), - |lc| lc + a.get_variable() - b.get_variable(), - |lc| lc, - ); - - Ok(r) -} - -// TODO: Figure out if this can be done better -pub fn conditionally_select_allocated_bit>( - mut cs: CS, - a: &AllocatedBit, - b: &AllocatedBit, - condition: &Boolean, -) -> Result { - let c = AllocatedBit::alloc( - cs.namespace(|| "conditionally select result"), - if condition.get_value().unwrap_or(false) { - a.get_value() - } else { - b.get_value() - }, - )?; - - // a * condition + b*(1-condition) = c -> - // a * condition - b*condition = c - b - cs.enforce( - || "conditional select constraint", - |lc| lc + a.get_variable() - b.get_variable(), - |_| condition.lc(CS::one(), F::ONE), - |lc| lc + c.get_variable() - b.get_variable(), - ); - - Ok(c) -} -/// If condition return a otherwise b where a and b are `BigNats` -pub fn conditionally_select_bignat>( - mut cs: CS, - a: &BigNat, - b: &BigNat, - condition: &Boolean, -) -> Result, SynthesisError> { - assert!(a.limbs.len() == b.limbs.len()); - let c = BigNat::alloc_from_nat( - cs.namespace(|| "conditional select result"), - || { - if *condition.get_value().get()? { - Ok(a.value.get()?.clone()) - } else { - Ok(b.value.get()?.clone()) - } - }, - a.params.limb_width, - a.params.n_limbs, - )?; - - // a * condition + b*(1-condition) = c -> - // a * condition - b*condition = c - b - for i in 0..c.limbs.len() { - cs.enforce( - || format!("conditional select constraint {i}"), - |lc| lc + &a.limbs[i] - &b.limbs[i], - |_| condition.lc(CS::one(), F::ONE), - |lc| lc + &c.limbs[i] - &b.limbs[i], - ); - } - Ok(c) -} - -/// Same as the above but Condition is an `AllocatedNum` that needs to be -/// 0 or 1. 1 => True, 0 => False -pub fn conditionally_select2>( - mut cs: CS, - a: &AllocatedNum, - b: &AllocatedNum, - condition: &AllocatedNum, -) -> Result, SynthesisError> { - let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { - if *condition.get_value().get()? == F::ONE { - Ok(*a.get_value().get()?) - } else { - Ok(*b.get_value().get()?) - } - })?; - - // a * condition + b*(1-condition) = c -> - // a * condition - b*condition = c - b - cs.enforce( - || "conditional select constraint", - |lc| lc + a.get_variable() - b.get_variable(), - |lc| lc + condition.get_variable(), - |lc| lc + c.get_variable() - b.get_variable(), - ); - - Ok(c) -} - -/// If condition set to 0 otherwise a. Condition is an allocated num -pub fn select_zero_or_num2>( - mut cs: CS, - a: &AllocatedNum, - condition: &AllocatedNum, -) -> Result, SynthesisError> { - let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { - if *condition.get_value().get()? == F::ONE { - Ok(F::ZERO) - } else { - Ok(*a.get_value().get()?) - } - })?; - - // a * (1 - condition) = c - cs.enforce( - || "conditional select constraint", - |lc| lc + a.get_variable(), - |lc| lc + CS::one() - condition.get_variable(), - |lc| lc + c.get_variable(), - ); - - Ok(c) -} - -/// If condition set to a otherwise 0. Condition is an allocated num -pub fn select_num_or_zero2>( - mut cs: CS, - a: &AllocatedNum, - condition: &AllocatedNum, -) -> Result, SynthesisError> { - let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { - if *condition.get_value().get()? == F::ONE { - Ok(*a.get_value().get()?) - } else { - Ok(F::ZERO) - } - })?; - - cs.enforce( - || "conditional select constraint", - |lc| lc + a.get_variable(), - |lc| lc + condition.get_variable(), - |lc| lc + c.get_variable(), - ); - - Ok(c) -} - -/// If condition set to a otherwise 0 -pub fn select_num_or_zero>( - mut cs: CS, - a: &AllocatedNum, - condition: &Boolean, -) -> Result, SynthesisError> { - let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { - if *condition.get_value().get()? { - Ok(*a.get_value().get()?) - } else { - Ok(F::ZERO) - } - })?; - - cs.enforce( - || "conditional select constraint", - |lc| lc + a.get_variable(), - |_| condition.lc(CS::one(), F::ONE), - |lc| lc + c.get_variable(), - ); - - Ok(c) -} - -/// If condition set to 1 otherwise a -pub fn select_one_or_num2>( - mut cs: CS, - a: &AllocatedNum, - condition: &AllocatedNum, -) -> Result, SynthesisError> { - let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { - if *condition.get_value().get()? == F::ONE { - Ok(F::ONE) - } else { - Ok(*a.get_value().get()?) - } - })?; - - cs.enforce( - || "conditional select constraint", - |lc| lc + CS::one() - a.get_variable(), - |lc| lc + condition.get_variable(), - |lc| lc + c.get_variable() - a.get_variable(), - ); - Ok(c) -} - -/// If condition set to 1 otherwise a - b -pub fn select_one_or_diff2>( - mut cs: CS, - a: &AllocatedNum, - b: &AllocatedNum, - condition: &AllocatedNum, -) -> Result, SynthesisError> { - let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { - if *condition.get_value().get()? == F::ONE { - Ok(F::ONE) - } else { - Ok(*a.get_value().get()? - *b.get_value().get()?) - } - })?; - - cs.enforce( - || "conditional select constraint", - |lc| lc + CS::one() - a.get_variable() + b.get_variable(), - |lc| lc + condition.get_variable(), - |lc| lc + c.get_variable() - a.get_variable() + b.get_variable(), - ); - Ok(c) -} - -/// If condition set to a otherwise 1 for boolean conditions -pub fn select_num_or_one>( - mut cs: CS, - a: &AllocatedNum, - condition: &Boolean, -) -> Result, SynthesisError> { - let c = AllocatedNum::alloc(cs.namespace(|| "conditional select result"), || { - if *condition.get_value().get()? { - Ok(*a.get_value().get()?) - } else { - Ok(F::ONE) - } - })?; - - cs.enforce( - || "conditional select constraint", - |lc| lc + a.get_variable() - CS::one(), - |_| condition.lc(CS::one(), F::ONE), - |lc| lc + c.get_variable() - CS::one(), - ); - - Ok(c) -} diff --git a/src/lib.rs b/src/lib.rs deleted file mode 100644 index 876578f..0000000 --- a/src/lib.rs +++ /dev/null @@ -1,1669 +0,0 @@ -#![allow(non_snake_case)] - -// private modules -mod bellpepper; -mod circuit; -mod digest; -mod nifs; - -// public modules -pub mod constants; -pub mod errors; -pub mod fast_serde; -pub mod gadgets; -pub mod provider; -pub mod r1cs; -pub mod spartan; -pub mod traits; - -pub mod cyclefold; -pub mod supernova; - -use std::sync::Arc; - -use bellpepper_core::{ConstraintSystem, SynthesisError}; -use circuit::{NovaAugmentedCircuit, NovaAugmentedCircuitInputs, NovaAugmentedCircuitParams}; -use constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_WITHOUT_IO_FOR_CRHF, NUM_HASH_BITS}; -use errors::NovaError; -use ff::Field; -use gadgets::scalar_as_base; -use nifs::NIFS; -use once_cell::sync::OnceCell; -use r1cs::{ - CommitmentKeyHint, R1CSInstance, R1CSShape, R1CSWitness, RelaxedR1CSInstance, - RelaxedR1CSWitness, -}; -use serde::{Deserialize, Serialize}; -use supernova::StepCircuit; -use traits::{ - commitment::{CommitmentEngineTrait, CommitmentTrait}, - snark::RelaxedR1CSSNARKTrait, - AbsorbInROTrait, CurveCycleEquipped, Dual, Engine, ROConstants, ROConstantsCircuit, ROTrait, -}; - -use crate::{ - bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - shape_cs::ShapeCS, - solver::SatisfyingAssignment, - }, - digest::{DigestComputer, SimpleDigestible}, - r1cs::R1CSResult, -}; - -/// A type that holds parameters for the primary and secondary circuits of Nova -/// and SuperNova -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct R1CSWithArity { - F_arity: usize, - r1cs_shape: R1CSShape, -} - -impl SimpleDigestible for R1CSWithArity {} - -impl R1CSWithArity { - /// Create a new `R1CSWithArity` - pub fn new(r1cs_shape: R1CSShape, F_arity: usize) -> Self { - Self { - F_arity, - r1cs_shape, - } - } - - /// Return the [`R1CSWithArity`]' digest. - pub fn digest(&self) -> E::Scalar { - let dc: DigestComputer<'_, ::Scalar, Self> = DigestComputer::new(self); - dc.digest().expect("Failure in computing digest") - } -} - -/// A type that holds public parameters of Nova -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct PublicParams -where - E: CurveCycleEquipped, -{ - F_arity_primary: usize, - F_arity_secondary: usize, - ro_consts_primary: ROConstants, - ro_consts_circuit_primary: ROConstantsCircuit>, - pub ck_primary: Arc>, - circuit_shape_primary: R1CSWithArity, - ro_consts_secondary: ROConstants>, - ro_consts_circuit_secondary: ROConstantsCircuit, - pub ck_secondary: Arc>>, - circuit_shape_secondary: R1CSWithArity>, - augmented_circuit_params_primary: NovaAugmentedCircuitParams, - augmented_circuit_params_secondary: NovaAugmentedCircuitParams, - #[serde(skip, default = "OnceCell::new")] - digest: OnceCell, -} - -impl SimpleDigestible for PublicParams where E1: CurveCycleEquipped {} - -impl PublicParams -where - E1: CurveCycleEquipped, -{ - /// Set up builder to create `PublicParams` for a pair of circuits `C1` and - /// `C2`. - /// - /// # Note - /// - /// Public parameters set up a number of bases for the homomorphic - /// commitment scheme of Nova. - /// - /// Some final compressing SNARKs, like variants of Spartan, use computation - /// commitments that require larger sizes for these parameters. These - /// SNARKs provide a hint for these values by implementing - /// `RelaxedR1CSSNARKTrait::ck_floor()`, which can be passed to this - /// function. - /// - /// If you're not using such a SNARK, pass - /// `arecibo::traits::snark::default_ck_hint()` instead. - /// - /// # Arguments - /// - /// * `c_primary`: The primary circuit of type `C1`. - /// * `c_secondary`: The secondary circuit of type `C2`. - /// * `ck_hint1`: A `CommitmentKeyHint` for `G1`, which is a function that - /// provides a hint for the number of generators required in the - /// commitment scheme for the primary circuit. - /// * `ck_hint2`: A `CommitmentKeyHint` for `G2`, similar to `ck_hint1`, but - /// for the secondary circuit. - /// - /// # Example - /// - /// ```rust - /// # use arecibo::spartan::ppsnark::RelaxedR1CSSNARK; - /// # use arecibo::provider::ipa_pc::EvaluationEngine; - /// # use arecibo::provider::{PallasEngine, VestaEngine}; - /// # use arecibo::traits::{circuit::TrivialCircuit, Engine, snark::RelaxedR1CSSNARKTrait}; - /// use arecibo::PublicParams; - /// - /// type E1 = PallasEngine; - /// type E2 = VestaEngine; - /// type EE = EvaluationEngine; - /// type SPrime = RelaxedR1CSSNARK>; - /// - /// let circuit1 = TrivialCircuit::<::Scalar>::default(); - /// let circuit2 = TrivialCircuit::<::Scalar>::default(); - /// // Only relevant for a SNARK using computation commitmnets, pass &(|_| 0) - /// // or &*nova_snark::traits::snark::default_ck_hint() otherwise. - /// let ck_hint1 = &*SPrime::::ck_floor(); - /// let ck_hint2 = &*SPrime::::ck_floor(); - /// - /// let pp = PublicParams::setup(&circuit1, &circuit2, ck_hint1, ck_hint2).unwrap(); - /// ``` - pub fn setup, C2: StepCircuit< as Engine>::Scalar>>( - c_primary: &C1, - c_secondary: &C2, - ck_hint1: &CommitmentKeyHint, - ck_hint2: &CommitmentKeyHint>, - ) -> Result { - let augmented_circuit_params_primary = - NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); - let augmented_circuit_params_secondary = - NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); - - let ro_consts_primary: ROConstants = ROConstants::::default(); - let ro_consts_secondary: ROConstants> = ROConstants::>::default(); - - let F_arity_primary = c_primary.arity(); - let F_arity_secondary = c_secondary.arity(); - - // ro_consts_circuit_primary are parameterized by E2 because the type alias uses - // E2::Base = E1::Scalar - let ro_consts_circuit_primary: ROConstantsCircuit> = - ROConstantsCircuit::>::default(); - let ro_consts_circuit_secondary: ROConstantsCircuit = - ROConstantsCircuit::::default(); - - // Initialize ck for the primary - let circuit_primary: NovaAugmentedCircuit<'_, Dual, C1> = NovaAugmentedCircuit::new( - &augmented_circuit_params_primary, - None, - c_primary, - ro_consts_circuit_primary.clone(), - ); - let mut cs: ShapeCS = ShapeCS::new(); - let _ = circuit_primary.synthesize(&mut cs); - let (r1cs_shape_primary, ck_primary) = cs.r1cs_shape_and_key(ck_hint1); - let ck_primary = Arc::new(ck_primary); - - // Initialize ck for the secondary - let circuit_secondary: NovaAugmentedCircuit<'_, E1, C2> = NovaAugmentedCircuit::new( - &augmented_circuit_params_secondary, - None, - c_secondary, - ro_consts_circuit_secondary.clone(), - ); - let mut cs: ShapeCS> = ShapeCS::new(); - let _ = circuit_secondary.synthesize(&mut cs); - let (r1cs_shape_secondary, ck_secondary) = cs.r1cs_shape_and_key(ck_hint2); - let ck_secondary = Arc::new(ck_secondary); - - if r1cs_shape_primary.num_io != 2 || r1cs_shape_secondary.num_io != 2 { - return Err(NovaError::InvalidStepCircuitIO); - } - - let circuit_shape_primary = R1CSWithArity::new(r1cs_shape_primary, F_arity_primary); - let circuit_shape_secondary = R1CSWithArity::new(r1cs_shape_secondary, F_arity_secondary); - - Ok(Self { - F_arity_primary, - F_arity_secondary, - ro_consts_primary, - ro_consts_circuit_primary, - ck_primary, - circuit_shape_primary, - ro_consts_secondary, - ro_consts_circuit_secondary, - ck_secondary, - circuit_shape_secondary, - augmented_circuit_params_primary, - augmented_circuit_params_secondary, - digest: OnceCell::new(), - }) - } - - /// Retrieve the digest of the public parameters. - pub fn digest(&self) -> E1::Scalar { - self.digest - .get_or_try_init(|| DigestComputer::new(self).digest()) - .cloned() - .expect("Failure in retrieving digest") - } - - /// Returns the number of constraints in the primary and secondary circuits - pub const fn num_constraints(&self) -> (usize, usize) { - ( - self.circuit_shape_primary.r1cs_shape.num_cons, - self.circuit_shape_secondary.r1cs_shape.num_cons, - ) - } - - /// Returns the number of variables in the primary and secondary circuits - pub const fn num_variables(&self) -> (usize, usize) { - ( - self.circuit_shape_primary.r1cs_shape.num_vars, - self.circuit_shape_secondary.r1cs_shape.num_vars, - ) - } -} - -/// A resource buffer for [`RecursiveSNARK`] for storing scratch values that are -/// computed by `prove_step`, which allows the reuse of memory allocations and -/// avoids unnecessary new allocations in the critical section. -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct ResourceBuffer { - l_w: Option>, - l_u: Option>, - - ABC_Z_1: R1CSResult, - ABC_Z_2: R1CSResult, - - /// buffer for `commit_T` - T: Vec, -} - -/// A SNARK that proves the correct execution of an incremental computation -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct RecursiveSNARK -where - E1: CurveCycleEquipped, -{ - z0_primary: Vec, - z0_secondary: Vec< as Engine>::Scalar>, - r_W_primary: RelaxedR1CSWitness, - r_U_primary: RelaxedR1CSInstance, - r_W_secondary: RelaxedR1CSWitness>, - r_U_secondary: RelaxedR1CSInstance>, - l_w_secondary: R1CSWitness>, - l_u_secondary: R1CSInstance>, - - /// Buffer for memory needed by the primary fold-step - buffer_primary: ResourceBuffer, - /// Buffer for memory needed by the secondary fold-step - buffer_secondary: ResourceBuffer>, - - i: usize, - zi_primary: Vec, - zi_secondary: Vec< as Engine>::Scalar>, -} - -impl RecursiveSNARK -where - E1: CurveCycleEquipped, -{ - /// Create new instance of recursive SNARK - pub fn new, C2: StepCircuit< as Engine>::Scalar>>( - pp: &PublicParams, - c_primary: &C1, - c_secondary: &C2, - z0_primary: &[E1::Scalar], - z0_secondary: &[ as Engine>::Scalar], - ) -> Result { - if z0_primary.len() != pp.F_arity_primary || z0_secondary.len() != pp.F_arity_secondary { - return Err(NovaError::InvalidInitialInputLength); - } - - let r1cs_primary = &pp.circuit_shape_primary.r1cs_shape; - let r1cs_secondary = &pp.circuit_shape_secondary.r1cs_shape; - - // base case for the primary - let mut cs_primary = SatisfyingAssignment::::new(); - let inputs_primary: NovaAugmentedCircuitInputs> = NovaAugmentedCircuitInputs::new( - scalar_as_base::(pp.digest()), - E1::Scalar::ZERO, - z0_primary.to_vec(), - None, - None, - None, - None, - ); - - let circuit_primary: NovaAugmentedCircuit<'_, Dual, C1> = NovaAugmentedCircuit::new( - &pp.augmented_circuit_params_primary, - Some(inputs_primary), - c_primary, - pp.ro_consts_circuit_primary.clone(), - ); - let zi_primary = circuit_primary.synthesize(&mut cs_primary)?; - let (u_primary, w_primary) = - cs_primary.r1cs_instance_and_witness(r1cs_primary, &pp.ck_primary)?; - - // base case for the secondary - let mut cs_secondary = SatisfyingAssignment::>::new(); - let inputs_secondary: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( - pp.digest(), - as Engine>::Scalar::ZERO, - z0_secondary.to_vec(), - None, - None, - Some(u_primary.clone()), - None, - ); - let circuit_secondary: NovaAugmentedCircuit<'_, E1, C2> = NovaAugmentedCircuit::new( - &pp.augmented_circuit_params_secondary, - Some(inputs_secondary), - c_secondary, - pp.ro_consts_circuit_secondary.clone(), - ); - let zi_secondary = circuit_secondary.synthesize(&mut cs_secondary)?; - let (u_secondary, w_secondary) = cs_secondary - .r1cs_instance_and_witness(&pp.circuit_shape_secondary.r1cs_shape, &pp.ck_secondary)?; - - // IVC proof for the primary circuit - let l_w_primary = w_primary; - let l_u_primary = u_primary; - let r_W_primary = RelaxedR1CSWitness::from_r1cs_witness(r1cs_primary, l_w_primary); - let r_U_primary = RelaxedR1CSInstance::from_r1cs_instance( - &*pp.ck_primary, - &pp.circuit_shape_primary.r1cs_shape, - l_u_primary, - ); - - // IVC proof for the secondary circuit - let l_w_secondary = w_secondary; - let l_u_secondary = u_secondary; - let r_W_secondary = RelaxedR1CSWitness::>::default(r1cs_secondary); - let r_U_secondary = - RelaxedR1CSInstance::>::default(&pp.ck_secondary, r1cs_secondary); - - assert!( - !(zi_primary.len() != pp.F_arity_primary || zi_secondary.len() != pp.F_arity_secondary), - "Invalid step length" - ); - - let zi_primary = zi_primary - .iter() - .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) - .collect::::Scalar>, _>>()?; - - let zi_secondary = zi_secondary - .iter() - .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) - .collect:: as Engine>::Scalar>, _>>()?; - - let buffer_primary = ResourceBuffer { - l_w: None, - l_u: None, - ABC_Z_1: R1CSResult::default(r1cs_primary.num_cons), - ABC_Z_2: R1CSResult::default(r1cs_primary.num_cons), - T: r1cs::default_T::(r1cs_primary.num_cons), - }; - - let buffer_secondary = ResourceBuffer { - l_w: None, - l_u: None, - ABC_Z_1: R1CSResult::default(r1cs_secondary.num_cons), - ABC_Z_2: R1CSResult::default(r1cs_secondary.num_cons), - T: r1cs::default_T::>(r1cs_secondary.num_cons), - }; - - Ok(Self { - z0_primary: z0_primary.to_vec(), - z0_secondary: z0_secondary.to_vec(), - r_W_primary, - r_U_primary, - r_W_secondary, - r_U_secondary, - l_w_secondary, - l_u_secondary, - - buffer_primary, - buffer_secondary, - i: 0, - zi_primary, - zi_secondary, - }) - } - - /// Inputs of the primary circuits - pub fn z0_primary(&self) -> &Vec { - &self.z0_primary - } - - /// Outputs of the primary circuits - pub fn zi_primary(&self) -> &Vec { - &self.zi_primary - } - - /// Create a new `RecursiveSNARK` (or updates the provided `RecursiveSNARK`) - /// by executing a step of the incremental computation - #[tracing::instrument(skip_all, name = "nova::RecursiveSNARK::prove_step")] - pub fn prove_step< - C1: StepCircuit, - C2: StepCircuit< as Engine>::Scalar>, - >( - &mut self, - pp: &PublicParams, - c_primary: &C1, - c_secondary: &C2, - ) -> Result<(), NovaError> { - // first step was already done in the constructor - if self.i == 0 { - self.i = 1; - return Ok(()); - } - - // save the inputs before proceeding to the `i+1`th step - let r_U_primary_i = self.r_U_primary.clone(); - let r_U_secondary_i = self.r_U_secondary.clone(); - let l_u_secondary_i = self.l_u_secondary.clone(); - - // fold the secondary circuit's instance - let (nifs_secondary, _) = NIFS::prove_mut( - &*pp.ck_secondary, - &pp.ro_consts_secondary, - &scalar_as_base::(pp.digest()), - &pp.circuit_shape_secondary.r1cs_shape, - &mut self.r_U_secondary, - &mut self.r_W_secondary, - &self.l_u_secondary, - &self.l_w_secondary, - &mut self.buffer_secondary.T, - &mut self.buffer_secondary.ABC_Z_1, - &mut self.buffer_secondary.ABC_Z_2, - )?; - - let mut cs_primary = SatisfyingAssignment::::with_capacity( - pp.circuit_shape_primary.r1cs_shape.num_io + 1, - pp.circuit_shape_primary.r1cs_shape.num_vars, - ); - let inputs_primary: NovaAugmentedCircuitInputs> = NovaAugmentedCircuitInputs::new( - scalar_as_base::(pp.digest()), - E1::Scalar::from(self.i as u64), - self.z0_primary.to_vec(), - Some(self.zi_primary.clone()), - Some(r_U_secondary_i), - Some(l_u_secondary_i), - Some(Commitment::>::decompress(&nifs_secondary.comm_T)?), - ); - - let circuit_primary: NovaAugmentedCircuit<'_, Dual, C1> = NovaAugmentedCircuit::new( - &pp.augmented_circuit_params_primary, - Some(inputs_primary), - c_primary, - pp.ro_consts_circuit_primary.clone(), - ); - - let zi_primary = circuit_primary.synthesize(&mut cs_primary)?; - - let (l_u_primary, l_w_primary) = cs_primary - .r1cs_instance_and_witness(&pp.circuit_shape_primary.r1cs_shape, &pp.ck_primary)?; - - // fold the primary circuit's instance - let (nifs_primary, _) = NIFS::prove_mut( - &*pp.ck_primary, - &pp.ro_consts_primary, - &pp.digest(), - &pp.circuit_shape_primary.r1cs_shape, - &mut self.r_U_primary, - &mut self.r_W_primary, - &l_u_primary, - &l_w_primary, - &mut self.buffer_primary.T, - &mut self.buffer_primary.ABC_Z_1, - &mut self.buffer_primary.ABC_Z_2, - )?; - - let mut cs_secondary = SatisfyingAssignment::>::with_capacity( - pp.circuit_shape_secondary.r1cs_shape.num_io + 1, - pp.circuit_shape_secondary.r1cs_shape.num_vars, - ); - let inputs_secondary: NovaAugmentedCircuitInputs = NovaAugmentedCircuitInputs::new( - pp.digest(), - as Engine>::Scalar::from(self.i as u64), - self.z0_secondary.to_vec(), - Some(self.zi_secondary.clone()), - Some(r_U_primary_i), - Some(l_u_primary), - Some(Commitment::::decompress(&nifs_primary.comm_T)?), - ); - - let circuit_secondary: NovaAugmentedCircuit<'_, E1, C2> = NovaAugmentedCircuit::new( - &pp.augmented_circuit_params_secondary, - Some(inputs_secondary), - c_secondary, - pp.ro_consts_circuit_secondary.clone(), - ); - let zi_secondary = circuit_secondary.synthesize(&mut cs_secondary)?; - - let (l_u_secondary, l_w_secondary) = cs_secondary - .r1cs_instance_and_witness(&pp.circuit_shape_secondary.r1cs_shape, &pp.ck_secondary) - .map_err(|_e| NovaError::UnSat)?; - - // update the running instances and witnesses - self.zi_primary = zi_primary - .iter() - .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) - .collect::::Scalar>, _>>()?; - self.zi_secondary = zi_secondary - .iter() - .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing)) - .collect:: as Engine>::Scalar>, _>>()?; - - self.l_u_secondary = l_u_secondary; - self.l_w_secondary = l_w_secondary; - - self.i += 1; - - Ok(()) - } - - /// Verify the correctness of the `RecursiveSNARK` - pub fn verify( - &self, - pp: &PublicParams, - num_steps: usize, - z0_primary: &[E1::Scalar], - z0_secondary: &[ as Engine>::Scalar], - ) -> Result<(Vec, Vec< as Engine>::Scalar>), NovaError> { - // number of steps cannot be zero - let is_num_steps_zero = num_steps == 0; - - // check if the provided proof has executed num_steps - let is_num_steps_not_match = self.i != num_steps; - - // check if the initial inputs match - let is_inputs_not_match = - self.z0_primary != z0_primary || self.z0_secondary != z0_secondary; - - // check if the (relaxed) R1CS instances have two public outputs - let is_instance_has_two_outputs = self.l_u_secondary.X.len() != 2 - || self.r_U_primary.X.len() != 2 - || self.r_U_secondary.X.len() != 2; - - if is_num_steps_zero - || is_num_steps_not_match - || is_inputs_not_match - || is_instance_has_two_outputs - { - return Err(NovaError::ProofVerifyError); - } - - // check if the output hashes in R1CS instances point to the right running - // instances - let (hash_primary, hash_secondary) = { - let mut hasher = as Engine>::RO::new( - pp.ro_consts_secondary.clone(), - NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * pp.F_arity_primary, - ); - hasher.absorb(pp.digest()); - hasher.absorb(E1::Scalar::from(num_steps as u64)); - for e in z0_primary { - hasher.absorb(*e); - } - for e in &self.zi_primary { - hasher.absorb(*e); - } - self.r_U_secondary.absorb_in_ro(&mut hasher); - - let mut hasher2 = ::RO::new( - pp.ro_consts_primary.clone(), - NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * pp.F_arity_secondary, - ); - hasher2.absorb(scalar_as_base::(pp.digest())); - hasher2.absorb( as Engine>::Scalar::from(num_steps as u64)); - for e in z0_secondary { - hasher2.absorb(*e); - } - for e in &self.zi_secondary { - hasher2.absorb(*e); - } - self.r_U_primary.absorb_in_ro(&mut hasher2); - - ( - hasher.squeeze(NUM_HASH_BITS), - hasher2.squeeze(NUM_HASH_BITS), - ) - }; - - if hash_primary != self.l_u_secondary.X[0] - || hash_secondary != scalar_as_base::>(self.l_u_secondary.X[1]) - { - return Err(NovaError::ProofVerifyError); - } - - // check the satisfiability of the provided instances - let (res_r_primary, (res_r_secondary, res_l_secondary)) = rayon::join( - || { - pp.circuit_shape_primary.r1cs_shape.is_sat_relaxed( - &pp.ck_primary, - &self.r_U_primary, - &self.r_W_primary, - ) - }, - || { - rayon::join( - || { - pp.circuit_shape_secondary.r1cs_shape.is_sat_relaxed( - &pp.ck_secondary, - &self.r_U_secondary, - &self.r_W_secondary, - ) - }, - || { - pp.circuit_shape_secondary.r1cs_shape.is_sat( - &pp.ck_secondary, - &self.l_u_secondary, - &self.l_w_secondary, - ) - }, - ) - }, - ); - - // check the returned res objects - res_r_primary?; - res_r_secondary?; - res_l_secondary?; - - Ok((self.zi_primary.clone(), self.zi_secondary.clone())) - } - - /// Get the outputs after the last step of computation. - pub fn outputs(&self) -> (&[E1::Scalar], &[ as Engine>::Scalar]) { - (&self.zi_primary, &self.zi_secondary) - } - - /// The number of steps which have been executed thus far. - pub fn num_steps(&self) -> usize { - self.i - } -} - -/// A type that holds the prover key for `CompressedSNARK` -#[derive(Clone, Debug)] -pub struct ProverKey -where - E1: CurveCycleEquipped, - S1: RelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, -{ - pub pk_primary: S1::ProverKey, - pub pk_secondary: S2::ProverKey, -} - -/// A type that holds the verifier key for `CompressedSNARK` -#[derive(Debug, Clone, Serialize)] -#[serde(bound = "")] -pub struct VerifierKey -where - E1: CurveCycleEquipped, - S1: RelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, -{ - F_arity_primary: usize, - F_arity_secondary: usize, - ro_consts_primary: ROConstants, - ro_consts_secondary: ROConstants>, - pp_digest: E1::Scalar, - vk_primary: S1::VerifierKey, - vk_secondary: S2::VerifierKey, -} - -/// A SNARK that proves the knowledge of a valid `RecursiveSNARK` -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct CompressedSNARK -where - E1: CurveCycleEquipped, - S1: RelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, -{ - r_U_primary: RelaxedR1CSInstance, - r_W_snark_primary: S1, - - r_U_secondary: RelaxedR1CSInstance>, - l_u_secondary: R1CSInstance>, - nifs_secondary: NIFS>, - f_W_snark_secondary: S2, - - zn_primary: Vec, - zn_secondary: Vec< as Engine>::Scalar>, -} - -impl CompressedSNARK -where - E1: CurveCycleEquipped, - S1: RelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, -{ - /// Creates prover and verifier keys for `CompressedSNARK` - pub fn setup( - pp: &PublicParams, - ) -> Result<(ProverKey, VerifierKey), NovaError> { - let (pk_primary, vk_primary) = - S1::setup(pp.ck_primary.clone(), &pp.circuit_shape_primary.r1cs_shape)?; - let (pk_secondary, vk_secondary) = S2::setup( - pp.ck_secondary.clone(), - &pp.circuit_shape_secondary.r1cs_shape, - )?; - - let pk = ProverKey { - pk_primary, - pk_secondary, - }; - - let vk = VerifierKey { - F_arity_primary: pp.F_arity_primary, - F_arity_secondary: pp.F_arity_secondary, - ro_consts_primary: pp.ro_consts_primary.clone(), - ro_consts_secondary: pp.ro_consts_secondary.clone(), - pp_digest: pp.digest(), - vk_primary, - vk_secondary, - }; - - Ok((pk, vk)) - } - - /// Create a new `CompressedSNARK` - pub fn prove( - pp: &PublicParams, - pk: &ProverKey, - recursive_snark: &RecursiveSNARK, - ) -> Result { - // fold the secondary circuit's instance with its running instance - let (nifs_secondary, (f_U_secondary, f_W_secondary), _) = NIFS::prove( - &*pp.ck_secondary, - &pp.ro_consts_secondary, - &scalar_as_base::(pp.digest()), - &pp.circuit_shape_secondary.r1cs_shape, - &recursive_snark.r_U_secondary, - &recursive_snark.r_W_secondary, - &recursive_snark.l_u_secondary, - &recursive_snark.l_w_secondary, - )?; - - // create SNARKs proving the knowledge of f_W_primary and f_W_secondary - let (r_W_snark_primary, f_W_snark_secondary) = rayon::join( - || { - S1::prove( - &pp.ck_primary, - &pk.pk_primary, - &pp.circuit_shape_primary.r1cs_shape, - &recursive_snark.r_U_primary, - &recursive_snark.r_W_primary, - ) - }, - || { - S2::prove( - &pp.ck_secondary, - &pk.pk_secondary, - &pp.circuit_shape_secondary.r1cs_shape, - &f_U_secondary, - &f_W_secondary, - ) - }, - ); - - Ok(Self { - r_U_primary: recursive_snark.r_U_primary.clone(), - r_W_snark_primary: r_W_snark_primary?, - - r_U_secondary: recursive_snark.r_U_secondary.clone(), - l_u_secondary: recursive_snark.l_u_secondary.clone(), - nifs_secondary, - f_W_snark_secondary: f_W_snark_secondary?, - - zn_primary: recursive_snark.zi_primary.clone(), - zn_secondary: recursive_snark.zi_secondary.clone(), - }) - } - - /// Verify the correctness of the `CompressedSNARK` - pub fn verify( - &self, - vk: &VerifierKey, - num_steps: usize, - z0_primary: &[E1::Scalar], - z0_secondary: &[ as Engine>::Scalar], - ) -> Result<(Vec, Vec< as Engine>::Scalar>), NovaError> { - // the number of steps cannot be zero - if num_steps == 0 { - return Err(NovaError::ProofVerifyError); - } - - // check if the (relaxed) R1CS instances have two public outputs - if self.l_u_secondary.X.len() != 2 - || self.r_U_primary.X.len() != 2 - || self.r_U_secondary.X.len() != 2 - { - return Err(NovaError::ProofVerifyError); - } - - // check if the output hashes in R1CS instances point to the right running - // instances - let (hash_primary, hash_secondary) = { - let mut hasher = as Engine>::RO::new( - vk.ro_consts_secondary.clone(), - NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * vk.F_arity_primary, - ); - hasher.absorb(vk.pp_digest); - hasher.absorb(E1::Scalar::from(num_steps as u64)); - for e in z0_primary { - hasher.absorb(*e); - } - for e in &self.zn_primary { - hasher.absorb(*e); - } - self.r_U_secondary.absorb_in_ro(&mut hasher); - - let mut hasher2 = ::RO::new( - vk.ro_consts_primary.clone(), - NUM_FE_WITHOUT_IO_FOR_CRHF + 2 * vk.F_arity_secondary, - ); - hasher2.absorb(scalar_as_base::(vk.pp_digest)); - hasher2.absorb( as Engine>::Scalar::from(num_steps as u64)); - for e in z0_secondary { - hasher2.absorb(*e); - } - for e in &self.zn_secondary { - hasher2.absorb(*e); - } - self.r_U_primary.absorb_in_ro(&mut hasher2); - - ( - hasher.squeeze(NUM_HASH_BITS), - hasher2.squeeze(NUM_HASH_BITS), - ) - }; - - if hash_primary != self.l_u_secondary.X[0] - || hash_secondary != scalar_as_base::>(self.l_u_secondary.X[1]) - { - return Err(NovaError::ProofVerifyError); - } - - // fold the secondary's running instance with the last instance to get a folded - // instance - let f_U_secondary = self.nifs_secondary.verify( - &vk.ro_consts_secondary, - &scalar_as_base::(vk.pp_digest), - &self.r_U_secondary, - &self.l_u_secondary, - )?; - - // check the satisfiability of the folded instances using - // SNARKs proving the knowledge of their satisfying witnesses - let (res_primary, res_secondary) = rayon::join( - || { - self.r_W_snark_primary - .verify(&vk.vk_primary, &self.r_U_primary) - }, - || { - self.f_W_snark_secondary - .verify(&vk.vk_secondary, &f_U_secondary) - }, - ); - - res_primary?; - res_secondary?; - - Ok((self.zn_primary.clone(), self.zn_secondary.clone())) - } -} - -/// Compute the circuit digest of a [`StepCircuit`]. -/// -/// Note for callers: This function should be called with its performance -/// characteristics in mind. It will synthesize and digest the full `circuit` -/// given. -pub fn circuit_digest>( - circuit: &C, -) -> E1::Scalar { - let augmented_circuit_params = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); - - // ro_consts_circuit are parameterized by G2 because the type alias uses - // G2::Base = G1::Scalar - let ro_consts_circuit: ROConstantsCircuit> = ROConstantsCircuit::>::default(); - - // Initialize ck for the primary - let augmented_circuit: NovaAugmentedCircuit<'_, Dual, C> = - NovaAugmentedCircuit::new(&augmented_circuit_params, None, circuit, ro_consts_circuit); - let mut cs: ShapeCS = ShapeCS::new(); - let _ = augmented_circuit.synthesize(&mut cs); - cs.r1cs_shape().digest() -} - -pub type CommitmentKey = <::CE as CommitmentEngineTrait>::CommitmentKey; -type Commitment = <::CE as CommitmentEngineTrait>::Commitment; -type CompressedCommitment = <<::CE as CommitmentEngineTrait>::Commitment as CommitmentTrait>::CompressedCommitment; -type CE = ::CE; - -// #[cfg(test)] -// mod tests { -// use core::{fmt::Write, marker::PhantomData}; - -// use ::bellpepper_core::{num::AllocatedNum, ConstraintSystem, -// SynthesisError}; use expect_test::{expect, Expect}; -// use ff::PrimeField; -// use halo2curves::bn256::Bn256; -// use traits::circuit::TrivialCircuit; - -// use self::traits::CurveCycleEquipped; -// use super::*; -// use crate::{ -// provider::{ -// non_hiding_zeromorph::ZMPCS, Bn256EngineIPA, Bn256EngineKZG, -// Bn256EngineZM, PallasEngine, Secp256k1Engine, -// }, -// traits::{evaluation::EvaluationEngineTrait, snark::default_ck_hint}, -// }; - -// type EE = provider::ipa_pc::EvaluationEngine; -// type S = spartan::snark::RelaxedR1CSSNARK; -// type SPrime = spartan::ppsnark::RelaxedR1CSSNARK; - -// #[derive(Clone, Debug, Default)] -// struct CubicCircuit { -// _p: PhantomData, -// } - -// impl StepCircuit for CubicCircuit { -// fn arity(&self) -> usize { -// 1 -// } - -// fn synthesize>( -// &self, -// cs: &mut CS, -// z: &[AllocatedNum], -// ) -> Result>, SynthesisError> { -// // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and -// `y` are // respectively the input and output. -// let x = &z[0]; -// let x_sq = x.square(cs.namespace(|| "x_sq"))?; -// let x_cu = x_sq.mul(cs.namespace(|| "x_cu"), x)?; -// let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { -// Ok(x_cu.get_value().unwrap() + x.get_value().unwrap() + -// F::from(5u64)) })?; - -// cs.enforce( -// || "y = x^3 + x + 5", -// |lc| { -// lc + x_cu.get_variable() -// + x.get_variable() -// + CS::one() -// + CS::one() -// + CS::one() -// + CS::one() -// + CS::one() -// }, -// |lc| lc + CS::one(), -// |lc| lc + y.get_variable(), -// ); - -// Ok(vec![y]) -// } -// } - -// impl CubicCircuit { -// fn output(&self, z: &[F]) -> Vec { -// vec![z[0] * z[0] * z[0] + z[0] + F::from(5u64)] -// } -// } - -// fn test_pp_digest_with(circuit1: &T1, circuit2: -// &T2, expected: &Expect) where -// E1: CurveCycleEquipped, -// T1: StepCircuit, -// T2: StepCircuit< as Engine>::Scalar>, -// EE1: EvaluationEngineTrait, -// EE2: EvaluationEngineTrait>, -// // this is due to the reliance on Abomonation -// ::Repr: Abomonation, -// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, -// { -// // this tests public parameters with a size specifically intended for -// a // spark-compressed SNARK -// let ck_hint1 = &*SPrime::::ck_floor(); -// let ck_hint2 = &*SPrime::, EE2>::ck_floor(); -// let pp = PublicParams::::setup(circuit1, circuit2, ck_hint1, -// ck_hint2).unwrap(); - -// let digest_str = -// pp.digest() -// .to_repr() -// .as_ref() -// .iter() -// .fold(String::new(), |mut output, b| { -// let _ = write!(output, "{b:02x}"); -// output -// }); - -// expected.assert_eq(&digest_str); -// } - -// #[test] -// fn test_pp_digest() { -// test_pp_digest_with::, EE<_>>( -// &TrivialCircuit::default(), -// &TrivialCircuit::default(), -// -// &expect!["e5a6a85b77f3fb958b69722a5a21bf656fd21a6b5a012708a4b086b6be6d2b03"], -// ); - -// test_pp_digest_with::, EE<_>>( -// &CubicCircuit::default(), -// &TrivialCircuit::default(), -// -// &expect!["ec707a8b822baebca114b6e61b238374f9ed358c542dd37ee73febb47832cd01"], -// ); - -// test_pp_digest_with::, EE<_>>( -// &TrivialCircuit::default(), -// &TrivialCircuit::default(), -// -// &expect!["df52de22456157eb056003d4dc580a167ab8ce40a151c9944ea09a6fd0028600"], -// ); - -// test_pp_digest_with::, EE<_>>( -// &CubicCircuit::default(), -// &TrivialCircuit::default(), -// -// &expect!["b3ad0f4b734c5bd2ab9e83be8ee0cbaaa120e5cd0270b51cb9d7778a33f0b801"], -// ); - -// test_pp_digest_with::, EE<_>>( -// &TrivialCircuit::default(), -// &TrivialCircuit::default(), -// -// &expect!["e1feca53664212ee750da857c726b2a09bb30b2964f22ea85a19b58c9eaf5701"], -// ); -// test_pp_digest_with::, EE<_>>( -// &CubicCircuit::default(), -// &TrivialCircuit::default(), -// -// &expect!["4ad6b10b6fd24fecba49f08d35bc874a6da9c77735bc0bcf4b78b1914a97e602"], -// ); -// } - -// fn test_ivc_trivial_with() -// where -// E1: CurveCycleEquipped, -// { -// let test_circuit1 = TrivialCircuit::<::Scalar>::default(); let test_circuit2 = -// TrivialCircuit::< as Engine>::Scalar>::default(); - -// // produce public parameters -// let pp = PublicParams::::setup( -// &test_circuit1, -// &test_circuit2, -// &*default_ck_hint(), -// &*default_ck_hint(), -// ) -// .unwrap(); -// let num_steps = 1; - -// // produce a recursive SNARK -// let mut recursive_snark = RecursiveSNARK::new( -// &pp, -// &test_circuit1, -// &test_circuit2, -// &[::Scalar::ZERO], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); - -// recursive_snark -// .prove_step(&pp, &test_circuit1, &test_circuit2) -// .unwrap(); - -// // verify the recursive SNARK -// recursive_snark -// .verify( -// &pp, -// num_steps, -// &[::Scalar::ZERO], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); -// } - -// #[test] -// fn test_ivc_trivial() { -// test_ivc_trivial_with::(); -// test_ivc_trivial_with::(); -// test_ivc_trivial_with::(); -// } - -// fn test_ivc_nontrivial_with() -// where -// E1: CurveCycleEquipped, -// { -// let circuit_primary = TrivialCircuit::default(); -// let circuit_secondary = CubicCircuit::default(); - -// // produce public parameters -// let pp = PublicParams::::setup( -// &circuit_primary, -// &circuit_secondary, -// &*default_ck_hint(), -// &*default_ck_hint(), -// ) -// .unwrap(); - -// let num_steps = 3; - -// // produce a recursive SNARK -// let mut recursive_snark = RecursiveSNARK::::new( -// &pp, -// &circuit_primary, -// &circuit_secondary, -// &[::Scalar::ONE], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); - -// for i in 0..num_steps { -// recursive_snark -// .prove_step(&pp, &circuit_primary, &circuit_secondary) -// .unwrap(); - -// // verify the recursive snark at each step of recursion -// recursive_snark -// .verify( -// &pp, -// i + 1, -// &[::Scalar::ONE], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); -// } - -// // verify the recursive SNARK -// let (zn_primary, zn_secondary) = recursive_snark -// .verify( -// &pp, -// num_steps, -// &[::Scalar::ONE], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); - -// // sanity: check the claimed output with a direct computation of the -// same assert_eq!(zn_primary, vec![::Scalar::ONE]); -// let mut zn_secondary_direct = vec![ as -// Engine>::Scalar::ZERO]; for _i in 0..num_steps { -// zn_secondary_direct = -// circuit_secondary.clone().output(&zn_secondary_direct); } -// assert_eq!(zn_secondary, zn_secondary_direct); -// assert_eq!( -// zn_secondary, -// vec![ as Engine>::Scalar::from(2460515u64)] -// ); -// } - -// #[test] -// fn test_ivc_nontrivial() { -// test_ivc_nontrivial_with::(); -// test_ivc_nontrivial_with::(); -// test_ivc_nontrivial_with::(); -// } - -// fn test_ivc_nontrivial_with_some_compression_with() -// where -// E1: CurveCycleEquipped, -// // this is due to the reliance on Abomonation -// ::Repr: Abomonation, -// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, -// S1: RelaxedR1CSSNARKTrait, -// S2: RelaxedR1CSSNARKTrait>, -// { -// let circuit_primary = TrivialCircuit::default(); -// let circuit_secondary = CubicCircuit::default(); - -// // produce public parameters, which we'll maybe use with a -// preprocessing // compressed SNARK -// let pp = PublicParams::::setup( -// &circuit_primary, -// &circuit_secondary, -// &*S1::ck_floor(), -// &*S2::ck_floor(), -// ) -// .unwrap(); - -// let num_steps = 3; - -// // produce a recursive SNARK -// let mut recursive_snark = RecursiveSNARK::::new( -// &pp, -// &circuit_primary, -// &circuit_secondary, -// &[::Scalar::ONE], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); - -// for _i in 0..num_steps { -// recursive_snark -// .prove_step(&pp, &circuit_primary, &circuit_secondary) -// .unwrap(); -// } - -// // verify the recursive SNARK -// let (zn_primary, zn_secondary) = recursive_snark -// .verify( -// &pp, -// num_steps, -// &[::Scalar::ONE], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); - -// // sanity: check the claimed output with a direct computation of the -// same assert_eq!(zn_primary, vec![::Scalar::ONE]); -// let mut zn_secondary_direct = vec![ as -// Engine>::Scalar::ZERO]; for _i in 0..num_steps { -// zn_secondary_direct = -// circuit_secondary.clone().output(&zn_secondary_direct); } -// assert_eq!(zn_secondary, zn_secondary_direct); -// assert_eq!( -// zn_secondary, -// vec![ as Engine>::Scalar::from(2460515u64)] -// ); - -// // run the compressed snark -// // produce the prover and verifier keys for compressed snark -// let (pk, vk) = CompressedSNARK::<_, S1, S2>::setup(&pp).unwrap(); - -// // produce a compressed SNARK -// let compressed_snark = -// CompressedSNARK::<_, S1, S2>::prove(&pp, &pk, -// &recursive_snark).unwrap(); - -// // verify the compressed SNARK -// compressed_snark -// .verify( -// &vk, -// num_steps, -// &[::Scalar::ONE], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); -// } - -// fn test_ivc_nontrivial_with_compression_with() -// where -// E1: CurveCycleEquipped, -// EE1: EvaluationEngineTrait, -// EE2: EvaluationEngineTrait>, -// // this is due to the reliance on Abomonation -// ::Repr: Abomonation, -// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, -// { -// test_ivc_nontrivial_with_some_compression_with::, S<_, -// EE2>>() } - -// #[test] -// fn test_ivc_nontrivial_with_compression() { -// test_ivc_nontrivial_with_compression_with::, -// EE<_>>(); test_ivc_nontrivial_with_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_compression_with::, -// EE<_>>(); test_ivc_nontrivial_with_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_compression_with::< -// Bn256EngineKZG, -// provider::hyperkzg::EvaluationEngine, -// EE<_>, -// >(); -// } - -// fn test_ivc_nontrivial_with_spark_compression_with() -// where -// E1: CurveCycleEquipped, -// EE1: EvaluationEngineTrait, -// EE2: EvaluationEngineTrait>, -// // this is due to the reliance on Abomonation -// ::Repr: Abomonation, -// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, -// { -// test_ivc_nontrivial_with_some_compression_with::, -// SPrime<_, EE2>>() } - -// #[test] -// fn test_ivc_nontrivial_with_spark_compression() { -// test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_spark_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_spark_compression_with::< -// Bn256EngineKZG, -// provider::hyperkzg::EvaluationEngine, -// EE<_>, -// >(); -// } - -// type BatchedS = spartan::batched::BatchedRelaxedR1CSSNARK; -// type BatchedSPrime = spartan::batched::BatchedRelaxedR1CSSNARK; - -// fn test_ivc_nontrivial_with_batched_compression_with() -// where -// E1: CurveCycleEquipped, -// EE1: EvaluationEngineTrait, -// EE2: EvaluationEngineTrait>, -// // this is due to the reliance on Abomonation -// ::Repr: Abomonation, -// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, -// { -// // this tests compatibility of the batched workflow with the -// non-batched one test_ivc_nontrivial_with_some_compression_with::, BatchedS<_, EE2>>() } - -// #[test] -// fn test_ivc_nontrivial_with_batched_compression() { -// test_ivc_nontrivial_with_batched_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_batched_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_batched_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_batched_compression_with::, EE<_>>( ); -// test_ivc_nontrivial_with_batched_compression_with::< -// Bn256EngineKZG, -// provider::hyperkzg::EvaluationEngine, -// EE<_>, -// >(); -// } - -// fn test_ivc_nontrivial_with_batched_spark_compression_with() where -// E1: CurveCycleEquipped, -// EE1: EvaluationEngineTrait, -// EE2: EvaluationEngineTrait>, -// // this is due to the reliance on Abomonation -// ::Repr: Abomonation, -// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, -// { -// // this tests compatibility of the batched workflow with the -// non-batched one test_ivc_nontrivial_with_some_compression_with::< -// E1, -// BatchedSPrime<_, EE1>, -// BatchedSPrime<_, EE2>, -// >() -// } - -// #[test] -// fn test_ivc_nontrivial_with_batched_spark_compression() { -// test_ivc_nontrivial_with_batched_spark_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_batched_spark_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_batched_spark_compression_with::, EE<_>>(); -// test_ivc_nontrivial_with_batched_spark_compression_with::< -// Bn256EngineZM, -// ZMPCS, -// EE<_>, -// >(); -// test_ivc_nontrivial_with_batched_spark_compression_with::< -// Bn256EngineKZG, -// provider::hyperkzg::EvaluationEngine, -// EE<_>, -// >(); -// } - -// fn test_ivc_nondet_with_compression_with() -// where -// E1: CurveCycleEquipped, -// EE1: EvaluationEngineTrait, -// EE2: EvaluationEngineTrait>, -// // this is due to the reliance on Abomonation -// ::Repr: Abomonation, -// < as Engine>::Scalar as PrimeField>::Repr: Abomonation, -// { -// // y is a non-deterministic advice representing the fifth root of the -// input at a // step. -// #[derive(Clone, Debug)] -// struct FifthRootCheckingCircuit { -// y: F, -// } - -// impl FifthRootCheckingCircuit { -// fn new(num_steps: usize) -> (Vec, Vec) { -// let mut powers = Vec::new(); -// let rng = &mut rand::rngs::OsRng; -// let mut seed = F::random(rng); -// for _i in 0..num_steps + 1 { -// seed *= seed.clone().square().square(); - -// powers.push(Self { y: seed }); -// } - -// // reverse the powers to get roots -// let roots = powers.into_iter().rev().collect::>(); -// (vec![roots[0].y], roots[1..].to_vec()) -// } -// } - -// impl StepCircuit for FifthRootCheckingCircuit -// where -// F: PrimeField, -// { -// fn arity(&self) -> usize { -// 1 -// } - -// fn synthesize>( -// &self, -// cs: &mut CS, -// z: &[AllocatedNum], -// ) -> Result>, SynthesisError> { -// let x = &z[0]; - -// // we allocate a variable and set it to the provided -// non-deterministic advice. let y = -// AllocatedNum::alloc_infallible(cs.namespace(|| "y"), || self.y); - -// // We now check if y = x^{1/5} by checking if y^5 = x -// let y_sq = y.square(cs.namespace(|| "y_sq"))?; -// let y_quad = y_sq.square(cs.namespace(|| "y_quad"))?; -// let y_pow_5 = y_quad.mul(cs.namespace(|| "y_fifth"), &y)?; - -// cs.enforce( -// || "y^5 = x", -// |lc| lc + y_pow_5.get_variable(), -// |lc| lc + CS::one(), -// |lc| lc + x.get_variable(), -// ); - -// Ok(vec![y]) -// } -// } - -// let circuit_primary = FifthRootCheckingCircuit { -// y: ::Scalar::ZERO, -// }; - -// let circuit_secondary = TrivialCircuit::default(); - -// // produce public parameters -// let pp = PublicParams::::setup( -// &circuit_primary, -// &circuit_secondary, -// &*default_ck_hint(), -// &*default_ck_hint(), -// ) -// .unwrap(); - -// let num_steps = 3; - -// // produce non-deterministic advice -// let (z0_primary, roots) = FifthRootCheckingCircuit::new(num_steps); -// let z0_secondary = vec![ as Engine>::Scalar::ZERO]; - -// // produce a recursive SNARK -// let mut recursive_snark = RecursiveSNARK::::new( -// &pp, -// &roots[0], -// &circuit_secondary, -// &z0_primary, -// &z0_secondary, -// ) -// .unwrap(); - -// for circuit_primary in roots.iter().take(num_steps) { -// recursive_snark -// .prove_step(&pp, circuit_primary, &circuit_secondary) -// .unwrap(); -// } - -// // verify the recursive SNARK -// recursive_snark -// .verify(&pp, num_steps, &z0_primary, &z0_secondary) -// .unwrap(); - -// // produce the prover and verifier keys for compressed snark -// let (pk, vk) = CompressedSNARK::<_, S, S<_, -// EE2>>::setup(&pp).unwrap(); - -// // produce a compressed SNARK -// let compressed_snark = -// CompressedSNARK::<_, S, S<_, EE2>>::prove(&pp, &pk, -// &recursive_snark).unwrap(); - -// // verify the compressed SNARK -// compressed_snark -// .verify(&vk, num_steps, &z0_primary, &z0_secondary) -// .unwrap(); -// } - -// #[test] -// fn test_ivc_nondet_with_compression() { -// test_ivc_nondet_with_compression_with::, -// EE<_>>(); test_ivc_nondet_with_compression_with::, EE<_>>(); -// test_ivc_nondet_with_compression_with::, -// EE<_>>(); test_ivc_nondet_with_compression_with::, EE<_>>(); } - -// fn test_ivc_base_with() -// where -// E1: CurveCycleEquipped, -// { -// let test_circuit1 = TrivialCircuit::<::Scalar>::default(); let test_circuit2 = -// CubicCircuit::< as Engine>::Scalar>::default(); - -// // produce public parameters -// let pp = PublicParams::::setup( -// &test_circuit1, -// &test_circuit2, -// &*default_ck_hint(), -// &*default_ck_hint(), -// ) -// .unwrap(); - -// let num_steps = 1; - -// // produce a recursive SNARK -// let mut recursive_snark = RecursiveSNARK::::new( -// &pp, -// &test_circuit1, -// &test_circuit2, -// &[::Scalar::ONE], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); - -// // produce a recursive SNARK -// recursive_snark -// .prove_step(&pp, &test_circuit1, &test_circuit2) -// .unwrap(); - -// // verify the recursive SNARK -// let (zn_primary, zn_secondary) = recursive_snark -// .verify( -// &pp, -// num_steps, -// &[::Scalar::ONE], -// &[ as Engine>::Scalar::ZERO], -// ) -// .unwrap(); - -// assert_eq!(zn_primary, vec![::Scalar::ONE]); -// assert_eq!(zn_secondary, vec![ as -// Engine>::Scalar::from(5u64)]); } - -// #[test] -// fn test_ivc_base() { -// test_ivc_base_with::(); -// test_ivc_base_with::(); -// test_ivc_base_with::(); -// } - -// fn test_setup_with() { -// #[derive(Clone, Debug, Default)] -// struct CircuitWithInputize { -// _p: PhantomData, -// } - -// impl StepCircuit for CircuitWithInputize { -// fn arity(&self) -> usize { -// 1 -// } - -// fn synthesize>( -// &self, -// cs: &mut CS, -// z: &[AllocatedNum], -// ) -> Result>, SynthesisError> { -// let x = &z[0]; -// // a simplified version of this test would only have one -// input // but beside the Nova Public parameter requirement for -// a num_io = 2, being // probed in this test, we *also* require -// num_io to be even, so // negative testing requires at least 4 -// inputs let y = x.square(cs.namespace(|| "x_sq"))?; -// y.inputize(cs.namespace(|| "y"))?; // inputize y -// let y2 = x.square(cs.namespace(|| "x_sq2"))?; -// y2.inputize(cs.namespace(|| "y2"))?; // inputize y2 -// let y3 = x.square(cs.namespace(|| "x_sq3"))?; -// y3.inputize(cs.namespace(|| "y3"))?; // inputize y2 -// let y4 = x.square(cs.namespace(|| "x_sq4"))?; -// y4.inputize(cs.namespace(|| "y4"))?; // inputize y2 -// Ok(vec![y, y2, y3, y4]) -// } -// } - -// // produce public parameters with trivial secondary -// let circuit = CircuitWithInputize::<::Scalar>::default(); let pp = PublicParams::::setup( -// &circuit, -// &TrivialCircuit::default(), -// &*default_ck_hint(), -// &*default_ck_hint(), -// ); -// assert!(pp.is_err()); -// assert_eq!(pp.err(), Some(NovaError::InvalidStepCircuitIO)); - -// // produce public parameters with the trivial primary -// let circuit = CircuitWithInputize::< as -// Engine>::Scalar>::default(); let pp = PublicParams::::setup( -// &TrivialCircuit::default(), -// &circuit, -// &*default_ck_hint(), -// &*default_ck_hint(), -// ); -// assert!(pp.is_err()); -// assert_eq!(pp.err(), Some(NovaError::InvalidStepCircuitIO)); -// } - -// #[test] -// fn test_setup() { -// test_setup_with::(); -// } -// } diff --git a/src/nifs.rs b/src/nifs.rs deleted file mode 100644 index 7b8c387..0000000 --- a/src/nifs.rs +++ /dev/null @@ -1,414 +0,0 @@ -//! This module implements a non-interactive folding scheme -#![allow(non_snake_case)] - -use serde::{Deserialize, Serialize}; - -use crate::{ - constants::{NUM_CHALLENGE_BITS, NUM_FE_FOR_RO, NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD}, - errors::NovaError, - r1cs::{ - R1CSInstance, R1CSResult, R1CSShape, R1CSWitness, RelaxedR1CSInstance, RelaxedR1CSWitness, - }, - scalar_as_base, - traits::{commitment::CommitmentTrait, AbsorbInROTrait, Engine, ROConstants, ROTrait}, - Commitment, CommitmentKey, CompressedCommitment, -}; - -/// A SNARK that holds the proof of a step of an incremental computation -#[allow(clippy::upper_case_acronyms)] -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct NIFS { - pub(crate) comm_T: CompressedCommitment, -} - -impl NIFS { - /// Takes as input a Relaxed R1CS instance-witness tuple `(U1, W1)` and - /// an R1CS instance-witness tuple `(U2, W2)` with the same structure - /// `shape` and defined with respect to the same `ck`, and outputs - /// a folded Relaxed R1CS instance-witness tuple `(U, W)` of the same shape - /// `shape`, with the guarantee that the folded witness `W` satisfies - /// the folded instance `U` if and only if `W1` satisfies `U1` and `W2` - /// satisfies `U2`. - /// - /// Note that this code is tailored for use with Nova's IVC scheme, which - /// enforces certain requirements between the two instances that are - /// folded. In particular, it requires that `U1` and `U2` are such that - /// the hash of `U1` is stored in the public IO of `U2`. - /// In this particular setting, this means that if `U2` is absorbed in the - /// RO, it implicitly absorbs `U1` as well. So the code below avoids - /// absorbing `U1` in the RO. - #[allow(clippy::too_many_arguments)] - #[tracing::instrument(skip_all, level = "trace", name = "NIFS::prove")] - pub fn prove( - ck: &CommitmentKey, - ro_consts: &ROConstants, - pp_digest: &E::Scalar, - S: &R1CSShape, - U1: &RelaxedR1CSInstance, - W1: &RelaxedR1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - ) -> Result< - ( - Self, - (RelaxedR1CSInstance, RelaxedR1CSWitness), - E::Scalar, - ), - NovaError, - > { - // Check `U1` and `U2` have the same arity - let io_arity = U1.X.len(); - if io_arity != U2.X.len() { - return Err(NovaError::InvalidInputLength); - } - - // initialize a new RO - let mut ro = E::RO::new( - ro_consts.clone(), - NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD + io_arity, - ); - - // append the digest of pp to the transcript - ro.absorb(scalar_as_base::(*pp_digest)); - - // append U2 to transcript, U1 does not need to absorbed since U2.X[0] = - // Hash(params, U1, i, z0, zi) - U2.absorb_in_ro(&mut ro); - - // compute a commitment to the cross-term - let (T, comm_T) = S.commit_T(ck, U1, W1, U2, W2)?; - - // append `comm_T` to the transcript and obtain a challenge - comm_T.absorb_in_ro(&mut ro); - - // compute a challenge from the RO - let r = ro.squeeze(NUM_CHALLENGE_BITS); - - // fold the instance using `r` and `comm_T` - let U = U1.fold(U2, &comm_T, &r); - - // fold the witness using `r` and `T` - let W = W1.fold(W2, &T, &r)?; - - // return the folded instance and witness - Ok(( - Self { - comm_T: comm_T.compress(), - }, - (U, W), - r, - )) - } - - /// Takes as input a Relaxed R1CS instance-witness tuple `(U1, W1)` and - /// an R1CS instance-witness tuple `(U2, W2)` with the same structure - /// `shape` and defined with respect to the same `ck`, and updates `(U1, - /// W1)` by folding `(U2, W2)` into it with the guarantee that the - /// updated witness `W` satisfies the updated instance `U` if and only - /// if `W1` satisfies `U1` and `W2` satisfies `U2`. - #[allow(clippy::too_many_arguments)] - #[tracing::instrument(skip_all, level = "trace", name = "NIFS::prove_mut")] - pub fn prove_mut( - ck: &CommitmentKey, - ro_consts: &ROConstants, - pp_digest: &E::Scalar, - S: &R1CSShape, - U1: &mut RelaxedR1CSInstance, - W1: &mut RelaxedR1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - T: &mut Vec, - ABC_Z_1: &mut R1CSResult, - ABC_Z_2: &mut R1CSResult, - ) -> Result<(Self, E::Scalar), NovaError> { - // initialize a new RO - let mut ro = E::RO::new(ro_consts.clone(), NUM_FE_FOR_RO); - - // append the digest of pp to the transcript - ro.absorb(scalar_as_base::(*pp_digest)); - - // append U2 to transcript, U1 does not need to absorbed since U2.X[0] = - // Hash(params, U1, i, z0, zi) - U2.absorb_in_ro(&mut ro); - - // compute a commitment to the cross-term - let comm_T = S.commit_T_into(ck, U1, W1, U2, W2, T, ABC_Z_1, ABC_Z_2)?; - - // append `comm_T` to the transcript and obtain a challenge - comm_T.absorb_in_ro(&mut ro); - - // compute a challenge from the RO - let r = ro.squeeze(NUM_CHALLENGE_BITS); - - // fold the instance using `r` and `comm_T` - U1.fold_mut(U2, &comm_T, &r); - - // fold the witness using `r` and `T` - W1.fold_mut(W2, T, &r)?; - - // return the commitment - Ok(( - Self { - comm_T: comm_T.compress(), - }, - r, - )) - } - - /// Takes as input a relaxed R1CS instance `U1` and R1CS instance `U2` - /// with the same shape and defined with respect to the same parameters, - /// and outputs a folded instance `U` with the same shape, - /// with the guarantee that the folded instance `U` - /// if and only if `U1` and `U2` are satisfiable. - pub fn verify( - &self, - ro_consts: &ROConstants, - pp_digest: &E::Scalar, - U1: &RelaxedR1CSInstance, - U2: &R1CSInstance, - ) -> Result, NovaError> { - // initialize a new RO - let mut ro = E::RO::new(ro_consts.clone(), NUM_FE_FOR_RO); - - // append the digest of pp to the transcript - ro.absorb(scalar_as_base::(*pp_digest)); - - // append U2 to transcript, U1 does not need to absorbed since U2.X[0] = - // Hash(params, U1, i, z0, zi) - U2.absorb_in_ro(&mut ro); - - // append `comm_T` to the transcript and obtain a challenge - let comm_T = Commitment::::decompress(&self.comm_T)?; - comm_T.absorb_in_ro(&mut ro); - - // compute a challenge from the RO - let r = ro.squeeze(NUM_CHALLENGE_BITS); - - // fold the instance using `r` and `comm_T` - let U = U1.fold(U2, &comm_T, &r); - - // return the folded instance - Ok(U) - } -} - -#[cfg(test)] -mod tests { - use ::bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; - use ff::{Field, PrimeField}; - use rand::rngs::OsRng; - - use super::*; - use crate::{ - bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - solver::SatisfyingAssignment, - test_shape_cs::TestShapeCS, - }, - provider::Bn256EngineKZG, - r1cs::commitment_key, - traits::{snark::default_ck_hint, Engine}, - }; - - fn synthesize_tiny_r1cs_bellpepper>( - cs: &mut CS, - x_val: Option, - ) -> Result<(), SynthesisError> { - // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and `y` are - // respectively the input and output. - let x = AllocatedNum::alloc_infallible(cs.namespace(|| "x"), || x_val.unwrap()); - let _ = x.inputize(cs.namespace(|| "x is input")); - - let x_sq = x.square(cs.namespace(|| "x_sq"))?; - let x_cu = x_sq.mul(cs.namespace(|| "x_cu"), &x)?; - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok(x_cu.get_value().unwrap() + x.get_value().unwrap() + Scalar::from(5u64)) - })?; - let _ = y.inputize(cs.namespace(|| "y is output")); - - cs.enforce( - || "y = x^3 + x + 5", - |lc| { - lc + x_cu.get_variable() - + x.get_variable() - + CS::one() - + CS::one() - + CS::one() - + CS::one() - + CS::one() - }, - |lc| lc + CS::one(), - |lc| lc + y.get_variable(), - ); - - Ok(()) - } - - fn test_tiny_r1cs_bellpepper_with() { - // First create the shape - let mut cs: TestShapeCS = TestShapeCS::new(); - let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, None); - let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); - let ro_consts = <::RO as ROTrait< - ::Base, - ::Scalar, - >>::Constants::default(); - - // Now get the instance and assignment for one instance - let mut cs = SatisfyingAssignment::::new(); - let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, Some(E::Scalar::from(5))); - let (U1, W1) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - - // Make sure that the first instance is satisfiable - shape.is_sat(&ck, &U1, &W1).unwrap(); - - // Now get the instance and assignment for second instance - let mut cs = SatisfyingAssignment::::new(); - let _ = synthesize_tiny_r1cs_bellpepper(&mut cs, Some(E::Scalar::from(135))); - let (U2, W2) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); - - // Make sure that the second instance is satisfiable - shape.is_sat(&ck, &U2, &W2).unwrap(); - - // execute a sequence of folds - execute_sequence( - &ck, - &ro_consts, - &::Scalar::ZERO, - &shape, - &U1, - &W1, - &U2, - &W2, - ); - } - - #[test] - fn test_tiny_r1cs_bellpepper() { - test_tiny_r1cs_bellpepper_with::(); - } - - fn execute_sequence( - ck: &CommitmentKey, - ro_consts: &<::RO as ROTrait<::Base, ::Scalar>>::Constants, - pp_digest: &::Scalar, - shape: &R1CSShape, - U1: &R1CSInstance, - W1: &R1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - ) { - // produce a default running instance - let mut r_W = RelaxedR1CSWitness::default(shape); - let mut r_U = RelaxedR1CSInstance::default(ck, shape); - - // produce a step SNARK with (W1, U1) as the first incoming witness-instance - // pair - let res = NIFS::prove(ck, ro_consts, pp_digest, shape, &r_U, &r_W, U1, W1); - assert!(res.is_ok()); - let (nifs, (_U, W), _) = res.unwrap(); - - // verify the step SNARK with U1 as the first incoming instance - let res = nifs.verify(ro_consts, pp_digest, &r_U, U1); - assert!(res.is_ok()); - let U = res.unwrap(); - - assert_eq!(U, _U); - - // update the running witness and instance - r_W = W; - r_U = U; - - // produce a step SNARK with (W2, U2) as the second incoming witness-instance - // pair - let res = NIFS::prove(ck, ro_consts, pp_digest, shape, &r_U, &r_W, U2, W2); - assert!(res.is_ok()); - let (nifs, (_U, W), _) = res.unwrap(); - - // verify the step SNARK with U1 as the first incoming instance - let res = nifs.verify(ro_consts, pp_digest, &r_U, U2); - assert!(res.is_ok()); - let U = res.unwrap(); - - assert_eq!(U, _U); - - // update the running witness and instance - r_W = W; - r_U = U; - - // check if the running instance is satisfiable - shape.is_sat_relaxed(ck, &r_U, &r_W).unwrap(); - } - - fn test_tiny_r1cs_with() { - let num_vars = 3; - let S = crate::r1cs::tests::tiny_r1cs::(num_vars); - let one = ::ONE; - - // generate generators and ro constants - let ck = commitment_key(&S, &*default_ck_hint()); - let ro_consts = <::RO as ROTrait< - ::Base, - ::Scalar, - >>::Constants::default(); - - let rand_inst_witness_generator = |ck: &CommitmentKey, - I: &E::Scalar| - -> (E::Scalar, R1CSInstance, R1CSWitness) { - let i0 = *I; - - // compute a satisfying (vars, X) tuple - let (O, vars, X) = { - let z0 = i0 * i0; // constraint 0 - let z1 = i0 * z0; // constraint 1 - let z2 = z1 + i0; // constraint 2 - let i1 = z2 + one + one + one + one + one; // constraint 3 - - // store the witness and IO for the instance - let W = vec![z0, z1, z2]; - let X = vec![i0, i1]; - (i1, W, X) - }; - - let W = { - let res = R1CSWitness::new(&S, vars); - assert!(res.is_ok()); - res.unwrap() - }; - let U = { - let comm_W = W.commit(ck); - let res = R1CSInstance::new(&S, comm_W, X); - assert!(res.is_ok()); - res.unwrap() - }; - - // check that generated instance is satisfiable - S.is_sat(ck, &U, &W).unwrap(); - - (O, U, W) - }; - - let mut csprng: OsRng = OsRng; - let I = E::Scalar::random(&mut csprng); // the first input is picked randomly for the first instance - let (O, U1, W1) = rand_inst_witness_generator(&ck, &I); - let (_O, U2, W2) = rand_inst_witness_generator(&ck, &O); - - // execute a sequence of folds - execute_sequence( - &ck, - &ro_consts, - &::Scalar::ZERO, - &S, - &U1, - &W1, - &U2, - &W2, - ); - } - - #[test] - fn test_tiny_r1cs() { - test_tiny_r1cs_with::(); - } -} diff --git a/src/provider/bn256_grumpkin.rs b/src/provider/bn256_grumpkin.rs deleted file mode 100644 index e7b5a5f..0000000 --- a/src/provider/bn256_grumpkin.rs +++ /dev/null @@ -1,113 +0,0 @@ -//! This module implements the Nova traits for `bn256::Point`, `bn256::Scalar`, -//! `grumpkin::Point`, `grumpkin::Scalar`. -use std::io::Read; - -use digest::{ExtendableOutput, Update}; -use ff::{FromUniformBytes, PrimeField}; -use group::{cofactor::CofactorCurveAffine, Curve, Group as AnotherGroup}; -#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] -use grumpkin_msm::{bn256 as bn256_msm, grumpkin as grumpkin_msm}; -// Remove this when https://github.com/zcash/pasta_curves/issues/41 resolves -use halo2curves::{bn256::G2Affine, CurveAffine, CurveExt}; -use num_bigint::BigInt; -use num_traits::Num; -use rayon::prelude::*; -use sha3::Shake256; - -use crate::{ - impl_traits, - provider::{traits::DlogGroup, util::msm::cpu_best_msm}, - traits::{Group, PrimeFieldExt, TranscriptReprTrait}, -}; - -// Thus compile-time assertions checks important assumptions in the memory -// representation of group data that supports the use of Abomonation. -static_assertions::assert_eq_size!(G2Affine, [u64; 16]); - -/// Re-exports that give access to the standard aliases used in the code base, -/// for bn256 -pub mod bn256 { - pub use halo2curves::bn256::{ - Fq as Base, Fr as Scalar, G1Affine as Affine, G1Compressed as Compressed, G1 as Point, - }; -} - -/// Re-exports that give access to the standard aliases used in the code base, -/// for grumpkin -pub mod grumpkin { - pub use halo2curves::grumpkin::{ - Fq as Base, Fr as Scalar, G1Affine as Affine, G1Compressed as Compressed, G1 as Point, - }; -} - -#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] -impl_traits!( - bn256, - "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", - "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", - bn256_msm -); -#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] -impl_traits!( - bn256, - "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", - "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47" -); - -#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] -impl_traits!( - grumpkin, - "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", - "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", - grumpkin_msm -); -#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] -impl_traits!( - grumpkin, - "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", - "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001" -); - -#[cfg(test)] -mod tests { - use ff::Field; - use rand::thread_rng; - - use crate::provider::{ - bn256_grumpkin::{bn256, grumpkin}, - traits::DlogGroup, - util::msm::cpu_best_msm, - }; - - #[test] - fn test_bn256_msm_correctness() { - let npoints = 1usize << 16; - let points = bn256::Point::from_label(b"test", npoints); - - let mut rng = thread_rng(); - let scalars = (0..npoints) - .map(|_| bn256::Scalar::random(&mut rng)) - .collect::>(); - - let cpu_msm = cpu_best_msm(&points, &scalars); - let gpu_msm = bn256::Point::vartime_multiscalar_mul(&scalars, &points); - - assert_eq!(cpu_msm, gpu_msm); - } - - #[test] - fn test_grumpkin_msm_correctness() { - let npoints = 1usize << 16; - let points = grumpkin::Point::from_label(b"test", npoints); - - let mut rng = thread_rng(); - let scalars = (0..npoints) - .map(|_| grumpkin::Scalar::random(&mut rng)) - .collect::>(); - - let cpu_msm = cpu_best_msm(&points, &scalars); - let gpu_msm = grumpkin::Point::vartime_multiscalar_mul(&scalars, &points); - - assert_eq!(cpu_msm, gpu_msm); - } -} diff --git a/src/provider/hyperkzg.rs b/src/provider/hyperkzg.rs deleted file mode 100644 index a845938..0000000 --- a/src/provider/hyperkzg.rs +++ /dev/null @@ -1,923 +0,0 @@ -//! This module implements Nova's evaluation engine using `HyperKZG`, a -//! KZG-based polynomial commitment for multilinear polynomials HyperKZG is based on the transformation from univariate PCS to multilinear PCS in the Gemini paper (section 2.4.2 in ``). -//! However, there are some key differences: -//! (1) HyperKZG works with multilinear polynomials represented in evaluation -//! form (rather than in coefficient form in Gemini's transformation). -//! This means that Spartan's polynomial IOP can use commit to its polynomials -//! as-is without incurring any interpolations or FFTs. (2) HyperKZG is -//! specialized to use KZG as the univariate commitment scheme, so it includes -//! several optimizations (both during the transformation of -//! multilinear-to-univariate claims and within the KZG commitment scheme -//! implementation itself). (3) HyperKZG also includes optimisation based on so called Shplonk/HaloInfinite technique (``). -//! Compared to pure HyperKZG, this optimisation in theory improves prover (at -//! cost of using 1 fixed KZG opening) and verifier (at cost of eliminating MSM) -#![allow(non_snake_case)] -use core::marker::PhantomData; -use std::sync::Arc; - -use ff::{Field, PrimeFieldBits}; -use group::{prime::PrimeCurveAffine as _, Curve, Group as _}; -use itertools::Itertools as _; -use pairing::{Engine, MillerLoopResult, MultiMillerLoop}; -use rayon::{ - iter::{ - IndexedParallelIterator, IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator, - }, - prelude::*, -}; -use ref_cast::RefCast as _; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; - -use crate::{ - errors::NovaError, - provider::{ - kzg_commitment::{KZGCommitmentEngine, KZGProverKey, KZGVerifierKey, UniversalKZGParam}, - pedersen::Commitment, - traits::DlogGroup, - util::iterators::IndexedParallelIteratorExt as _, - }, - spartan::{math::Math, polys::univariate::UniPoly}, - traits::{ - commitment::{CommitmentEngineTrait, Len}, - evaluation::EvaluationEngineTrait, - Engine as NovaEngine, Group, TranscriptEngineTrait, TranscriptReprTrait, - }, -}; - -/// Provides an implementation of a polynomial evaluation argument -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound( - serialize = "E::G1Affine: Serialize, E::Fr: Serialize", - deserialize = "E::G1Affine: Deserialize<'de>, E::Fr: Deserialize<'de>" -))] -pub struct EvaluationArgument { - comms: Vec, - evals: Vec>, - R_x: Vec, - C_Q: E::G1Affine, - C_H: E::G1Affine, -} - -/// Provides an implementation of a polynomial evaluation engine using KZG -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct EvaluationEngine { - _p: PhantomData<(E, NE)>, -} - -// This impl block defines helper functions that are not a part of -// EvaluationEngineTrait, but that we will use to implement the trait methods. -impl EvaluationEngine -where - E: Engine, - NE: NovaEngine>, - E::G1: DlogGroup, - // the following bounds repeat existing, satisfied bounds on associated types of the above - // but are required since the equality constraints we use in the above do not transitively - // carry bounds we should be able to remove most of those constraints when rust supports - // associated_type_bounds - E::Fr: Serialize + DeserializeOwned, - E::G1Affine: Serialize + DeserializeOwned, - E::G1Affine: TranscriptReprTrait, // TODO: this bound on DlogGroup is really unusable! - E::G2Affine: Serialize + DeserializeOwned, - E::Fr: PrimeFieldBits + TranscriptReprTrait, - ::Base: TranscriptReprTrait, -{ - fn compute_challenge( - com: &[E::G1Affine], - transcript: &mut impl TranscriptEngineTrait, - ) -> E::Fr { - transcript.absorb(b"c", &com); - transcript.squeeze(b"c").unwrap() - } - - // Compute challenge q = Hash(vk, C0, ..., C_{k-1}, u0, ...., u_{t-1}, - // (f_i(u_j))_{i=0..k-1,j=0..t-1}) - // It is assumed that both 'C' and 'u' are already absorbed by the transcript - fn get_batch_challenge( - v: &[Vec], - transcript: &mut impl TranscriptEngineTrait, - ) -> E::Fr { - transcript.absorb( - b"v", - &v.iter() - .flatten() - .cloned() - .collect::>() - .as_slice(), - ); - - transcript.squeeze(b"r").unwrap() - } - - fn compute_a(c_q: &E::G1Affine, transcript: &mut impl TranscriptEngineTrait) -> E::Fr { - transcript.absorb(b"C_Q", c_q); - transcript.squeeze(b"a").unwrap() - } - - fn compute_pi_polynomials(hat_P: &[E::Fr], point: &[E::Fr]) -> Vec> { - let mut polys: Vec> = Vec::new(); - polys.push(hat_P.to_vec()); - - for i in 0..point.len() - 1 { - let Pi_len = polys[i].len() / 2; - let mut Pi = vec![E::Fr::ZERO; Pi_len]; - - (0..Pi_len) - .into_par_iter() - .map(|j| { - point[point.len() - i - 1] * (polys[i][2 * j + 1] - polys[i][2 * j]) - + polys[i][2 * j] - }) - .collect_into_vec(&mut Pi); - - polys.push(Pi); - } - - assert_eq!(polys.len(), hat_P.len().log_2()); - - polys - } - - fn compute_commitments( - ck: &UniversalKZGParam, - _C: &Commitment, - polys: &[Vec], - ) -> Vec { - let comms: Vec = (1..polys.len()) - .into_par_iter() - .map(|i| >::commit(ck, &polys[i]).comm) - .collect(); - - let mut comms_affine: Vec = vec![E::G1Affine::identity(); comms.len()]; - NE::GE::batch_normalize(&comms, &mut comms_affine); - comms_affine - } - - fn compute_evals(polys: &[Vec], u: &[E::Fr]) -> Vec> { - let mut v = vec![vec!(E::Fr::ZERO; polys.len()); u.len()]; - v.par_iter_mut().enumerate().for_each(|(i, v_i)| { - // for each point u - v_i.par_iter_mut().zip_eq(polys).for_each(|(v_ij, f)| { - // for each poly f (except the last one - since it is constant) - *v_ij = UniPoly::ref_cast(f).evaluate(&u[i]); - }); - }); - v - } - - fn compute_k_polynomial( - batched_Pi: &UniPoly, - Q_x: &UniPoly, - D: &UniPoly, - R_x: &UniPoly, - a: E::Fr, - ) -> UniPoly { - let mut tmp = Q_x.clone(); - tmp *= &D.evaluate(&a); - tmp[0] += &R_x.evaluate(&a); - let mut K_x = batched_Pi.clone(); - K_x -= &tmp; - K_x - } -} - -impl EvaluationEngineTrait for EvaluationEngine -where - E: MultiMillerLoop, - NE: NovaEngine>, - E::Fr: Serialize + DeserializeOwned, - E::G1Affine: Serialize + DeserializeOwned, - E::G2Affine: Serialize + DeserializeOwned, - E::G1: DlogGroup, - ::Base: TranscriptReprTrait, /* Note: due to the move of the bound - * TranscriptReprTrait on G::Base - * from Group to Engine */ - E::Fr: PrimeFieldBits, // TODO due to use of gen_srs_for_testing, make optional - E::Fr: TranscriptReprTrait, - E::G1Affine: TranscriptReprTrait, -{ - type EvaluationArgument = EvaluationArgument; - type ProverKey = KZGProverKey; - type VerifierKey = KZGVerifierKey; - - fn setup(ck: Arc>) -> (Self::ProverKey, Self::VerifierKey) { - let len = ck.length() - 1; - UniversalKZGParam::trim(ck, len) - } - - fn prove( - ck: &UniversalKZGParam, - _pk: &Self::ProverKey, - transcript: &mut ::TE, - _C: &Commitment, - hat_P: &[E::Fr], - point: &[E::Fr], - _eval: &E::Fr, - ) -> Result { - let x: Vec = point.to_vec(); - let ell = x.len(); - let n = hat_P.len(); - assert_eq!(n, 1 << ell); // Below we assume that n is a power of two - - // Phase 1 -- create commitments com_1, ..., com_\ell - // We do not compute final Pi (and its commitment as well since it is already - // committed according to EvaluationEngineTrait API) as it is constant and - // equals to 'eval' also known to verifier, so can be derived on its - // side as well - let polys = Self::compute_pi_polynomials(hat_P, point); - let comms = Self::compute_commitments(ck, _C, &polys); - - // Phase 2 - let r = Self::compute_challenge(&comms, transcript); - let u = vec![r, -r, r * r]; - let evals = Self::compute_evals(&polys, &u); - - // Phase 3 - // Compute B(x) = f_0(x) + q * f_1(x) + ... + q^(k-1) * f_{k-1}(x) - let q = Self::get_batch_challenge(&evals, transcript); - let batched_Pi: UniPoly = polys.into_par_iter().map(UniPoly::new).rlc(&q); - - // Q(x), R(x) = P(x) / D(x), where D(x) = (x - r) * (x + r) * (x - r^2) = 1 * - // x^3 - r^2 * x^2 - r^2 * x + r^4 - let D = UniPoly::new(vec![u[2] * u[2], -u[2], -u[2], E::Fr::from(1)]); - let (Q_x, R_x) = batched_Pi.divide_with_q_and_r(&D).unwrap(); - - let C_Q = >::commit(ck, &Q_x.coeffs) - .comm - .to_affine(); - - let a = Self::compute_a(&C_Q, transcript); - - // K(x) = P(x) - Q(x) * D(a) - R(a), note that R(a) should be subtracted from a - // free term of polynomial - let K_x = Self::compute_k_polynomial(&batched_Pi, &Q_x, &D, &R_x, a); - - // TODO: since this is a usual KZG10 we should use it as utility instead - let h = K_x.divide_minus_u(a); - let C_H = >::commit(ck, &h.coeffs) - .comm - .to_affine(); - - Ok(EvaluationArgument:: { - comms, - evals, - R_x: R_x.coeffs, - C_Q, - C_H, - }) - } - - /// A method to verify purported evaluations of a batch of polynomials - fn verify( - vk: &Self::VerifierKey, - transcript: &mut ::TE, - C: &Commitment, - point: &[E::Fr], - P_of_x: &E::Fr, - pi: &Self::EvaluationArgument, - ) -> Result<(), NovaError> { - let r = Self::compute_challenge(&pi.comms, transcript); - let u = [r, -r, r * r]; - - if pi.evals.len() != u.len() { - return Err(NovaError::ProofVerifyError); - } - if pi.R_x.len() != u.len() { - return Err(NovaError::ProofVerifyError); - } - - let mut comms = pi.comms.to_vec(); - comms.insert(0, C.comm.to_affine()); - - let q = Self::get_batch_challenge(&pi.evals, transcript); - let R_x = UniPoly::new(pi.R_x.clone()); - - let verification_failed = pi.evals.iter().zip_eq(u.iter()).any(|(evals_i, u_i)| { - // here we check correlation between R polynomial and batched evals, e.g.: - // 1) R(r) == eval at r - // 2) R(-r) == eval at -r - // 3) R(r^2) == eval at r^2 - let batched_eval = UniPoly::ref_cast(evals_i).evaluate(&q); - batched_eval != R_x.evaluate(u_i) - }); - if verification_failed { - return Err(NovaError::ProofVerifyError); - } - - // here we check that Pi polynomials were correctly constructed by the prover, - // using 'r' as a random point, e.g: P_i_even = P_i(r) + P_i(-r) * 1/2 - // P_i_odd = P_i(r) - P_i(-r) * 1/2*r - // P_i+1(r^2) == (1 - point_i) * P_i_even + point_i * P_i_odd -> should hold, - // according to Gemini transformation - let mut point = point.to_vec(); - point.reverse(); - - let r_mul_2 = E::Fr::from(2) * r; - #[allow(clippy::disallowed_methods)] - let verification_failed = pi.evals[0] - .par_iter() - .chain(&[*P_of_x]) - .zip_eq(pi.evals[1].par_iter().chain(&[*P_of_x])) - .zip(pi.evals[2][1..].par_iter().chain(&[*P_of_x])) - .enumerate() - .any(|(index, ((eval_r, eval_minus_r), eval_r_squared))| { - // some optimisation to avoid using expensive inversions: - // P_i+1(r^2) == (1 - point_i) * (P_i(r) + P_i(-r)) * 1/2 + point_i * (P_i(r) - - // P_i(-r)) * 1/2 * r is equivalent to: - // 2 * r * P_i+1(r^2) == r * (1 - point_i) * (P_i(r) + P_i(-r)) + point_i * - // (P_i(r) - P_i(-r)) - - let even = *eval_r + eval_minus_r; - let odd = *eval_r - eval_minus_r; - let right = r * ((E::Fr::ONE - point[index]) * even) + (point[index] * odd); - let left = *eval_r_squared * r_mul_2; - left != right - }); - - if verification_failed { - return Err(NovaError::ProofVerifyError); - } - - let C_P: E::G1 = comms.par_iter().map(|comm| comm.to_curve()).rlc(&q); - let C_Q = pi.C_Q; - let C_H = pi.C_H; - let r_squared = u[2]; - - // D = (x - r) * (x + r) * (x - r^2) = 1 * x^3 - r^2 * x^2 - r^2 * x + r^4 - let D = UniPoly::new(vec![ - r_squared * r_squared, - -r_squared, - -r_squared, - E::Fr::from(1), - ]); - - let a = Self::compute_a(&C_Q, transcript); - - let C_K = C_P - (C_Q * D.evaluate(&a) + vk.g * R_x.evaluate(&a)); - - let pairing_inputs: Vec<(E::G1Affine, E::G2Prepared)> = vec![ - (C_H, vk.beta_h.into()), - ((C_H * (-a) - C_K).to_affine(), vk.h.into()), - ]; - - #[allow(clippy::map_identity)] - let pairing_input_refs = pairing_inputs - .iter() - .map(|(a, b)| (a, b)) - .collect::>(); - - let pairing_result = - E::multi_miller_loop(pairing_input_refs.as_slice()).final_exponentiation(); - let successful: bool = pairing_result.is_identity().into(); - if !successful { - return Err(NovaError::ProofVerifyError); - } - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use bincode::Options; - use expect_test::expect; - use halo2curves::bn256::G1; - use itertools::Itertools; - - use super::*; - use crate::{ - provider::{ - keccak::Keccak256Transcript, - util::{ - iterators::DoubleEndedIteratorExt as _, test_utils::prove_verify_from_num_vars, - }, - }, - spartan::powers, - traits::TranscriptEngineTrait, - zip_with, CommitmentEngineTrait, CommitmentKey, - }; - - type E = halo2curves::bn256::Bn256; - type NE = crate::provider::Bn256EngineKZG; - type Fr = ::Scalar; - - fn test_commitment_to_k_polynomial_correctness( - ck: &CommitmentKey, - C: &Commitment, - poly: &[Fr], - point: &[Fr], - _eval: &Fr, - ) { - let polys = EvaluationEngine::::compute_pi_polynomials(poly, point); - let mut comms = EvaluationEngine::::compute_commitments(ck, C, &polys); - comms.insert(0, C.comm.to_affine()); - - let q = Fr::from(8165763); - let q_powers = batch_challenge_powers(q, polys.len()); - let batched_Pi: UniPoly = polys.clone().into_iter().map(UniPoly::new).rlc(&q); - - let r = Fr::from(1354678); - let r_squared = r * r; - - let divident = batched_Pi.clone(); - let D = UniPoly::new(vec![ - r_squared * r_squared, - -r_squared, - -r_squared, - Fr::from(1), - ]); - let (Q_x, R_x) = divident.divide_with_q_and_r(&D).unwrap(); - - let a = Fr::from(938576); - - let K_x = EvaluationEngine::::compute_k_polynomial(&batched_Pi, &Q_x, &D, &R_x, a); - - let mut C_P = G1::identity(); - q_powers.iter().zip_eq(comms.iter()).for_each(|(q_i, C_i)| { - C_P += *C_i * q_i; - }); - - let C_Q = <::CE as CommitmentEngineTrait< - NE, - >>::commit(ck, &Q_x.coeffs) - .comm - .to_affine(); - - // Check that Cp - Cq * D(a) - g1 * R(a) == MSM(ck, K(x)) - let C_K = C_P - C_Q * D.evaluate(&a) - ck.powers_of_g[0] * R_x.evaluate(&a); - - let C_K_expected = - <::CE as CommitmentEngineTrait>::commit( - ck, - &K_x.coeffs, - ) - .comm - .to_affine(); - - assert_eq!(C_K_expected, C_K.to_affine()); - } - - fn test_k_polynomial_correctness(poly: &[Fr], point: &[Fr], _eval: &Fr) { - let polys = EvaluationEngine::::compute_pi_polynomials(poly, point); - let q = Fr::from(8165763); - let batched_Pi: UniPoly = polys.clone().into_iter().map(UniPoly::new).rlc(&q); - - let r = Fr::from(56263); - let r_squared = r * r; - - let divident = batched_Pi.clone(); - let D = UniPoly::new(vec![ - r_squared * r_squared, - -r_squared, - -r_squared, - Fr::from(1), - ]); - let (Q_x, R_x) = divident.divide_with_q_and_r(&D).unwrap(); - - let a = Fr::from(190837645); - - let K_x = EvaluationEngine::::compute_k_polynomial(&batched_Pi, &Q_x, &D, &R_x, a); - - assert_eq!(Fr::from(0), K_x.evaluate(&a)); - } - - fn test_d_polynomial_correctness(poly: &[Fr], point: &[Fr], _eval: &Fr) { - let polys = EvaluationEngine::::compute_pi_polynomials(poly, point); - let q = Fr::from(8165763); - let batched_Pi: UniPoly = polys.clone().into_iter().map(UniPoly::new).rlc(&q); - - let r = Fr::from(2895776832); - let r_squared = r * r; - - let divident = batched_Pi.clone(); - // D(x) = (x - r) * (x + r) * (x - r^2) - let D = UniPoly::new(vec![ - r_squared * r_squared, - -r_squared, - -r_squared, - Fr::from(1), - ]); - let (Q_x, R_x) = divident.divide_with_q_and_r(&D).unwrap(); - - let evaluation_scalar = Fr::from(182746); - assert_eq!( - batched_Pi.evaluate(&evaluation_scalar), - D.evaluate(&evaluation_scalar) * Q_x.evaluate(&evaluation_scalar) - + R_x.evaluate(&evaluation_scalar) - ); - - // Check that Q(x) = (P(x) - R(x)) / D(x) - let mut P_x = batched_Pi.clone(); - let minus_R_x = UniPoly::new( - R_x.clone() - .coeffs - .into_iter() - .map(|coeff| -coeff) - .collect::>(), - ); - P_x += &minus_R_x; - - let divident = P_x.clone(); - let (Q_x_recomputed, _) = divident.divide_with_q_and_r(&D).unwrap(); - - assert_eq!(Q_x, Q_x_recomputed); - } - - fn test_batching_property_on_evaluation(poly: &[Fr], point: &[Fr], _eval: &Fr) { - let polys = EvaluationEngine::::compute_pi_polynomials(poly, point); - - let q = Fr::from(97652); - let u = [Fr::from(10), Fr::from(20), Fr::from(50)]; - - let batched_Pi: UniPoly = polys.clone().into_iter().map(UniPoly::new).rlc(&q); - - let q_powers = batch_challenge_powers(q, polys.len()); - for evaluation_scalar in u.iter() { - let evals = polys - .clone() - .into_iter() - .map(|poly| UniPoly::new(poly).evaluate(evaluation_scalar)) - .collect::>(); - - let expected = zip_with!((evals.iter(), q_powers.iter()), |eval, q| eval * q) - .collect::>() - .into_iter() - .sum::(); - - let actual = batched_Pi.evaluate(evaluation_scalar); - assert_eq!(expected, actual); - } - } - - #[test] - fn test_hyperkzg_shplonk_unit_tests() { - // poly = [1, 2, 1, 4, 1, 2, 1, 4] - let poly = vec![ - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - ]; - - // point = [4,3,8] - let point = vec![Fr::from(4), Fr::from(3), Fr::from(8)]; - - // eval = 57 - let eval = Fr::from(57); - - let ck: CommitmentKey = - as CommitmentEngineTrait>::setup(b"test", poly.len()); - - let ck = Arc::new(ck); - let C: Commitment = KZGCommitmentEngine::commit(&ck, &poly); - - test_batching_property_on_evaluation(&poly, &point, &eval); - test_d_polynomial_correctness(&poly, &point, &eval); - test_k_polynomial_correctness(&poly, &point, &eval); - test_commitment_to_k_polynomial_correctness(&ck, &C, &poly, &point, &eval); - } - - #[test] - fn test_hyperkzg_shplonk_pcs() { - let n = 8; - - // poly = [1, 2, 1, 4, 1, 2, 1, 4] - let poly = vec![ - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - ]; - - // point = [4,3,8] - let point = vec![Fr::from(4), Fr::from(3), Fr::from(8)]; - - // eval = 57 - let eval = Fr::from(57); - - let ck: CommitmentKey = - as CommitmentEngineTrait>::setup(b"test", n); - let ck = Arc::new(ck); - let (pk, vk): (KZGProverKey, KZGVerifierKey) = - EvaluationEngine::::setup(ck.clone()); - - // make a commitment - let C: Commitment = KZGCommitmentEngine::commit(&ck, &poly); - - let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); - let proof = EvaluationEngine::::prove( - &ck, - &pk, - &mut prover_transcript, - &C, - &poly, - &point, - &eval, - ) - .unwrap(); - - let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); - EvaluationEngine::::verify(&vk, &mut verifier_transcript, &C, &point, &eval, &proof) - .unwrap(); - } - - #[test] - fn test_hyperkzg_shplonk_pcs_negative() { - let n = 8; - // poly = [1, 2, 1, 4, 1, 2, 1, 4] - let poly = vec![ - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - ]; - // point = [4,3,8] - let point = vec![Fr::from(4), Fr::from(3), Fr::from(8)]; - // eval = 57 - let eval = Fr::from(57); - - // eval = 57 - let eval1 = Fr::from(56); // wrong eval - test_negative_inner(n, &poly, &point, &eval1); - - // point = [4,3,8] - let point1 = vec![Fr::from(4), Fr::from(3), Fr::from(7)]; // wrong point - test_negative_inner(n, &poly, &point1, &eval); - - // poly = [1, 2, 1, 4, 1, 2, 1, 4] - let poly1 = vec![ - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - Fr::ONE, - Fr::from(2), - Fr::from(200), - Fr::from(100), - ]; // wrong poly - test_negative_inner(n, &poly1, &point, &eval); - } - - fn test_negative_inner(n: usize, poly: &[Fr], point: &[Fr], eval: &Fr) { - let ck: CommitmentKey = - as CommitmentEngineTrait>::setup(b"test", n); - let ck = Arc::new(ck); - let (pk, vk): (KZGProverKey, KZGVerifierKey) = - EvaluationEngine::::setup(ck.clone()); - - // make a commitment - let C: Commitment = KZGCommitmentEngine::commit(&ck, poly); - - let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); - let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); - - let proof = EvaluationEngine::::prove( - &ck, - &pk, - &mut prover_transcript, - &C, - poly, - point, - eval, - ) - .unwrap(); - - assert!(EvaluationEngine::::verify( - &vk, - &mut verifier_transcript, - &C, - point, - eval, - &proof - ) - .is_err()); - } - - #[test] - fn test_hyperkzg_shplonk_pcs_negative_wrong_commitment() { - let n = 8; - // poly = [1, 2, 1, 4, 1, 2, 1, 4] - let poly = vec![ - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - Fr::ONE, - Fr::from(2), - Fr::from(1), - Fr::from(4), - ]; - // point = [4,3,8] - let point = vec![Fr::from(4), Fr::from(3), Fr::from(8)]; - // eval = 57 - let eval = Fr::from(57); - - // altered_poly = [85, 84, 83, 82, 81, 80, 79, 78] - let altered_poly = vec![ - Fr::from(85), - Fr::from(84), - Fr::from(83), - Fr::from(82), - Fr::from(81), - Fr::from(80), - Fr::from(79), - Fr::from(78), - ]; - - let ck: CommitmentKey = - as CommitmentEngineTrait>::setup(b"test", n); - - let C1: Commitment = KZGCommitmentEngine::commit(&ck, &poly); // correct commitment - let C2: Commitment = KZGCommitmentEngine::commit(&ck, &altered_poly); // wrong commitment - - test_negative_inner_commitment(&poly, &point, &eval, &ck, &C1, &C2); // here we check detection when proof and commitment do not correspond - test_negative_inner_commitment(&poly, &point, &eval, &ck, &C2, &C2); // here we check detection when proof was built with wrong commitment - } - - fn test_negative_inner_commitment( - poly: &[Fr], - point: &[Fr], - eval: &Fr, - ck: &CommitmentKey, - C_prover: &Commitment, - C_verifier: &Commitment, - ) { - let ck = Arc::new(ck.clone()); - let (pk, vk): (KZGProverKey, KZGVerifierKey) = - EvaluationEngine::::setup(ck.clone()); - - let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); - let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); - - let proof = EvaluationEngine::::prove( - &ck, - &pk, - &mut prover_transcript, - C_prover, - poly, - point, - eval, - ) - .unwrap(); - - assert!(EvaluationEngine::::verify( - &vk, - &mut verifier_transcript, - C_verifier, - point, - eval, - &proof - ) - .is_err()); - } - - #[test] - fn test_hyperkzg_shplonk_eval() { - // Test with poly(X1, X2) = 1 + X1 + X2 + X1*X2 - let n = 4; - let ck: CommitmentKey = - as CommitmentEngineTrait>::setup(b"test", n); - let ck = Arc::new(ck); - let (pk, vk): (KZGProverKey, KZGVerifierKey) = - EvaluationEngine::::setup(ck.clone()); - - // poly is in eval. representation; evaluated at [(0,0), (0,1), (1,0), (1,1)] - let poly = vec![Fr::from(1), Fr::from(2), Fr::from(2), Fr::from(4)]; - - let C = as CommitmentEngineTrait>::commit(&ck, &poly); - - let test_inner = |point: Vec, eval: Fr| -> Result<(), NovaError> { - let mut tr = Keccak256Transcript::::new(b"TestEval"); - let proof = - EvaluationEngine::::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval) - .unwrap(); - let mut tr = Keccak256Transcript::new(b"TestEval"); - EvaluationEngine::::verify(&vk, &mut tr, &C, &point, &eval, &proof) - }; - - // Call the prover with a (point, eval) pair. - // The prover does not recompute so it may produce a proof, but it should not - // verify - let point = vec![Fr::from(0), Fr::from(0)]; - let eval = Fr::ONE; - test_inner(point, eval).unwrap(); - - let point = vec![Fr::from(0), Fr::from(1)]; - let eval = Fr::from(2); - test_inner(point, eval).unwrap(); - - let point = vec![Fr::from(1), Fr::from(1)]; - let eval = Fr::from(4); - test_inner(point, eval).unwrap(); - - let point = vec![Fr::from(0), Fr::from(2)]; - let eval = Fr::from(3); - test_inner(point, eval).unwrap(); - - let point = vec![Fr::from(2), Fr::from(2)]; - let eval = Fr::from(9); - test_inner(point, eval).unwrap(); - - // Try a couple incorrect evaluations and expect failure - let point = vec![Fr::from(2), Fr::from(2)]; - let eval = Fr::from(50); - assert!(test_inner(point, eval).is_err()); - - let point = vec![Fr::from(0), Fr::from(2)]; - let eval = Fr::from(4); - assert!(test_inner(point, eval).is_err()); - } - - #[test] - fn test_hyperkzg_shplonk_transcript_correctness() { - let n = 4; - - // poly = [1, 2, 1, 4] - let poly = vec![Fr::ONE, Fr::from(2), Fr::from(1), Fr::from(4)]; - - // point = [4,3] - let point = vec![Fr::from(4), Fr::from(3)]; - - // eval = 28 - let eval = Fr::from(28); - - let ck: CommitmentKey = - as CommitmentEngineTrait>::setup(b"test", n); - let ck = Arc::new(ck); - let (pk, vk): (KZGProverKey, KZGVerifierKey) = - EvaluationEngine::::setup(ck.clone()); - - // make a commitment - let C = KZGCommitmentEngine::commit(&ck, &poly); - - // prove an evaluation - let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); - let proof = EvaluationEngine::::prove( - &ck, - &pk, - &mut prover_transcript, - &C, - &poly, - &point, - &eval, - ) - .unwrap(); - let post_c_p = prover_transcript.squeeze(b"c").unwrap(); - - // verify the evaluation - let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); - EvaluationEngine::::verify(&vk, &mut verifier_transcript, &C, &point, &eval, &proof) - .unwrap(); - let post_c_v = verifier_transcript.squeeze(b"c").unwrap(); - - // check if the prover transcript and verifier transcript are kept in the - // same state - assert_eq!(post_c_p, post_c_v); - - let proof_bytes = bincode::DefaultOptions::new() - .with_big_endian() - .with_fixint_encoding() - .serialize(&proof) - .unwrap(); - expect!["432"].assert_eq(&proof_bytes.len().to_string()); - - // Change the proof and expect verification to fail - let mut bad_proof = proof.clone(); - bad_proof.comms[0] = (bad_proof.comms[0] + bad_proof.comms[0] * Fr::from(123)).to_affine(); - let mut verifier_transcript2 = Keccak256Transcript::::new(b"TestEval"); - assert!(EvaluationEngine::::verify( - &vk, - &mut verifier_transcript2, - &C, - &point, - &eval, - &bad_proof - ) - .is_err()); - } - - #[test] - fn test_hyperkzg_shplonk_more() { - // test the hyperkzg prover and verifier with random instances (derived from a - // seed) - for num_vars in [4, 5, 6] { - prove_verify_from_num_vars::<_, EvaluationEngine>(num_vars); - } - } - - /// Compute powers of q : (1, q, q^2, ..., q^(k-1)) - fn batch_challenge_powers(q: Fr, k: usize) -> Vec { - powers(&q, k) - } -} diff --git a/src/provider/ipa_pc.rs b/src/provider/ipa_pc.rs deleted file mode 100644 index 9ae85be..0000000 --- a/src/provider/ipa_pc.rs +++ /dev/null @@ -1,394 +0,0 @@ -//! This module implements `EvaluationEngine` using an IPA-based polynomial -//! commitment scheme -use core::iter; -use std::{marker::PhantomData, sync::Arc}; - -use ff::Field; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -use crate::{ - digest::SimpleDigestible, - errors::{NovaError, PCSError}, - provider::{pedersen::CommitmentKeyExtTrait, traits::DlogGroup, util::field::batch_invert}, - spartan::polys::eq::EqPolynomial, - traits::{ - commitment::{CommitmentEngineTrait, CommitmentTrait}, - evaluation::EvaluationEngineTrait, - Engine, TranscriptEngineTrait, TranscriptReprTrait, - }, - zip_with, Commitment, CommitmentKey, CompressedCommitment, CE, -}; - -/// Provides an implementation of the prover key -#[derive(Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct ProverKey { - pub ck_s: CommitmentKey, -} - -/// Provides an implementation of the verifier key -#[derive(Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct VerifierKey { - pub ck_v: Arc>, - pub ck_s: CommitmentKey, -} - -impl SimpleDigestible for VerifierKey {} - -/// Provides an implementation of a polynomial evaluation engine using IPA -#[derive(Clone, Debug)] -pub struct EvaluationEngine { - _p: PhantomData, -} - -impl EvaluationEngineTrait for EvaluationEngine -where - E: Engine, - E::GE: DlogGroup, - CommitmentKey: CommitmentKeyExtTrait, -{ - type ProverKey = ProverKey; - type VerifierKey = VerifierKey; - type EvaluationArgument = InnerProductArgument; - - fn setup( - ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, - ) -> (Self::ProverKey, Self::VerifierKey) { - let ck_c = E::CE::setup(b"ipa", 1); - - let pk = ProverKey { ck_s: ck_c.clone() }; - let vk = VerifierKey { - ck_v: ck.clone(), - ck_s: ck_c, - }; - - (pk, vk) - } - - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - transcript: &mut E::TE, - comm: &Commitment, - poly: &[E::Scalar], - point: &[E::Scalar], - eval: &E::Scalar, - ) -> Result { - let u = InnerProductInstance::new(comm, &EqPolynomial::evals_from_points(point), eval); - let w = InnerProductWitness::new(poly); - - InnerProductArgument::prove(ck.clone(), pk.ck_s.clone(), &u, &w, transcript) - } - - /// A method to verify purported evaluations of a batch of polynomials - fn verify( - vk: &Self::VerifierKey, - transcript: &mut E::TE, - comm: &Commitment, - point: &[E::Scalar], - eval: &E::Scalar, - arg: &Self::EvaluationArgument, - ) -> Result<(), NovaError> { - let u = InnerProductInstance::new(comm, &EqPolynomial::evals_from_points(point), eval); - - arg.verify(&vk.ck_v, vk.ck_s.clone(), 1 << point.len(), &u, transcript)?; - - Ok(()) - } -} - -fn inner_product(a: &[T], b: &[T]) -> T { - zip_with!(par_iter, (a, b), |x, y| *x * y).sum() -} - -/// An inner product instance consists of a commitment to a vector `a` and -/// another vector `b` and the claim that c = . -struct InnerProductInstance { - comm_a_vec: Commitment, - b_vec: Vec, - c: E::Scalar, -} - -impl InnerProductInstance -where - E: Engine, - E::GE: DlogGroup, -{ - fn new(comm_a_vec: &Commitment, b_vec: &[E::Scalar], c: &E::Scalar) -> Self { - Self { - comm_a_vec: *comm_a_vec, - b_vec: b_vec.to_vec(), - c: *c, - } - } -} - -impl TranscriptReprTrait for InnerProductInstance { - fn to_transcript_bytes(&self) -> Vec { - // we do not need to include self.b_vec as in our context it is produced from - // the transcript - [ - self.comm_a_vec.to_transcript_bytes(), - self.c.to_transcript_bytes(), - ] - .concat() - } -} - -struct InnerProductWitness { - a_vec: Vec, -} - -impl InnerProductWitness { - fn new(a_vec: &[E::Scalar]) -> Self { - Self { - a_vec: a_vec.to_vec(), - } - } -} - -/// An inner product argument -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct InnerProductArgument { - pub(in crate::provider) L_vec: Vec>, - pub(in crate::provider) R_vec: Vec>, - pub(in crate::provider) a_hat: E::Scalar, -} - -impl InnerProductArgument -where - E: Engine, - E::GE: DlogGroup, - CommitmentKey: CommitmentKeyExtTrait, -{ - const fn protocol_name() -> &'static [u8] { - b"IPA" - } - - fn prove( - ck: CommitmentKey, - mut ck_c: CommitmentKey, - U: &InnerProductInstance, - W: &InnerProductWitness, - transcript: &mut E::TE, - ) -> Result { - transcript.dom_sep(Self::protocol_name()); - - let (ck, _) = ck.split_at(U.b_vec.len()); - - if U.b_vec.len() != W.a_vec.len() { - return Err(NovaError::InvalidInputLength); - } - - // absorb the instance in the transcript - transcript.absorb(b"U", U); - - // sample a random base for committing to the inner product - let r = transcript.squeeze(b"r")?; - ck_c.scale(&r); - - // a closure that executes a step of the recursive inner product argument - let prove_inner = |a_vec: &[E::Scalar], - b_vec: &[E::Scalar], - ck: CommitmentKey, - transcript: &mut E::TE| - -> Result< - ( - CompressedCommitment, - CompressedCommitment, - Vec, - Vec, - CommitmentKey, - ), - NovaError, - > { - let n = a_vec.len(); - let (ck_L, ck_R) = ck.split_at(n / 2); - - let c_L = inner_product(&a_vec[0..n / 2], &b_vec[n / 2..n]); - let c_R = inner_product(&a_vec[n / 2..n], &b_vec[0..n / 2]); - - let L = CE::::commit( - &ck_R.combine(&ck_c), - &a_vec[0..n / 2] - .iter() - .chain(iter::once(&c_L)) - .copied() - .collect::>(), - ) - .compress(); - let R = CE::::commit( - &ck_L.combine(&ck_c), - &a_vec[n / 2..n] - .iter() - .chain(iter::once(&c_R)) - .copied() - .collect::>(), - ) - .compress(); - - transcript.absorb(b"L", &L); - transcript.absorb(b"R", &R); - - let r = transcript.squeeze(b"r")?; - let r_inverse = r.invert().unwrap(); - - // fold the left half and the right half - let a_vec_folded = zip_with!( - (a_vec[0..n / 2].par_iter(), a_vec[n / 2..n].par_iter()), - |a_L, a_R| *a_L * r + r_inverse * *a_R - ) - .collect::>(); - - let b_vec_folded = zip_with!( - (b_vec[0..n / 2].par_iter(), b_vec[n / 2..n].par_iter()), - |b_L, b_R| *b_L * r_inverse + r * *b_R - ) - .collect::>(); - - let ck_folded = CommitmentKeyExtTrait::fold(&ck_L, &ck_R, &r_inverse, &r); - - Ok((L, R, a_vec_folded, b_vec_folded, ck_folded)) - }; - - // two vectors to hold the logarithmic number of group elements - let mut L_vec: Vec> = Vec::new(); - let mut R_vec: Vec> = Vec::new(); - - // we create mutable copies of vectors and generators - let mut a_vec = W.a_vec.to_vec(); - let mut b_vec = U.b_vec.to_vec(); - let mut ck = ck; - for _i in 0..usize::try_from(U.b_vec.len().ilog2()).unwrap() { - let (L, R, a_vec_folded, b_vec_folded, ck_folded) = - prove_inner(&a_vec, &b_vec, ck, transcript)?; - L_vec.push(L); - R_vec.push(R); - - a_vec = a_vec_folded; - b_vec = b_vec_folded; - ck = ck_folded; - } - - Ok(Self { - L_vec, - R_vec, - a_hat: a_vec[0], - }) - } - - fn verify( - &self, - ck: &CommitmentKey, - mut ck_c: CommitmentKey, - n: usize, - U: &InnerProductInstance, - transcript: &mut E::TE, - ) -> Result<(), NovaError> { - let (ck, _) = ck.clone().split_at(U.b_vec.len()); - - transcript.dom_sep(Self::protocol_name()); - if U.b_vec.len() != n - || n != (1 << self.L_vec.len()) - || self.L_vec.len() != self.R_vec.len() - || self.L_vec.len() >= 32 - { - return Err(NovaError::InvalidInputLength); - } - - // absorb the instance in the transcript - transcript.absorb(b"U", U); - - // sample a random base for committing to the inner product - let r = transcript.squeeze(b"r")?; - ck_c.scale(&r); - - let P = U.comm_a_vec + CE::::commit(&ck_c, &[U.c]); - - // compute a vector of public coins using self.L_vec and self.R_vec - let r = (0..self.L_vec.len()) - .map(|i| { - transcript.absorb(b"L", &self.L_vec[i]); - transcript.absorb(b"R", &self.R_vec[i]); - transcript.squeeze(b"r") - }) - .collect::, NovaError>>()?; - - // precompute scalars necessary for verification - let r_square: Vec = (0..self.L_vec.len()) - .into_par_iter() - .map(|i| r[i] * r[i]) - .collect(); - let r_inverse = batch_invert(r.clone())?; - let r_inverse_square: Vec = (0..self.L_vec.len()) - .into_par_iter() - .map(|i| r_inverse[i] * r_inverse[i]) - .collect(); - - // compute the vector with the tensor structure - let s = { - let mut s = vec![E::Scalar::ZERO; n]; - s[0] = { - let mut v = E::Scalar::ONE; - for r_inverse_i in r_inverse { - v *= r_inverse_i; - } - v - }; - for i in 1..n { - let pos_in_r = (31 - (i as u32).leading_zeros()) as usize; - s[i] = s[i - (1 << pos_in_r)] * r_square[(self.L_vec.len() - 1) - pos_in_r]; - } - s - }; - - let ck_hat = { - let c = CE::::commit(&ck, &s).compress(); - CommitmentKey::::reinterpret_commitments_as_ck(&[c])? - }; - - let b_hat = inner_product(&U.b_vec, &s); - - let P_hat = { - let ck_folded = { - let ck_L = CommitmentKey::::reinterpret_commitments_as_ck(&self.L_vec)?; - let ck_R = CommitmentKey::::reinterpret_commitments_as_ck(&self.R_vec)?; - let ck_P = CommitmentKey::::reinterpret_commitments_as_ck(&[P.compress()])?; - ck_L.combine(&ck_R).combine(&ck_P) - }; - - CE::::commit( - &ck_folded, - &r_square - .iter() - .chain(r_inverse_square.iter()) - .chain(iter::once(&E::Scalar::ONE)) - .copied() - .collect::>(), - ) - }; - - if P_hat == CE::::commit(&ck_hat.combine(&ck_c), &[self.a_hat, self.a_hat * b_hat]) { - Ok(()) - } else { - Err(NovaError::PCSError(PCSError::InvalidPCS)) - } - } -} - -#[cfg(test)] -mod test { - use crate::provider::{ - ipa_pc::EvaluationEngine, util::test_utils::prove_verify_from_num_vars, GrumpkinEngine, - }; - - #[test] - fn test_multiple_polynomial_size() { - for num_vars in [4, 5, 6] { - prove_verify_from_num_vars::<_, EvaluationEngine>(num_vars); - } - } -} diff --git a/src/provider/keccak.rs b/src/provider/keccak.rs deleted file mode 100644 index cd63658..0000000 --- a/src/provider/keccak.rs +++ /dev/null @@ -1,247 +0,0 @@ -//! This module provides an implementation of `TranscriptEngineTrait` using -//! keccak256 -use core::marker::PhantomData; - -use sha3::{Digest, Keccak256}; - -use crate::{ - errors::NovaError, - traits::{Engine, PrimeFieldExt, TranscriptEngineTrait, TranscriptReprTrait}, -}; - -const PERSONA_TAG: &[u8] = b"NoTR"; -const DOM_SEP_TAG: &[u8] = b"NoDS"; -const KECCAK256_STATE_SIZE: usize = 64; -const KECCAK256_PREFIX_CHALLENGE_LO: u8 = 0; -const KECCAK256_PREFIX_CHALLENGE_HI: u8 = 1; - -/// Provides an implementation of `TranscriptEngine` -#[derive(Debug)] -pub struct Keccak256Transcript { - round: u16, - state: [u8; KECCAK256_STATE_SIZE], - transcript: Keccak256, - _p: PhantomData, -} - -fn compute_updated_state(keccak_instance: Keccak256, input: &[u8]) -> [u8; KECCAK256_STATE_SIZE] { - let mut updated_instance = keccak_instance; - updated_instance.update(input); - - let input_lo = &[KECCAK256_PREFIX_CHALLENGE_LO]; - let input_hi = &[KECCAK256_PREFIX_CHALLENGE_HI]; - - let mut hasher_lo = updated_instance.clone(); - let mut hasher_hi = updated_instance; - - hasher_lo.update(input_lo); - hasher_hi.update(input_hi); - - let output_lo = hasher_lo.finalize(); - let output_hi = hasher_hi.finalize(); - - [output_lo, output_hi] - .concat() - .as_slice() - .try_into() - .unwrap() -} - -impl TranscriptEngineTrait for Keccak256Transcript { - fn new(label: &'static [u8]) -> Self { - let keccak_instance = Keccak256::new(); - let input = [PERSONA_TAG, label].concat(); - let output = compute_updated_state(keccak_instance.clone(), &input); - - Self { - round: 0u16, - state: output, - transcript: keccak_instance, - _p: PhantomData, - } - } - - fn squeeze(&mut self, label: &'static [u8]) -> Result { - // we gather the full input from the round, preceded by the current state of the - // transcript - let input = [ - DOM_SEP_TAG, - self.round.to_le_bytes().as_ref(), - self.state.as_ref(), - label, - ] - .concat(); - let output = compute_updated_state(self.transcript.clone(), &input); - - // update state - self.round = { - self.round - .checked_add(1) - .ok_or(NovaError::InternalTranscriptError)? - }; - self.state.copy_from_slice(&output); - self.transcript = Keccak256::new(); - - // squeeze out a challenge - Ok(E::Scalar::from_uniform(&output)) - } - - fn absorb>(&mut self, label: &'static [u8], o: &T) { - self.transcript.update(label); - self.transcript.update(&o.to_transcript_bytes()); - } - - fn dom_sep(&mut self, bytes: &'static [u8]) { - self.transcript.update(DOM_SEP_TAG); - self.transcript.update(bytes); - } -} - -#[cfg(test)] -mod tests { - use ff::PrimeField; - use rand::Rng; - use sha3::{Digest, Keccak256}; - - use crate::{ - provider::{keccak::Keccak256Transcript, Bn256EngineKZG, GrumpkinEngine}, - traits::{Engine, PrimeFieldExt, TranscriptEngineTrait, TranscriptReprTrait}, - }; - - fn test_keccak_transcript_with( - expected_h1: &'static str, - expected_h2: &'static str, - ) { - let mut transcript: Keccak256Transcript = Keccak256Transcript::new(b"test"); - - // two scalars - let s1 = ::Scalar::from(2u64); - let s2 = ::Scalar::from(5u64); - - // add the scalars to the transcript - transcript.absorb(b"s1", &s1); - transcript.absorb(b"s2", &s2); - - // make a challenge - let c1: ::Scalar = transcript.squeeze(b"c1").unwrap(); - assert_eq!(hex::encode(c1.to_repr().as_ref()), expected_h1); - - // a scalar - let s3 = ::Scalar::from(128u64); - - // add the scalar to the transcript - transcript.absorb(b"s3", &s3); - - // make a challenge - let c2: ::Scalar = transcript.squeeze(b"c2").unwrap(); - assert_eq!(hex::encode(c2.to_repr().as_ref()), expected_h2); - } - - #[test] - fn test_keccak_transcript() { - test_keccak_transcript_with::( - "9fb71e3b74bfd0b60d97349849b895595779a240b92a6fae86bd2812692b6b0e", - "bfd4c50b7d6317e9267d5d65c985eb455a3561129c0b3beef79bfc8461a84f18", - ); - } - - #[test] - fn test_keccak_example() { - let mut hasher = Keccak256::new(); - hasher.update(0xffffffff_u32.to_le_bytes()); - let output: [u8; 32] = hasher.finalize().into(); - assert_eq!( - hex::encode(output), - "29045a592007d0c246ef02c2223570da9522d0cf0f73282c79a1bc8f0bb2c238" - ); - } - - use super::{ - DOM_SEP_TAG, KECCAK256_PREFIX_CHALLENGE_HI, KECCAK256_PREFIX_CHALLENGE_LO, - KECCAK256_STATE_SIZE, PERSONA_TAG, - }; - - fn compute_updated_state_for_testing(input: &[u8]) -> [u8; KECCAK256_STATE_SIZE] { - let input_lo = [input, &[KECCAK256_PREFIX_CHALLENGE_LO]].concat(); - let input_hi = [input, &[KECCAK256_PREFIX_CHALLENGE_HI]].concat(); - - let mut hasher_lo = Keccak256::new(); - let mut hasher_hi = Keccak256::new(); - - hasher_lo.update(&input_lo); - hasher_hi.update(&input_hi); - - let output_lo = hasher_lo.finalize(); - let output_hi = hasher_hi.finalize(); - - [output_lo, output_hi] - .concat() - .as_slice() - .try_into() - .unwrap() - } - - fn squeeze_for_testing( - transcript: &[u8], - round: u16, - state: [u8; KECCAK256_STATE_SIZE], - label: &'static [u8], - ) -> [u8; 64] { - let input = [ - transcript, - DOM_SEP_TAG, - round.to_le_bytes().as_ref(), - state.as_ref(), - label, - ] - .concat(); - compute_updated_state_for_testing(&input) - } - - // This test is meant to ensure compatibility between the incremental way of - // computing the transcript above, and the former, which materialized the - // entirety of the input vector before calling Keccak256 on it. - fn test_keccak_transcript_incremental_vs_explicit_with() { - let test_label = b"test"; - let mut transcript: Keccak256Transcript = Keccak256Transcript::new(test_label); - let mut rng = rand::thread_rng(); - - // ten scalars - let scalars = std::iter::from_fn(|| Some(::Scalar::from(rng.gen::()))) - .take(10) - .collect::>(); - - // add the scalars to the transcripts, - let mut manual_transcript: Vec = vec![]; - let labels = [ - b"s1", b"s2", b"s3", b"s4", b"s5", b"s6", b"s7", b"s8", b"s9", b"s0", - ]; - - for i in 0..10 { - transcript.absorb(&labels[i][..], &scalars[i]); - manual_transcript.extend(labels[i]); - manual_transcript.extend(scalars[i].to_transcript_bytes()); - } - - // compute the initial state - let input = [PERSONA_TAG, test_label].concat(); - let initial_state = compute_updated_state_for_testing(&input); - - // make a challenge - let c1: ::Scalar = transcript.squeeze(b"c1").unwrap(); - - let c1_bytes = squeeze_for_testing(&manual_transcript[..], 0u16, initial_state, b"c1"); - let to_hex = |g: E::Scalar| hex::encode(g.to_repr().as_ref()); - assert_eq!(to_hex(c1), to_hex(E::Scalar::from_uniform(&c1_bytes))); - } - - #[test] - fn test_keccak_transcript_incremental_vs_explicit() { - // test_keccak_transcript_incremental_vs_explicit_with::(); - // test_keccak_transcript_incremental_vs_explicit_with::(); - test_keccak_transcript_incremental_vs_explicit_with::(); - test_keccak_transcript_incremental_vs_explicit_with::(); - // test_keccak_transcript_incremental_vs_explicit_with::(); - // test_keccak_transcript_incremental_vs_explicit_with::(); - } -} diff --git a/src/provider/kzg_commitment.rs b/src/provider/kzg_commitment.rs deleted file mode 100644 index e94a166..0000000 --- a/src/provider/kzg_commitment.rs +++ /dev/null @@ -1,349 +0,0 @@ -//! Commitment engine for KZG commitments - -use std::{io::Cursor, marker::PhantomData, sync::Arc}; - -use ff::{Field, PrimeField, PrimeFieldBits}; -use group::{prime::PrimeCurveAffine, Curve, Group as _}; -use halo2curves::serde::SerdeObject; -use pairing::Engine; -use rand::rngs::StdRng; -use rand_core::{CryptoRng, RngCore, SeedableRng}; -use serde::{Deserialize, Serialize}; - -use crate::{ - digest::SimpleDigestible, - fast_serde, - fast_serde::{FastSerde, SerdeByteError, SerdeByteTypes}, - provider::{pedersen::Commitment, traits::DlogGroup, util::fb_msm}, - traits::{ - commitment::{CommitmentEngineTrait, Len}, - Engine as NovaEngine, Group, TranscriptReprTrait, - }, -}; - -/// `UniversalParams` are the universal parameters for the KZG10 scheme. -#[derive(Debug, Clone, Eq, Serialize, Deserialize)] -#[serde(bound( - serialize = "E::G1Affine: Serialize, E::G2Affine: Serialize", - deserialize = "E::G1Affine: Deserialize<'de>, E::G2Affine: Deserialize<'de>" -))] -pub struct UniversalKZGParam { - /// Group elements of the form `{ β^i G }`, where `i` ranges from 0 to - /// `degree`. - pub powers_of_g: Vec, - /// Group elements of the form `{ β^i H }`, where `i` ranges from 0 to - /// `degree`. - pub powers_of_h: Vec, -} - -impl PartialEq for UniversalKZGParam { - fn eq(&self, other: &Self) -> bool { - self.powers_of_g == other.powers_of_g && self.powers_of_h == other.powers_of_h - } -} -// for the purpose of the Len trait, we count commitment bases, i.e. G1 elements -impl Len for UniversalKZGParam { - fn length(&self) -> usize { - self.powers_of_g.len() - } -} - -/// `UnivariateProverKey` is used to generate a proof -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -#[serde(bound( - serialize = "E::G1Affine: Serialize, E::G2Affine: Serialize", - deserialize = "E::G1Affine: Deserialize<'de>, E::G2Affine: Deserialize<'de>" -))] -pub struct KZGProverKey { - /// generators from the universal parameters - uv_params: Arc>, - /// offset at which we start reading into the SRS - offset: usize, - /// maximum supported size - supported_size: usize, -} - -impl KZGProverKey { - pub(in crate::provider) fn new( - uv_params: Arc>, - offset: usize, - supported_size: usize, - ) -> Self { - assert!( - uv_params.max_degree() >= offset + supported_size, - "not enough bases (req: {} from offset {}) in the UVKZGParams (length: {})", - supported_size, - offset, - uv_params.max_degree() - ); - Self { - uv_params, - offset, - supported_size, - } - } - - pub fn powers_of_g(&self) -> &[E::G1Affine] { - &self.uv_params.powers_of_g[self.offset..self.offset + self.supported_size] - } -} - -/// `UVKZGVerifierKey` is used to check evaluation proofs for a given -/// commitment. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -#[serde(bound(serialize = "E::G1Affine: Serialize, E::G2Affine: Serialize",))] -pub struct KZGVerifierKey { - /// The generator of G1. - pub g: E::G1Affine, - /// The generator of G2. - pub h: E::G2Affine, - /// β times the above generator of G2. - pub beta_h: E::G2Affine, -} - -impl SimpleDigestible for KZGVerifierKey -where - E::G1Affine: Serialize, - E::G2Affine: Serialize, -{ -} - -impl UniversalKZGParam { - /// Returns the maximum supported degree - pub fn max_degree(&self) -> usize { - self.powers_of_g.len() - } - - /// Trim the universal parameters to specialize the public parameters - /// for univariate polynomials to the given `supported_size`, and - /// returns prover key and verifier key. `supported_size` should - /// be in range `1..params.len()` - /// - /// # Panics - /// If `supported_size` is greater than `self.max_degree()`, or - /// `self.max_degree()` is zero. - pub fn trim(ukzg: Arc, supported_size: usize) -> (KZGProverKey, KZGVerifierKey) { - assert!(ukzg.max_degree() > 0, "max_degree is zero"); - let g = ukzg.powers_of_g[0]; - let h = ukzg.powers_of_h[0]; - let beta_h = ukzg.powers_of_h[1]; - let pk = KZGProverKey::new(ukzg, 0, supported_size + 1); - let vk = KZGVerifierKey { g, h, beta_h }; - (pk, vk) - } -} - -impl FastSerde for UniversalKZGParam -where - E::G1Affine: SerdeObject, - E::G2Affine: SerdeObject, -{ - /// Byte format: - /// - /// [0..4] - Magic number (4 bytes) - /// [4] - Serde type: UniversalKZGParam (u8) - /// [5] - Number of sections (u8 = 2) - /// [6] - Section 1 type: powers_of_g (u8) - /// [7..11] - Section 1 size (u32) - /// [11..] - Section 1 data - /// [...+1] - Section 2 type: powers_of_h (u8) - /// [...+5] - Section 2 size (u32) - /// [...end] - Section 2 data - fn to_bytes(&self) -> Vec { - let mut out = Vec::new(); - - out.extend_from_slice(&fast_serde::MAGIC_NUMBER); - out.push(fast_serde::SerdeByteTypes::UniversalKZGParam as u8); - out.push(2); // num_sections - - Self::write_section_bytes( - &mut out, - 1, - &self - .powers_of_g - .iter() - .flat_map(|p| p.to_raw_bytes()) - .collect::>(), - ); - - Self::write_section_bytes( - &mut out, - 2, - &self - .powers_of_h - .iter() - .flat_map(|p| p.to_raw_bytes()) - .collect::>(), - ); - - out - } - - fn from_bytes(bytes: &Vec) -> Result { - let mut cursor = Cursor::new(bytes); - - Self::validate_header(&mut cursor, SerdeByteTypes::UniversalKZGParam, 2)?; - - // Read sections of points - let powers_of_g = Self::read_section_bytes(&mut cursor, 1)? - .chunks(E::G1Affine::identity().to_raw_bytes().len()) - .map(|bytes| E::G1Affine::from_raw_bytes(bytes).ok_or(SerdeByteError::G1DecodeError)) - .collect::, _>>()?; - - let powers_of_h = Self::read_section_bytes(&mut cursor, 2)? - .chunks(E::G2Affine::identity().to_raw_bytes().len()) - .map(|bytes| E::G2Affine::from_raw_bytes(bytes).ok_or(SerdeByteError::G2DecodeError)) - .collect::, _>>()?; - - Ok(Self { - powers_of_g, - powers_of_h, - }) - } -} - -impl UniversalKZGParam -where - E::Fr: PrimeFieldBits, -{ - /// Build SRS for testing. - /// WARNING: THIS FUNCTION IS FOR TESTING PURPOSE ONLY. - /// THE OUTPUT SRS SHOULD NOT BE USED IN PRODUCTION. - pub fn gen_srs_for_testing(mut rng: &mut R, max_degree: usize) -> Self { - let beta = E::Fr::random(&mut rng); - let g = E::G1::random(&mut rng); - let h = E::G2::random(rng); - - let nz_powers_of_beta = (0..=max_degree) - .scan(beta, |acc, _| { - let val = *acc; - *acc *= beta; - Some(val) - }) - .collect::>(); - - let window_size = fb_msm::get_mul_window_size(max_degree); - let scalar_bits = E::Fr::NUM_BITS as usize; - - let (powers_of_g_projective, powers_of_h_projective) = rayon::join( - || { - let g_table = fb_msm::get_window_table(scalar_bits, window_size, g); - fb_msm::multi_scalar_mul::( - scalar_bits, - window_size, - &g_table, - &nz_powers_of_beta, - ) - }, - || { - let h_table = fb_msm::get_window_table(scalar_bits, window_size, h); - fb_msm::multi_scalar_mul::( - scalar_bits, - window_size, - &h_table, - &nz_powers_of_beta, - ) - }, - ); - - let mut powers_of_g = vec![E::G1Affine::identity(); powers_of_g_projective.len()]; - let mut powers_of_h = vec![E::G2Affine::identity(); powers_of_h_projective.len()]; - - rayon::join( - || E::G1::batch_normalize(&powers_of_g_projective, &mut powers_of_g), - || E::G2::batch_normalize(&powers_of_h_projective, &mut powers_of_h), - ); - - Self { - powers_of_g, - powers_of_h, - } - } -} - -/// Commitments -#[derive(Debug, Clone, Copy, Eq, PartialEq, Default, Serialize, Deserialize)] -#[serde(bound( - serialize = "E::G1Affine: Serialize", - deserialize = "E::G1Affine: Deserialize<'de>" -))] -pub struct UVKZGCommitment( - /// the actual commitment is an affine point. - pub E::G1Affine, -); - -impl TranscriptReprTrait for UVKZGCommitment -where - E::G1: DlogGroup, - // Note: due to the move of the bound TranscriptReprTrait on G::Base from Group to Engine - ::Base: TranscriptReprTrait, -{ - fn to_transcript_bytes(&self) -> Vec { - // TODO: avoid the round-trip through the group (to_curve .. to_coordinates) - let (x, y, is_infinity) = self.0.to_curve().to_coordinates(); - let is_infinity_byte = (!is_infinity).into(); - [ - x.to_transcript_bytes(), - y.to_transcript_bytes(), - [is_infinity_byte].to_vec(), - ] - .concat() - } -} - -/// Provides a commitment engine -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct KZGCommitmentEngine { - _p: PhantomData, -} - -impl> CommitmentEngineTrait - for KZGCommitmentEngine -where - E::G1: DlogGroup, - E::G1Affine: Serialize + for<'de> Deserialize<'de>, - E::G2Affine: Serialize + for<'de> Deserialize<'de>, - E::Fr: PrimeFieldBits, // TODO due to use of gen_srs_for_testing, make optional -{ - type CommitmentKey = UniversalKZGParam; - type Commitment = Commitment; - - fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey { - // TODO: this is just for testing, replace by grabbing from a real setup for - // production - let mut bytes = [0u8; 32]; - let len = label.len().min(32); - bytes[..len].copy_from_slice(&label[..len]); - let rng = &mut StdRng::from_seed(bytes); - UniversalKZGParam::gen_srs_for_testing(rng, n.next_power_of_two()) - } - - fn commit(ck: &Self::CommitmentKey, v: &[::Scalar]) -> Self::Commitment { - assert!(ck.length() >= v.len()); - Commitment { - comm: E::G1::vartime_multiscalar_mul(v, &ck.powers_of_g[..v.len()]), - } - } -} - -impl> From> - for UVKZGCommitment -where - E::G1: Group, -{ - fn from(c: Commitment) -> Self { - Self(c.comm.to_affine()) - } -} - -impl> From> - for Commitment -where - E::G1: Group, -{ - fn from(c: UVKZGCommitment) -> Self { - Self { - comm: c.0.to_curve(), - } - } -} diff --git a/src/provider/mod.rs b/src/provider/mod.rs deleted file mode 100644 index 23ae8d9..0000000 --- a/src/provider/mod.rs +++ /dev/null @@ -1,176 +0,0 @@ -//! This module implements Nova's traits using the following several different -//! combinations - -// public modules to be used as an evaluation engine with Spartan -pub mod hyperkzg; -pub mod ipa_pc; - -// crate-public modules, made crate-public mostly for tests -pub(crate) mod bn256_grumpkin; -mod pedersen; -pub(crate) mod poseidon; -pub(crate) mod traits; -// a non-hiding variant of kzg -mod kzg_commitment; -pub(crate) mod util; - -// crate-private modules -mod keccak; -mod tests; - -use halo2curves::bn256::Bn256; - -use self::kzg_commitment::KZGCommitmentEngine; -use crate::{ - provider::{ - bn256_grumpkin::{bn256, grumpkin}, - keccak::Keccak256Transcript, - pedersen::CommitmentEngine as PedersenCommitmentEngine, - poseidon::{PoseidonRO, PoseidonROCircuit}, - }, - traits::{CurveCycleEquipped, Engine}, -}; - -/// An implementation of the Nova `Engine` trait with Grumpkin curve and -/// Pedersen commitment scheme -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct GrumpkinEngine; - -/// An implementation of the Nova `Engine` trait with BN254 curve and Pedersen -/// commitment scheme -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct Bn256EngineIPA; - -impl Engine for Bn256EngineIPA { - type Base = bn256::Base; - type Scalar = bn256::Scalar; - type GE = bn256::Point; - type RO = PoseidonRO; - type ROCircuit = PoseidonROCircuit; - type TE = Keccak256Transcript; - type CE = PedersenCommitmentEngine; -} - -impl Engine for GrumpkinEngine { - type Base = grumpkin::Base; - type Scalar = grumpkin::Scalar; - type GE = grumpkin::Point; - type RO = PoseidonRO; - type ROCircuit = PoseidonROCircuit; - type TE = Keccak256Transcript; - type CE = PedersenCommitmentEngine; -} - -/// An implementation of the Nova `Engine` trait with BN254 curve and Zeromorph -/// commitment scheme -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct Bn256EngineZM; - -impl Engine for Bn256EngineZM { - type Base = bn256::Base; - type Scalar = bn256::Scalar; - type GE = bn256::Point; - type RO = PoseidonRO; - type ROCircuit = PoseidonROCircuit; - type TE = Keccak256Transcript; - type CE = KZGCommitmentEngine; -} -/// An implementation of Nova traits with HyperKZG over the BN256 curve -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct Bn256EngineKZG; - -impl Engine for Bn256EngineKZG { - type Base = bn256::Base; - type Scalar = bn256::Scalar; - type GE = bn256::Point; - type RO = PoseidonRO; - type ROCircuit = PoseidonROCircuit; - type TE = Keccak256Transcript; - type CE = KZGCommitmentEngine; -} - -impl CurveCycleEquipped for Bn256EngineIPA { - type Secondary = GrumpkinEngine; -} - -impl CurveCycleEquipped for Bn256EngineKZG { - type Secondary = GrumpkinEngine; -} - -impl CurveCycleEquipped for Bn256EngineZM { - type Secondary = GrumpkinEngine; -} - -#[cfg(test)] -mod test { - use std::io::Read; - - use digest::{ExtendableOutput, Update}; - use group::{ff::Field, Curve, Group}; - use halo2curves::{CurveAffine, CurveExt}; - use itertools::Itertools as _; - use rand_core::OsRng; - use sha3::Shake256; - - use crate::provider::{ - bn256_grumpkin::{bn256, grumpkin}, - traits::DlogGroup, - util::msm::cpu_best_msm, - }; - - macro_rules! impl_cycle_pair_test { - ($curve:ident) => { - fn from_label_serial(label: &'static [u8], n: usize) -> Vec<$curve::Affine> { - let mut shake = Shake256::default(); - shake.update(label); - let mut reader = shake.finalize_xof(); - (0..n) - .map(|_| { - let mut uniform_bytes = [0u8; 32]; - reader.read_exact(&mut uniform_bytes).unwrap(); - let hash = $curve::Point::hash_to_curve("from_uniform_bytes"); - hash(&uniform_bytes).to_affine() - }) - .collect() - } - - let label = b"test_from_label"; - for n in [ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1021, - ] { - let ck_par = <$curve::Point as DlogGroup>::from_label(label, n); - let ck_ser = from_label_serial(label, n); - assert_eq!(ck_par.len(), n); - assert_eq!(ck_ser.len(), n); - assert_eq!(ck_par, ck_ser); - } - }; - } - - fn test_msm_with>() { - let n = 8; - let coeffs = (0..n).map(|_| F::random(OsRng)).collect::>(); - let bases = (0..n) - .map(|_| A::from(A::generator() * F::random(OsRng))) - .collect::>(); - let naive = coeffs - .iter() - .zip_eq(bases.iter()) - .fold(A::CurveExt::identity(), |acc, (coeff, base)| { - acc + *base * coeff - }); - - assert_eq!(naive, cpu_best_msm(&bases, &coeffs)) - } - - #[test] - fn test_msm() { - test_msm_with::(); - test_msm_with::(); - } - - #[test] - fn test_bn256_from_label() { - impl_cycle_pair_test!(bn256); - } -} diff --git a/src/provider/pedersen.rs b/src/provider/pedersen.rs deleted file mode 100644 index dd16831..0000000 --- a/src/provider/pedersen.rs +++ /dev/null @@ -1,369 +0,0 @@ -//! This module provides an implementation of a commitment engine -use core::{ - fmt::Debug, - marker::PhantomData, - ops::{Add, Mul, MulAssign}, -}; -use std::io::Cursor; - -use ff::Field; -use group::{ - prime::{PrimeCurve, PrimeCurveAffine}, - Curve, Group, GroupEncoding, -}; -use halo2curves::serde::SerdeObject; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -use crate::{ - errors::NovaError, - fast_serde, - fast_serde::{FastSerde, SerdeByteError, SerdeByteTypes}, - provider::traits::DlogGroup, - traits::{ - commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, - AbsorbInROTrait, Engine, ROTrait, TranscriptReprTrait, - }, - zip_with, -}; - -/// A type that holds commitment generators -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct CommitmentKey -where - E: Engine, - E::GE: DlogGroup, -{ - pub ck: Vec<::Affine>, -} - -impl Len for CommitmentKey -where - E: Engine, - E::GE: DlogGroup, -{ - fn length(&self) -> usize { - self.ck.len() - } -} - -impl FastSerde for CommitmentKey -where - ::Affine: SerdeObject, - E::GE: DlogGroup, -{ - /// Byte format: - /// - /// [0..4] - Magic number (4 bytes) - /// [4] - Serde type: CommitmentKey (u8) - /// [5] - Number of sections (u8 = 1) - /// [6] - Section 1 type: ck (u8) - /// [7..11] - Section 1 size (u32) - /// [11..] - Section 1 data - fn to_bytes(&self) -> Vec { - let mut out = Vec::new(); - - out.extend_from_slice(&fast_serde::MAGIC_NUMBER); - out.push(fast_serde::SerdeByteTypes::CommitmentKey as u8); - out.push(1); // num_sections - - Self::write_section_bytes( - &mut out, - 1, - &self - .ck - .iter() - .flat_map(|p| p.to_raw_bytes()) - .collect::>(), - ); - - out - } - - fn from_bytes(bytes: &Vec) -> Result { - let mut cursor = Cursor::new(bytes); - - // Validate header - Self::validate_header(&mut cursor, SerdeByteTypes::CommitmentKey, 1)?; - - // Read ck section - let ck = Self::read_section_bytes(&mut cursor, 1)? - .chunks( - ::Affine::identity() - .to_raw_bytes() - .len(), - ) - .map(|bytes| { - ::Affine::from_raw_bytes(bytes) - .ok_or(SerdeByteError::G1DecodeError) - }) - .collect::, _>>()?; - - Ok(Self { ck }) - } -} - -/// A type that holds a commitment -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct Commitment { - pub(crate) comm: E::GE, -} - -/// A type that holds a compressed commitment -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct CompressedCommitment -where - E: Engine, - E::GE: DlogGroup, -{ - pub(crate) comm: ::Compressed, -} - -impl CommitmentTrait for Commitment -where - E: Engine, - E::GE: DlogGroup, -{ - type CompressedCommitment = CompressedCommitment; - - fn compress(&self) -> Self::CompressedCommitment { - CompressedCommitment { - comm: ::to_bytes(&self.comm).into(), - } - } - - fn to_coordinates(&self) -> (E::Base, E::Base, bool) { - self.comm.to_coordinates() - } - - fn decompress(c: &Self::CompressedCommitment) -> Result { - let opt_comm = <::GE as GroupEncoding>::from_bytes(&c.comm.clone().into()); - let Some(comm) = Option::from(opt_comm) else { - return Err(NovaError::DecompressionError); - }; - Ok(Self { comm }) - } -} - -impl Default for Commitment -where - E: Engine, - E::GE: DlogGroup, -{ - fn default() -> Self { - Self { - comm: E::GE::identity(), - } - } -} - -impl TranscriptReprTrait for Commitment -where - E: Engine, - E::GE: DlogGroup, -{ - fn to_transcript_bytes(&self) -> Vec { - let (x, y, is_infinity) = self.comm.to_coordinates(); - let is_infinity_byte = (!is_infinity).into(); - [ - x.to_transcript_bytes(), - y.to_transcript_bytes(), - [is_infinity_byte].to_vec(), - ] - .concat() - } -} - -impl AbsorbInROTrait for Commitment -where - E: Engine, - E::GE: DlogGroup, -{ - fn absorb_in_ro(&self, ro: &mut E::RO) { - let (x, y, is_infinity) = self.comm.to_coordinates(); - ro.absorb(x); - ro.absorb(y); - ro.absorb(if is_infinity { - E::Base::ONE - } else { - E::Base::ZERO - }); - } -} - -impl TranscriptReprTrait for CompressedCommitment -where - E: Engine, - E::GE: DlogGroup, -{ - fn to_transcript_bytes(&self) -> Vec { - self.comm.to_transcript_bytes() - } -} - -impl MulAssign for Commitment -where - E: Engine, - E::GE: DlogGroup, -{ - fn mul_assign(&mut self, scalar: E::Scalar) { - *self = Self { - comm: self.comm * scalar, - }; - } -} - -impl<'a, 'b, E> Mul<&'b E::Scalar> for &'a Commitment -where - E: Engine, - E::GE: DlogGroup, -{ - type Output = Commitment; - fn mul(self, scalar: &'b E::Scalar) -> Commitment { - Commitment { - comm: self.comm * scalar, - } - } -} - -impl Mul for Commitment -where - E: Engine, - E::GE: DlogGroup, -{ - type Output = Self; - - fn mul(self, scalar: E::Scalar) -> Self { - Self { - comm: self.comm * scalar, - } - } -} - -impl Add for Commitment -where - E: Engine, - E::GE: DlogGroup, -{ - type Output = Self; - - fn add(self, other: Self) -> Self { - Self { - comm: self.comm + other.comm, - } - } -} - -/// Provides a commitment engine -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct CommitmentEngine { - _p: PhantomData, -} - -impl CommitmentEngineTrait for CommitmentEngine -where - E: Engine, - E::GE: DlogGroup, -{ - type CommitmentKey = CommitmentKey; - type Commitment = Commitment; - - fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey { - Self::CommitmentKey { - ck: E::GE::from_label(label, n.next_power_of_two()), - } - } - - fn commit(ck: &Self::CommitmentKey, v: &[E::Scalar]) -> Self::Commitment { - assert!(ck.ck.len() >= v.len()); - Commitment { - comm: E::GE::vartime_multiscalar_mul(v, &ck.ck[..v.len()]), - } - } -} - -/// A trait listing properties of a commitment key that can be managed in a -/// divide-and-conquer fashion -pub trait CommitmentKeyExtTrait -where - E: Engine, - E::GE: DlogGroup, -{ - /// Splits the commitment key into two pieces at a specified point - fn split_at(self, n: usize) -> (Self, Self) - where - Self: Sized; - - /// Combines two commitment keys into one - fn combine(&self, other: &Self) -> Self; - - /// Folds the two commitment keys into one using the provided weights - fn fold(L: &Self, R: &Self, w1: &E::Scalar, w2: &E::Scalar) -> Self; - - /// Scales the commitment key using the provided scalar - fn scale(&mut self, r: &E::Scalar); - - /// Reinterprets commitments as commitment keys - fn reinterpret_commitments_as_ck( - c: &[<<::CE as CommitmentEngineTrait>::Commitment as CommitmentTrait< - E, - >>::CompressedCommitment], - ) -> Result - where - Self: Sized; -} - -impl CommitmentKeyExtTrait for CommitmentKey -where - E: Engine>, - E::GE: DlogGroup, -{ - fn split_at(mut self, n: usize) -> (Self, Self) { - let right = self.ck.split_off(n); - (self, Self { ck: right }) - } - - fn combine(&self, other: &Self) -> Self { - let ck = { - self.ck - .iter() - .cloned() - .chain(other.ck.iter().cloned()) - .collect::>() - }; - Self { ck } - } - - // combines the left and right halves of `self` using `w1` and `w2` as the - // weights - fn fold(L: &Self, R: &Self, w1: &E::Scalar, w2: &E::Scalar) -> Self { - debug_assert!(L.ck.len() == R.ck.len()); - let ck_curve: Vec = zip_with!(par_iter, (L.ck, R.ck), |l, r| { - E::GE::vartime_multiscalar_mul(&[*w1, *w2], &[*l, *r]) - }) - .collect(); - let mut ck_affine = vec![::Affine::identity(); L.ck.len()]; - E::GE::batch_normalize(&ck_curve, &mut ck_affine); - - Self { ck: ck_affine } - } - - /// Scales each element in `self` by `r` - fn scale(&mut self, r: &E::Scalar) { - let ck_scaled: Vec = self.ck.par_iter().map(|g| *g * r).collect(); - E::GE::batch_normalize(&ck_scaled, &mut self.ck); - } - - /// reinterprets a vector of commitments as a set of generators - fn reinterpret_commitments_as_ck(c: &[CompressedCommitment]) -> Result { - let d = c - .par_iter() - .map(|c| Commitment::::decompress(c).map(|c| c.comm)) - .collect::, NovaError>>()?; - let mut ck = vec![::Affine::identity(); d.len()]; - E::GE::batch_normalize(&d, &mut ck); - Ok(Self { ck }) - } -} diff --git a/src/provider/poseidon.rs b/src/provider/poseidon.rs deleted file mode 100644 index 78956d1..0000000 --- a/src/provider/poseidon.rs +++ /dev/null @@ -1,244 +0,0 @@ -//! Poseidon Constants and Poseidon-based RO used in Nova -use core::marker::PhantomData; - -use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - num::AllocatedNum, - ConstraintSystem, SynthesisError, -}; -use ff::{PrimeField, PrimeFieldBits}; -use generic_array::typenum::U24; -use neptune::{ - circuit2::Elt, - poseidon::PoseidonConstants, - sponge::{ - api::{IOPattern, SpongeAPI, SpongeOp}, - circuit::SpongeCircuit, - vanilla::{Mode::Simplex, Sponge, SpongeTrait}, - }, - Strength, -}; -use serde::{Deserialize, Serialize}; - -use crate::traits::{ROCircuitTrait, ROTrait}; - -/// All Poseidon Constants that are used in Nova -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct PoseidonConstantsCircuit(PoseidonConstants); - -impl Default for PoseidonConstantsCircuit { - /// Generate Poseidon constants - fn default() -> Self { - Self(Sponge::::api_constants(Strength::Standard)) - } -} - -/// A Poseidon-based RO to use outside circuits -#[derive(Debug)] -pub struct PoseidonRO -where - Base: PrimeField, - Scalar: PrimeField, -{ - state: Vec, - constants: PoseidonConstantsCircuit, - num_absorbs: usize, - squeezed: bool, - _p: PhantomData, -} - -impl ROTrait for PoseidonRO -where - Base: PrimeField + PrimeFieldBits + Serialize + for<'de> Deserialize<'de>, - Scalar: PrimeField, -{ - type CircuitRO = PoseidonROCircuit; - type Constants = PoseidonConstantsCircuit; - - fn new(constants: PoseidonConstantsCircuit, num_absorbs: usize) -> Self { - Self { - state: Vec::new(), - constants, - num_absorbs, - squeezed: false, - _p: PhantomData, - } - } - - /// Absorb a new number into the state of the oracle - fn absorb(&mut self, e: Base) { - assert!(!self.squeezed, "Cannot absorb after squeezing"); - self.state.push(e); - } - - /// Compute a challenge by hashing the current state - fn squeeze(&mut self, num_bits: usize) -> Scalar { - // check if we have squeezed already - assert!(!self.squeezed, "Cannot squeeze again after squeezing"); - self.squeezed = true; - - let mut sponge = Sponge::new_with_constants(&self.constants.0, Simplex); - let acc = &mut (); - let parameter = IOPattern(vec![ - SpongeOp::Absorb(self.num_absorbs as u32), - SpongeOp::Squeeze(1u32), - ]); - - sponge.start(parameter, None, acc); - assert_eq!(self.num_absorbs, self.state.len()); - SpongeAPI::absorb(&mut sponge, self.num_absorbs as u32, &self.state, acc); - let hash = SpongeAPI::squeeze(&mut sponge, 1, acc); - sponge.finish(acc).unwrap(); - - // Only return `num_bits` - let bits = hash[0].to_le_bits(); - let mut res = Scalar::ZERO; - let mut coeff = Scalar::ONE; - for bit in bits[..num_bits].into_iter() { - if *bit { - res += coeff; - } - coeff += coeff; - } - res - } -} - -/// A Poseidon-based RO gadget to use inside the verifier circuit. -#[derive(Debug)] -pub struct PoseidonROCircuit { - // Internal state - state: Vec>, - constants: PoseidonConstantsCircuit, - num_absorbs: usize, - squeezed: bool, -} - -impl ROCircuitTrait for PoseidonROCircuit -where - Scalar: PrimeField + PrimeFieldBits + Serialize + for<'de> Deserialize<'de>, -{ - type NativeRO = PoseidonRO; - type Constants = PoseidonConstantsCircuit; - - /// Initialize the internal state and set the poseidon constants - fn new(constants: PoseidonConstantsCircuit, num_absorbs: usize) -> Self { - Self { - state: Vec::new(), - constants, - num_absorbs, - squeezed: false, - } - } - - /// Absorb a new number into the state of the oracle - fn absorb(&mut self, e: &AllocatedNum) { - assert!(!self.squeezed, "Cannot absorb after squeezing"); - self.state.push(e.clone()); - } - - /// Compute a challenge by hashing the current state - fn squeeze>( - &mut self, - mut cs: CS, - num_bits: usize, - ) -> Result, SynthesisError> { - // check if we have squeezed already - assert!(!self.squeezed, "Cannot squeeze again after squeezing"); - self.squeezed = true; - let parameter = IOPattern(vec![ - SpongeOp::Absorb(self.num_absorbs as u32), - SpongeOp::Squeeze(1u32), - ]); - let mut ns = cs.namespace(|| "ns"); - - let hash = { - let mut sponge = SpongeCircuit::new_with_constants(&self.constants.0, Simplex); - let acc = &mut ns; - assert_eq!(self.num_absorbs, self.state.len()); - - sponge.start(parameter, None, acc); - SpongeAPI::absorb( - &mut sponge, - self.num_absorbs as u32, - &(0..self.state.len()) - .map(|i| Elt::Allocated(self.state[i].clone())) - .collect::>>(), - acc, - ); - - let output = SpongeAPI::squeeze(&mut sponge, 1, acc); - sponge.finish(acc).unwrap(); - output - }; - - let hash = Elt::ensure_allocated(&hash[0], &mut ns.namespace(|| "ensure allocated"), true)?; - - // return the hash as a vector of bits, truncated - Ok(hash - .to_bits_le_strict(ns.namespace(|| "poseidon hash to boolean"))? - .iter() - .map(|boolean| match boolean { - Boolean::Is(ref x) => x.clone(), - _ => panic!("Wrong type of input. We should have never reached there"), - }) - .collect::>()[..num_bits] - .into()) - } -} - -#[cfg(test)] -mod tests { - use ff::Field; - use rand::rngs::OsRng; - - use super::*; - use crate::{ - bellpepper::solver::SatisfyingAssignment, - constants::NUM_CHALLENGE_BITS, - gadgets::le_bits_to_num, - provider::{Bn256EngineKZG, GrumpkinEngine}, - traits::Engine, - }; - - fn test_poseidon_ro_with() - where - // we can print the field elements we get from E's Base & Scalar fields, - // and compare their byte representations - <::Base as PrimeField>::Repr: std::fmt::Debug, - <::Scalar as PrimeField>::Repr: std::fmt::Debug, - <::Base as PrimeField>::Repr: - PartialEq<<::Scalar as PrimeField>::Repr>, - { - // Check that the number computed inside the circuit is equal to the number - // computed outside the circuit - let mut csprng: OsRng = OsRng; - let constants = PoseidonConstantsCircuit::::default(); - let num_absorbs = 32; - let mut ro: PoseidonRO = - PoseidonRO::new(constants.clone(), num_absorbs); - let mut ro_gadget: PoseidonROCircuit = - PoseidonROCircuit::new(constants, num_absorbs); - let mut cs = SatisfyingAssignment::::new(); - for i in 0..num_absorbs { - let num = E::Scalar::random(&mut csprng); - ro.absorb(num); - let num_gadget = - AllocatedNum::alloc_infallible(cs.namespace(|| format!("data {i}")), || num); - num_gadget - .inputize(&mut cs.namespace(|| format!("input {i}"))) - .unwrap(); - ro_gadget.absorb(&num_gadget); - } - let num = ro.squeeze(NUM_CHALLENGE_BITS); - let num2_bits = ro_gadget.squeeze(&mut cs, NUM_CHALLENGE_BITS).unwrap(); - let num2 = le_bits_to_num(&mut cs, &num2_bits).unwrap(); - assert_eq!(num.to_repr(), num2.get_value().unwrap().to_repr()); - } - - #[test] - fn test_poseidon_ro() { - test_poseidon_ro_with::(); - test_poseidon_ro_with::(); - } -} diff --git a/src/provider/tests/ipa_pc.rs b/src/provider/tests/ipa_pc.rs deleted file mode 100644 index 3007176..0000000 --- a/src/provider/tests/ipa_pc.rs +++ /dev/null @@ -1,130 +0,0 @@ -#[cfg(test)] -mod test { - use group::Curve; - use handlebars::Handlebars; - use serde_json::{json, Map, Value}; - - use crate::provider::{ - ipa_pc::EvaluationEngine, - pedersen::{CommitmentKey, CommitmentKeyExtTrait}, - tests::solidity_compatibility_utils::{ - compressed_commitment_to_json, ec_points_to_json, field_elements_to_json, - generate_pcs_solidity_unit_test_data, - }, - GrumpkinEngine, - }; - - static IPA_COMPATIBILITY_UNIT_TESTING_TEMPLATE: &str = " -// SPDX-License-Identifier: Apache-2.0 -pragma solidity ^0.8.16; -import \"@std/Test.sol\"; -import \"src/blocks/grumpkin/Grumpkin.sol\"; -import \"src/blocks/EqPolynomial.sol\"; -import \"src/Utilities.sol\"; -import \"src/blocks/IpaPcs.sol\"; - -contract IpaTest is Test { -function composeIpaInput() public pure returns (InnerProductArgument.IpaInputGrumpkin memory) { -Grumpkin.GrumpkinAffinePoint[] memory ck_v = new Grumpkin.GrumpkinAffinePoint[]({{ len ck_v }}); -{{ #each ck_v }} ck_v[{{ i }}]=Grumpkin.GrumpkinAffinePoint({{ x }}, {{y}});\n {{ /each }} - -Grumpkin.GrumpkinAffinePoint[] memory ck_s = new Grumpkin.GrumpkinAffinePoint[]({{ len ck_s }}); -{{ #each ck_s }} ck_s[{{ i }}]=Grumpkin.GrumpkinAffinePoint({{ x }}, {{y}});\n {{ /each }} - -uint256[] memory point = new uint256[]({{ len point }}); -{{ #each point }} point[{{ i }}]={{ val }};\n {{ /each }} - -uint256[] memory L_vec = new uint256[]({{ len L_vec }}); -{{ #each L_vec }} L_vec[{{ i }}]={{ compressed }};\n {{ /each }} - -uint256[] memory R_vec = new uint256[]({{ len R_vec }}); -{{ #each R_vec }} R_vec[{{ i }}]={{ compressed }};\n {{ /each }} - -uint256 a_hat = {{ a_hat }}; - -// InnerProductInstance -Grumpkin.GrumpkinAffinePoint memory commitment = Grumpkin.GrumpkinAffinePoint({{ commitment_x }}, {{ commitment_y }}); - -uint256 eval = {{ eval }}; - -return InnerProductArgument.IpaInputGrumpkin(ck_v, ck_s, point, L_vec, R_vec, commitment, eval, a_hat); -} - -function testIpaGrumpkinVerification_{{ num_vars }}_Variables() public { -InnerProductArgument.IpaInputGrumpkin memory input = composeIpaInput(); -assertTrue(InnerProductArgument.verifyGrumpkin(input, getTranscript())); -} - -function getTranscript() public pure returns (KeccakTranscriptLib.KeccakTranscript memory) { -// b\"TestEval\" in Rust -uint8[] memory label = new uint8[](8); -label[0] = 0x54; -label[1] = 0x65; -label[2] = 0x73; -label[3] = 0x74; -label[4] = 0x45; -label[5] = 0x76; -label[6] = 0x61; -label[7] = 0x6c; - -KeccakTranscriptLib.KeccakTranscript memory keccak_transcript = KeccakTranscriptLib.instantiate(label); -return keccak_transcript; -} -} -"; - - // To generate Solidity unit-test: - // cargo test test_solidity_compatibility_ipa --release -- --ignored --nocapture - // > ipa.t.sol - #[test] - #[ignore] - fn test_solidity_compatibility_ipa() { - let num_vars = 2; - - // Secondary part of verification is IPA over Grumpkin - let (commitment, point, eval, proof, vk) = - generate_pcs_solidity_unit_test_data::<_, EvaluationEngine>(num_vars); - - let num_vars_string = format!("{}", num_vars); - let eval_string = format!("{:?}", eval); - let commitment_x_string = format!("{:?}", commitment.comm.to_affine().x); - let commitment_y_string = format!("{:?}", commitment.comm.to_affine().y); - let proof_a_hat_string = format!("{:?}", proof.a_hat); - - let r_vec = CommitmentKey::::reinterpret_commitments_as_ck(&proof.R_vec) - .expect("can't reinterpred R_vec"); - let l_vec = CommitmentKey::::reinterpret_commitments_as_ck(&proof.L_vec) - .expect("can't reinterpred L_vec"); - - let r_vec_array = compressed_commitment_to_json::(&r_vec.ck); - let l_vec_array = compressed_commitment_to_json::(&l_vec.ck); - let point_array = field_elements_to_json::(&point); - let ckv_array = ec_points_to_json::(&vk.ck_v.ck); - let cks_array = ec_points_to_json::(&vk.ck_s.ck); - - let mut map = Map::new(); - map.insert("num_vars".to_string(), Value::String(num_vars_string)); - map.insert("eval".to_string(), Value::String(eval_string)); - map.insert( - "commitment_x".to_string(), - Value::String(commitment_x_string), - ); - map.insert( - "commitment_y".to_string(), - Value::String(commitment_y_string), - ); - map.insert("R_vec".to_string(), Value::Array(r_vec_array)); - map.insert("L_vec".to_string(), Value::Array(l_vec_array)); - map.insert("a_hat".to_string(), Value::String(proof_a_hat_string)); - map.insert("point".to_string(), Value::Array(point_array)); - map.insert("ck_v".to_string(), Value::Array(ckv_array)); - map.insert("ck_s".to_string(), Value::Array(cks_array)); - - let mut reg = Handlebars::new(); - reg.register_template_string("ipa.t.sol", IPA_COMPATIBILITY_UNIT_TESTING_TEMPLATE) - .expect("can't register template"); - - let solidity_unit_test_source = reg.render("ipa.t.sol", &json!(map)).expect("can't render"); - println!("{}", solidity_unit_test_source); - } -} diff --git a/src/provider/tests/mod.rs b/src/provider/tests/mod.rs deleted file mode 100644 index 7184cdd..0000000 --- a/src/provider/tests/mod.rs +++ /dev/null @@ -1,155 +0,0 @@ -mod ipa_pc; - -#[cfg(test)] -pub mod solidity_compatibility_utils { - use std::sync::Arc; - - use group::{ - prime::{PrimeCurve, PrimeCurveAffine}, - GroupEncoding, - }; - use rand::rngs::StdRng; - use serde_json::{Map, Value}; - - use crate::{ - provider::traits::DlogGroup, - spartan::polys::multilinear::MultilinearPolynomial, - traits::{commitment::CommitmentEngineTrait, evaluation::EvaluationEngineTrait, Engine}, - }; - - pub(crate) fn generate_pcs_solidity_unit_test_data>( - num_vars: usize, - ) -> ( - >::Commitment, - Vec, - E::Scalar, - EE::EvaluationArgument, - EE::VerifierKey, - ) { - use rand_core::SeedableRng; - - let mut rng = StdRng::seed_from_u64(num_vars as u64); - - let (poly, point, eval) = crate::provider::util::test_utils::random_poly_with_eval::< - E, - StdRng, - >(num_vars, &mut rng); - - // Mock commitment key. - let ck = E::CE::setup(b"test", 1 << num_vars); - let ck_arc = Arc::new(ck.clone()); - // Commits to the provided vector using the provided generators. - let commitment = E::CE::commit(&ck_arc, poly.evaluations()); - - let (proof, vk) = prove_verify_solidity::(ck_arc, &commitment, &poly, &point, &eval); - - (commitment, point, eval, proof, vk) - } - - fn prove_verify_solidity>( - ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, - commitment: &<::CE as CommitmentEngineTrait>::Commitment, - poly: &MultilinearPolynomial<::Scalar>, - point: &[::Scalar], - eval: &::Scalar, - ) -> (EE::EvaluationArgument, EE::VerifierKey) { - use crate::traits::TranscriptEngineTrait; - - // Generate Prover and verifier key for given commitment key. - let ock = ck.clone(); - let (prover_key, verifier_key) = EE::setup(ck); - - // Generate proof. - let mut prover_transcript = E::TE::new(b"TestEval"); - let proof: EE::EvaluationArgument = EE::prove( - &*ock, - &prover_key, - &mut prover_transcript, - commitment, - poly.evaluations(), - point, - eval, - ) - .unwrap(); - let pcp = prover_transcript.squeeze(b"c").unwrap(); - - // Verify proof. - let mut verifier_transcript = E::TE::new(b"TestEval"); - EE::verify( - &verifier_key, - &mut verifier_transcript, - commitment, - point, - eval, - &proof, - ) - .unwrap(); - let pcv = verifier_transcript.squeeze(b"c").unwrap(); - - // Check if the prover transcript and verifier transcript are kept in the same - // state. - assert_eq!(pcp, pcv); - - (proof, verifier_key) - } - - pub(crate) fn field_elements_to_json(field_elements: &[E::Scalar]) -> Vec { - let mut value_vector = vec![]; - field_elements.iter().enumerate().for_each(|(i, fe)| { - let mut value = Map::new(); - value.insert("i".to_string(), Value::String(i.to_string())); - value.insert("val".to_string(), Value::String(format!("{:?}", fe))); - value_vector.push(Value::Object(value)); - }); - value_vector - } - - pub(crate) fn ec_points_to_json(ec_points: &[::Affine]) -> Vec - where - E: Engine, - E::GE: DlogGroup, - { - let mut value_vector = vec![]; - ec_points.iter().enumerate().for_each(|(i, ec_point)| { - let mut value = Map::new(); - let coordinates_info = ec_point.to_curve().to_coordinates(); - let not_infinity = !coordinates_info.2; - assert!(not_infinity); - value.insert("i".to_string(), Value::String(i.to_string())); - value.insert( - "x".to_string(), - Value::String(format!("{:?}", coordinates_info.0)), - ); - value.insert( - "y".to_string(), - Value::String(format!("{:?}", coordinates_info.1)), - ); - value_vector.push(Value::Object(value)); - }); - value_vector - } - - pub(crate) fn compressed_commitment_to_json( - ec_points: &[::Affine], - ) -> Vec - where - E: Engine, - E::GE: DlogGroup, - { - let mut value_vector = vec![]; - ec_points.iter().enumerate().for_each(|(i, ec_point)| { - let mut value = Map::new(); - let compressed_commitment_info = ec_point.to_curve().to_bytes(); - let mut data = compressed_commitment_info.as_ref().to_vec(); - data.reverse(); - - value.insert("i".to_string(), Value::String(i.to_string())); - value.insert( - "compressed".to_string(), - Value::String(format!("0x{}", hex::encode(data))), - ); - value_vector.push(Value::Object(value)); - }); - value_vector - } -} diff --git a/src/provider/traits.rs b/src/provider/traits.rs deleted file mode 100644 index 2a978f5..0000000 --- a/src/provider/traits.rs +++ /dev/null @@ -1,208 +0,0 @@ -use std::{fmt::Debug, ops::Mul}; - -use group::{ - prime::{PrimeCurve, PrimeCurveAffine}, - GroupEncoding, -}; -use serde::{Deserialize, Serialize}; - -use crate::traits::{Group, TranscriptReprTrait}; - -/// A trait that defines extensions to the Group trait -pub trait DlogGroup: - Group::ScalarExt> - + Serialize - + for<'de> Deserialize<'de> - + PrimeCurve::ScalarExt, Affine = ::AffineExt> -{ - type ScalarExt; - type AffineExt: Clone - + Debug - + Eq - + Serialize - + for<'de> Deserialize<'de> - + Sync - + Send - // technical bounds, should disappear when associated_type_bounds stabilizes - + Mul - + PrimeCurveAffine; - type Compressed: Clone - + Debug - + Eq - + From<::Repr> - + Into<::Repr> - + Serialize - + for<'de> Deserialize<'de> - + Sync - + Send - + TranscriptReprTrait; - - /// A method to compute a multiexponentation - fn vartime_multiscalar_mul(scalars: &[Self::ScalarExt], bases: &[Self::AffineExt]) -> Self; - - /// Produce a vector of group elements using a static label - fn from_label(label: &'static [u8], n: usize) -> Vec; - - /// Returns the affine coordinates (x, y, infinity) for the point - fn to_coordinates(&self) -> (::Base, ::Base, bool); -} - -/// This implementation behaves in ways specific to the halo2curves suite of -/// curves in: -// - to_coordinates, -// - vartime_multiscalar_mul, where it does not call into accelerated implementations. -// A specific reimplementation exists for the pasta curves in their own module. -#[macro_export] -macro_rules! impl_traits { - ( - $name:ident, - $order_str:literal, - $base_str:literal - ) => { - $crate::impl_traits!($name, $order_str, $base_str, cpu_best_msm); - }; - ( - $name:ident, - $order_str:literal, - $base_str:literal, - $large_msm_method: ident - ) => { - // These compile-time assertions check important assumptions in the memory - // representation of group data that supports the use of Abomonation. - static_assertions::assert_eq_size!($name::Affine, [u64; 8]); - static_assertions::assert_eq_size!($name::Point, [u64; 12]); - - impl Group for $name::Point { - type Base = $name::Base; - type Scalar = $name::Scalar; - - fn group_params() -> (Self::Base, Self::Base, BigInt, BigInt) { - let A = $name::Point::a(); - let B = $name::Point::b(); - let order = BigInt::from_str_radix($order_str, 16).unwrap(); - let base = BigInt::from_str_radix($base_str, 16).unwrap(); - - (A, B, order, base) - } - } - - impl DlogGroup for $name::Point { - type ScalarExt = $name::Scalar; - type AffineExt = $name::Affine; - // note: for halo2curves implementations, $name::Compressed == <$name::Point as - // GroupEncoding>::Repr so the blanket impl From for T and impl - // Into apply. - type Compressed = $name::Compressed; - - fn vartime_multiscalar_mul( - scalars: &[Self::ScalarExt], - bases: &[Self::AffineExt], - ) -> Self { - #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] - if scalars.len() >= 128 { - $large_msm_method(bases, scalars) - } else { - cpu_best_msm(bases, scalars) - } - #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] - cpu_best_msm(bases, scalars) - } - - fn from_label(label: &'static [u8], n: usize) -> Vec { - let mut shake = Shake256::default(); - shake.update(label); - let mut reader = shake.finalize_xof(); - let mut uniform_bytes_vec = Vec::new(); - for _ in 0..n { - let mut uniform_bytes = [0u8; 32]; - reader.read_exact(&mut uniform_bytes).unwrap(); - uniform_bytes_vec.push(uniform_bytes); - } - let gens_proj: Vec<$name::Point> = (0..n) - .into_par_iter() - .map(|i| { - let hash = $name::Point::hash_to_curve("from_uniform_bytes"); - hash(&uniform_bytes_vec[i]) - }) - .collect(); - - let num_threads = rayon::current_num_threads(); - if gens_proj.len() > num_threads { - let chunk = (gens_proj.len() as f64 / num_threads as f64).ceil() as usize; - (0..num_threads) - .into_par_iter() - .flat_map(|i| { - let start = i * chunk; - let end = if i == num_threads - 1 { - gens_proj.len() - } else { - core::cmp::min((i + 1) * chunk, gens_proj.len()) - }; - if end > start { - let mut gens = vec![$name::Affine::identity(); end - start]; - ::batch_normalize(&gens_proj[start..end], &mut gens); - gens - } else { - vec![] - } - }) - .collect() - } else { - let mut gens = vec![$name::Affine::identity(); n]; - ::batch_normalize(&gens_proj, &mut gens); - gens - } - } - - fn to_coordinates(&self) -> (Self::Base, Self::Base, bool) { - let coordinates = self.to_affine().coordinates(); - if coordinates.is_some().unwrap_u8() == 1 && ($name::Point::identity() != *self) { - (*coordinates.unwrap().x(), *coordinates.unwrap().y(), false) - } else { - (Self::Base::zero(), Self::Base::zero(), true) - } - } - } - - impl PrimeFieldExt for $name::Scalar { - fn from_uniform(bytes: &[u8]) -> Self { - let bytes_arr: [u8; 64] = bytes.try_into().unwrap(); - $name::Scalar::from_uniform_bytes(&bytes_arr) - } - } - - impl TranscriptReprTrait for $name::Compressed { - fn to_transcript_bytes(&self) -> Vec { - self.as_ref().to_vec() - } - } - - impl TranscriptReprTrait for $name::Scalar { - fn to_transcript_bytes(&self) -> Vec { - self.to_repr().to_vec() - } - } - - impl TranscriptReprTrait for $name::Affine { - fn to_transcript_bytes(&self) -> Vec { - let (x, y, is_infinity_byte) = { - let coordinates = self.coordinates(); - if coordinates.is_some().unwrap_u8() == 1 - && ($name::Affine::identity() != *self) - { - let c = coordinates.unwrap(); - (*c.x(), *c.y(), u8::from(false)) - } else { - ($name::Base::zero(), $name::Base::zero(), u8::from(false)) - } - }; - - x.to_repr() - .into_iter() - .chain(y.to_repr().into_iter()) - .chain(std::iter::once(is_infinity_byte)) - .collect() - } - } - }; -} diff --git a/src/provider/util/fb_msm.rs b/src/provider/util/fb_msm.rs deleted file mode 100644 index 231c729..0000000 --- a/src/provider/util/fb_msm.rs +++ /dev/null @@ -1,137 +0,0 @@ -/// # Fixed-base Scalar Multiplication -/// -/// This module provides an implementation of fixed-base scalar multiplication -/// on elliptic curves. -/// -/// The multiplication is optimized through a windowed method, where scalars are -/// broken into fixed-size windows, pre-computation tables are generated, and -/// results are efficiently combined. -use ff::{PrimeField, PrimeFieldBits}; -use group::{prime::PrimeCurve, Curve}; -use rayon::prelude::*; - -/// Determines the window size for scalar multiplication based on the number of -/// scalars. -/// -/// This is used to balance between pre-computation and number of point -/// additions. -pub(crate) fn get_mul_window_size(num_scalars: usize) -> usize { - if num_scalars < 32 { - 3 - } else { - (num_scalars as f64).ln().ceil() as usize - } -} - -/// Generates a table of multiples of a base point `g` for use in windowed -/// scalar multiplication. -/// -/// This pre-computes multiples of a base point for each window and organizes -/// them into a table for quick lookup during the scalar multiplication process. -/// The table is a vector of vectors, each inner vector corresponding to a -/// window and containing the multiples of `g` for that window. -pub(crate) fn get_window_table( - scalar_size: usize, - window: usize, - g: T, -) -> Vec> -where - T: Curve, - T::AffineRepr: Send, -{ - let in_window = 1 << window; - // Number of outer iterations needed to cover the entire scalar - let outerc = (scalar_size + window - 1) / window; - - // Number of multiples of the window's "outer point" needed for each window - // (fewer for the last window) - let last_in_window = 1 << (scalar_size - (outerc - 1) * window); - - let mut multiples_of_g = vec![vec![T::identity(); in_window]; outerc]; - - // Compute the multiples of g for each window - // g_outers = [ 2^{k*window}*g for k in 0..outerc] - let mut g_outer = g; - let mut g_outers = Vec::with_capacity(outerc); - for _ in 0..outerc { - g_outers.push(g_outer); - for _ in 0..window { - g_outer = g_outer.double(); - } - } - multiples_of_g - .par_iter_mut() - .enumerate() - .zip_eq(g_outers) - .for_each(|((outer, multiples_of_g), g_outer)| { - let cur_in_window = if outer == outerc - 1 { - last_in_window - } else { - in_window - }; - - // multiples_of_g = [id, g_outer, 2*g_outer, 3*g_outer, ...], - // where g_outer = 2^{outer*window}*g - let mut g_inner = T::identity(); - for inner in multiples_of_g.iter_mut().take(cur_in_window) { - *inner = g_inner; - g_inner.add_assign(&g_outer); - } - }); - multiples_of_g - .par_iter() - .map(|s| s.iter().map(|s| s.to_affine()).collect()) - .collect() -} - -/// Performs the actual windowed scalar multiplication using a pre-computed -/// table of points. -/// -/// Given a scalar and a table of pre-computed multiples of a base point, this -/// function efficiently computes the scalar multiplication by breaking the -/// scalar into windows and adding the corresponding multiples from the table. -fn windowed_mul( - outerc: usize, - window: usize, - multiples_of_g: &[Vec], - scalar: &T::Scalar, -) -> T -where - T: PrimeCurve, - T::Scalar: PrimeFieldBits, -{ - let modulus_size = ::NUM_BITS as usize; - let scalar_val: Vec = scalar.to_le_bits().into_iter().collect(); - - let mut res = T::identity(); - for outer in 0..outerc { - let mut inner = 0usize; - for i in 0..window { - if outer * window + i < modulus_size && scalar_val[outer * window + i] { - inner |= 1 << i; - } - } - res.add_assign(&multiples_of_g[outer][inner]); - } - res -} - -/// Computes multiple scalar multiplications simultaneously using the windowed -/// method. -pub(crate) fn multi_scalar_mul( - scalar_size: usize, - window: usize, - table: &[Vec], - v: &[T::Scalar], -) -> Vec -where - T: PrimeCurve, - T::Scalar: PrimeFieldBits, -{ - let outerc = (scalar_size + window - 1) / window; - assert!(outerc <= table.len()); - - v.par_iter() - .map(|e| windowed_mul::(outerc, window, table, e)) - .collect::>() -} diff --git a/src/provider/util/mod.rs b/src/provider/util/mod.rs deleted file mode 100644 index e45ba7e..0000000 --- a/src/provider/util/mod.rs +++ /dev/null @@ -1,234 +0,0 @@ -//! Utilities for provider module. -pub(in crate::provider) mod fb_msm; -pub mod msm { - use halo2curves::{msm::best_multiexp, CurveAffine}; - - // this argument swap is useful until Rust gets named arguments - // and saves significant complexity in macro code - pub fn cpu_best_msm(bases: &[C], scalars: &[C::Scalar]) -> C::Curve { - best_multiexp(scalars, bases) - } -} - -pub mod field { - use ff::{BatchInverter, Field}; - - use crate::errors::NovaError; - - #[inline] - pub fn batch_invert(mut v: Vec) -> Result, NovaError> { - // we only allocate the scratch space if every element of v is nonzero - let mut scratch_space = v - .iter() - .map(|x| { - if !x.is_zero_vartime() { - Ok(*x) - } else { - Err(NovaError::InternalError) - } - }) - .collect::, _>>()?; - let _ = BatchInverter::invert_with_external_scratch(&mut v, &mut scratch_space[..]); - Ok(v) - } -} - -pub mod iterators { - use std::{ - borrow::Borrow, - iter::DoubleEndedIterator, - ops::{AddAssign, MulAssign}, - }; - - use ff::Field; - use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; - use rayon_scan::ScanParallelIterator; - - pub trait DoubleEndedIteratorExt: DoubleEndedIterator { - /// This function employs Horner's scheme and core traits to create a - /// combination of an iterator input with the powers - /// of a provided coefficient. - fn rlc(&mut self, coefficient: &F) -> T - where - T: Clone + for<'a> MulAssign<&'a F> + for<'r> AddAssign<&'r T>, - Self::Item: Borrow, - { - let mut iter = self.rev(); - let Some(fst) = iter.next() else { - panic!("input iterator should not be empty") - }; - - iter.fold(fst.borrow().clone(), |mut acc, item| { - acc *= coefficient; - acc += item.borrow(); - acc - }) - } - } - - impl DoubleEndedIteratorExt for I {} - - pub trait IndexedParallelIteratorExt: IndexedParallelIterator { - /// This function core traits to create a combination of an iterator - /// input with the powers of a provided coefficient. - fn rlc(self, coefficient: &F) -> T - where - F: Field, - Self::Item: Borrow, - T: Clone + for<'a> MulAssign<&'a F> + for<'r> AddAssign<&'r T> + Send + Sync, - { - debug_assert!(self.len() > 0); - // generate an iterator of powers of the right length - let v = { - let mut v = vec![*coefficient; self.len()]; - v[0] = F::ONE; - v - }; - // the collect is due to Scan being unindexed - let powers: Vec<_> = v.into_par_iter().scan(|a, b| *a * *b, F::ONE).collect(); - - self.zip_eq(powers.into_par_iter()) - .map(|(pt, val)| { - let mut pt = pt.borrow().clone(); - pt *= &val; - pt - }) - .reduce_with(|mut a, b| { - a += &b; - a - }) - .unwrap() - } - } - - impl IndexedParallelIteratorExt for I {} -} - -#[cfg(test)] -pub mod test_utils { - //! Contains utilities for testing and benchmarking. - use std::sync::Arc; - - use ff::Field; - use rand::rngs::StdRng; - use rand_core::{CryptoRng, RngCore}; - - use crate::{ - spartan::polys::multilinear::MultilinearPolynomial, - traits::{commitment::CommitmentEngineTrait, evaluation::EvaluationEngineTrait, Engine}, - }; - - /// Returns a random polynomial, a point and calculate its evaluation. - pub(crate) fn random_poly_with_eval( - num_vars: usize, - mut rng: &mut R, - ) -> ( - MultilinearPolynomial<::Scalar>, - Vec<::Scalar>, - ::Scalar, - ) { - // Generate random polynomial and point. - let poly = MultilinearPolynomial::random(num_vars, &mut rng); - let point = (0..num_vars) - .map(|_| ::Scalar::random(&mut rng)) - .collect::>(); - - // Calculation evaluation of point over polynomial. - let eval = poly.evaluate(&point); - - (poly, point, eval) - } - - /// Methods used to test the prove and verify flow of - /// [`MultilinearPolynomial`] Commitment Schemes (PCS). - /// - /// Generates a random polynomial and point from a seed to test a - /// proving/verifying flow of one of our [`EvaluationEngine`]. - pub(crate) fn prove_verify_from_num_vars>( - num_vars: usize, - ) { - use rand_core::SeedableRng; - - let mut rng = StdRng::seed_from_u64(num_vars as u64); - - let (poly, point, eval) = random_poly_with_eval::(num_vars, &mut rng); - - // Mock commitment key. - let ck = E::CE::setup(b"test", 1 << num_vars); - let ck = Arc::new(ck); - // Commits to the provided vector using the provided generators. - let commitment = E::CE::commit(&ck, poly.evaluations()); - - prove_verify_with::(ck, &commitment, &poly, &point, &eval, true) - } - - fn prove_verify_with>( - ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, - commitment: &<::CE as CommitmentEngineTrait>::Commitment, - poly: &MultilinearPolynomial<::Scalar>, - point: &[::Scalar], - eval: &::Scalar, - evaluate_bad_proof: bool, - ) { - use std::ops::Add; - - use crate::traits::TranscriptEngineTrait; - - // Generate Prover and verifier key for given commitment key. - let ock = ck.clone(); - let (prover_key, verifier_key) = EE::setup(ck); - - // Generate proof. - let mut prover_transcript = E::TE::new(b"TestEval"); - let proof = EE::prove( - &*ock, - &prover_key, - &mut prover_transcript, - commitment, - poly.evaluations(), - point, - eval, - ) - .unwrap(); - let pcp = prover_transcript.squeeze(b"c").unwrap(); - - // Verify proof. - let mut verifier_transcript = E::TE::new(b"TestEval"); - EE::verify( - &verifier_key, - &mut verifier_transcript, - commitment, - point, - eval, - &proof, - ) - .unwrap(); - let pcv = verifier_transcript.squeeze(b"c").unwrap(); - - // Check if the prover transcript and verifier transcript are kept in the same - // state. - assert_eq!(pcp, pcv); - - if evaluate_bad_proof { - // Generate another point to verify proof. Also produce eval. - let altered_verifier_point = point - .iter() - .map(|s| s.add(::Scalar::ONE)) - .collect::>(); - let altered_verifier_eval = - MultilinearPolynomial::evaluate_with(poly.evaluations(), &altered_verifier_point); - - // Verify proof, should fail. - let mut verifier_transcript = E::TE::new(b"TestEval"); - assert!(EE::verify( - &verifier_key, - &mut verifier_transcript, - commitment, - &altered_verifier_point, - &altered_verifier_eval, - &proof, - ) - .is_err()); - } - } -} diff --git a/src/r1cs/mod.rs b/src/r1cs/mod.rs deleted file mode 100644 index 1b1dabc..0000000 --- a/src/r1cs/mod.rs +++ /dev/null @@ -1,921 +0,0 @@ -//! This module defines R1CS related types and a folding scheme for Relaxed R1CS -mod sparse; -pub(crate) mod util; - -use core::cmp::max; - -use ff::Field; -use once_cell::sync::OnceCell; -use rand_core::{CryptoRng, RngCore}; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; -pub(crate) use sparse::SparseMatrix; - -use crate::{ - constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, - digest::{DigestComputer, SimpleDigestible}, - errors::NovaError, - gadgets::{f_to_nat, nat_to_limbs, scalar_as_base}, - traits::{ - commitment::CommitmentEngineTrait, AbsorbInROTrait, Engine, ROTrait, TranscriptReprTrait, - }, - zip_with, Commitment, CommitmentKey, CE, -}; - -/// A type that holds the shape of the R1CS matrices -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct R1CSShape { - pub(crate) num_cons: usize, - pub(crate) num_vars: usize, - pub(crate) num_io: usize, - pub(crate) A: SparseMatrix, - pub(crate) B: SparseMatrix, - pub(crate) C: SparseMatrix, - #[serde(skip, default = "OnceCell::new")] - pub(crate) digest: OnceCell, -} - -impl SimpleDigestible for R1CSShape {} - -/// A type that holds the result of a R1CS multiplication -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct R1CSResult { - pub(crate) AZ: Vec, - pub(crate) BZ: Vec, - pub(crate) CZ: Vec, -} - -/// A type that holds a witness for a given R1CS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct R1CSWitness { - W: Vec, -} - -/// A type that holds an R1CS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct R1CSInstance { - pub(crate) comm_W: Commitment, - pub(crate) X: Vec, -} - -/// A type that holds a witness for a given Relaxed R1CS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct RelaxedR1CSWitness { - pub(crate) W: Vec, - pub(crate) E: Vec, -} - -/// A type that holds a Relaxed R1CS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct RelaxedR1CSInstance { - pub(crate) comm_W: Commitment, - pub(crate) comm_E: Commitment, - pub(crate) X: Vec, - pub(crate) u: E::Scalar, -} - -/// A type for functions that hints commitment key sizing by returning the floor -/// of the number of required generators. -pub type CommitmentKeyHint = dyn Fn(&R1CSShape) -> usize; - -/// Generates public parameters for a Rank-1 Constraint System (R1CS). -/// -/// This function takes into consideration the shape of the R1CS matrices and a -/// hint function for the number of generators. It returns a `CommitmentKey`. -/// -/// # Arguments -/// -/// * `S`: The shape of the R1CS matrices. -/// * `ck_floor`: A function that provides a floor for the number of generators. -/// A good function to provide is the `commitment_key_floor` field in the -/// trait `RelaxedR1CSSNARKTrait`. -pub fn commitment_key( - S: &R1CSShape, - ck_floor: &CommitmentKeyHint, -) -> CommitmentKey { - let size = commitment_key_size(S, ck_floor); - E::CE::setup(b"ck", size) -} - -/// Computes the number of generators required for the commitment key -/// corresponding to shape `S`. -pub fn commitment_key_size(S: &R1CSShape, ck_floor: &CommitmentKeyHint) -> usize { - let num_cons = S.num_cons; - let num_vars = S.num_vars; - let ck_hint = ck_floor(S); - max(max(num_cons, num_vars), ck_hint) -} - -impl R1CSShape { - /// Create an object of type `R1CSShape` from the explicitly specified R1CS - /// matrices - pub fn new( - num_cons: usize, - num_vars: usize, - num_io: usize, - A: SparseMatrix, - B: SparseMatrix, - C: SparseMatrix, - ) -> Result { - let is_valid = |num_cons: usize, - num_vars: usize, - num_io: usize, - M: &SparseMatrix| - -> Result, NovaError> { - M.iter() - .map(|(row, col, _val)| { - if row >= num_cons || col > num_io + num_vars { - Err(NovaError::InvalidIndex) - } else { - Ok(()) - } - }) - .collect::, NovaError>>() - }; - - is_valid(num_cons, num_vars, num_io, &A)?; - is_valid(num_cons, num_vars, num_io, &B)?; - is_valid(num_cons, num_vars, num_io, &C)?; - - // We require the number of public inputs/outputs to be even - if num_io % 2 != 0 { - return Err(NovaError::InvalidStepCircuitIO); - } - - Ok(Self { - num_cons, - num_vars, - num_io, - A, - B, - C, - digest: OnceCell::new(), - }) - } - - /// Generate a random [`R1CSShape`] with the specified number of - /// constraints, variables, and public inputs/outputs. - pub fn random( - num_cons: usize, - num_vars: usize, - num_io: usize, - num_entries: usize, - mut rng: &mut R, - ) -> Self { - assert!(num_cons.is_power_of_two()); - assert!(num_vars.is_power_of_two()); - assert!(num_entries.is_power_of_two()); - assert!(num_io < num_vars); - - let num_rows = num_cons; - let num_cols = num_vars + 1 + num_io; - - let (NA, NB, NC) = { - let N_div_3 = num_entries / 3; - let NC = num_entries - (2 * N_div_3); - (N_div_3, N_div_3, NC) - }; - - let A = SparseMatrix::random(num_rows, num_cols, NA, &mut rng); - let B = SparseMatrix::random(num_rows, num_cols, NB, &mut rng); - let C = SparseMatrix::random(num_rows, num_cols, NC, &mut rng); - - Self { - num_cons, - num_vars, - num_io, - A, - B, - C, - digest: Default::default(), - } - } - - /// Generate a satisfying [`RelaxedR1CSWitness`] and [`RelaxedR1CSInstance`] - /// for this [`R1CSShape`]. - pub fn random_witness_instance( - &self, - commitment_key: &CommitmentKey, - mut rng: &mut R, - ) -> (RelaxedR1CSWitness, RelaxedR1CSInstance) { - // Sample a random witness and compute the error term - let W = (0..self.num_vars) - .map(|_| E::Scalar::random(&mut rng)) - .collect::>(); - let u = E::Scalar::random(&mut rng); - let X = (0..self.num_io) - .map(|_| E::Scalar::random(&mut rng)) - .collect::>(); - - let E = self.compute_E(&W, &u, &X).unwrap(); - - let (comm_W, comm_E) = rayon::join( - || CE::::commit(commitment_key, &W), - || CE::::commit(commitment_key, &E), - ); - - let witness = RelaxedR1CSWitness { W, E }; - let instance = RelaxedR1CSInstance { - comm_W, - comm_E, - u, - X, - }; - - (witness, instance) - } - - /// returned the digest of the `R1CSShape` - pub fn digest(&self) -> E::Scalar { - self.digest - .get_or_try_init(|| DigestComputer::new(self).digest()) - .cloned() - .expect("Failure retrieving digest") - } - - // Checks regularity conditions on the R1CSShape, required in Spartan-class - // SNARKs Returns false if num_cons or num_vars are not powers of two, or if - // num_io > num_vars - #[inline] - pub(crate) fn is_regular_shape(&self) -> bool { - let cons_valid = self.num_cons.next_power_of_two() == self.num_cons; - let vars_valid = self.num_vars.next_power_of_two() == self.num_vars; - let io_lt_vars = self.num_io < self.num_vars; - cons_valid && vars_valid && io_lt_vars - } - - pub(crate) fn multiply_vec( - &self, - z: &[E::Scalar], - ) -> Result<(Vec, Vec, Vec), NovaError> { - if z.len() != self.num_io + self.num_vars + 1 { - return Err(NovaError::InvalidWitnessLength); - } - - let (Az, (Bz, Cz)) = rayon::join( - || self.A.multiply_vec(z), - || rayon::join(|| self.B.multiply_vec(z), || self.C.multiply_vec(z)), - ); - - Ok((Az, Bz, Cz)) - } - - pub(crate) fn multiply_witness( - &self, - W: &[E::Scalar], - u: &E::Scalar, - X: &[E::Scalar], - ) -> Result<(Vec, Vec, Vec), NovaError> { - if X.len() != self.num_io || W.len() != self.num_vars { - return Err(NovaError::InvalidWitnessLength); - } - - let (Az, (Bz, Cz)) = rayon::join( - || self.A.multiply_witness(W, u, X), - || { - rayon::join( - || self.B.multiply_witness(W, u, X), - || self.C.multiply_witness(W, u, X), - ) - }, - ); - - Ok((Az, Bz, Cz)) - } - - pub(crate) fn multiply_witness_into( - &self, - W: &[E::Scalar], - u: &E::Scalar, - X: &[E::Scalar], - ABC_Z: &mut R1CSResult, - ) -> Result<(), NovaError> { - if X.len() != self.num_io || W.len() != self.num_vars { - return Err(NovaError::InvalidWitnessLength); - } - - let R1CSResult { AZ, BZ, CZ } = ABC_Z; - - rayon::join( - || self.A.multiply_witness_into(W, u, X, AZ), - || { - rayon::join( - || self.B.multiply_witness_into(W, u, X, BZ), - || self.C.multiply_witness_into(W, u, X, CZ), - ) - }, - ); - - Ok(()) - } - - /// Computes the error term E = Az * Bz - u*Cz. - fn compute_E( - &self, - W: &[E::Scalar], - u: &E::Scalar, - X: &[E::Scalar], - ) -> Result, NovaError> { - if X.len() != self.num_io || W.len() != self.num_vars { - return Err(NovaError::InvalidWitnessLength); - } - - let (Az, (Bz, Cz)) = rayon::join( - || self.A.multiply_witness(W, u, X), - || { - rayon::join( - || self.B.multiply_witness(W, u, X), - || self.C.multiply_witness(W, u, X), - ) - }, - ); - - let E = zip_with!( - (Az.into_par_iter(), Bz.into_par_iter(), Cz.into_par_iter()), - |a, b, c| a * b - c * u - ) - .collect::>(); - - Ok(E) - } - - /// Checks if the Relaxed R1CS instance is satisfiable given a witness and - /// its shape - pub fn is_sat_relaxed( - &self, - ck: &CommitmentKey, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, - ) -> Result<(), NovaError> { - assert_eq!(W.W.len(), self.num_vars); - assert_eq!(W.E.len(), self.num_cons); - assert_eq!(U.X.len(), self.num_io); - - // verify if Az * Bz - u*Cz = E - let E = self.compute_E(&W.W, &U.u, &U.X)?; - W.E.par_iter() - .zip_eq(E.into_par_iter()) - .enumerate() - .try_for_each(|(i, (we, e))| { - if *we != e { - // constraint failed, retrieve constraint name - Err(NovaError::UnSatIndex(i)) - } else { - Ok(()) - } - })?; - - // verify if comm_E and comm_W are commitments to E and W - let res_comm = { - let (comm_W, comm_E) = - rayon::join(|| CE::::commit(ck, &W.W), || CE::::commit(ck, &W.E)); - U.comm_W == comm_W && U.comm_E == comm_E - }; - - if !res_comm { - return Err(NovaError::UnSat); - } - Ok(()) - } - - /// Checks if the R1CS instance is satisfiable given a witness and its shape - pub fn is_sat( - &self, - ck: &CommitmentKey, - U: &R1CSInstance, - W: &R1CSWitness, - ) -> Result<(), NovaError> { - assert_eq!(W.W.len(), self.num_vars); - assert_eq!(U.X.len(), self.num_io); - - // verify if Az * Bz - u*Cz = 0 - let E = self.compute_E(&W.W, &E::Scalar::ONE, &U.X)?; - E.into_par_iter().enumerate().try_for_each(|(i, e)| { - if e != E::Scalar::ZERO { - Err(NovaError::UnSatIndex(i)) - } else { - Ok(()) - } - })?; - - // verify if comm_W is a commitment to W - if U.comm_W != CE::::commit(ck, &W.W) { - return Err(NovaError::UnSat); - } - Ok(()) - } - - /// A method to compute a commitment to the cross-term `T` given a - /// Relaxed R1CS instance-witness pair and an R1CS instance-witness pair - pub fn commit_T( - &self, - ck: &CommitmentKey, - U1: &RelaxedR1CSInstance, - W1: &RelaxedR1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - ) -> Result<(Vec, Commitment), NovaError> { - let (AZ_1, BZ_1, CZ_1) = tracing::trace_span!("AZ_1, BZ_1, CZ_1") - .in_scope(|| self.multiply_witness(&W1.W, &U1.u, &U1.X))?; - - let (AZ_2, BZ_2, CZ_2) = tracing::trace_span!("AZ_2, BZ_2, CZ_2") - .in_scope(|| self.multiply_witness(&W2.W, &E::Scalar::ONE, &U2.X))?; - - let (AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1) = - tracing::trace_span!("cross terms").in_scope(|| { - let AZ_1_circ_BZ_2 = (0..AZ_1.len()) - .into_par_iter() - .map(|i| AZ_1[i] * BZ_2[i]) - .collect::>(); - let AZ_2_circ_BZ_1 = (0..AZ_2.len()) - .into_par_iter() - .map(|i| AZ_2[i] * BZ_1[i]) - .collect::>(); - let u_1_cdot_CZ_2 = (0..CZ_2.len()) - .into_par_iter() - .map(|i| U1.u * CZ_2[i]) - .collect::>(); - let u_2_cdot_CZ_1 = (0..CZ_1.len()) - .into_par_iter() - .map(|i| CZ_1[i]) - .collect::>(); - (AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1) - }); - - let T = tracing::trace_span!("T").in_scope(|| { - AZ_1_circ_BZ_2 - .par_iter() - .zip_eq(&AZ_2_circ_BZ_1) - .zip_eq(&u_1_cdot_CZ_2) - .zip_eq(&u_2_cdot_CZ_1) - .map(|(((a, b), c), d)| *a + *b - *c - *d) - .collect::>() - }); - - let comm_T = CE::::commit(ck, &T); - - Ok((T, comm_T)) - } - - /// A method to compute a commitment to the cross-term `T` given a - /// Relaxed R1CS instance-witness pair and an R1CS instance-witness pair - /// - /// This is [`R1CSShape::commit_T`] but into a buffer. - pub fn commit_T_into( - &self, - ck: &CommitmentKey, - U1: &RelaxedR1CSInstance, - W1: &RelaxedR1CSWitness, - U2: &R1CSInstance, - W2: &R1CSWitness, - T: &mut Vec, - ABC_Z_1: &mut R1CSResult, - ABC_Z_2: &mut R1CSResult, - ) -> Result, NovaError> { - tracing::info_span!("AZ_1, BZ_1, CZ_1") - .in_scope(|| self.multiply_witness_into(&W1.W, &U1.u, &U1.X, ABC_Z_1))?; - - let R1CSResult { - AZ: AZ_1, - BZ: BZ_1, - CZ: CZ_1, - } = ABC_Z_1; - - tracing::info_span!("AZ_2, BZ_2, CZ_2") - .in_scope(|| self.multiply_witness_into(&W2.W, &E::Scalar::ONE, &U2.X, ABC_Z_2))?; - - let R1CSResult { - AZ: AZ_2, - BZ: BZ_2, - CZ: CZ_2, - } = ABC_Z_2; - - // this doesn't allocate memory but has bad temporal cache locality -- should - // test to see which is faster - T.clear(); - tracing::info_span!("T").in_scope(|| { - (0..AZ_1.len()) - .into_par_iter() - .map(|i| { - let AZ_1_circ_BZ_2 = AZ_1[i] * BZ_2[i]; - let AZ_2_circ_BZ_1 = AZ_2[i] * BZ_1[i]; - let u_1_cdot_Cz_2_plus_Cz_1 = U1.u * CZ_2[i] + CZ_1[i]; - AZ_1_circ_BZ_2 + AZ_2_circ_BZ_1 - u_1_cdot_Cz_2_plus_Cz_1 - }) - .collect_into_vec(T) - }); - - Ok(CE::::commit(ck, T)) - } - - /// Pads the `R1CSShape` so that the shape passes `is_regular_shape` - /// Renumbers variables to accommodate padded variables - pub fn pad(&self) -> Self { - // check if the provided R1CSShape is already as required - if self.is_regular_shape() { - return self.clone(); - } - - // equalize the number of variables, constraints, and public IO - let m = max(max(self.num_vars, self.num_cons), self.num_io).next_power_of_two(); - - // check if the number of variables are as expected, then - // we simply set the number of constraints to the next power of two - if self.num_vars == m { - return Self { - num_cons: m, - num_vars: m, - num_io: self.num_io, - A: self.A.clone(), - B: self.B.clone(), - C: self.C.clone(), - digest: OnceCell::new(), - }; - } - - // otherwise, we need to pad the number of variables and renumber variable - // accesses - let num_vars_padded = m; - let num_cons_padded = m; - - let apply_pad = |mut M: SparseMatrix| -> SparseMatrix { - M.indices.par_iter_mut().for_each(|c| { - if *c >= self.num_vars { - *c += num_vars_padded - self.num_vars - } - }); - - M.cols += num_vars_padded - self.num_vars; - - let ex = { - let nnz = M.indptr.last().unwrap(); - vec![*nnz; num_cons_padded - self.num_cons] - }; - M.indptr.extend(ex); - M - }; - - let A_padded = apply_pad(self.A.clone()); - let B_padded = apply_pad(self.B.clone()); - let C_padded = apply_pad(self.C.clone()); - - Self { - num_cons: num_cons_padded, - num_vars: num_vars_padded, - num_io: self.num_io, - A: A_padded, - B: B_padded, - C: C_padded, - digest: OnceCell::new(), - } - } -} - -impl R1CSResult { - /// Produces a default `R1CSResult` given an `R1CSShape` - pub fn default(num_cons: usize) -> Self { - Self { - AZ: vec![E::Scalar::ZERO; num_cons], - BZ: vec![E::Scalar::ZERO; num_cons], - CZ: vec![E::Scalar::ZERO; num_cons], - } - } -} - -impl R1CSWitness { - /// A method to create a witness object using a vector of scalars - pub fn new(S: &R1CSShape, W: Vec) -> Result { - if S.num_vars != W.len() { - Err(NovaError::InvalidWitnessLength) - } else { - Ok(Self { W }) - } - } - - /// Commits to the witness using the supplied generators - pub fn commit(&self, ck: &CommitmentKey) -> Commitment { - CE::::commit(ck, &self.W) - } -} - -impl R1CSInstance { - /// A method to create an instance object using constituent elements - pub fn new( - S: &R1CSShape, - comm_W: Commitment, - X: Vec, - ) -> Result { - if S.num_io != X.len() { - Err(NovaError::InvalidInputLength) - } else { - Ok(Self { comm_W, X }) - } - } -} - -impl AbsorbInROTrait for R1CSInstance { - fn absorb_in_ro(&self, ro: &mut E::RO) { - self.comm_W.absorb_in_ro(ro); - for x in &self.X { - ro.absorb(scalar_as_base::(*x)); - } - } -} - -impl RelaxedR1CSWitness { - /// Produces a default `RelaxedR1CSWitness` given an `R1CSShape` - pub fn default(S: &R1CSShape) -> Self { - Self { - W: vec![E::Scalar::ZERO; S.num_vars], - E: vec![E::Scalar::ZERO; S.num_cons], - } - } - - /// Initializes a new `RelaxedR1CSWitness` from an `R1CSWitness` - pub fn from_r1cs_witness(S: &R1CSShape, witness: R1CSWitness) -> Self { - Self { - W: witness.W, - E: vec![E::Scalar::ZERO; S.num_cons], - } - } - - /// Commits to the witness using the supplied generators - pub fn commit(&self, ck: &CommitmentKey) -> (Commitment, Commitment) { - (CE::::commit(ck, &self.W), CE::::commit(ck, &self.E)) - } - - /// Folds an incoming `R1CSWitness` into the current one - pub fn fold( - &self, - W2: &R1CSWitness, - T: &[E::Scalar], - r: &E::Scalar, - ) -> Result { - let (W1, E1) = (&self.W, &self.E); - let W2 = &W2.W; - - if W1.len() != W2.len() { - return Err(NovaError::InvalidWitnessLength); - } - - let W = zip_with!((W1.par_iter(), W2), |a, b| *a + *r * *b).collect::>(); - let E = zip_with!((E1.par_iter(), T), |a, b| *a + *r * *b).collect::>(); - Ok(Self { W, E }) - } - - /// Mutably folds an incoming `R1CSWitness` into the current one - pub fn fold_mut( - &mut self, - W2: &R1CSWitness, - T: &[E::Scalar], - r: &E::Scalar, - ) -> Result<(), NovaError> { - if self.W.len() != W2.W.len() { - return Err(NovaError::InvalidWitnessLength); - } - - self.W - .par_iter_mut() - .zip_eq(&W2.W) - .for_each(|(a, b)| *a += *r * *b); - self.E - .par_iter_mut() - .zip_eq(T) - .for_each(|(a, b)| *a += *r * *b); - - Ok(()) - } - - /// Pads the provided witness to the correct length - pub fn pad(&self, S: &R1CSShape) -> Self { - let mut W = self.W.clone(); - W.extend(vec![E::Scalar::ZERO; S.num_vars - W.len()]); - - let mut E = self.E.clone(); - E.extend(vec![E::Scalar::ZERO; S.num_cons - E.len()]); - - Self { W, E } - } -} - -impl RelaxedR1CSInstance { - /// Produces a default `RelaxedR1CSInstance` given `R1CSGens` and - /// `R1CSShape` - pub fn default(_ck: &CommitmentKey, S: &R1CSShape) -> Self { - let (comm_W, comm_E) = (Commitment::::default(), Commitment::::default()); - Self { - comm_W, - comm_E, - u: E::Scalar::ZERO, - X: vec![E::Scalar::ZERO; S.num_io], - } - } - - /// Initializes a new `RelaxedR1CSInstance` from an `R1CSInstance` - pub fn from_r1cs_instance( - _ck: &CommitmentKey, - S: &R1CSShape, - instance: R1CSInstance, - ) -> Self { - assert_eq!(S.num_io, instance.X.len()); - - Self { - comm_W: instance.comm_W, - comm_E: Commitment::::default(), - u: E::Scalar::ONE, - X: instance.X, - } - } - - /// Initializes a new `RelaxedR1CSInstance` from an `R1CSInstance` - pub fn from_r1cs_instance_unchecked(comm_W: &Commitment, X: &[E::Scalar]) -> Self { - Self { - comm_W: *comm_W, - comm_E: Commitment::::default(), - u: E::Scalar::ONE, - X: X.to_vec(), - } - } - - /// Folds an incoming `RelaxedR1CSInstance` into the current one - pub fn fold(&self, U2: &R1CSInstance, comm_T: &Commitment, r: &E::Scalar) -> Self { - let (X1, u1, comm_W_1, comm_E_1) = - (&self.X, &self.u, &self.comm_W.clone(), &self.comm_E.clone()); - let (X2, comm_W_2) = (&U2.X, &U2.comm_W); - - // weighted sum of X, comm_W, comm_E, and u - let X = zip_with!((X1.par_iter(), X2), |a, b| *a + *r * *b).collect::>(); - let comm_W = *comm_W_1 + *comm_W_2 * *r; - let comm_E = *comm_E_1 + *comm_T * *r; - let u = *u1 + *r; - - Self { - comm_W, - comm_E, - X, - u, - } - } - - /// Mutably folds an incoming `RelaxedR1CSInstance` into the current one - pub fn fold_mut(&mut self, U2: &R1CSInstance, comm_T: &Commitment, r: &E::Scalar) { - let (X2, comm_W_2) = (&U2.X, &U2.comm_W); - - // weighted sum of X, comm_W, comm_E, and u - self.X.par_iter_mut().zip_eq(X2).for_each(|(a, b)| { - *a += *r * *b; - }); - self.comm_W = self.comm_W + *comm_W_2 * *r; - self.comm_E = self.comm_E + *comm_T * *r; - self.u += *r; - } -} - -impl TranscriptReprTrait for RelaxedR1CSInstance { - fn to_transcript_bytes(&self) -> Vec { - [ - self.comm_W.to_transcript_bytes(), - self.comm_E.to_transcript_bytes(), - self.u.to_transcript_bytes(), - self.X.as_slice().to_transcript_bytes(), - ] - .concat() - } -} - -impl AbsorbInROTrait for RelaxedR1CSInstance { - fn absorb_in_ro(&self, ro: &mut E::RO) { - self.comm_W.absorb_in_ro(ro); - self.comm_E.absorb_in_ro(ro); - ro.absorb(scalar_as_base::(self.u)); - - // absorb each element of self.X in bignum format - for x in &self.X { - let limbs: Vec = - nat_to_limbs(&f_to_nat(x), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); - for limb in limbs { - ro.absorb(scalar_as_base::(limb)); - } - } - } -} - -/// Empty buffer for `commit_T_into` -pub fn default_T(num_cons: usize) -> Vec { - Vec::with_capacity(num_cons) -} - -#[cfg(test)] -pub(crate) mod tests { - use ff::Field; - use rand_chacha::ChaCha20Rng; - use rand_core::SeedableRng; - - use super::*; - use crate::{ - provider::{Bn256EngineIPA, Bn256EngineKZG}, - r1cs::sparse::SparseMatrix, - traits::Engine, - }; - - pub(crate) fn tiny_r1cs(num_vars: usize) -> R1CSShape { - let one = ::ONE; - let (num_cons, num_vars, num_io, A, B, C) = { - let num_cons = 4; - let num_io = 2; - - // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and `y` are - // respectively the input and output. The R1CS for this problem - // consists of the following constraints: `I0 * I0 - Z0 = 0` - // `Z0 * I0 - Z1 = 0` - // `(Z1 + I0) * 1 - Z2 = 0` - // `(Z2 + 5) * 1 - I1 = 0` - - // Relaxed R1CS is a set of three sparse matrices (A B C), where there is a row - // for every constraint and a column for every entry in z = (vars, - // u, inputs) An R1CS instance is satisfiable iff: - // Az \circ Bz = u \cdot Cz + E, where z = (vars, 1, inputs) - let mut A: Vec<(usize, usize, E::Scalar)> = Vec::new(); - let mut B: Vec<(usize, usize, E::Scalar)> = Vec::new(); - let mut C: Vec<(usize, usize, E::Scalar)> = Vec::new(); - - // constraint 0 entries in (A,B,C) - // `I0 * I0 - Z0 = 0` - A.push((0, num_vars + 1, one)); - B.push((0, num_vars + 1, one)); - C.push((0, 0, one)); - - // constraint 1 entries in (A,B,C) - // `Z0 * I0 - Z1 = 0` - A.push((1, 0, one)); - B.push((1, num_vars + 1, one)); - C.push((1, 1, one)); - - // constraint 2 entries in (A,B,C) - // `(Z1 + I0) * 1 - Z2 = 0` - A.push((2, 1, one)); - A.push((2, num_vars + 1, one)); - B.push((2, num_vars, one)); - C.push((2, 2, one)); - - // constraint 3 entries in (A,B,C) - // `(Z2 + 5) * 1 - I1 = 0` - A.push((3, 2, one)); - A.push((3, num_vars, one + one + one + one + one)); - B.push((3, num_vars, one)); - C.push((3, num_vars + 2, one)); - - (num_cons, num_vars, num_io, A, B, C) - }; - - // create a shape object - let rows = num_cons; - let cols = num_vars + num_io + 1; - - R1CSShape::new( - num_cons, - num_vars, - num_io, - SparseMatrix::new(&A, rows, cols), - SparseMatrix::new(&B, rows, cols), - SparseMatrix::new(&C, rows, cols), - ) - .unwrap() - } - - fn test_pad_tiny_r1cs_with() { - let padded_r1cs = tiny_r1cs::(3).pad(); - assert!(padded_r1cs.is_regular_shape()); - - let expected_r1cs = tiny_r1cs::(4); - - assert_eq!(padded_r1cs, expected_r1cs); - } - - #[test] - fn test_pad_tiny_r1cs() { - test_pad_tiny_r1cs_with::(); - } - - fn test_random_r1cs_with() { - let mut rng = ChaCha20Rng::from_seed([0u8; 32]); - - let ck_size: usize = 16_384; - let ck = E::CE::setup(b"ipa", ck_size); - - let cases = [(16, 16, 2, 16), (16, 32, 12, 8), (256, 256, 2, 1024)]; - - for (num_cons, num_vars, num_io, num_entries) in cases { - let S = R1CSShape::::random(num_cons, num_vars, num_io, num_entries, &mut rng); - let (W, U) = S.random_witness_instance(&ck, &mut rng); - S.is_sat_relaxed(&ck, &U, &W).unwrap(); - } - } - - #[test] - fn test_random_r1cs() { - test_random_r1cs_with::(); - } -} diff --git a/src/r1cs/sparse.rs b/src/r1cs/sparse.rs deleted file mode 100644 index 16f5a79..0000000 --- a/src/r1cs/sparse.rs +++ /dev/null @@ -1,380 +0,0 @@ -//! # Sparse Matrices -//! -//! This module defines a custom implementation of CSR/CSC sparse matrices. -//! Specifically, we implement sparse matrix / dense vector multiplication -//! to compute the `A z`, `B z`, and `C z` in Nova. - -use std::{cmp::Ordering, collections::BTreeSet}; - -use ff::PrimeField; -use itertools::Itertools as _; -use rand_core::{CryptoRng, RngCore}; -use rayon::prelude::*; -use ref_cast::RefCast; -use serde::{Deserialize, Serialize}; - -/// CSR format sparse matrix, We follow the names used by scipy. -/// Detailed explanation here: -#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct SparseMatrix { - /// all non-zero values in the matrix - pub data: Vec, - /// column indices - pub indices: Vec, - /// row information - pub indptr: Vec, - /// number of columns - pub cols: usize, -} - -/// Wrapper type for encode rows of [`SparseMatrix`] -#[derive(Debug, Clone, RefCast)] -#[repr(transparent)] -pub struct RowData([usize; 2]); - -/// [`SparseMatrix`]s are often large, and this helps with cloning bottlenecks -impl Clone for SparseMatrix { - fn clone(&self) -> Self { - Self { - data: self.data.par_iter().cloned().collect(), - indices: self.indices.par_iter().cloned().collect(), - indptr: self.indptr.par_iter().cloned().collect(), - cols: self.cols, - } - } -} - -impl SparseMatrix { - /// 0x0 empty matrix - pub fn empty() -> Self { - Self { - data: vec![], - indices: vec![], - indptr: vec![0], - cols: 0, - } - } - - /// Construct from the COO representation; Vec. - /// We assume that the rows are sorted during construction. - pub fn new(matrix: &[(usize, usize, F)], rows: usize, cols: usize) -> Self { - let mut new_matrix = vec![vec![]; rows]; - for (row, col, val) in matrix { - new_matrix[*row].push((*col, *val)); - } - - for row in new_matrix.iter() { - assert!(row.windows(2).all(|w| w[0].0 < w[1].0)); - } - - let mut indptr = vec![0; rows + 1]; - for (i, col) in new_matrix.iter().enumerate() { - indptr[i + 1] = indptr[i] + col.len(); - } - - let mut indices = vec![]; - let mut data = vec![]; - for col in new_matrix { - let (idx, val): (Vec<_>, Vec<_>) = col.into_iter().unzip(); - indices.extend(idx); - data.extend(val); - } - - Self { - data, - indices, - indptr, - cols, - } - } - - /// Samples a new random matrix of size `rows` x `cols` with `num_entries` - /// non-zero entries. - pub fn random( - rows: usize, - cols: usize, - num_entries: usize, - mut rng: &mut R, - ) -> Self { - assert!(num_entries <= rows * cols); - - let mut indices = BTreeSet::<(usize, usize)>::new(); - while indices.len() < num_entries { - let row = rng.next_u32() as usize % rows; - let col = rng.next_u32() as usize % cols; - indices.insert((row, col)); - } - - let matrix = indices - .into_iter() - .map(|(row, col)| (row, col, F::random(&mut rng))) - .collect::>(); - - Self::new(&matrix, rows, cols) - } - - /// Returns an iterator into the rows - pub fn iter_rows(&self) -> impl Iterator { - self.indptr - .windows(2) - .map(|ptrs| RowData::ref_cast(ptrs.try_into().unwrap())) - } - - /// Returns a parallel iterator into the rows - pub fn par_iter_rows(&self) -> impl IndexedParallelIterator { - self.indptr - .par_windows(2) - .map(|ptrs| RowData::ref_cast(ptrs.try_into().unwrap())) - } - - /// Retrieves the data for row slice [i..j] from `row`. - /// [`RowData`] **must** be created from unmodified `self` previously to - /// guarentee safety. - pub fn get_row(&self, row: &RowData) -> impl Iterator { - self.data[row.0[0]..row.0[1]] - .iter() - .zip_eq(&self.indices[row.0[0]..row.0[1]]) - } - - /// Retrieves the data for row slice [i..j] from `ptrs`. - /// We assume that `ptrs` is indexed from `indptrs` and do not check if the - /// returned slice is actually a valid row. - pub fn get_row_unchecked(&self, ptrs: &[usize; 2]) -> impl Iterator { - self.data[ptrs[0]..ptrs[1]] - .iter() - .zip_eq(&self.indices[ptrs[0]..ptrs[1]]) - } - - /// Multiply by a dense vector; uses rayon to parallelize. - pub fn multiply_vec(&self, vector: &[F]) -> Vec { - assert_eq!(self.cols, vector.len(), "invalid shape"); - - self.multiply_vec_unchecked(vector) - } - - /// Multiply by a dense vector; uses rayon to parallelize. - /// This does not check that the shape of the matrix/vector are compatible. - #[tracing::instrument( - skip_all, - level = "trace", - name = "SparseMatrix::multiply_vec_unchecked" - )] - fn multiply_vec_unchecked(&self, vector: &[F]) -> Vec { - let mut sink: Vec = Vec::with_capacity(self.indptr.len() - 1); - self.multiply_vec_into_unchecked(vector, &mut sink); - sink - } - - fn multiply_vec_into_unchecked(&self, vector: &[F], sink: &mut Vec) { - self.indptr - .par_windows(2) - .map(|ptrs| { - self.get_row_unchecked(ptrs.try_into().unwrap()) - .map(|(val, col_idx)| *val * vector[*col_idx]) - .sum() - }) - .collect_into_vec(sink); - } - - /// Multiply by a witness representing a dense vector; uses rayon to - /// parallelize. - pub fn multiply_witness(&self, W: &[F], u: &F, X: &[F]) -> Vec { - assert_eq!(self.cols, W.len() + X.len() + 1, "invalid shape"); - - self.multiply_witness_unchecked(W, u, X) - } - - /// Multiply by a witness representing a dense vector; uses rayon to - /// parallelize. This does not check that the shape of the matrix/vector - /// are compatible. - #[tracing::instrument( - skip_all, - level = "trace", - name = "SparseMatrix::multiply_vec_unchecked" - )] - fn multiply_witness_unchecked(&self, W: &[F], u: &F, X: &[F]) -> Vec { - // preallocate the result vector - let mut sink = Vec::with_capacity(self.indptr.len() - 1); - self.multiply_witness_into_unchecked(W, u, X, &mut sink); - sink - } - - /// Multiply by a witness representing a dense vector; uses rayon to - /// parallelize. - pub fn multiply_witness_into(&self, W: &[F], u: &F, X: &[F], sink: &mut Vec) { - assert_eq!(self.cols, W.len() + X.len() + 1, "invalid shape"); - - self.multiply_witness_into_unchecked(W, u, X, sink); - } - - /// Multiply by a witness representing a dense vector; uses rayon to - /// parallelize. This does not check that the shape of the matrix/vector - /// are compatible. - fn multiply_witness_into_unchecked(&self, W: &[F], u: &F, X: &[F], sink: &mut Vec) { - let num_vars = W.len(); - self.indptr - .par_windows(2) - .map(|ptrs| { - self.get_row_unchecked(ptrs.try_into().unwrap()).fold( - F::ZERO, - |acc, (val, col_idx)| { - let val = match col_idx.cmp(&num_vars) { - Ordering::Less => *val * W[*col_idx], - Ordering::Equal => *val * *u, - Ordering::Greater => *val * X[*col_idx - num_vars - 1], - }; - acc + val - }, - ) - }) - .collect_into_vec(sink); - } - - /// number of non-zero entries - pub fn len(&self) -> usize { - *self.indptr.last().unwrap() - } - - /// empty matrix - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// returns a custom iterator - pub fn iter(&self) -> Iter<'_, F> { - let mut row = 0; - while self.indptr[row + 1] == 0 { - row += 1; - } - Iter { - matrix: self, - row, - i: 0, - nnz: *self.indptr.last().unwrap(), - } - } - - pub fn num_rows(&self) -> usize { - self.indptr.len() - 1 - } - - pub fn num_cols(&self) -> usize { - self.cols - } -} - -/// Iterator for sparse matrix -#[derive(Debug)] -pub struct Iter<'a, F: PrimeField> { - matrix: &'a SparseMatrix, - row: usize, - i: usize, - nnz: usize, -} - -impl<'a, F: PrimeField> Iterator for Iter<'a, F> { - type Item = (usize, usize, F); - - fn next(&mut self) -> Option { - // are we at the end? - if self.i == self.nnz { - return None; - } - - // compute current item - let curr_item = ( - self.row, - self.matrix.indices[self.i], - self.matrix.data[self.i], - ); - - // advance the iterator - self.i += 1; - // edge case at the end - if self.i == self.nnz { - return Some(curr_item); - } - // if `i` has moved to next row - while self.i >= self.matrix.indptr[self.row + 1] { - self.row += 1; - } - - Some(curr_item) - } -} - -// #[cfg(test)] -// mod tests { -// #[cfg(not(target_arch = "wasm32"))] -// use proptest::{ -// prelude::*, -// strategy::{BoxedStrategy, Just, Strategy}, -// }; - -// use super::SparseMatrix; -// #[cfg(not(target_arch = "wasm32"))] -// use crate::r1cs::util::FWrap; -// use crate::{ -// provider::PallasEngine, -// traits::{Engine, Group}, -// }; - -// type G = ::GE; -// type Fr = ::Scalar; - -// #[test] -// fn test_matrix_creation() { -// let matrix_data = vec![ -// (0, 1, Fr::from(2)), -// (1, 2, Fr::from(3)), -// (2, 0, Fr::from(4)), -// ]; -// let sparse_matrix = SparseMatrix::::new(&matrix_data, 3, 3); - -// assert_eq!( -// sparse_matrix.data, -// vec![Fr::from(2), Fr::from(3), Fr::from(4)] -// ); -// assert_eq!(sparse_matrix.indices, vec![1, 2, 0]); -// assert_eq!(sparse_matrix.indptr, vec![0, 1, 2, 3]); -// } - -// #[test] -// fn test_matrix_vector_multiplication() { -// let matrix_data = vec![ -// (0, 1, Fr::from(2)), -// (0, 2, Fr::from(7)), -// (1, 2, Fr::from(3)), -// (2, 0, Fr::from(4)), -// ]; -// let sparse_matrix = SparseMatrix::::new(&matrix_data, 3, 3); -// let vector = vec![Fr::from(1), Fr::from(2), Fr::from(3)]; - -// let result = sparse_matrix.multiply_vec(&vector); - -// assert_eq!(result, vec![Fr::from(25), Fr::from(9), Fr::from(4)]); -// } - -// #[cfg(not(target_arch = "wasm32"))] -// fn coo_strategy() -> BoxedStrategy)>> { -// let coo_strategy = -// any::>().prop_flat_map(|f| (0usize..100, 0usize..100, -// Just(f))); proptest::collection::vec(coo_strategy, 10).boxed() -// } - -// #[cfg(not(target_arch = "wasm32"))] -// proptest! { -// #[test] -// fn test_matrix_iter(mut coo_matrix in coo_strategy()) { -// // process the randomly generated coo matrix -// coo_matrix.sort_by_key(|(row, col, _val)| (*row, *col)); -// coo_matrix.dedup_by_key(|(row, col, _val)| (*row, *col)); -// let coo_matrix = coo_matrix.into_iter().map(|(row, col, val)| { -// (row, col, val.0) }).collect::>(); - -// let matrix = SparseMatrix::new(&coo_matrix, 100, 100); - -// prop_assert_eq!(coo_matrix, matrix.iter().collect::>()); -// } -// } -// } diff --git a/src/spartan/batched.rs b/src/spartan/batched.rs deleted file mode 100644 index 28b6b7c..0000000 --- a/src/spartan/batched.rs +++ /dev/null @@ -1,650 +0,0 @@ -//! This module implements `BatchedRelaxedR1CSSNARKTrait` using Spartan that is -//! generic over the polynomial commitment and evaluation argument (i.e., a PCS) -//! This version of Spartan does not use preprocessing so the verifier keeps the -//! entire description of R1CS matrices. This is essentially optimal for the -//! verifier when using an IPA-based polynomial commitment scheme. This batched -//! implementation batches the outer and inner sumchecks of the Spartan SNARK. - -use core::slice; -use std::{iter, sync::Arc}; - -use ff::Field; -use itertools::Itertools; -use once_cell::sync::OnceCell; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -use super::{ - compute_eval_table_sparse, - math::Math, - polys::{eq::EqPolynomial, multilinear::MultilinearPolynomial}, - powers, - snark::batch_eval_reduce, - sumcheck::SumcheckProof, - PolyEvalInstance, PolyEvalWitness, -}; -use crate::{ - digest::{DigestComputer, SimpleDigestible}, - errors::NovaError, - r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness, SparseMatrix}, - spartan::{ - polys::{multilinear::SparsePolynomial, power::PowPolynomial}, - snark::batch_eval_verify, - }, - traits::{ - evaluation::EvaluationEngineTrait, - snark::{BatchedRelaxedR1CSSNARKTrait, DigestHelperTrait, RelaxedR1CSSNARKTrait}, - Engine, TranscriptEngineTrait, - }, - zip_with, CommitmentKey, -}; - -/// A succinct proof of knowledge of a witness to a batch of relaxed R1CS -/// instances The proof is produced using Spartan's combination of the sum-check -/// and the commitment to a vector viewed as a polynomial commitment -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct BatchedRelaxedR1CSSNARK> { - sc_proof_outer: SumcheckProof, - // Claims ([Azᵢ(τᵢ)], [Bzᵢ(τᵢ)], [Czᵢ(τᵢ)]) - claims_outer: Vec<(E::Scalar, E::Scalar, E::Scalar)>, - // [Eᵢ(r_x)] - evals_E: Vec, - sc_proof_inner: SumcheckProof, - // [Wᵢ(r_y[1..])] - evals_W: Vec, - sc_proof_batch: SumcheckProof, - // [Wᵢ(r_z), Eᵢ(r_z)] - evals_batch: Vec, - eval_arg: EE::EvaluationArgument, -} - -/// A type that represents the prover's key -#[derive(Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct ProverKey> { - pub pk_ee: EE::ProverKey, - pub vk_digest: E::Scalar, // digest of the verifier's key -} - -/// A type that represents the verifier's key -#[derive(Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct VerifierKey> { - pub vk_ee: EE::VerifierKey, - S: Vec>, - #[serde(skip, default = "OnceCell::new")] - pub digest: OnceCell, -} - -impl> VerifierKey { - fn new(shapes: Vec>, vk_ee: EE::VerifierKey) -> Self { - Self { - vk_ee, - S: shapes, - digest: OnceCell::new(), - } - } -} - -impl> SimpleDigestible for VerifierKey {} - -impl> DigestHelperTrait for VerifierKey { - /// Returns the digest of the verifier's key. - fn digest(&self) -> E::Scalar { - self.digest - .get_or_try_init(|| { - let dc = DigestComputer::::new(self); - dc.digest() - }) - .cloned() - .expect("Failure to retrieve digest!") - } -} - -impl> BatchedRelaxedR1CSSNARKTrait - for BatchedRelaxedR1CSSNARK -{ - type ProverKey = ProverKey; - - type VerifierKey = VerifierKey; - - fn initialize_pk( - ck: Arc>, - vk_digest: E::Scalar, - ) -> Result { - // NOTE: We do not use the verifier key in this context - // TODO: This currently samples a `ck_c` element, does this need to - // be truly secret, if so, retrieve from an SRS. - let (pk_ee, _vk) = EE::setup(ck); - - Ok(ProverKey { pk_ee, vk_digest }) - } - - fn setup( - ck: Arc>, - S: Vec<&R1CSShape>, - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { - let (pk_ee, vk_ee) = EE::setup(ck); - - let S = S.iter().map(|s| s.pad()).collect(); - - let vk = VerifierKey::new(S, vk_ee); - - let pk = ProverKey { - pk_ee, - vk_digest: vk.digest(), - }; - - Ok((pk, vk)) - } - - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - S: Vec<&R1CSShape>, - U: &[RelaxedR1CSInstance], - W: &[RelaxedR1CSWitness], - ) -> Result { - let num_instances = U.len(); - // Pad shapes and ensure their sizes are correct - let S = S.iter().map(|s| s.pad()).collect::>(); - - // Pad (W,E) for each instance - let W = zip_with!(iter, (W, S), |w, s| w.pad(s)).collect::>>(); - - let mut transcript = E::TE::new(b"BatchedRelaxedR1CSSNARK"); - - transcript.absorb(b"vk", &pk.vk_digest); - if num_instances > 1 { - let num_instances_field = E::Scalar::from(num_instances as u64); - transcript.absorb(b"n", &num_instances_field); - } - transcript.absorb(b"U", &U); - - let (polys_W, polys_E): (Vec<_>, Vec<_>) = W.into_iter().map(|w| (w.W, w.E)).unzip(); - - // Append public inputs to W: Z = [W, u, X] - let polys_Z = zip_with!(iter, (polys_W, U), |w, u| [ - w.clone(), - vec![u.u], - u.X.clone() - ] - .concat()) - .collect::>>(); - - let (num_rounds_x, num_rounds_y): (Vec<_>, Vec<_>) = S - .iter() - .map(|s| (s.num_cons.log_2(), s.num_vars.log_2() + 1)) - .unzip(); - let num_rounds_x_max = *num_rounds_x.iter().max().unwrap(); - let num_rounds_y_max = *num_rounds_y.iter().max().unwrap(); - - // Generate tau polynomial corresponding to eq(τ, τ², τ⁴ , …) - // for a random challenge τ - let tau = transcript.squeeze(b"t")?; - let all_taus = PowPolynomial::squares(&tau, num_rounds_x_max); - - let polys_tau = num_rounds_x - .iter() - .map(|&num_rounds_x| PowPolynomial::evals_with_powers(&all_taus, num_rounds_x)) - .map(MultilinearPolynomial::new) - .collect::>(); - - // Compute MLEs of Az, Bz, Cz, uCz + E - let (polys_Az, polys_Bz, polys_Cz): (Vec<_>, Vec<_>, Vec<_>) = - zip_with!(par_iter, (S, polys_Z), |s, poly_Z| { - let (poly_Az, poly_Bz, poly_Cz) = s.multiply_vec(poly_Z)?; - Ok((poly_Az, poly_Bz, poly_Cz)) - }) - .collect::, NovaError>>()? - .into_iter() - .multiunzip(); - - let polys_uCz_E = zip_with!(par_iter, (U, polys_E, polys_Cz), |u, poly_E, poly_Cz| { - zip_with!(par_iter, (poly_Cz, poly_E), |cz, e| u.u * cz + e).collect::>() - }) - .collect::>(); - - let comb_func_outer = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - poly_C_comp: &E::Scalar, - poly_D_comp: &E::Scalar| - -> E::Scalar { - *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) - }; - - // Sample challenge for random linear-combination of outer claims - let outer_r = transcript.squeeze(b"out_r")?; - let outer_r_powers = powers(&outer_r, num_instances); - - // Verify outer sumcheck: Az * Bz - uCz_E for each instance - let (sc_proof_outer, r_x, claims_outer) = - SumcheckProof::prove_cubic_with_additive_term_batch( - &vec![E::Scalar::ZERO; num_instances], - &num_rounds_x, - polys_tau, - polys_Az - .into_iter() - .map(MultilinearPolynomial::new) - .collect(), - polys_Bz - .into_iter() - .map(MultilinearPolynomial::new) - .collect(), - polys_uCz_E - .into_iter() - .map(MultilinearPolynomial::new) - .collect(), - &outer_r_powers, - comb_func_outer, - &mut transcript, - )?; - - let r_x = num_rounds_x - .iter() - .map(|&num_rounds| r_x[(num_rounds_x_max - num_rounds)..].to_vec()) - .collect::>(); - - // Extract evaluations of Az, Bz from Sumcheck and Cz, E at r_x - let (evals_Az_Bz_Cz, evals_E): (Vec<_>, Vec<_>) = zip_with!( - par_iter, - (claims_outer[1], claims_outer[2], polys_Cz, polys_E, r_x), - |eval_Az, eval_Bz, poly_Cz, poly_E, r_x| { - let (eval_Cz, eval_E) = rayon::join( - || MultilinearPolynomial::evaluate_with(poly_Cz, r_x), - || MultilinearPolynomial::evaluate_with(poly_E, r_x), - ); - ((*eval_Az, *eval_Bz, eval_Cz), eval_E) - } - ) - .unzip(); - - evals_Az_Bz_Cz.iter().zip_eq(evals_E.iter()).for_each( - |(&(eval_Az, eval_Bz, eval_Cz), &eval_E)| { - transcript.absorb( - b"claims_outer", - &[eval_Az, eval_Bz, eval_Cz, eval_E].as_slice(), - ) - }, - ); - - let inner_r = transcript.squeeze(b"in_r")?; - let inner_r_square = inner_r.square(); - let inner_r_cube = inner_r_square * inner_r; - let inner_r_powers = powers(&inner_r_cube, num_instances); - - let claims_inner_joint = evals_Az_Bz_Cz - .iter() - .map(|(eval_Az, eval_Bz, eval_Cz)| { - *eval_Az + inner_r * eval_Bz + inner_r_square * eval_Cz - }) - .collect::>(); - - let polys_ABCs = { - let inner = |M_evals_As: Vec, - M_evals_Bs: Vec, - M_evals_Cs: Vec| - -> Vec { - zip_with!( - into_par_iter, - (M_evals_As, M_evals_Bs, M_evals_Cs), - |eval_A, eval_B, eval_C| eval_A + inner_r * eval_B + inner_r_square * eval_C - ) - .collect::>() - }; - - zip_with!(par_iter, (S, r_x), |s, r_x| { - let evals_rx = EqPolynomial::evals_from_points(r_x); - let (eval_A, eval_B, eval_C) = compute_eval_table_sparse(s, &evals_rx); - MultilinearPolynomial::new(inner(eval_A, eval_B, eval_C)) - }) - .collect::>() - }; - - let polys_Z = polys_Z - .into_iter() - .zip_eq(num_rounds_y.iter()) - .map(|(mut z, &num_rounds_y)| { - z.resize(1 << num_rounds_y, E::Scalar::ZERO); - MultilinearPolynomial::new(z) - }) - .collect::>(); - - let comb_func = |poly_A_comp: &E::Scalar, poly_B_comp: &E::Scalar| -> E::Scalar { - *poly_A_comp * *poly_B_comp - }; - - let (sc_proof_inner, r_y, _claims_inner): ( - SumcheckProof, - Vec, - (Vec<_>, Vec<_>), - ) = SumcheckProof::prove_quad_batch( - &claims_inner_joint, - &num_rounds_y, - polys_ABCs, - polys_Z, - &inner_r_powers, - comb_func, - &mut transcript, - )?; - - let r_y = num_rounds_y - .iter() - .map(|num_rounds| { - let (_, r_y_hi) = r_y.split_at(num_rounds_y_max - num_rounds); - r_y_hi - }) - .collect::>(); - - let evals_W = zip_with!(par_iter, (polys_W, r_y), |poly, r_y| { - MultilinearPolynomial::evaluate_with(poly, &r_y[1..]) - }) - .collect::>(); - - // Create evaluation instances for W(r_y[1..]) and E(r_x) - let (w_vec, u_vec) = { - let mut w_vec = Vec::with_capacity(2 * num_instances); - let mut u_vec = Vec::with_capacity(2 * num_instances); - w_vec.extend(polys_W.into_iter().map(|poly| PolyEvalWitness { p: poly })); - u_vec.extend(zip_with!(iter, (evals_W, U, r_y), |eval, u, r_y| { - PolyEvalInstance { - c: u.comm_W, - x: r_y[1..].to_vec(), - e: *eval, - } - })); - - w_vec.extend(polys_E.into_iter().map(|poly| PolyEvalWitness { p: poly })); - u_vec.extend(zip_with!( - (evals_E.iter(), U.iter(), r_x), - |eval_E, u, r_x| PolyEvalInstance { - c: u.comm_E, - x: r_x, - e: *eval_E, - } - )); - (w_vec, u_vec) - }; - - let (batched_u, batched_w, sc_proof_batch, claims_batch_left) = - batch_eval_reduce(u_vec, &w_vec, &mut transcript)?; - - let eval_arg = EE::prove( - ck, - &pk.pk_ee, - &mut transcript, - &batched_u.c, - &batched_w.p, - &batched_u.x, - &batched_u.e, - )?; - - Ok(Self { - sc_proof_outer, - claims_outer: evals_Az_Bz_Cz, - evals_E, - sc_proof_inner, - evals_W, - sc_proof_batch, - evals_batch: claims_batch_left, - eval_arg, - }) - } - - fn verify( - &self, - vk: &Self::VerifierKey, - U: &[RelaxedR1CSInstance], - ) -> Result<(), NovaError> { - let num_instances = U.len(); - let mut transcript = E::TE::new(b"BatchedRelaxedR1CSSNARK"); - - transcript.absorb(b"vk", &vk.digest()); - if num_instances > 1 { - let num_instances_field = E::Scalar::from(num_instances as u64); - transcript.absorb(b"n", &num_instances_field); - } - transcript.absorb(b"U", &U); - - let num_instances = U.len(); - - let (num_rounds_x, num_rounds_y): (Vec<_>, Vec<_>) = - vk.S.iter() - .map(|s| (s.num_cons.log_2(), s.num_vars.log_2() + 1)) - .unzip(); - let num_rounds_x_max = *num_rounds_x.iter().max().unwrap(); - let num_rounds_y_max = *num_rounds_y.iter().max().unwrap(); - - // Define τ polynomials of the appropriate size for each instance - let tau = transcript.squeeze(b"t")?; - let all_taus = PowPolynomial::squares(&tau, num_rounds_x_max); - - let polys_tau = num_rounds_x - .iter() - .map(|&num_rounds_x| PowPolynomial::evals_with_powers(&all_taus, num_rounds_x)) - .map(MultilinearPolynomial::new) - .collect::>(); - - // Sample challenge for random linear-combination of outer claims - let outer_r = transcript.squeeze(b"out_r")?; - let outer_r_powers = powers(&outer_r, num_instances); - - let (claim_outer_final, r_x) = self.sc_proof_outer.verify_batch( - &vec![E::Scalar::ZERO; num_instances], - &num_rounds_x, - &outer_r_powers, - 3, - &mut transcript, - )?; - - // Since each instance has a different number of rounds, the Sumcheck - // prover skips the first num_rounds_x_max - num_rounds_x rounds. - // The evaluation point for each instance is therefore r_x[num_rounds_x_max - - // num_rounds_x..] - let r_x = num_rounds_x - .iter() - .map(|num_rounds| r_x[(num_rounds_x_max - num_rounds)..].to_vec()) - .collect::>(); - - // Extract evaluations into a vector [(Azᵢ, Bzᵢ, Czᵢ, Eᵢ)] - let ABCE_evals = || self.claims_outer.iter().zip_eq(self.evals_E.iter()); - - // Add evaluations of Az, Bz, Cz, E to transcript - for ((claim_Az, claim_Bz, claim_Cz), eval_E) in ABCE_evals() { - transcript.absorb( - b"claims_outer", - &[*claim_Az, *claim_Bz, *claim_Cz, *eval_E].as_slice(), - ) - } - - let chis_r_x = r_x - .par_iter() - .map(|r_x| EqPolynomial::evals_from_points(r_x)) - .collect::>(); - - // Evaluate τ(rₓ) for each instance - let evals_tau = zip_with!(iter, (polys_tau, chis_r_x), |poly_tau, er_x| { - MultilinearPolynomial::evaluate_with_chis(poly_tau.evaluations(), er_x) - }); - - // Compute expected claim for all instances ∑ᵢ rⁱ⋅τ(rₓ)⋅(Azᵢ⋅Bzᵢ − uᵢ⋅Czᵢ − Eᵢ) - let claim_outer_final_expected = zip_with!( - (ABCE_evals(), U.iter(), evals_tau, outer_r_powers.iter()), - |ABCE_eval, u, eval_tau, r| { - let ((claim_Az, claim_Bz, claim_Cz), eval_E) = ABCE_eval; - *r * eval_tau * (*claim_Az * claim_Bz - u.u * claim_Cz - eval_E) - } - ) - .sum::(); - - if claim_outer_final != claim_outer_final_expected { - return Err(NovaError::InvalidSumcheckProof); - } - - let inner_r = transcript.squeeze(b"in_r")?; - let inner_r_square = inner_r.square(); - let inner_r_cube = inner_r_square * inner_r; - let inner_r_powers = powers(&inner_r_cube, num_instances); - - // Compute inner claims Mzᵢ = (Azᵢ + r⋅Bzᵢ + r²⋅Czᵢ), - // which are batched by Sumcheck into one claim: ∑ᵢ r³ⁱ⋅Mzᵢ - let claims_inner = self - .claims_outer - .iter() - .map(|(claim_Az, claim_Bz, claim_Cz)| { - *claim_Az + inner_r * claim_Bz + inner_r_square * claim_Cz - }) - .collect::>(); - - let (claim_inner_final, r_y) = self.sc_proof_inner.verify_batch( - &claims_inner, - &num_rounds_y, - &inner_r_powers, - 2, - &mut transcript, - )?; - let r_y: Vec> = num_rounds_y - .iter() - .map(|num_rounds| r_y[(num_rounds_y_max - num_rounds)..].to_vec()) - .collect(); - - // Compute evaluations of Zᵢ = [Wᵢ, uᵢ, Xᵢ] at r_y - // Zᵢ(r_y) = (1−r_y[0])⋅W(r_y[1..]) + r_y[0]⋅MLE([uᵢ, Xᵢ])(r_y[1..]) - let evals_Z = zip_with!(iter, (self.evals_W, U, r_y), |eval_W, U, r_y| { - let eval_X = { - // constant term - let poly_X = iter::once(U.u).chain(U.X.iter().cloned()).collect(); - SparsePolynomial::new(r_y.len() - 1, poly_X).evaluate(&r_y[1..]) - }; - (E::Scalar::ONE - r_y[0]) * eval_W + r_y[0] * eval_X - }) - .collect::>(); - - // compute evaluations of R1CS matrices M(r_x, r_y) = eq(r_y)ᵀ⋅M⋅eq(r_x) - let multi_evaluate = |M_vec: &[&SparseMatrix], - chi_r_x: &[E::Scalar], - r_y: &[E::Scalar]| - -> Vec { - let evaluate_with_table = - |M: &SparseMatrix, T_x: &[E::Scalar], T_y: &[E::Scalar]| -> E::Scalar { - M.par_iter_rows() - .enumerate() - .map(|(row_idx, row)| { - M.get_row(row) - .map(|(val, col_idx)| T_x[row_idx] * T_y[*col_idx] * val) - .sum::() - }) - .sum() - }; - - let T_x = chi_r_x; - let T_y = EqPolynomial::evals_from_points(r_y); - - M_vec - .par_iter() - .map(|&M_vec| evaluate_with_table(M_vec, T_x, &T_y)) - .collect() - }; - - // Compute inner claim ∑ᵢ r³ⁱ⋅(Aᵢ(r_x, r_y) + r⋅Bᵢ(r_x, r_y) + r²⋅Cᵢ(r_x, - // r_y))⋅Zᵢ(r_y) - let claim_inner_final_expected = zip_with!( - iter, - (vk.S, chis_r_x, r_y, evals_Z, inner_r_powers), - |S, r_x, r_y, eval_Z, r_i| { - let evals = multi_evaluate(&[&S.A, &S.B, &S.C], r_x, r_y); - let eval = evals[0] + inner_r * evals[1] + inner_r_square * evals[2]; - eval * r_i * eval_Z - } - ) - .sum::(); - - if claim_inner_final != claim_inner_final_expected { - return Err(NovaError::InvalidSumcheckProof); - } - - // Create evaluation instances for W(r_y[1..]) and E(r_x) - let u_vec = { - let mut u_vec = Vec::with_capacity(2 * num_instances); - u_vec.extend(zip_with!(iter, (self.evals_W, U, r_y), |eval, u, r_y| { - PolyEvalInstance { - c: u.comm_W, - x: r_y[1..].to_vec(), - e: *eval, - } - })); - - u_vec.extend(zip_with!(iter, (self.evals_E, U, r_x), |eval, u, r_x| { - PolyEvalInstance { - c: u.comm_E, - x: r_x.to_vec(), - e: *eval, - } - })); - u_vec - }; - - let batched_u = batch_eval_verify( - u_vec, - &mut transcript, - &self.sc_proof_batch, - &self.evals_batch, - )?; - - // verify - EE::verify( - &vk.vk_ee, - &mut transcript, - &batched_u.c, - &batched_u.x, - &batched_u.e, - &self.eval_arg, - )?; - - Ok(()) - } -} - -impl> RelaxedR1CSSNARKTrait - for BatchedRelaxedR1CSSNARK -{ - type ProverKey = ProverKey; - - type VerifierKey = VerifierKey; - - fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { - >::ck_floor() - } - - fn initialize_pk( - ck: Arc>, - vk_digest: E::Scalar, - ) -> Result { - >::initialize_pk(ck, vk_digest) - } - - fn setup( - ck: Arc>, - S: &R1CSShape, - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { - >::setup(ck, vec![S]) - } - - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - S: &R1CSShape, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, - ) -> Result { - let slice_U = slice::from_ref(U); - let slice_W = slice::from_ref(W); - >::prove(ck, pk, vec![S], slice_U, slice_W) - } - - fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { - let slice = slice::from_ref(U); - >::verify(self, vk, slice) - } -} diff --git a/src/spartan/batched_ppsnark.rs b/src/spartan/batched_ppsnark.rs deleted file mode 100644 index 8ee9439..0000000 --- a/src/spartan/batched_ppsnark.rs +++ /dev/null @@ -1,1413 +0,0 @@ -//! batched pp snark - -use core::slice; -use std::sync::Arc; - -use ff::Field; -use itertools::{chain, Itertools as _}; -use once_cell::sync::*; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -use crate::{ - digest::{DigestComputer, SimpleDigestible}, - errors::NovaError, - r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, - spartan::{ - math::Math, - polys::{ - eq::EqPolynomial, - identity::IdentityPolynomial, - masked_eq::MaskedEqPolynomial, - multilinear::{MultilinearPolynomial, SparsePolynomial}, - power::PowPolynomial, - univariate::{CompressedUniPoly, UniPoly}, - }, - powers, - ppsnark::{R1CSShapeSparkCommitment, R1CSShapeSparkRepr}, - sumcheck::{ - engine::{ - InnerSumcheckInstance, MemorySumcheckInstance, OuterSumcheckInstance, - SumcheckEngine, WitnessBoundSumcheck, - }, - SumcheckProof, - }, - PolyEvalInstance, PolyEvalWitness, - }, - traits::{ - commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, - evaluation::EvaluationEngineTrait, - snark::{BatchedRelaxedR1CSSNARKTrait, DigestHelperTrait, RelaxedR1CSSNARKTrait}, - Engine, TranscriptEngineTrait, - }, - zip_with, zip_with_for_each, Commitment, CommitmentKey, CompressedCommitment, -}; - -/// A type that represents the prover's key -#[derive(Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct ProverKey> { - pk_ee: EE::ProverKey, - S_repr: Vec>, - S_comm: Vec>, - vk_digest: E::Scalar, // digest of verifier's key -} - -/// A type that represents the verifier's key -#[derive(Debug, Deserialize, Serialize)] -#[serde(bound = "")] -pub struct VerifierKey> { - vk_ee: EE::VerifierKey, - S_comm: Vec>, - num_vars: Vec, - #[serde(skip, default = "OnceCell::new")] - digest: OnceCell, -} -impl> VerifierKey { - fn new( - num_vars: Vec, - S_comm: Vec>, - vk_ee: EE::VerifierKey, - ) -> Self { - Self { - num_vars, - S_comm, - vk_ee, - digest: Default::default(), - } - } -} - -impl> SimpleDigestible for VerifierKey {} - -impl> DigestHelperTrait for VerifierKey { - /// Returns the digest of the verifier's key - fn digest(&self) -> E::Scalar { - self.digest - .get_or_try_init(|| { - let dc = DigestComputer::new(self); - dc.digest() - }) - .cloned() - .expect("Failure to retrieve digest!") - } -} - -/// A succinct proof of knowledge of a witness to a relaxed R1CS instance -/// The proof is produced using Spartan's combination of the sum-check and -/// the commitment to a vector viewed as a polynomial commitment -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct BatchedRelaxedR1CSSNARK> { - // commitment to oracles: the first three are for Az, Bz, Cz, - // and the last two are for memory reads - comms_Az_Bz_Cz: Vec<[CompressedCommitment; 3]>, - comms_L_row_col: Vec<[CompressedCommitment; 2]>, - // commitments to aid the memory checks - // [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] - comms_mem_oracles: Vec<[CompressedCommitment; 4]>, - - // claims about Az, Bz, and Cz polynomials - evals_Az_Bz_Cz_at_tau: Vec<[E::Scalar; 3]>, - - // sum-check - sc: SumcheckProof, - - // claims from the end of sum-check - evals_Az_Bz_Cz_W_E: Vec<[E::Scalar; 5]>, - evals_L_row_col: Vec<[E::Scalar; 2]>, - // [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] - evals_mem_oracle: Vec<[E::Scalar; 4]>, - // [val_A, val_B, val_C, row, col, ts_row, ts_col] - evals_mem_preprocessed: Vec<[E::Scalar; 7]>, - - // a PCS evaluation argument - eval_arg: EE::EvaluationArgument, -} - -impl> BatchedRelaxedR1CSSNARKTrait - for BatchedRelaxedR1CSSNARK -{ - type ProverKey = ProverKey; - type VerifierKey = VerifierKey; - - fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { - Box::new(|shape: &R1CSShape| -> usize { - // the commitment key should be large enough to commit to the R1CS matrices - std::cmp::max( - shape.A.len() + shape.B.len() + shape.C.len(), - std::cmp::max(shape.num_cons, 2 * shape.num_vars), - ) - }) - } - - fn initialize_pk( - ck: Arc>, - vk_digest: E::Scalar, - ) -> Result { - todo!("unimplemented for batched_ppsnark"); - } - - fn setup( - ck: Arc>, - S: Vec<&R1CSShape>, - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { - for s in S.iter() { - // check the provided commitment key meets minimal requirements - if ck.length() < >::ck_floor()(s) { - // return Err(NovaError::InvalidCommitmentKeyLength); - return Err(NovaError::InternalError); - } - } - let (pk_ee, vk_ee) = EE::setup(ck.clone()); - - let S = S.iter().map(|s| s.pad()).collect::>(); - let S_repr = S.iter().map(R1CSShapeSparkRepr::new).collect::>(); - let S_comm = S_repr - .iter() - .map(|s_repr| s_repr.commit(&*ck)) - .collect::>(); - let num_vars = S.iter().map(|s| s.num_vars).collect::>(); - let vk = VerifierKey::new(num_vars, S_comm.clone(), vk_ee); - let pk = ProverKey { - pk_ee, - S_repr, - S_comm, - vk_digest: vk.digest(), - }; - Ok((pk, vk)) - } - - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - S: Vec<&R1CSShape>, - U: &[RelaxedR1CSInstance], - W: &[RelaxedR1CSWitness], - ) -> Result { - // Pad shapes so that num_vars = num_cons = Nᵢ and check the sizes are correct - let S = S.par_iter().map(|s| s.pad()).collect::>(); - - // N[i] = max(|Aᵢ|+|Bᵢ|+|Cᵢ|, 2*num_varsᵢ, num_consᵢ) - let Nis = pk.S_repr.iter().map(|s| s.N).collect::>(); - assert!(Nis.iter().all(|&Ni| Ni.is_power_of_two())); - let N_max = *Nis.iter().max().unwrap(); - - let num_instances = U.len(); - - // Pad [(Wᵢ,Eᵢ)] to the next power of 2 (not to Ni) - let W = - zip_with!(par_iter, (W, S), |w, s| w.pad(s)).collect::>>(); - - // number of rounds of sum-check - let num_rounds_sc = N_max.log_2(); - - // Initialize transcript with vk || [Uᵢ] - let mut transcript = E::TE::new(b"BatchedRelaxedR1CSSNARK"); - transcript.absorb(b"vk", &pk.vk_digest); - if num_instances > 1 { - let num_instances_field = E::Scalar::from(num_instances as u64); - transcript.absorb(b"n", &num_instances_field); - } - transcript.absorb(b"U", &U); - - // Append public inputs to Wᵢ: Zᵢ = [Wᵢ, uᵢ, Xᵢ] - let polys_Z = zip_with!(par_iter, (W, U, Nis), |W, U, Ni| { - // poly_Z will be resized later, so we preallocate the correct capacity - let mut poly_Z = Vec::with_capacity(*Ni); - poly_Z.extend(W.W.iter().chain([&U.u]).chain(U.X.iter())); - poly_Z - }) - .collect::>>(); - - // Move polys_W and polys_E, as well as U.u out of U - let (comms_W_E, us): (Vec<_>, Vec<_>) = - U.iter().map(|U| ([U.comm_W, U.comm_E], U.u)).unzip(); - let (polys_W, polys_E): (Vec<_>, Vec<_>) = W.into_iter().map(|w| (w.W, w.E)).unzip(); - - // Compute [Az, Bz, Cz] - let mut polys_Az_Bz_Cz = zip_with!(par_iter, (polys_Z, S), |z, s| { - let (Az, Bz, Cz) = s.multiply_vec(z)?; - Ok([Az, Bz, Cz]) - }) - .collect::, NovaError>>()?; - - // Commit to [Az, Bz, Cz] and add to transcript - let comms_Az_Bz_Cz = polys_Az_Bz_Cz - .par_iter() - .map(|[Az, Bz, Cz]| { - let (comm_Az, (comm_Bz, comm_Cz)) = rayon::join( - || E::CE::commit(ck, Az), - || rayon::join(|| E::CE::commit(ck, Bz), || E::CE::commit(ck, Cz)), - ); - [comm_Az, comm_Bz, comm_Cz] - }) - .collect::>(); - comms_Az_Bz_Cz - .iter() - .for_each(|comms| transcript.absorb(b"c", &comms.as_slice())); - - // Compute eq(tau) for each instance in log2(Ni) variables - let tau = transcript.squeeze(b"t")?; - let all_taus = PowPolynomial::squares(&tau, N_max.log_2()); - - let (polys_tau, coords_tau): (Vec<_>, Vec<_>) = Nis - .par_iter() - .map(|&N_i| { - let log_Ni = N_i.log_2(); - let eqp: EqPolynomial<_> = all_taus[..log_Ni].iter().cloned().collect(); - let evals = eqp.evals(); - let coords = eqp.r; - (evals, coords) - }) - .unzip(); - - // Pad [Az, Bz, Cz] to Ni - polys_Az_Bz_Cz - .par_iter_mut() - .zip_eq(Nis.par_iter()) - .for_each(|(az_bz_cz, &Ni)| { - az_bz_cz - .par_iter_mut() - .for_each(|mz| mz.resize(Ni, E::Scalar::ZERO)) - }); - - // Evaluate and commit to [Az(tau), Bz(tau), Cz(tau)] - let evals_Az_Bz_Cz_at_tau = zip_with!( - par_iter, - (polys_Az_Bz_Cz, coords_tau), - |ABCs, tau_coords| { - let [Az, Bz, Cz] = ABCs; - let (eval_Az, (eval_Bz, eval_Cz)) = rayon::join( - || MultilinearPolynomial::evaluate_with(Az, tau_coords), - || { - rayon::join( - || MultilinearPolynomial::evaluate_with(Bz, tau_coords), - || MultilinearPolynomial::evaluate_with(Cz, tau_coords), - ) - }, - ); - [eval_Az, eval_Bz, eval_Cz] - } - ) - .collect::>(); - - // absorb the claimed evaluations into the transcript - for evals in evals_Az_Bz_Cz_at_tau.iter() { - transcript.absorb(b"e", &evals.as_slice()); - } - - // Pad Zᵢ, E to Nᵢ - let polys_Z = polys_Z - .into_par_iter() - .zip_eq(Nis.par_iter()) - .map(|(mut poly_Z, &Ni)| { - poly_Z.resize(Ni, E::Scalar::ZERO); - poly_Z - }) - .collect::>(); - - // Pad both W,E to have the same size. This is inefficient for W since the - // second half is empty, but it makes it easier to handle the batching - // at the end. - let polys_E = polys_E - .into_par_iter() - .zip_eq(Nis.par_iter()) - .map(|(mut poly_E, &Ni)| { - poly_E.resize(Ni, E::Scalar::ZERO); - poly_E - }) - .collect::>(); - - let polys_W = polys_W - .into_par_iter() - .zip_eq(Nis.par_iter()) - .map(|(mut poly_W, &Ni)| { - poly_W.resize(Ni, E::Scalar::ZERO); - poly_W - }) - .collect::>(); - - // (2) send commitments to the following two oracles - // L_row(i) = eq(tau, row(i)) for all i in [0..Nᵢ] - // L_col(i) = z(col(i)) for all i in [0..Nᵢ] - let polys_L_row_col = zip_with!( - par_iter, - (S, Nis, polys_Z, polys_tau), - |S, Ni, poly_Z, poly_tau| { - let mut L_row = vec![poly_tau[0]; *Ni]; // we place mem_row[0] since resized row is appended with 0s - let mut L_col = vec![poly_Z[Ni - 1]; *Ni]; // we place mem_col[Ni-1] since resized col is appended with Ni-1 - - for (i, (val_r, val_c)) in - S.A.iter() - .chain(S.B.iter()) - .chain(S.C.iter()) - .map(|(r, c, _)| (poly_tau[r], poly_Z[c])) - .enumerate() - { - L_row[i] = val_r; - L_col[i] = val_c; - } - - [L_row, L_col] - } - ) - .collect::>(); - - let comms_L_row_col = polys_L_row_col - .par_iter() - .map(|[L_row, L_col]| { - let (comm_L_row, comm_L_col) = - rayon::join(|| E::CE::commit(ck, L_row), || E::CE::commit(ck, L_col)); - [comm_L_row, comm_L_col] - }) - .collect::>(); - - // absorb commitments to L_row and L_col in the transcript - for comms in comms_L_row_col.iter() { - transcript.absorb(b"e", &comms.as_slice()); - } - - // For each instance, batch Mz = Az + c*Bz + c^2*Cz - let c = transcript.squeeze(b"c")?; - - let polys_Mz: Vec<_> = polys_Az_Bz_Cz - .par_iter() - .map(|polys_Az_Bz_Cz| { - let poly_vec: Vec<&Vec<_>> = polys_Az_Bz_Cz.iter().collect(); - let w = PolyEvalWitness::::batch(&poly_vec[..], &c); - w.p - }) - .collect(); - - let evals_Mz: Vec<_> = zip_with!( - iter, - (comms_Az_Bz_Cz, evals_Az_Bz_Cz_at_tau), - |comm_Az_Bz_Cz, evals_Az_Bz_Cz_at_tau| { - let u = PolyEvalInstance::::batch( - comm_Az_Bz_Cz.as_slice(), - vec![], // ignored by the function - evals_Az_Bz_Cz_at_tau.as_slice(), - &c, - ); - u.e - } - ) - .collect(); - - // we now need to prove three claims for each instance - // (outer) - // 0 = \sum_x poly_tau(x) * (poly_Az(x) * poly_Bz(x) - poly_uCz_E(x)) - // eval_Az_at_tau + c * eval_Bz_at_tau + c^2 * eval_Cz_at_tau = - // (Az+c*Bz+c^2*Cz)(tau) (inner) - // eval_Az_at_tau + c * eval_Bz_at_tau + c^2 * eval_Cz_at_tau = \sum_y - // L_row(y) * (val_A(y) + c * val_B(y) + c^2 * val_C(y)) * L_col(y) - // (mem) - // L_row(i) = eq(tau, row(i)) - // L_col(i) = z(col(i)) - let outer_sc_inst = zip_with!( - ( - polys_Az_Bz_Cz.par_iter(), - polys_E.par_iter(), - polys_Mz.into_par_iter(), - polys_tau.par_iter(), - evals_Mz.par_iter(), - us.par_iter() - ), - |poly_ABC, poly_E, poly_Mz, poly_tau, eval_Mz, u| { - let [poly_Az, poly_Bz, poly_Cz] = poly_ABC; - let poly_uCz_E = - zip_with!(par_iter, (poly_Cz, poly_E), |cz, e| *u * cz + e).collect(); - OuterSumcheckInstance::new( - poly_tau.clone(), - poly_Az.clone(), - poly_Bz.clone(), - poly_uCz_E, - poly_Mz, // Mz = Az + c * Bz + c^2 * Cz - eval_Mz, // eval_Az_at_tau + c * eval_Az_at_tau + c^2 * eval_Cz_at_tau - ) - } - ) - .collect::>(); - - let inner_sc_inst = zip_with!( - par_iter, - (pk.S_repr, evals_Mz, polys_L_row_col), - |s_repr, eval_Mz, poly_L| { - let [poly_L_row, poly_L_col] = poly_L; - let c_square = c.square(); - let val = zip_with!( - par_iter, - (s_repr.val_A, s_repr.val_B, s_repr.val_C), - |v_a, v_b, v_c| *v_a + c * *v_b + c_square * *v_c - ) - .collect::>(); - - InnerSumcheckInstance::new( - *eval_Mz, - MultilinearPolynomial::new(poly_L_row.clone()), - MultilinearPolynomial::new(poly_L_col.clone()), - MultilinearPolynomial::new(val), - ) - } - ) - .collect::>(); - - // a third sum-check instance to prove the read-only memory claim - // we now need to prove that L_row and L_col are well-formed - let (mem_sc_inst, comms_mem_oracles, polys_mem_oracles) = { - let gamma = transcript.squeeze(b"g")?; - let r = transcript.squeeze(b"r")?; - - // We start by computing oracles and auxiliary polynomials to help prove the - // claim oracles correspond to [t_plus_r_inv_row, w_plus_r_inv_row, - // t_plus_r_inv_col, w_plus_r_inv_col] - let (comms_mem_oracles, polys_mem_oracles, mem_aux) = pk - .S_repr - .iter() - .zip_eq(polys_tau.iter()) - .zip_eq(polys_Z.iter()) - .zip_eq(polys_L_row_col.iter()) - .try_fold( - (Vec::new(), Vec::new(), Vec::new()), - |(mut comms, mut polys, mut aux), - (((s_repr, poly_tau), poly_Z), [L_row, L_col])| { - let (comm, poly, a) = MemorySumcheckInstance::::compute_oracles( - ck, - &r, - &gamma, - poly_tau, - &s_repr.row, - L_row, - &s_repr.ts_row, - poly_Z, - &s_repr.col, - L_col, - &s_repr.ts_col, - )?; - - comms.push(comm); - polys.push(poly); - aux.push(a); - - Ok::<_, NovaError>((comms, polys, aux)) - }, - )?; - - // Commit to oracles - for comms in comms_mem_oracles.iter() { - transcript.absorb(b"l", &comms.as_slice()); - } - - // Sample new random variable for eq polynomial - let rho = transcript.squeeze(b"r")?; - let all_rhos = PowPolynomial::squares(&rho, N_max.log_2()); - - let instances = zip_with!( - ( - pk.S_repr.par_iter(), - Nis.par_iter(), - polys_mem_oracles.par_iter(), - mem_aux.into_par_iter() - ), - |s_repr, Ni, polys_mem_oracles, polys_aux| { - MemorySumcheckInstance::::new( - polys_mem_oracles.clone(), - polys_aux, - PowPolynomial::evals_with_powers(&all_rhos, Ni.log_2()), - s_repr.ts_row.clone(), - s_repr.ts_col.clone(), - ) - } - ) - .collect::>(); - (instances, comms_mem_oracles, polys_mem_oracles) - }; - - let witness_sc_inst = zip_with!(par_iter, (polys_W, S), |poly_W, S| { - WitnessBoundSumcheck::new(tau, poly_W.clone(), S.num_vars) - }) - .collect::>(); - - // Run batched Sumcheck for the 3 claims for all instances. - // Note that the polynomials for claims relating to instance i have size Ni. - let (sc, rand_sc, claims_outer, claims_inner, claims_mem, claims_witness) = - Self::prove_helper( - num_rounds_sc, - mem_sc_inst, - outer_sc_inst, - inner_sc_inst, - witness_sc_inst, - &mut transcript, - )?; - - let (evals_Az_Bz_Cz_W_E, evals_L_row_col, evals_mem_oracle, evals_mem_preprocessed) = { - let evals_Az_Bz = claims_outer - .into_iter() - .map(|claims| [claims[0][0], claims[0][1]]) - .collect::>(); - - let evals_L_row_col = claims_inner - .into_iter() - .map(|claims| { - // [L_row, L_col] - [claims[0][0], claims[0][1]] - }) - .collect::>(); - - let (evals_mem_oracle, evals_mem_ts): (Vec<_>, Vec<_>) = claims_mem - .into_iter() - .map(|claims| { - ( - // [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] - [claims[0][0], claims[0][1], claims[1][0], claims[1][1]], - // [ts_row, ts_col] - [claims[0][2], claims[1][2]], - ) - }) - .unzip(); - - let evals_W = claims_witness - .into_iter() - .map(|claims| claims[0][0]) - .collect::>(); - - let (evals_Cz_E, evals_mem_val_row_col): (Vec<_>, Vec<_>) = zip_with!( - iter, - (polys_Az_Bz_Cz, polys_E, pk.S_repr), - |ABCzs, poly_E, s_repr| { - let [_, _, Cz] = ABCzs; - let log_Ni = s_repr.N.log_2(); - let (_, rand_sc) = rand_sc.split_at(num_rounds_sc - log_Ni); - let rand_sc_evals = EqPolynomial::evals_from_points(rand_sc); - let e = [ - Cz, - poly_E, - &s_repr.val_A, - &s_repr.val_B, - &s_repr.val_C, - &s_repr.row, - &s_repr.col, - ] - .into_iter() - .map(|p| { - // Manually compute evaluation to avoid recomputing rand_sc_evals - zip_with!(par_iter, (p, rand_sc_evals), |p, eq| *p * eq).sum() - }) - .collect::>(); - ([e[0], e[1]], [e[2], e[3], e[4], e[5], e[6]]) - } - ) - .unzip(); - - let evals_Az_Bz_Cz_W_E = zip_with!( - (evals_Az_Bz.into_iter(), evals_Cz_E.into_iter(), evals_W), - |Az_Bz, Cz_E, W| { - let [Az, Bz] = Az_Bz; - let [Cz, E] = Cz_E; - [Az, Bz, Cz, W, E] - } - ) - .collect::>(); - - // [val_A, val_B, val_C, row, col, ts_row, ts_col] - let evals_mem_preprocessed = zip_with!( - (evals_mem_val_row_col.into_iter(), evals_mem_ts), - |eval_mem_val_row_col, eval_mem_ts| { - let [val_A, val_B, val_C, row, col] = eval_mem_val_row_col; - let [ts_row, ts_col] = eval_mem_ts; - [val_A, val_B, val_C, row, col, ts_row, ts_col] - } - ) - .collect::>(); - ( - evals_Az_Bz_Cz_W_E, - evals_L_row_col, - evals_mem_oracle, - evals_mem_preprocessed, - ) - }; - - let evals_vec = zip_with!( - iter, - ( - evals_Az_Bz_Cz_W_E, - evals_L_row_col, - evals_mem_oracle, - evals_mem_preprocessed - ), - |Az_Bz_Cz_W_E, L_row_col, mem_oracles, mem_preprocessed| { - chain![Az_Bz_Cz_W_E, L_row_col, mem_oracles, mem_preprocessed] - .cloned() - .collect::>() - } - ) - .collect::>(); - - let comms_vec = zip_with!( - iter, - ( - comms_Az_Bz_Cz, - comms_W_E, - comms_L_row_col, - comms_mem_oracles, - pk.S_comm - ), - |Az_Bz_Cz, comms_W_E, L_row_col, mem_oracles, S_comm| { - chain![ - Az_Bz_Cz, - comms_W_E, - L_row_col, - mem_oracles, - [ - &S_comm.comm_val_A, - &S_comm.comm_val_B, - &S_comm.comm_val_C, - &S_comm.comm_row, - &S_comm.comm_col, - &S_comm.comm_ts_row, - &S_comm.comm_ts_col, - ] - ] - } - ) - .flatten() - .cloned() - .collect::>(); - - let w_vec = zip_with!( - ( - polys_Az_Bz_Cz.into_iter(), - polys_W.into_iter(), - polys_E.into_iter(), - polys_L_row_col.into_iter(), - polys_mem_oracles.into_iter(), - pk.S_repr.iter() - ), - |Az_Bz_Cz, W, E, L_row_col, mem_oracles, S_repr| { - chain![ - Az_Bz_Cz, - [W, E], - L_row_col, - mem_oracles, - [ - S_repr.val_A.clone(), - S_repr.val_B.clone(), - S_repr.val_C.clone(), - S_repr.row.clone(), - S_repr.col.clone(), - S_repr.ts_row.clone(), - S_repr.ts_col.clone(), - ] - ] - } - ) - .flatten() - .map(|p| PolyEvalWitness:: { p }) - .collect::>(); - - for evals in evals_vec.iter() { - transcript.absorb(b"e", &evals.as_slice()); // comm_vec is already - // in the transcript - } - let evals_vec = evals_vec.into_iter().flatten().collect::>(); - - let c = transcript.squeeze(b"c")?; - - // Compute number of variables for each polynomial - let num_vars_u = w_vec.iter().map(|w| w.p.len().log_2()).collect::>(); - let u_batch = - PolyEvalInstance::::batch_diff_size(&comms_vec, &evals_vec, &num_vars_u, rand_sc, c); - let w_batch = - PolyEvalWitness::::batch_diff_size(&w_vec.iter().by_ref().collect::>(), c); - - let eval_arg = EE::prove( - ck, - &pk.pk_ee, - &mut transcript, - &u_batch.c, - &w_batch.p, - &u_batch.x, - &u_batch.e, - )?; - - let comms_Az_Bz_Cz = comms_Az_Bz_Cz - .into_iter() - .map(|comms| comms.map(|comm| comm.compress())) - .collect(); - let comms_L_row_col = comms_L_row_col - .into_iter() - .map(|comms| comms.map(|comm| comm.compress())) - .collect(); - let comms_mem_oracles = comms_mem_oracles - .into_iter() - .map(|comms| comms.map(|comm| comm.compress())) - .collect(); - - Ok(Self { - comms_Az_Bz_Cz, - comms_L_row_col, - comms_mem_oracles, - evals_Az_Bz_Cz_at_tau, - sc, - evals_Az_Bz_Cz_W_E, - evals_L_row_col, - evals_mem_oracle, - evals_mem_preprocessed, - eval_arg, - }) - } - - fn verify( - &self, - vk: &Self::VerifierKey, - U: &[RelaxedR1CSInstance], - ) -> Result<(), NovaError> { - let num_instances = U.len(); - let num_claims_per_instance = 10; - - // number of rounds of sum-check - let num_rounds = vk.S_comm.iter().map(|s| s.N.log_2()).collect::>(); - let num_rounds_max = *num_rounds.iter().max().unwrap(); - - let mut transcript = E::TE::new(b"BatchedRelaxedR1CSSNARK"); - - transcript.absorb(b"vk", &vk.digest()); - if num_instances > 1 { - let num_instances_field = E::Scalar::from(num_instances as u64); - transcript.absorb(b"n", &num_instances_field); - } - transcript.absorb(b"U", &U); - - // Decompress commitments - let comms_Az_Bz_Cz = self - .comms_Az_Bz_Cz - .iter() - .map(|comms| { - comms - .iter() - .map(Commitment::::decompress) - .collect::, _>>() - }) - .collect::, _>>()?; - - let comms_L_row_col = self - .comms_L_row_col - .iter() - .map(|comms| { - comms - .iter() - .map(Commitment::::decompress) - .collect::, _>>() - }) - .collect::, _>>()?; - - let comms_mem_oracles = self - .comms_mem_oracles - .iter() - .map(|comms| { - comms - .iter() - .map(Commitment::::decompress) - .collect::, _>>() - }) - .collect::, _>>()?; - - // Add commitments [Az, Bz, Cz] to the transcript - comms_Az_Bz_Cz - .iter() - .for_each(|comms| transcript.absorb(b"c", &comms.as_slice())); - - let tau = transcript.squeeze(b"t")?; - let tau_coords = PowPolynomial::new(&tau, num_rounds_max).coordinates(); - - // absorb the claimed evaluations into the transcript - self.evals_Az_Bz_Cz_at_tau.iter().for_each(|evals| { - transcript.absorb(b"e", &evals.as_slice()); - }); - - // absorb commitments to L_row and L_col in the transcript - for comms in comms_L_row_col.iter() { - transcript.absorb(b"e", &comms.as_slice()); - } - - // Batch at tau for each instance - let c = transcript.squeeze(b"c")?; - - // Compute eval_Mz = eval_Az_at_tau + c * eval_Bz_at_tau + c^2 * eval_Cz_at_tau - let evals_Mz: Vec<_> = zip_with!( - iter, - (comms_Az_Bz_Cz, self.evals_Az_Bz_Cz_at_tau), - |comm_Az_Bz_Cz, evals_Az_Bz_Cz_at_tau| { - let u = PolyEvalInstance::::batch( - comm_Az_Bz_Cz.as_slice(), - tau_coords.clone(), - evals_Az_Bz_Cz_at_tau.as_slice(), - &c, - ); - u.e - } - ) - .collect(); - - let gamma = transcript.squeeze(b"g")?; - let r = transcript.squeeze(b"r")?; - - for comms in comms_mem_oracles.iter() { - transcript.absorb(b"l", &comms.as_slice()); - } - - let rho = transcript.squeeze(b"r")?; - - let s = transcript.squeeze(b"r")?; - let s_powers = powers(&s, num_instances * num_claims_per_instance); - - let (claim_sc_final, rand_sc) = { - // Gather all claims into a single vector - let claims = evals_Mz - .iter() - .flat_map(|&eval_Mz| { - let mut claims = vec![E::Scalar::ZERO; num_claims_per_instance]; - claims[7] = eval_Mz; - claims[8] = eval_Mz; - claims.into_iter() - }) - .collect::>(); - - // Number of rounds for each claim - let num_rounds_by_claim = num_rounds - .iter() - .flat_map(|num_rounds_i| vec![*num_rounds_i; num_claims_per_instance].into_iter()) - .collect::>(); - - self.sc - .verify_batch(&claims, &num_rounds_by_claim, &s_powers, 3, &mut transcript)? - }; - - // Truncated sumcheck randomness for each instance - let rand_sc_i = num_rounds - .iter() - .map(|num_rounds| rand_sc[(num_rounds_max - num_rounds)..].to_vec()) - .collect::>(); - - let claim_sc_final_expected = zip_with!( - ( - vk.num_vars.iter(), - rand_sc_i.iter(), - U.iter(), - self.evals_Az_Bz_Cz_W_E.iter().cloned(), - self.evals_L_row_col.iter().cloned(), - self.evals_mem_oracle.iter().cloned(), - self.evals_mem_preprocessed.iter().cloned() - ), - |num_vars, - rand_sc, - U, - evals_Az_Bz_Cz_W_E, - evals_L_row_col, - eval_mem_oracle, - eval_mem_preprocessed| { - let [Az, Bz, Cz, W, E] = evals_Az_Bz_Cz_W_E; - let [L_row, L_col] = evals_L_row_col; - let [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] = - eval_mem_oracle; - let [val_A, val_B, val_C, row, col, ts_row, ts_col] = eval_mem_preprocessed; - - let num_rounds_i = rand_sc.len(); - let num_vars_log = num_vars.log_2(); - - let eq_rho = PowPolynomial::new(&rho, num_rounds_i).evaluate(rand_sc); - - let (eq_tau, eq_masked_tau) = { - let eq_tau: EqPolynomial<_> = PowPolynomial::new(&tau, num_rounds_i).into(); - - let eq_tau_at_rand = eq_tau.evaluate(rand_sc); - let eq_masked_tau = - MaskedEqPolynomial::new(&eq_tau, num_vars_log).evaluate(rand_sc); - - (eq_tau_at_rand, eq_masked_tau) - }; - - // Evaluate identity polynomial - let id = IdentityPolynomial::new(num_rounds_i).evaluate(rand_sc); - - let Z = { - // rand_sc was padded, so we now remove the padding - let (factor, rand_sc_unpad) = { - let l = num_rounds_i - (num_vars_log + 1); - - let (rand_sc_lo, rand_sc_hi) = rand_sc.split_at(l); - - let factor = rand_sc_lo - .iter() - .fold(E::Scalar::ONE, |acc, r_p| acc * (E::Scalar::ONE - r_p)); - - (factor, rand_sc_hi) - }; - - let X = { - // constant term - let poly_X = std::iter::once(U.u).chain(U.X.iter().cloned()).collect(); - SparsePolynomial::new(num_vars_log, poly_X).evaluate(&rand_sc_unpad[1..]) - }; - - // W was evaluated as if it was padded to logNi variables, - // so we don't multiply it by (1-rand_sc_unpad[0]) - W + factor * rand_sc_unpad[0] * X - }; - - let t_plus_r_row = { - let addr_row = id; - let val_row = eq_tau; - let t = addr_row + gamma * val_row; - t + r - }; - - let w_plus_r_row = { - let addr_row = row; - let val_row = L_row; - let w = addr_row + gamma * val_row; - w + r - }; - - let t_plus_r_col = { - let addr_col = id; - let val_col = Z; - let t = addr_col + gamma * val_col; - t + r - }; - - let w_plus_r_col = { - let addr_col = col; - let val_col = L_col; - let w = addr_col + gamma * val_col; - w + r - }; - - let claims_mem = [ - t_plus_r_inv_row - w_plus_r_inv_row, - t_plus_r_inv_col - w_plus_r_inv_col, - eq_rho * (t_plus_r_inv_row * t_plus_r_row - ts_row), - eq_rho * (w_plus_r_inv_row * w_plus_r_row - E::Scalar::ONE), - eq_rho * (t_plus_r_inv_col * t_plus_r_col - ts_col), - eq_rho * (w_plus_r_inv_col * w_plus_r_col - E::Scalar::ONE), - ]; - - let claims_outer = [ - eq_tau * (Az * Bz - U.u * Cz - E), - eq_tau * (Az + c * Bz + c * c * Cz), - ]; - let claims_inner = [L_row * L_col * (val_A + c * val_B + c * c * val_C)]; - - let claims_witness = [eq_masked_tau * W]; - - chain![claims_mem, claims_outer, claims_inner, claims_witness] - } - ) - .flatten() - .zip_eq(s_powers) - .fold(E::Scalar::ZERO, |acc, (claim, s)| acc + s * claim); - - if claim_sc_final_expected != claim_sc_final { - return Err(NovaError::InvalidSumcheckProof); - } - - let evals_vec = zip_with!( - iter, - ( - self.evals_Az_Bz_Cz_W_E, - self.evals_L_row_col, - self.evals_mem_oracle, - self.evals_mem_preprocessed - ), - |Az_Bz_Cz_W_E, L_row_col, mem_oracles, mem_preprocessed| { - chain![Az_Bz_Cz_W_E, L_row_col, mem_oracles, mem_preprocessed] - .cloned() - .collect::>() - } - ) - .collect::>(); - - // Add all Sumcheck evaluations to the transcript - for evals in evals_vec.iter() { - transcript.absorb(b"e", &evals.as_slice()); // comm_vec is already - // in the transcript - } - - let c = transcript.squeeze(b"c")?; - - // Compute batched polynomial evaluation instance at rand_sc - let u = { - let num_evals = evals_vec[0].len(); - - let evals_vec = evals_vec.into_iter().flatten().collect::>(); - - let num_vars = num_rounds - .iter() - .flat_map(|num_rounds| vec![*num_rounds; num_evals].into_iter()) - .collect::>(); - - let comms_vec = zip_with!( - ( - comms_Az_Bz_Cz.into_iter(), - U.iter(), - comms_L_row_col.into_iter(), - comms_mem_oracles.into_iter(), - vk.S_comm.iter() - ), - |Az_Bz_Cz, U, L_row_col, mem_oracles, S_comm| { - chain![ - Az_Bz_Cz, - [U.comm_W, U.comm_E], - L_row_col, - mem_oracles, - [ - S_comm.comm_val_A, - S_comm.comm_val_B, - S_comm.comm_val_C, - S_comm.comm_row, - S_comm.comm_col, - S_comm.comm_ts_row, - S_comm.comm_ts_col, - ] - ] - } - ) - .flatten() - .collect::>(); - - PolyEvalInstance::::batch_diff_size(&comms_vec, &evals_vec, &num_vars, rand_sc, c) - }; - - // verify - EE::verify(&vk.vk_ee, &mut transcript, &u.c, &u.x, &u.e, &self.eval_arg)?; - - Ok(()) - } -} - -impl> BatchedRelaxedR1CSSNARK { - /// Runs the batched Sumcheck protocol for the claims of multiple instance - /// of possibly different sizes. - /// - /// # Details - /// - /// In order to avoid padding all polynomials to the same maximum size, we - /// adopt the following strategy. - /// - /// Let n be the number of variables for the largest instance, - /// and let m be the number of variables for a shorter one. - /// Let P(X_{0},...,X_{m-1}) be one of the MLEs of the short instance, which - /// has been committed to by taking the MSM of its evaluations with the - /// first 2^m basis points of the commitment key. - /// - /// This Sumcheck prover will interpret it as the polynomial - /// P'(X_{0},...,X_{n-1}) = P(X_{n-m},...,X_{n-1}), - /// whose MLE evaluations over {0,1}^m is equal to 2^{n-m} repetitions of - /// the evaluations of P. - /// - /// In order to account for these "imagined" repetitions, the initial claims - /// for this short instances are scaled by 2^{n-m}. - /// - /// For the first n-m rounds, the univariate polynomials relating to this - /// shorter claim will be constant, and equal to the initial claims, - /// scaled by 2^{n-m-i}, where i is the round number. By definition, P' - /// does not depend on X_i, so binding P' to r_i has no effect on the - /// evaluations. The Sumcheck prover will then interpret the polynomial - /// P' as having half as many repetitions in the next round. - /// - /// When we get to round n-m, the Sumcheck proceeds as usual since the - /// polynomials are the expected size for the round. - /// - /// Note that at the end of the protocol, the prover returns the evaluation - /// u' = P'(r_{0},...,r_{n-1}) = P(r_{n-m},...,r_{n-1}) - /// However, the polynomial we actually committed to over {0,1}^n is - /// P''(X_{0},...,X_{n-1}) = L_0(X_{0},...,X_{n-m-1}) * - /// P(X_{n-m},...,X_{n-1}) The SNARK prover/verifier will need to - /// rescale the evaluation by the first Lagrange polynomial - /// u'' = L_0(r_{0},...,r_{n-m-1}) * u' - /// in order batch all evaluations with a single PCS call. - fn prove_helper( - num_rounds: usize, - mut mem: Vec, - mut outer: Vec, - mut inner: Vec, - mut witness: Vec, - transcript: &mut E::TE, - ) -> Result< - ( - SumcheckProof, - Vec, - Vec>>, - Vec>>, - Vec>>, - Vec>>, - ), - NovaError, - > - where - T1: SumcheckEngine, - T2: SumcheckEngine, - T3: SumcheckEngine, - T4: SumcheckEngine, - { - // sanity checks - let num_instances = mem.len(); - assert_eq!(outer.len(), num_instances); - assert_eq!(inner.len(), num_instances); - assert_eq!(witness.len(), num_instances); - - for inst in mem.iter_mut() { - assert!(inst.size().is_power_of_two()); - } - for inst in outer.iter() { - assert!(inst.size().is_power_of_two()); - } - for inst in inner.iter() { - assert!(inst.size().is_power_of_two()); - } - for inst in witness.iter() { - assert!(inst.size().is_power_of_two()); - } - - let degree = mem[0].degree(); - assert!(mem.iter().all(|inst| inst.degree() == degree)); - assert!(outer.iter().all(|inst| inst.degree() == degree)); - assert!(inner.iter().all(|inst| inst.degree() == degree)); - assert!(witness.iter().all(|inst| inst.degree() == degree)); - - // Collect all claims from the instances. If the instances is defined over `m` - // variables, which is less that the total number of rounds `n`, - // the individual claims σ are scaled by 2^{n-m}. - let claims = zip_with!( - iter, - (mem, outer, inner, witness), - |mem, outer, inner, witness| { - Self::scaled_claims(mem, num_rounds) - .into_iter() - .chain(Self::scaled_claims(outer, num_rounds)) - .chain(Self::scaled_claims(inner, num_rounds)) - .chain(Self::scaled_claims(witness, num_rounds)) - } - ) - .flatten() - .collect::>(); - - // Sample a challenge for the random linear combination of all scaled claims - let s = transcript.squeeze(b"r")?; - let coeffs = powers(&s, claims.len()); - - // At the start of each round, the running claim is equal to the random linear - // combination of the Sumcheck claims, evaluated over the bound - // polynomials. Initially, it is equal to the random linear combination - // of the scaled input claims. - let mut running_claim = zip_with!(iter, (claims, coeffs), |c_1, c_2| *c_1 * c_2).sum(); - - // Keep track of the verifier challenges r, and the univariate polynomials sent - // by the prover in each round - let mut r: Vec = Vec::new(); - let mut cubic_polys: Vec> = Vec::new(); - - for i in 0..num_rounds { - // At the start of round i, there input polynomials are defined over at most n-i - // variables. - let remaining_variables = num_rounds - i; - - // For each claim j, compute the evaluations of its univariate polynomial - // S_j(X_i) at X = 0, 2, 3. The polynomial is such that - // S_{j-1}(r_{j-1}) = S_j(0) + S_j(1). If the number of variable m - // of the claim is m < n-i, then the polynomial is constants and - // equal to the initial claim σ_j scaled by 2^{n-m-i-1}. - let evals = zip_with!( - par_iter, - (mem, outer, inner, witness), - |mem, outer, inner, witness| { - let ((evals_mem, evals_outer), (evals_inner, evals_witness)) = rayon::join( - || { - rayon::join( - || Self::get_evals(mem, remaining_variables), - || Self::get_evals(outer, remaining_variables), - ) - }, - || { - rayon::join( - || Self::get_evals(inner, remaining_variables), - || Self::get_evals(witness, remaining_variables), - ) - }, - ); - evals_mem - .into_par_iter() - .chain(evals_outer.into_par_iter()) - .chain(evals_inner.into_par_iter()) - .chain(evals_witness.into_par_iter()) - } - ) - .flatten() - .collect::>(); - - assert_eq!(evals.len(), claims.len()); - - // Random linear combination of the univariate evaluations at X_i = 0, 2, 3 - let evals_combined_0 = (0..evals.len()).map(|i| evals[i][0] * coeffs[i]).sum(); - let evals_combined_2 = (0..evals.len()).map(|i| evals[i][1] * coeffs[i]).sum(); - let evals_combined_3 = (0..evals.len()).map(|i| evals[i][2] * coeffs[i]).sum(); - - let evals = vec![ - evals_combined_0, - running_claim - evals_combined_0, - evals_combined_2, - evals_combined_3, - ]; - // Coefficient representation of S(X_i) - let poly = UniPoly::from_evals(&evals); - - // append the prover's message to the transcript - transcript.absorb(b"p", &poly); - - // derive the verifier's challenge for the next round - let r_i = transcript.squeeze(b"c")?; - r.push(r_i); - - // Bind the variable X_i of polynomials across all claims to r_i. - // If the claim is defined over m variables and m < n-i, then - // binding has no effect on the polynomial. - zip_with_for_each!( - par_iter_mut, - (mem, outer, inner, witness), - |mem, outer, inner, witness| { - rayon::join( - || { - rayon::join( - || Self::bind(mem, remaining_variables, &r_i), - || Self::bind(outer, remaining_variables, &r_i), - ) - }, - || { - rayon::join( - || Self::bind(inner, remaining_variables, &r_i), - || Self::bind(witness, remaining_variables, &r_i), - ) - }, - ); - } - ); - - running_claim = poly.evaluate(&r_i); - cubic_polys.push(poly.compress()); - } - - // Collect evaluations at (r_{n-m}, ..., r_{n-1}) of polynomials over all - // claims, where m is the initial number of variables the individual - // claims are defined over. - let claims_outer = outer.into_iter().map(|inst| inst.final_claims()).collect(); - let claims_inner = inner.into_iter().map(|inst| inst.final_claims()).collect(); - let claims_mem = mem.into_iter().map(|inst| inst.final_claims()).collect(); - let claims_witness = witness - .into_iter() - .map(|inst| inst.final_claims()) - .collect(); - - Ok(( - SumcheckProof::new(cubic_polys), - r, - claims_outer, - claims_inner, - claims_mem, - claims_witness, - )) - } - - /// In round i, computes the evaluations at X_i = 0, 2, 3 of the univariate - /// polynomials S(X_i) for each claim in the instance. - /// Let `n` be the total number of Sumcheck rounds, and assume the instance - /// is defined over `m` variables. We define `remaining_variables` as - /// n-i. If m < n-i, then the polynomials in the instance are not - /// defined over X_i, so the univariate polynomial is constant and equal - /// to 2^{n-m-i-1}*σ, where σ is the initial claim. - fn get_evals>( - inst: &T, - remaining_variables: usize, - ) -> Vec> { - let num_instance_variables = inst.size().log_2(); // m - if num_instance_variables < remaining_variables { - let deg = inst.degree(); - - // The evaluations at X_i = 0, 2, 3 are all equal to the scaled claim - Self::scaled_claims(inst, remaining_variables - 1) - .into_iter() - .map(|scaled_claim| vec![scaled_claim; deg]) - .collect() - } else { - inst.evaluation_points() - } - } - - /// In round i after receiving challenge r_i, we partially evaluate all - /// polynomials in the instance at X_i = r_i. If the instance is defined - /// over m variables m which is less than n-i, then the polynomials do - /// not depend on X_i, so binding them to r_i has no effect. - fn bind>(inst: &mut T, remaining_variables: usize, r: &E::Scalar) { - let num_instance_variables = inst.size().log_2(); // m - if remaining_variables <= num_instance_variables { - inst.bound(r) - } - } - - /// Given an instance defined over m variables, the sum over n = - /// `remaining_variables` is equal to the initial claim scaled by - /// 2^{n-m}, when m ≤ n. - fn scaled_claims>(inst: &T, remaining_variables: usize) -> Vec { - let num_instance_variables = inst.size().log_2(); // m - let num_repetitions = 1 << (remaining_variables - num_instance_variables); - let scaling = E::Scalar::from(num_repetitions as u64); - inst.initial_claims() - .iter() - .map(|claim| scaling * claim) - .collect() - } -} - -impl> RelaxedR1CSSNARKTrait - for BatchedRelaxedR1CSSNARK -{ - type ProverKey = ProverKey; - - type VerifierKey = VerifierKey; - - fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { - >::ck_floor() - } - - fn initialize_pk( - ck: Arc>, - vk_digest: E::Scalar, - ) -> Result { - >::initialize_pk(ck, vk_digest) - } - - fn setup( - ck: Arc>, - S: &R1CSShape, - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { - >::setup(ck, vec![S]) - } - - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - S: &R1CSShape, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, - ) -> Result { - let slice_U = slice::from_ref(U); - let slice_W = slice::from_ref(W); - - >::prove(ck, pk, vec![S], slice_U, slice_W) - } - - fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { - let slice = slice::from_ref(U); - >::verify(self, vk, slice) - } -} diff --git a/src/spartan/math.rs b/src/spartan/math.rs deleted file mode 100644 index 853b72e..0000000 --- a/src/spartan/math.rs +++ /dev/null @@ -1,15 +0,0 @@ -pub trait Math { - fn log_2(self) -> usize; -} - -impl Math for usize { - fn log_2(self) -> usize { - assert_ne!(self, 0); - - if self.is_power_of_two() { - (1usize.leading_zeros() - self.leading_zeros()) as Self - } else { - (0usize.leading_zeros() - self.leading_zeros()) as Self - } - } -} diff --git a/src/spartan/polys/eq.rs b/src/spartan/polys/eq.rs deleted file mode 100644 index 7d5e0c6..0000000 --- a/src/spartan/polys/eq.rs +++ /dev/null @@ -1,124 +0,0 @@ -//! `EqPolynomial`: Represents multilinear extension of equality polynomials, -//! evaluated based on binary input values. - -use ff::PrimeField; -use rayon::prelude::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; - -/// Represents the multilinear extension polynomial (MLE) of the equality -/// polynomial $eq(x,e)$, denoted as $\tilde{eq}(x, e)$. -/// -/// The polynomial is defined by the formula: -/// $$ -/// \tilde{eq}(x, e) = \prod_{i=1}^m(e_i * x_i + (1 - e_i) * (1 - x_i)) -/// $$ -/// -/// Each element in the vector `r` corresponds to a component $e_i$, -/// representing a bit from the binary representation of an input value $e$. -/// This polynomial evaluates to 1 if every component $x_i$ equals its -/// corresponding $e_i$, and 0 otherwise. -/// -/// For instance, for e = 6 (with a binary representation of 0b110), the vector -/// r would be [1, 1, 0]. -#[derive(Debug)] -pub struct EqPolynomial { - pub(in crate::spartan) r: Vec, -} - -impl EqPolynomial { - /// Creates a new `EqPolynomial` from a vector of Scalars `r`. - /// - /// Each Scalar in `r` corresponds to a bit from the binary representation - /// of an input value `e`. - pub const fn new(r: Vec) -> Self { - Self { r } - } - - /// Evaluates the `EqPolynomial` at a given point `rx`. - /// - /// This function computes the value of the polynomial at the point - /// specified by `rx`. It expects `rx` to have the same length as the - /// internal vector `r`. - /// - /// Panics if `rx` and `r` have different lengths. - pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { - assert_eq!(self.r.len(), rx.len()); - (0..rx.len()) - .map(|i| self.r[i] * rx[i] + (Scalar::ONE - self.r[i]) * (Scalar::ONE - rx[i])) - .product() - } - - /// Evaluates the `EqPolynomial` at all the `2^|r|` points in its domain. - /// - /// Returns a vector of Scalars, each corresponding to the polynomial - /// evaluation at a specific point. - #[must_use = "this returns an expensive vector and leaves self unchanged"] - pub fn evals(&self) -> Vec { - Self::evals_from_points(&self.r) - } - - /// Evaluates the `EqPolynomial` from the `2^|r|` points in its domain, - /// without creating an intermediate polynomial representation. - /// - /// Returns a vector of Scalars, each corresponding to the polynomial - /// evaluation at a specific point. - pub fn evals_from_points(r: &[Scalar]) -> Vec { - let ell = r.len(); - let mut evals: Vec = vec![Scalar::ZERO; (2_usize).pow(ell as u32)]; - let mut size = 1; - evals[0] = Scalar::ONE; - - for r in r.iter().rev() { - let (evals_left, evals_right) = evals.split_at_mut(size); - let (evals_right, _) = evals_right.split_at_mut(size); - - evals_left - .par_iter_mut() - .zip_eq(evals_right.par_iter_mut()) - .for_each(|(x, y)| { - *y = *x * r; - *x -= &*y; - }); - - size *= 2; - } - - evals - } -} - -impl FromIterator for EqPolynomial { - fn from_iter>(iter: I) -> Self { - let r: Vec<_> = iter.into_iter().collect(); - Self { r } - } -} - -#[cfg(test)] -mod tests { - - use super::*; - use crate::provider; - - fn test_eq_polynomial_with() { - let eq_poly = EqPolynomial::::new(vec![F::ONE, F::ZERO, F::ONE]); - let y = eq_poly.evaluate(vec![F::ONE, F::ONE, F::ONE].as_slice()); - assert_eq!(y, F::ZERO); - - let y = eq_poly.evaluate(vec![F::ONE, F::ZERO, F::ONE].as_slice()); - assert_eq!(y, F::ONE); - - let eval_list = eq_poly.evals(); - for (i, &coeff) in eval_list.iter().enumerate().take((2_usize).pow(3)) { - if i == 5 { - assert_eq!(coeff, F::ONE); - } else { - assert_eq!(coeff, F::ZERO); - } - } - } - - #[test] - fn test_eq_polynomial() { - test_eq_polynomial_with::(); - } -} diff --git a/src/spartan/polys/identity.rs b/src/spartan/polys/identity.rs deleted file mode 100644 index f2c6068..0000000 --- a/src/spartan/polys/identity.rs +++ /dev/null @@ -1,30 +0,0 @@ -use core::marker::PhantomData; - -use ff::PrimeField; - -pub struct IdentityPolynomial { - ell: usize, - _p: PhantomData, -} - -impl IdentityPolynomial { - pub fn new(ell: usize) -> Self { - Self { - ell, - _p: PhantomData, - } - } - - pub fn evaluate(&self, r: &[Scalar]) -> Scalar { - assert_eq!(self.ell, r.len()); - let mut power_of_two = 1_u64; - (0..self.ell) - .rev() - .map(|i| { - let result = Scalar::from(power_of_two) * r[i]; - power_of_two *= 2; - result - }) - .sum() - } -} diff --git a/src/spartan/polys/masked_eq.rs b/src/spartan/polys/masked_eq.rs deleted file mode 100644 index b882af8..0000000 --- a/src/spartan/polys/masked_eq.rs +++ /dev/null @@ -1,150 +0,0 @@ -//! `MaskedEqPolynomial`: Represents the `eq` polynomial over n variables, where -//! the first 2^m entries are 0. - -use ff::PrimeField; -use itertools::zip_eq; - -use crate::spartan::polys::eq::EqPolynomial; - -/// Represents the multilinear extension polynomial (MLE) of the equality -/// polynomial $eqₘ(x,r)$ over n variables, where the first 2^m evaluations are -/// 0. -/// -/// The polynomial is defined by the formula: -/// eqₘ(x,r) = eq(x,r) - ( ∏_{0 ≤ i < n-m} (1−rᵢ)(1−xᵢ) )⋅( ∏_{n-m ≤ i < n} -/// (1−rᵢ)(1−xᵢ) + rᵢ⋅xᵢ ) -#[derive(Debug)] -pub struct MaskedEqPolynomial<'a, Scalar> { - eq: &'a EqPolynomial, - num_masked_vars: usize, -} - -impl<'a, Scalar: PrimeField> MaskedEqPolynomial<'a, Scalar> { - /// Creates a new `MaskedEqPolynomial` from a vector of Scalars `r` of size - /// n, with the number of masked variables m = `num_masked_vars`. - pub const fn new(eq: &'a EqPolynomial, num_masked_vars: usize) -> Self { - MaskedEqPolynomial { - eq, - num_masked_vars, - } - } - - /// Evaluates the `MaskedEqPolynomial` at a given point `rx`. - /// - /// This function computes the value of the polynomial at the point - /// specified by `rx`. It expects `rx` to have the same length as the - /// internal vector `r`. - /// - /// Panics if `rx` and `r` have different lengths. - pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { - let r = &self.eq.r; - assert_eq!(r.len(), rx.len()); - let split_idx = r.len() - self.num_masked_vars; - - let (r_lo, r_hi) = r.split_at(split_idx); - let (rx_lo, rx_hi) = rx.split_at(split_idx); - let eq_lo = zip_eq(r_lo, rx_lo) - .map(|(r, rx)| *r * rx + (Scalar::ONE - r) * (Scalar::ONE - rx)) - .product::(); - let eq_hi = zip_eq(r_hi, rx_hi) - .map(|(r, rx)| *r * rx + (Scalar::ONE - r) * (Scalar::ONE - rx)) - .product::(); - let mask_lo = zip_eq(r_lo, rx_lo) - .map(|(r, rx)| (Scalar::ONE - r) * (Scalar::ONE - rx)) - .product::(); - - (eq_lo - mask_lo) * eq_hi - } - - /// Evaluates the `MaskedEqPolynomial` at all the `2^|r|` points in its - /// domain. - /// - /// Returns a vector of Scalars, each corresponding to the polynomial - /// evaluation at a specific point. - pub fn evals(&self) -> Vec { - Self::evals_from_points(&self.eq.r, self.num_masked_vars) - } - - /// Evaluates the `MaskedEqPolynomial` from the `2^|r|` points in its - /// domain, without creating an intermediate polynomial representation. - /// - /// Returns a vector of Scalars, each corresponding to the polynomial - /// evaluation at a specific point. - fn evals_from_points(r: &[Scalar], num_masked_vars: usize) -> Vec { - let mut evals = EqPolynomial::evals_from_points(r); - - // replace the first 2^m evaluations with 0 - let num_masked_evals = 1 << num_masked_vars; - evals[..num_masked_evals] - .iter_mut() - .for_each(|e| *e = Scalar::ZERO); - - evals - } -} - -#[cfg(test)] -mod tests { - use rand_chacha::ChaCha20Rng; - use rand_core::{CryptoRng, RngCore, SeedableRng}; - - use super::*; - use crate::{provider, spartan::polys::eq::EqPolynomial}; - - fn test_masked_eq_polynomial_with( - num_vars: usize, - num_masked_vars: usize, - mut rng: &mut R, - ) { - let num_masked_evals = 1 << num_masked_vars; - - // random point - let r = std::iter::from_fn(|| Some(F::random(&mut rng))) - .take(num_vars) - .collect::>(); - // evaluation point - let rx = std::iter::from_fn(|| Some(F::random(&mut rng))) - .take(num_vars) - .collect::>(); - - let poly_eq = EqPolynomial::new(r); - let poly_eq_evals = poly_eq.evals(); - - let masked_eq_poly = MaskedEqPolynomial::new(&poly_eq, num_masked_vars); - let masked_eq_poly_evals = masked_eq_poly.evals(); - - // ensure the first 2^m entries are 0 - assert_eq!( - masked_eq_poly_evals[..num_masked_evals], - vec![F::ZERO; num_masked_evals] - ); - // ensure the remaining evaluations match eq(r) - assert_eq!( - masked_eq_poly_evals[num_masked_evals..], - poly_eq_evals[num_masked_evals..] - ); - - // compute the evaluation at rx succinctly - let masked_eq_eval = masked_eq_poly.evaluate(&rx); - - // compute the evaluation as a MLE - let rx_evals = EqPolynomial::evals_from_points(&rx); - let expected_masked_eq_eval = zip_eq(rx_evals, masked_eq_poly_evals) - .map(|(rx, r)| rx * r) - .sum(); - - assert_eq!(masked_eq_eval, expected_masked_eq_eval); - } - - #[test] - fn test_masked_eq_polynomial() { - let mut rng = ChaCha20Rng::from_seed([0u8; 32]); - let num_vars = 5; - let num_masked_vars = 2; - test_masked_eq_polynomial_with::( - num_vars, - num_masked_vars, - &mut rng, - ); - } -} diff --git a/src/spartan/polys/multilinear.rs b/src/spartan/polys/multilinear.rs deleted file mode 100644 index d7f3436..0000000 --- a/src/spartan/polys/multilinear.rs +++ /dev/null @@ -1,336 +0,0 @@ -//! Main components: -//! - `MultilinearPolynomial`: Dense representation of multilinear polynomials, -//! represented by evaluations over all possible binary inputs. -//! - `SparsePolynomial`: Efficient representation of sparse multilinear -//! polynomials, storing only non-zero evaluations. - -use std::ops::{Add, Index}; - -use ff::PrimeField; -use itertools::Itertools as _; -use rand_core::{CryptoRng, RngCore}; -use rayon::prelude::{ - IndexedParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator, ParallelIterator, -}; -use serde::{Deserialize, Serialize}; - -use crate::spartan::{math::Math, polys::eq::EqPolynomial}; - -/// A multilinear extension of a polynomial $Z(\cdot)$, denote it as -/// $\tilde{Z}(x_1, ..., x_m)$ where the degree of each variable is at most one. -/// -/// This is the dense representation of a multilinear poynomial. -/// Let it be $\mathbb{G}(\cdot): \mathbb{F}^m \rightarrow \mathbb{F}$, it can -/// be represented uniquely by the list of evaluations of $\mathbb{G}(\cdot)$ -/// over the Boolean hypercube $\{0, 1\}^m$. -/// -/// For example, a 3 variables multilinear polynomial can be represented by -/// evaluation at points $[0, 2^3-1]$. -/// -/// The implementation follows -/// $$ -/// \tilde{Z}(x_1, ..., x_m) = \sum_{e\in {0,1}^m}Z(e) \cdot \prod_{i=1}^m(x_i -/// \cdot e_i + (1-x_i) \cdot (1-e_i)) $$ -/// -/// Vector $Z$ indicates $Z(e)$ where $e$ ranges from $0$ to $2^m-1$. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -pub struct MultilinearPolynomial { - num_vars: usize, // the number of variables in the multilinear polynomial - pub(crate) Z: Vec, /* evaluations of the polynomial in all the 2^num_vars Boolean - * inputs */ -} - -impl MultilinearPolynomial { - /// Creates a new `MultilinearPolynomial` from the given evaluations. - /// - /// # Panics - /// The number of evaluations must be a power of two. - pub fn new(Z: Vec) -> Self { - let num_vars = Z.len().log_2(); - assert_eq!(Z.len(), 1 << num_vars); - Self { num_vars, Z } - } - - /// evaluations of the polynomial in all the 2^num_vars Boolean inputs - pub fn evaluations(&self) -> &[Scalar] { - &self.Z[..] - } - - /// Returns the number of variables in the multilinear polynomial - pub const fn get_num_vars(&self) -> usize { - self.num_vars - } - - /// Returns the total number of evaluations. - pub fn len(&self) -> usize { - self.Z.len() - } - - /// Returns true if no evaluations. - pub fn is_empty(&self) -> bool { - self.Z.len() == 0 - } - - /// Returns a random polynomial - pub fn random(num_vars: usize, mut rng: &mut R) -> Self { - Self::new( - std::iter::from_fn(|| Some(Scalar::random(&mut rng))) - .take(1 << num_vars) - .collect(), - ) - } - - /// Binds the polynomial's top variable using the given scalar. - /// - /// This operation modifies the polynomial in-place. - pub fn bind_poly_var_top(&mut self, r: &Scalar) { - assert!(self.num_vars > 0); - - let n = self.len() / 2; - - let (left, right) = self.Z.split_at_mut(n); - - left.par_iter_mut() - .zip_eq(right.par_iter()) - .for_each(|(a, b)| { - *a += *r * (*b - *a); - }); - - self.Z.resize(n, Scalar::ZERO); - self.num_vars -= 1; - } - - /// Evaluates the polynomial at the given point. - /// Returns Z(r) in O(n) time. - /// - /// The point must have a value for each variable. - pub fn evaluate(&self, r: &[Scalar]) -> Scalar { - // r must have a value for each variable - assert_eq!(r.len(), self.get_num_vars()); - Self::evaluate_with(&self.Z, r) - } - - /// Evaluates the polynomial with the given evaluations and point. - pub fn evaluate_with(Z: &[Scalar], r: &[Scalar]) -> Scalar { - let chis = EqPolynomial::evals_from_points(r); - Self::evaluate_with_chis(Z, &chis) - } - - /// Evaluates the polynomial with the given evaluations and chi coefficients - pub fn evaluate_with_chis(Z: &[Scalar], chis: &[Scalar]) -> Scalar { - zip_with!(par_iter, (chis, Z), |a, b| *a * b).sum() - } -} - -impl Index for MultilinearPolynomial { - type Output = Scalar; - - #[inline(always)] - fn index(&self, _index: usize) -> &Scalar { - &(self.Z[_index]) - } -} - -/// Sparse multilinear polynomial, which means the $Z(\cdot)$ is zero at most -/// points. In our context, sparse polynomials are non-zeros over the hypercube -/// at locations that map to "small" integers We exploit this property to -/// implement a time-optimal algorithm -pub(crate) struct SparsePolynomial { - num_vars: usize, - Z: Vec, -} - -impl SparsePolynomial { - pub fn new(num_vars: usize, Z: Vec) -> Self { - Self { num_vars, Z } - } - - // a time-optimal algorithm to evaluate sparse polynomials - pub fn evaluate(&self, r: &[Scalar]) -> Scalar { - assert_eq!(self.num_vars, r.len()); - - let num_vars_z = self.Z.len().next_power_of_two().log_2(); - let chis = EqPolynomial::evals_from_points(&r[self.num_vars - 1 - num_vars_z..]); - #[allow(clippy::disallowed_methods)] - let eval_partial: Scalar = self - .Z - .iter() - .zip(chis.iter()) - .map(|(z, chi)| *z * *chi) - .sum(); - - let common = (0..self.num_vars - 1 - num_vars_z) - .map(|i| (Scalar::ONE - r[i])) - .product::(); - - common * eval_partial - } -} - -/// Adds another multilinear polynomial to `self`. -/// Assumes the two polynomials have the same number of variables. -impl Add for MultilinearPolynomial { - type Output = Result; - - fn add(self, other: Self) -> Self::Output { - if self.get_num_vars() != other.get_num_vars() { - return Err("The two polynomials must have the same number of variables"); - } - - let sum: Vec = zip_with!(into_iter, (self.Z, other.Z), |a, b| a + b).collect(); - - Ok(Self::new(sum)) - } -} - -#[cfg(test)] -mod tests { - use rand_chacha::ChaCha20Rng; - use rand_core::SeedableRng; - - use super::*; - use crate::provider::bn256_grumpkin::bn256; - - fn make_mlp(len: usize, value: F) -> MultilinearPolynomial { - MultilinearPolynomial { - num_vars: len.count_ones() as usize, - Z: vec![value; len], - } - } - - // fn test_multilinear_polynomial_with() { - // // Let the polynomial has 3 variables, p(x_1, x_2, x_3) = (x_1 + x_2) * - // x_3 // Evaluations of the polynomial at boolean cube are [0, 0, 0, 1, - // 0, 1, 0, 2]. - - // let TWO = F::from(2); - - // let Z = vec![ - // F::ZERO, - // F::ZERO, - // F::ZERO, - // F::ONE, - // F::ZERO, - // F::ONE, - // F::ZERO, - // TWO, - // ]; - // let m_poly = MultilinearPolynomial::::new(Z.clone()); - // assert_eq!(m_poly.get_num_vars(), 3); - - // let x = vec![F::ONE, F::ONE, F::ONE]; - // assert_eq!(m_poly.evaluate(x.as_slice()), TWO); - - // let y = MultilinearPolynomial::::evaluate_with(Z.as_slice(), - // x.as_slice()); assert_eq!(y, TWO); - // } - - // fn test_sparse_polynomial_with() { - // // Let the polynomial have 4 variables, but is non-zero at only 3 - // locations (out // of 2^4 = 16) over the hypercube - // let mut Z = vec![F::ONE, F::ONE, F::from(2)]; - // let m_poly = SparsePolynomial::::new(4, Z.clone()); - - // Z.resize(16, F::ZERO); // append with zeros to make it a dense polynomial - // let m_poly_dense = MultilinearPolynomial::new(Z); - - // // evaluation point - // let x = vec![F::from(5), F::from(8), F::from(5), F::from(3)]; - - // // check evaluations - // assert_eq!( - // m_poly.evaluate(x.as_slice()), - // m_poly_dense.evaluate(x.as_slice()) - // ); - // } - - fn test_mlp_add_with() { - let mlp1 = make_mlp(4, F::from(3)); - let mlp2 = make_mlp(4, F::from(7)); - - let mlp3 = mlp1.add(mlp2).unwrap(); - - assert_eq!(mlp3.Z, vec![F::from(10); 4]); - } - - #[test] - fn test_mlp_add() { - test_mlp_add_with::(); - } - - fn test_evaluation_with() { - let num_evals = 4; - let mut evals: Vec = Vec::with_capacity(num_evals); - for _ in 0..num_evals { - evals.push(F::from(8)); - } - let dense_poly: MultilinearPolynomial = MultilinearPolynomial::new(evals.clone()); - - // Evaluate at 3: - // (0, 0) = 1 - // (0, 1) = 1 - // (1, 0) = 1 - // (1, 1) = 1 - // g(x_0,x_1) => c_0*(1 - x_0)(1 - x_1) + c_1*(1-x_0)(x_1) + c_2*(x_0)(1-x_1) + - // c_3*(x_0)(x_1) g(3, 4) = 8*(1 - 3)(1 - 4) + 8*(1-3)(4) + 8*(3)(1-4) + - // 8*(3)(4) = 48 + -64 + -72 + 96 = 8 g(5, 10) = 8*(1 - 5)(1 - 10) + - // 8*(1 - 5)(10) + 8*(5)(1-10) + 8*(5)(10) = 96 + -16 + -72 + 96 = 8 - assert_eq!( - dense_poly.evaluate(vec![F::from(3), F::from(4)].as_slice()), - F::from(8) - ); - assert_eq!( - dense_poly.evaluate(vec![F::from(5), F::from(10)].as_slice()), - F::from(8) - ); - } - - #[test] - fn test_evaluation() { - test_evaluation_with::(); - } - - /// This binds the variables of a multilinear polynomial to a provided - /// sequence of values. - /// - /// Assuming `bind_poly_var_top` defines the "top" variable of the - /// polynomial, this aims to test whether variables should be provided - /// to the `evaluate` function in topmost-first (big endian) of - /// topmost-last (lower endian) order. - fn bind_sequence( - poly: &MultilinearPolynomial, - values: &[F], - ) -> MultilinearPolynomial { - // Assert that the size of the polynomial being evaluated is a power of 2 - // greater than (1 << values.len()) - assert!(poly.Z.len().is_power_of_two()); - assert!(poly.Z.len() >= 1 << values.len()); - - let mut tmp = poly.clone(); - for v in values.iter() { - tmp.bind_poly_var_top(v); - } - tmp - } - - fn bind_and_evaluate_with() { - for i in 0..50 { - // Initialize a random polynomial - let n = 7; - let mut rng = ChaCha20Rng::from_seed([i as u8; 32]); - let poly = MultilinearPolynomial::random(n, &mut rng); - - // draw a random point - let pt: Vec<_> = std::iter::from_fn(|| Some(F::random(&mut rng))) - .take(n) - .collect(); - // this shows the order in which coordinates are evaluated - assert_eq!(poly.evaluate(&pt), bind_sequence(&poly, &pt).Z[0]) - } - } - - #[test] - fn test_bind_and_evaluate() { - bind_and_evaluate_with::(); - } -} diff --git a/src/spartan/polys/power.rs b/src/spartan/polys/power.rs deleted file mode 100644 index 04aba49..0000000 --- a/src/spartan/polys/power.rs +++ /dev/null @@ -1,75 +0,0 @@ -//! `PowPolynomial`: Represents multilinear extension of power polynomials - -use std::iter::successors; - -use ff::PrimeField; - -use crate::spartan::polys::eq::EqPolynomial; - -/// Represents the multilinear extension polynomial (MLE) of the equality -/// polynomial $pow(x,t)$, denoted as $\tilde{pow}(x, t)$. -/// -/// The polynomial is defined by the formula: -/// $$ -/// \tilde{power}(x, t) = \prod_{i=1}^m(1 + (t^{2^i} - 1) * x_i) -/// $$ -pub struct PowPolynomial { - eq: EqPolynomial, -} - -impl PowPolynomial { - /// Creates a new `PowPolynomial` from a Scalars `t`. - pub fn new(t: &Scalar, ell: usize) -> Self { - // t_pow = [t^{2^0}, t^{2^1}, ..., t^{2^{ell-1}}] - let t_pow = Self::squares(t, ell); - - Self { - eq: EqPolynomial::new(t_pow), - } - } - - /// Create powers the following powers of `t`: - /// [t^{2^0}, t^{2^1}, ..., t^{2^{ell-1}}] - pub fn squares(t: &Scalar, ell: usize) -> Vec { - successors(Some(*t), |p: &Scalar| Some(p.square())) - .take(ell) - .collect::>() - } - - /// Creates the evals corresponding to a `PowPolynomial` from an - /// already-existing vector of powers. `t_pow.len() > ell` must be true. - pub(crate) fn evals_with_powers(powers: &[Scalar], ell: usize) -> Vec { - let t_pow = powers[..ell].to_vec(); - EqPolynomial::evals_from_points(&t_pow) - } - - /// Evaluates the `PowPolynomial` at a given point `rx`. - /// - /// This function computes the value of the polynomial at the point - /// specified by `rx`. It expects `rx` to have the same length as the - /// internal vector `t_pow`. - /// - /// Panics if `rx` and `t_pow` have different lengths. - pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { - self.eq.evaluate(rx) - } - - pub fn coordinates(self) -> Vec { - self.eq.r - } - - /// Evaluates the `PowPolynomial` at all the `2^|t_pow|` points in its - /// domain. - /// - /// Returns a vector of Scalars, each corresponding to the polynomial - /// evaluation at a specific point. - pub fn evals(&self) -> Vec { - self.eq.evals() - } -} - -impl From> for EqPolynomial { - fn from(polynomial: PowPolynomial) -> Self { - polynomial.eq - } -} diff --git a/src/spartan/polys/univariate.rs b/src/spartan/polys/univariate.rs deleted file mode 100644 index 35a25d9..0000000 --- a/src/spartan/polys/univariate.rs +++ /dev/null @@ -1,415 +0,0 @@ -//! Main components: -//! - `UniPoly`: an univariate dense polynomial in coefficient form (big -//! endian), -//! - `CompressedUniPoly`: a univariate dense polynomial, compressed (omitted -//! linear term), in coefficient form (little endian), -use std::{ - cmp::Ordering, - ops::{AddAssign, Index, IndexMut, MulAssign, SubAssign}, -}; - -use ff::PrimeField; -use rayon::prelude::{IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; -use ref_cast::RefCast; -use serde::{Deserialize, Serialize}; - -use crate::{ - provider::util::iterators::DoubleEndedIteratorExt as _, - traits::{Group, TranscriptReprTrait}, -}; - -// ax^2 + bx + c stored as vec![c, b, a] -// ax^3 + bx^2 + cx + d stored as vec![d, c, b, a] -#[derive(Debug, Clone, PartialEq, Eq, RefCast)] -#[repr(transparent)] -pub struct UniPoly { - pub coeffs: Vec, -} - -// ax^2 + bx + c stored as vec![c, a] -// ax^3 + bx^2 + cx + d stored as vec![d, c, a] -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct CompressedUniPoly { - coeffs_except_linear_term: Vec, -} - -impl UniPoly { - pub fn new(coeffs: Vec) -> Self { - let mut res = Self { coeffs }; - res.truncate_leading_zeros(); - res - } - - fn zero() -> Self { - Self::new(Vec::new()) - } - - /// Divide self by another polynomial, and returns the - /// quotient and remainder. - pub fn divide_with_q_and_r(&self, divisor: &Self) -> Option<(Self, Self)> { - if self.is_zero() { - Some((Self::zero(), Self::zero())) - } else if divisor.is_zero() { - None - } else if self.degree() < divisor.degree() { - Some((Self::zero(), self.clone())) - } else { - // Now we know that self.degree() >= divisor.degree(); - let mut quotient = vec![Scalar::ZERO; self.degree() - divisor.degree() + 1]; - let mut remainder: Self = self.clone(); - // Can unwrap here because we know self is not zero. - let divisor_leading_inv = divisor.leading_coefficient().unwrap().invert().unwrap(); - while !remainder.is_zero() && remainder.degree() >= divisor.degree() { - let cur_q_coeff = *remainder.leading_coefficient().unwrap() * divisor_leading_inv; - let cur_q_degree = remainder.degree() - divisor.degree(); - quotient[cur_q_degree] = cur_q_coeff; - - for (i, div_coeff) in divisor.coeffs.iter().enumerate() { - remainder.coeffs[cur_q_degree + i] -= &(cur_q_coeff * div_coeff); - } - while let Some(true) = remainder.coeffs.last().map(|c| c == &Scalar::ZERO) { - remainder.coeffs.pop(); - } - } - Some((Self::new(quotient), remainder)) - } - } - - /// Divides f(x) by x-a and returns quotient polynomial with no reminder - /// This is a common use case for polynomial divisions in KZG-based PCS. - pub fn divide_minus_u(&self, u: Scalar) -> Self { - if self.is_zero() { - Self::zero() - } else { - // On input f(x) and u compute the witness polynomial used to prove - // that f(u) = v. The main part of this is to compute the - // division (f(x) - f(u)) / (x - u), but we don't use a general - // division algorithm, we make use of the fact that the division - // never has a remainder, and that the denominator is always a linear - // polynomial. The cost is (d-1) mults + (d-1) adds in E::Scalar, where - // d is the degree of f. - // - // We use the fact that if we compute the quotient of f(x)/(x-u), - // there will be a remainder, but it'll be v = f(u). Put another way - // the quotient of f(x)/(x-u) and (f(x) - f(v))/(x-u) is the - // same. One advantage is that computing f(u) could be decoupled - // from kzg_open, it could be done later or separate from computing W. - - let d = self.coeffs.len(); - - // Compute h(x) = f(x)/(x - u) - let mut h = vec![Scalar::ZERO; d]; - for i in (1..d).rev() { - h[i - 1] = self.coeffs[i] + h[i] * u; - } - Self::new(h) - } - } - - fn is_zero(&self) -> bool { - self.coeffs.is_empty() || self.coeffs.iter().all(|c| c == &Scalar::ZERO) - } - - fn truncate_leading_zeros(&mut self) { - while self.coeffs.last().map_or(false, |c| c == &Scalar::ZERO) { - self.coeffs.pop(); - } - } - - fn leading_coefficient(&self) -> Option<&Scalar> { - self.coeffs.last() - } - - pub fn from_evals(evals: &[Scalar]) -> Self { - // we only support degree-2 or degree-3 univariate polynomials - assert!(evals.len() == 3 || evals.len() == 4); - let two_inv = Scalar::from(2).invert().unwrap(); - let coeffs = if evals.len() == 3 { - // ax^2 + bx + c - let c = evals[0]; - let a = two_inv * (evals[2] - evals[1] - evals[1] + c); - let b = evals[1] - c - a; - vec![c, b, a] - } else { - // ax^3 + bx^2 + cx + d - let six_inv = Scalar::from(6).invert().unwrap(); - - let d = evals[0]; - let a = six_inv - * (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1] - - evals[0]); - let b = two_inv - * (evals[0] + evals[0] - evals[1] - evals[1] - evals[1] - evals[1] - evals[1] - + evals[2] - + evals[2] - + evals[2] - + evals[2] - - evals[3]); - let c = evals[1] - d - a - b; - vec![d, c, b, a] - }; - - Self { coeffs } - } - - pub fn degree(&self) -> usize { - self.coeffs.len() - 1 - } - - pub fn eval_at_zero(&self) -> Scalar { - self.coeffs[0] - } - - pub fn eval_at_one(&self) -> Scalar { - (0..self.coeffs.len()) - .into_par_iter() - .map(|i| self.coeffs[i]) - .sum() - } - - pub fn evaluate(&self, r: &Scalar) -> Scalar { - self.coeffs.iter().rlc(r) - } - - pub fn compress(&self) -> CompressedUniPoly { - let coeffs_except_linear_term = [&self.coeffs[..1], &self.coeffs[2..]].concat(); - assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len()); - CompressedUniPoly { - coeffs_except_linear_term, - } - } - - #[cfg(test)] - /// Returns a random polynomial - pub fn random(num_vars: usize, mut rng: &mut R) -> Self { - Self::new( - std::iter::from_fn(|| Some(Scalar::random(&mut rng))) - .take(num_vars) - .collect(), - ) - } -} - -impl CompressedUniPoly { - // we require eval(0) + eval(1) = hint, so we can solve for the linear term as: - // linear_term = hint - 2 * constant_term - deg2 term - deg3 term - pub fn decompress(&self, hint: &Scalar) -> UniPoly { - let mut linear_term = - *hint - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0]; - for i in 1..self.coeffs_except_linear_term.len() { - linear_term -= self.coeffs_except_linear_term[i]; - } - - let mut coeffs: Vec = Vec::new(); - coeffs.push(self.coeffs_except_linear_term[0]); - coeffs.push(linear_term); - coeffs.extend(&self.coeffs_except_linear_term[1..]); - assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len()); - UniPoly { coeffs } - } -} - -impl TranscriptReprTrait for UniPoly { - fn to_transcript_bytes(&self) -> Vec { - let coeffs = self.compress().coeffs_except_linear_term; - coeffs - .iter() - .flat_map(|&t| t.to_repr().as_ref().to_vec()) - .collect::>() - } -} - -impl Index for UniPoly { - type Output = Scalar; - - fn index(&self, index: usize) -> &Self::Output { - &self.coeffs[index] - } -} - -impl IndexMut for UniPoly { - fn index_mut(&mut self, index: usize) -> &mut Self::Output { - &mut self.coeffs[index] - } -} - -impl AddAssign<&Scalar> for UniPoly { - fn add_assign(&mut self, rhs: &Scalar) { - self.coeffs.par_iter_mut().for_each(|c| *c += rhs); - } -} - -impl MulAssign<&Scalar> for UniPoly { - fn mul_assign(&mut self, rhs: &Scalar) { - self.coeffs.par_iter_mut().for_each(|c| *c *= rhs); - } -} - -impl AddAssign<&Self> for UniPoly { - fn add_assign(&mut self, rhs: &Self) { - let ordering = self.coeffs.len().cmp(&rhs.coeffs.len()); - #[allow(clippy::disallowed_methods)] - for (lhs, rhs) in self.coeffs.iter_mut().zip(&rhs.coeffs) { - *lhs += rhs; - } - if matches!(ordering, Ordering::Less) { - self.coeffs - .extend(rhs.coeffs[self.coeffs.len()..].iter().cloned()); - } - if matches!(ordering, Ordering::Equal) { - self.truncate_leading_zeros(); - } - } -} - -impl SubAssign<&Self> for UniPoly { - fn sub_assign(&mut self, rhs: &Self) { - let ordering = self.coeffs.len().cmp(&rhs.coeffs.len()); - #[allow(clippy::disallowed_methods)] - for (lhs, rhs) in self.coeffs.iter_mut().zip(&rhs.coeffs) { - *lhs -= rhs; - } - if matches!(ordering, Ordering::Less) { - self.coeffs - .extend(rhs.coeffs[self.coeffs.len()..].iter().cloned()); - } - if matches!(ordering, Ordering::Equal) { - self.truncate_leading_zeros(); - } - } -} - -impl AsRef> for UniPoly { - fn as_ref(&self) -> &Vec { - &self.coeffs - } -} - -#[cfg(test)] -mod tests { - use rand::SeedableRng; - use rand_chacha::ChaCha20Rng; - - use super::*; - use crate::provider::bn256_grumpkin; - - fn test_from_evals_quad_with() { - // polynomial is 2x^2 + 3x + 1 - let e0 = F::ONE; - let e1 = F::from(6); - let e2 = F::from(15); - let evals = vec![e0, e1, e2]; - let poly = UniPoly::from_evals(&evals); - - assert_eq!(poly.eval_at_zero(), e0); - assert_eq!(poly.eval_at_one(), e1); - assert_eq!(poly.coeffs.len(), 3); - assert_eq!(poly.coeffs[0], F::ONE); - assert_eq!(poly.coeffs[1], F::from(3)); - assert_eq!(poly.coeffs[2], F::from(2)); - - let hint = e0 + e1; - let compressed_poly = poly.compress(); - let decompressed_poly = compressed_poly.decompress(&hint); - for i in 0..decompressed_poly.coeffs.len() { - assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); - } - - let e3 = F::from(28); - assert_eq!(poly.evaluate(&F::from(3)), e3); - } - - #[test] - fn test_from_evals_quad() { - test_from_evals_quad_with::(); - } - - fn test_from_evals_cubic_with() { - // polynomial is x^3 + 2x^2 + 3x + 1 - let e0 = F::ONE; - let e1 = F::from(7); - let e2 = F::from(23); - let e3 = F::from(55); - let evals = vec![e0, e1, e2, e3]; - let poly = UniPoly::from_evals(&evals); - - assert_eq!(poly.eval_at_zero(), e0); - assert_eq!(poly.eval_at_one(), e1); - assert_eq!(poly.coeffs.len(), 4); - - assert_eq!(poly.coeffs[1], F::from(3)); - assert_eq!(poly.coeffs[2], F::from(2)); - assert_eq!(poly.coeffs[3], F::from(1)); - - let hint = e0 + e1; - let compressed_poly = poly.compress(); - let decompressed_poly = compressed_poly.decompress(&hint); - for i in 0..decompressed_poly.coeffs.len() { - assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); - } - - let e4 = F::from(109); - assert_eq!(poly.evaluate(&F::from(4)), e4); - } - - #[test] - fn test_from_evals_cubic() { - test_from_evals_cubic_with::(); - } - - /// Perform a naive n^2 multiplication of `self` by `other`. - pub fn naive_mul(ours: &UniPoly, other: &UniPoly) -> UniPoly { - if ours.is_zero() || other.is_zero() { - UniPoly::zero() - } else { - let mut result = vec![F::ZERO; ours.degree() + other.degree() + 1]; - for (i, self_coeff) in ours.coeffs.iter().enumerate() { - for (j, other_coeff) in other.coeffs.iter().enumerate() { - result[i + j] += &(*self_coeff * other_coeff); - } - } - UniPoly::new(result) - } - } - - fn divide_polynomials_random() { - let rng = &mut ChaCha20Rng::from_seed([0u8; 32]); - - for a_degree in 0..50 { - for b_degree in 0..50 { - let dividend = UniPoly::::random(a_degree, rng); - let divisor = UniPoly::::random(b_degree, rng); - - if let Some((quotient, remainder)) = - UniPoly::divide_with_q_and_r(÷nd, &divisor) - { - let mut prod = naive_mul(&divisor, "ient); - prod += &remainder; - assert_eq!(dividend, prod) - } - } - } - } - - #[test] - fn test_divide_minus_u() { - fn test_inner() { - let rng = &mut ChaCha20Rng::from_seed([0u8; 32]); - let dividend = UniPoly::::random(50, rng); - let u = Fr::random(rng); - let divisor = UniPoly::new(vec![-u, Fr::ONE]); - - let (q1, _) = dividend.divide_with_q_and_r(&divisor).unwrap(); - let q2 = dividend.divide_minus_u(u); - - assert_eq!(q1, q2); - } - - test_inner::(); - } - - #[test] - fn test_divide_polynomials_random() { - divide_polynomials_random::(); - } -} diff --git a/src/spartan/ppsnark.rs b/src/spartan/ppsnark.rs deleted file mode 100644 index 64c27c4..0000000 --- a/src/spartan/ppsnark.rs +++ /dev/null @@ -1,1097 +0,0 @@ -//! This module implements `RelaxedR1CSSNARK` traits using a spark-based -//! approach to prove evaluations of sparse multilinear polynomials involved in -//! Spartan's sum-check protocol, thereby providing a preprocessing SNARK -//! The verifier in this preprocessing SNARK maintains a commitment to R1CS -//! matrices. This is beneficial when using a polynomial commitment scheme in -//! which the verifier's costs is succinct. This code includes experimental -//! optimizations to reduce runtimes and proof sizes. -use core::cmp::max; -use std::sync::Arc; - -use ff::Field; -use itertools::Itertools as _; -use once_cell::sync::OnceCell; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -use super::polys::{masked_eq::MaskedEqPolynomial, multilinear::SparsePolynomial}; -use crate::{ - digest::{DigestComputer, SimpleDigestible}, - errors::NovaError, - r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, - spartan::{ - math::Math, - polys::{ - eq::EqPolynomial, - identity::IdentityPolynomial, - multilinear::MultilinearPolynomial, - power::PowPolynomial, - univariate::{CompressedUniPoly, UniPoly}, - }, - powers, - sumcheck::{ - engine::{ - InnerSumcheckInstance, MemorySumcheckInstance, OuterSumcheckInstance, - SumcheckEngine, WitnessBoundSumcheck, - }, - SumcheckProof, - }, - PolyEvalInstance, PolyEvalWitness, - }, - traits::{ - commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, - evaluation::EvaluationEngineTrait, - snark::{DigestHelperTrait, RelaxedR1CSSNARKTrait}, - Engine, TranscriptEngineTrait, TranscriptReprTrait, - }, - zip_with, Commitment, CommitmentKey, CompressedCommitment, -}; - -fn padded(v: &[E::Scalar], n: usize, e: &E::Scalar) -> Vec { - let mut v_padded = vec![*e; n]; - v_padded[..v.len()].copy_from_slice(v); - v_padded -} - -/// A type that holds `R1CSShape` in a form amenable to memory checking -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct R1CSShapeSparkRepr { - pub(in crate::spartan) N: usize, // size of the vectors - - // dense representation - pub(in crate::spartan) row: Vec, - pub(in crate::spartan) col: Vec, - pub(in crate::spartan) val_A: Vec, - pub(in crate::spartan) val_B: Vec, - pub(in crate::spartan) val_C: Vec, - - // timestamp polynomials - pub(in crate::spartan) ts_row: Vec, - pub(in crate::spartan) ts_col: Vec, -} - -/// A type that holds a commitment to a sparse polynomial -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct R1CSShapeSparkCommitment { - pub(in crate::spartan) N: usize, // size of each vector - - // commitments to the dense representation - pub(in crate::spartan) comm_row: Commitment, - pub(in crate::spartan) comm_col: Commitment, - pub(in crate::spartan) comm_val_A: Commitment, - pub(in crate::spartan) comm_val_B: Commitment, - pub(in crate::spartan) comm_val_C: Commitment, - - // commitments to the timestamp polynomials - pub(in crate::spartan) comm_ts_row: Commitment, - pub(in crate::spartan) comm_ts_col: Commitment, -} - -impl TranscriptReprTrait for R1CSShapeSparkCommitment { - fn to_transcript_bytes(&self) -> Vec { - [ - self.comm_row, - self.comm_col, - self.comm_val_A, - self.comm_val_B, - self.comm_val_C, - self.comm_ts_row, - self.comm_ts_col, - ] - .as_slice() - .to_transcript_bytes() - } -} - -impl R1CSShapeSparkRepr { - /// represents `R1CSShape` in a Spark-friendly format amenable to memory - /// checking - pub fn new(S: &R1CSShape) -> Self { - let N = { - let total_nz = S.A.len() + S.B.len() + S.C.len(); - max(total_nz, max(2 * S.num_vars, S.num_cons)).next_power_of_two() - }; - - // we make col lookup into the last entry of z, so we commit to zeros - let (mut row, mut col, mut val_A, mut val_B, mut val_C) = ( - vec![0; N], - vec![N - 1; N], - vec![E::Scalar::ZERO; N], - vec![E::Scalar::ZERO; N], - vec![E::Scalar::ZERO; N], - ); - - for (i, entry) in S.A.iter().enumerate() { - let (r, c, v) = entry; - row[i] = r; - col[i] = c; - val_A[i] = v; - } - - let b_offset = S.A.len(); - for (i, entry) in S.B.iter().enumerate() { - let (r, c, v) = entry; - row[b_offset + i] = r; - col[b_offset + i] = c; - val_B[b_offset + i] = v; - } - - let c_offset = S.A.len() + S.B.len(); - for (i, entry) in S.C.iter().enumerate() { - let (r, c, v) = entry; - row[c_offset + i] = r; - col[c_offset + i] = c; - val_C[c_offset + i] = v; - } - - // timestamp calculation routine - let timestamp_calc = - |num_ops: usize, num_cells: usize, addr_trace: &[usize]| -> Vec { - let mut ts = vec![0usize; num_cells]; - - assert!(num_ops >= addr_trace.len()); - for addr in addr_trace { - assert!(*addr < num_cells); - ts[*addr] += 1; - } - ts - }; - - // timestamp polynomials for row - let (ts_row, ts_col) = - rayon::join(|| timestamp_calc(N, N, &row), || timestamp_calc(N, N, &col)); - - // a routine to turn a vector of usize into a vector scalars - let to_vec_scalar = |v: &[usize]| -> Vec { - v.iter() - .map(|x| E::Scalar::from(*x as u64)) - .collect::>() - }; - - Self { - N, - - // dense representation - row: to_vec_scalar(&row), - col: to_vec_scalar(&col), - val_A, - val_B, - val_C, - - // timestamp polynomials - ts_row: to_vec_scalar(&ts_row), - ts_col: to_vec_scalar(&ts_col), - } - } - - pub(in crate::spartan) fn commit(&self, ck: &CommitmentKey) -> R1CSShapeSparkCommitment { - let comm_vec: Vec> = [ - &self.row, - &self.col, - &self.val_A, - &self.val_B, - &self.val_C, - &self.ts_row, - &self.ts_col, - ] - .par_iter() - .map(|v| E::CE::commit(ck, v)) - .collect(); - - R1CSShapeSparkCommitment { - N: self.row.len(), - comm_row: comm_vec[0], - comm_col: comm_vec[1], - comm_val_A: comm_vec[2], - comm_val_B: comm_vec[3], - comm_val_C: comm_vec[4], - comm_ts_row: comm_vec[5], - comm_ts_col: comm_vec[6], - } - } - - // computes evaluation oracles - fn evaluation_oracles( - &self, - S: &R1CSShape, - r_x: &E::Scalar, - z: &[E::Scalar], - ) -> ( - Vec, - Vec, - Vec, - Vec, - ) { - let mem_row = PowPolynomial::new(r_x, self.N.log_2()).evals(); - let mem_col = padded::(z, self.N, &E::Scalar::ZERO); - - let (L_row, L_col) = { - let mut L_row = vec![mem_row[0]; self.N]; // we place mem_row[0] since resized row is appended with 0s - let mut L_col = vec![mem_col[self.N - 1]; self.N]; // we place mem_col[N-1] since resized col is appended with N-1 - - for (i, (val_r, val_c)) in - S.A.iter() - .chain(S.B.iter()) - .chain(S.C.iter()) - .map(|(r, c, _)| (mem_row[r], mem_col[c])) - .enumerate() - { - L_row[i] = val_r; - L_col[i] = val_c; - } - (L_row, L_col) - }; - - (mem_row, mem_col, L_row, L_col) - } -} - -/// A type that represents the prover's key -#[derive(Debug, Clone)] -pub struct ProverKey> { - pk_ee: EE::ProverKey, - S_repr: R1CSShapeSparkRepr, - S_comm: R1CSShapeSparkCommitment, - vk_digest: E::Scalar, // digest of verifier's key -} - -/// A type that represents the verifier's key -#[derive(Debug, Clone, Serialize)] -#[serde(bound = "EE::VerifierKey: Serialize")] -pub struct VerifierKey> { - num_cons: usize, - num_vars: usize, - vk_ee: EE::VerifierKey, - S_comm: R1CSShapeSparkCommitment, - #[serde(skip, default = "OnceCell::new")] - digest: OnceCell, -} - -impl> SimpleDigestible for VerifierKey where - EE::VerifierKey: Serialize -{ -} - -/// A succinct proof of knowledge of a witness to a relaxed R1CS instance -/// The proof is produced using Spartan's combination of the sum-check and -/// the commitment to a vector viewed as a polynomial commitment -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct RelaxedR1CSSNARK> { - // commitment to oracles: the first three are for Az, Bz, Cz, - // and the last two are for memory reads - comm_Az: CompressedCommitment, - comm_Bz: CompressedCommitment, - comm_Cz: CompressedCommitment, - comm_L_row: CompressedCommitment, - comm_L_col: CompressedCommitment, - - // commitments to aid the memory checks - comm_t_plus_r_inv_row: CompressedCommitment, - comm_w_plus_r_inv_row: CompressedCommitment, - comm_t_plus_r_inv_col: CompressedCommitment, - comm_w_plus_r_inv_col: CompressedCommitment, - - // claims about Az, Bz, and Cz polynomials - eval_Az_at_tau: E::Scalar, - eval_Bz_at_tau: E::Scalar, - eval_Cz_at_tau: E::Scalar, - - // sum-check - sc: SumcheckProof, - - // claims from the end of sum-check - eval_Az: E::Scalar, - eval_Bz: E::Scalar, - eval_Cz: E::Scalar, - eval_E: E::Scalar, - eval_L_row: E::Scalar, - eval_L_col: E::Scalar, - eval_val_A: E::Scalar, - eval_val_B: E::Scalar, - eval_val_C: E::Scalar, - - eval_W: E::Scalar, - - eval_t_plus_r_inv_row: E::Scalar, - eval_row: E::Scalar, // address - eval_w_plus_r_inv_row: E::Scalar, - eval_ts_row: E::Scalar, - - eval_t_plus_r_inv_col: E::Scalar, - eval_col: E::Scalar, // address - eval_w_plus_r_inv_col: E::Scalar, - eval_ts_col: E::Scalar, - - // a PCS evaluation argument - eval_arg: EE::EvaluationArgument, -} - -impl> RelaxedR1CSSNARK { - fn prove_helper( - mem: &mut T1, - outer: &mut T2, - inner: &mut T3, - witness: &mut T4, - transcript: &mut E::TE, - ) -> Result< - ( - SumcheckProof, - Vec, - Vec>, - Vec>, - Vec>, - Vec>, - ), - NovaError, - > - where - T1: SumcheckEngine, - T2: SumcheckEngine, - T3: SumcheckEngine, - T4: SumcheckEngine, - { - // sanity checks - assert_eq!(mem.size(), outer.size()); - assert_eq!(mem.size(), inner.size()); - assert_eq!(mem.size(), witness.size()); - assert_eq!(mem.degree(), outer.degree()); - assert_eq!(mem.degree(), inner.degree()); - assert_eq!(mem.degree(), witness.degree()); - - // these claims are already added to the transcript, so we do not need to add - let claims = mem - .initial_claims() - .into_iter() - .chain(outer.initial_claims()) - .chain(inner.initial_claims()) - .chain(witness.initial_claims()) - .collect::>(); - - let s = transcript.squeeze(b"r")?; - let coeffs = powers(&s, claims.len()); - - // compute the joint claim - let claim = zip_with!(iter, (claims, coeffs), |c_1, c_2| *c_1 * c_2).sum(); - - let mut e = claim; - let mut r: Vec = Vec::new(); - let mut cubic_polys: Vec> = Vec::new(); - let num_rounds = mem.size().log_2(); - for _ in 0..num_rounds { - let ((evals_mem, evals_outer), (evals_inner, evals_witness)) = rayon::join( - || rayon::join(|| mem.evaluation_points(), || outer.evaluation_points()), - || rayon::join(|| inner.evaluation_points(), || witness.evaluation_points()), - ); - - let evals: Vec> = evals_mem - .into_iter() - .chain(evals_outer.into_iter()) - .chain(evals_inner.into_iter()) - .chain(evals_witness.into_iter()) - .collect::>>(); - assert_eq!(evals.len(), claims.len()); - - let evals_combined_0 = (0..evals.len()).map(|i| evals[i][0] * coeffs[i]).sum(); - let evals_combined_2 = (0..evals.len()).map(|i| evals[i][1] * coeffs[i]).sum(); - let evals_combined_3 = (0..evals.len()).map(|i| evals[i][2] * coeffs[i]).sum(); - - let evals = vec![ - evals_combined_0, - e - evals_combined_0, - evals_combined_2, - evals_combined_3, - ]; - let poly = UniPoly::from_evals(&evals); - - // append the prover's message to the transcript - transcript.absorb(b"p", &poly); - - // derive the verifier's challenge for the next round - let r_i = transcript.squeeze(b"c")?; - r.push(r_i); - - let _ = rayon::join( - || rayon::join(|| mem.bound(&r_i), || outer.bound(&r_i)), - || rayon::join(|| inner.bound(&r_i), || witness.bound(&r_i)), - ); - - e = poly.evaluate(&r_i); - cubic_polys.push(poly.compress()); - } - - let mem_claims = mem.final_claims(); - let outer_claims = outer.final_claims(); - let inner_claims = inner.final_claims(); - let witness_claims = witness.final_claims(); - - Ok(( - SumcheckProof::new(cubic_polys), - r, - mem_claims, - outer_claims, - inner_claims, - witness_claims, - )) - } -} - -impl> VerifierKey { - fn new( - num_cons: usize, - num_vars: usize, - S_comm: R1CSShapeSparkCommitment, - vk_ee: EE::VerifierKey, - ) -> Self { - Self { - num_cons, - num_vars, - S_comm, - vk_ee, - digest: Default::default(), - } - } -} -impl> DigestHelperTrait for VerifierKey { - /// Returns the digest of the verifier's key - fn digest(&self) -> E::Scalar { - self.digest - .get_or_try_init(|| { - let dc = DigestComputer::new(self); - dc.digest() - }) - .cloned() - .expect("Failure to retrieve digest!") - } -} - -impl> RelaxedR1CSSNARKTrait for RelaxedR1CSSNARK { - type ProverKey = ProverKey; - type VerifierKey = VerifierKey; - - fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { - Box::new(|shape: &R1CSShape| -> usize { - // the commitment key should be large enough to commit to the R1CS matrices - shape.A.len() + shape.B.len() + shape.C.len() - }) - } - - fn initialize_pk( - _ck: Arc>, - _vk_digest: ::Scalar, - ) -> Result { - todo!("not implemented for nova snarks"); - } - - fn setup( - ck: Arc>, - S: &R1CSShape, - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { - // check the provided commitment key meets minimal requirements - if ck.length() < Self::ck_floor()(S) { - return Err(NovaError::InvalidCommitmentKeyLength); - } - let (pk_ee, vk_ee) = EE::setup(ck.clone()); - - // pad the R1CS matrices - let S = S.pad(); - - let S_repr = R1CSShapeSparkRepr::new(&S); - let S_comm = S_repr.commit(&*ck); - - let vk = VerifierKey::new(S.num_cons, S.num_vars, S_comm.clone(), vk_ee); - - let pk = ProverKey { - pk_ee, - S_repr, - S_comm, - vk_digest: vk.digest(), - }; - - Ok((pk, vk)) - } - - /// produces a succinct proof of satisfiability of a `RelaxedR1CS` instance - #[tracing::instrument(skip_all, name = "PPSNARK::prove")] - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - S: &R1CSShape, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, - ) -> Result { - // pad the R1CSShape - let S = S.pad(); - // sanity check that R1CSShape has all required size characteristics - assert!(S.is_regular_shape()); - - let W = W.pad(&S); // pad the witness - let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); - - // append the verifier key (which includes commitment to R1CS matrices) and the - // RelaxedR1CSInstance to the transcript - transcript.absorb(b"vk", &pk.vk_digest); - transcript.absorb(b"U", U); - - // compute the full satisfying assignment by concatenating W.W, U.u, and U.X - let z = [W.W.clone(), vec![U.u], U.X.clone()].concat(); - - // compute Az, Bz, Cz - let (mut Az, mut Bz, mut Cz) = S.multiply_vec(&z)?; - - // commit to Az, Bz, Cz - let (comm_Az, (comm_Bz, comm_Cz)) = rayon::join( - || E::CE::commit(ck, &Az), - || rayon::join(|| E::CE::commit(ck, &Bz), || E::CE::commit(ck, &Cz)), - ); - - transcript.absorb(b"c", &[comm_Az, comm_Bz, comm_Cz].as_slice()); - - // number of rounds of sum-check - let num_rounds_sc = pk.S_repr.N.log_2(); - let tau = transcript.squeeze(b"t")?; - let tau_coords = PowPolynomial::new(&tau, num_rounds_sc).coordinates(); - - // (1) send commitments to Az, Bz, and Cz along with their evaluations at tau - let (Az, Bz, Cz, W, E) = { - Az.resize(pk.S_repr.N, E::Scalar::ZERO); - Bz.resize(pk.S_repr.N, E::Scalar::ZERO); - Cz.resize(pk.S_repr.N, E::Scalar::ZERO); - let E = padded::(&W.E, pk.S_repr.N, &E::Scalar::ZERO); - let W = padded::(&W.W, pk.S_repr.N, &E::Scalar::ZERO); - - (Az, Bz, Cz, W, E) - }; - let chis_taus = EqPolynomial::evals_from_points(&tau_coords); - let (eval_Az_at_tau, eval_Bz_at_tau, eval_Cz_at_tau) = { - let evals_at_tau = [&Az, &Bz, &Cz] - .into_par_iter() - .map(|p| MultilinearPolynomial::evaluate_with_chis(p, &chis_taus)) - .collect::>(); - (evals_at_tau[0], evals_at_tau[1], evals_at_tau[2]) - }; - - // (2) send commitments to the following two oracles - // L_row(i) = eq(tau, row(i)) for all i - // L_col(i) = z(col(i)) for all i - let (mem_row, mem_col, L_row, L_col) = pk.S_repr.evaluation_oracles(&S, &tau, &z); - let (comm_L_row, comm_L_col) = - rayon::join(|| E::CE::commit(ck, &L_row), || E::CE::commit(ck, &L_col)); - - // since all the three polynomials are opened at tau, - // we can combine them into a single polynomial opened at tau - let eval_vec = vec![eval_Az_at_tau, eval_Bz_at_tau, eval_Cz_at_tau]; - - // absorb the claimed evaluations into the transcript - transcript.absorb(b"e", &eval_vec.as_slice()); - // absorb commitments to L_row and L_col in the transcript - transcript.absorb(b"e", &vec![comm_L_row, comm_L_col].as_slice()); - let comm_vec = vec![comm_Az, comm_Bz, comm_Cz]; - let poly_vec = vec![&Az, &Bz, &Cz]; - let c = transcript.squeeze(b"c")?; - let w: PolyEvalWitness = PolyEvalWitness::batch(&poly_vec, &c); - let u: PolyEvalInstance = - PolyEvalInstance::batch(&comm_vec, tau_coords.clone(), &eval_vec, &c); - - // we now need to prove four claims - // (1) 0 = \sum_x poly_tau(x) * (poly_Az(x) * poly_Bz(x) - poly_uCz_E(x)), and - // eval_Az_at_tau + r * eval_Bz_at_tau + r^2 * eval_Cz_at_tau = - // (Az+r*Bz+r^2*Cz)(tau) (2) eval_Az_at_tau + c * eval_Bz_at_tau + c^2 * - // eval_Cz_at_tau = \sum_y L_row(y) * (val_A(y) + c * val_B(y) + c^2 * val_C(y)) - // * L_col(y) (3) L_row(i) = eq(tau, row(i)) and L_col(i) = z(col(i)) - // (4) Check that the witness polynomial W is well-formed e.g., it is padded - // with only zeros - let gamma = transcript.squeeze(b"g")?; - let r = transcript.squeeze(b"r")?; - - let ((mut outer_sc_inst, mut inner_sc_inst), mem_res) = rayon::join( - || { - // a sum-check instance to prove the first claim - let outer_sc_inst = OuterSumcheckInstance::new( - PowPolynomial::new(&tau, num_rounds_sc).evals(), - Az.clone(), - Bz.clone(), - (0..Cz.len()) - .map(|i| U.u * Cz[i] + E[i]) - .collect::>(), - w.p.clone(), // Mz = Az + r * Bz + r^2 * Cz - &u.e, // eval_Az_at_tau + r * eval_Az_at_tau + r^2 * eval_Cz_at_tau - ); - - // a sum-check instance to prove the second claim - let val = zip_with!( - par_iter, - (pk.S_repr.val_A, pk.S_repr.val_B, pk.S_repr.val_C), - |v_a, v_b, v_c| *v_a + c * *v_b + c * c * *v_c - ) - .collect::>(); - let inner_sc_inst = InnerSumcheckInstance { - claim: eval_Az_at_tau + c * eval_Bz_at_tau + c * c * eval_Cz_at_tau, - poly_L_row: MultilinearPolynomial::new(L_row.clone()), - poly_L_col: MultilinearPolynomial::new(L_col.clone()), - poly_val: MultilinearPolynomial::new(val), - }; - - (outer_sc_inst, inner_sc_inst) - }, - || { - // a third sum-check instance to prove the read-only memory claim - // we now need to prove that L_row and L_col are well-formed - - // hash the tuples of (addr,val) memory contents and read responses into a - // single field element using `hash_func` - - let (comm_mem_oracles, mem_oracles, mem_aux) = - MemorySumcheckInstance::::compute_oracles( - ck, - &r, - &gamma, - &mem_row, - &pk.S_repr.row, - &L_row, - &pk.S_repr.ts_row, - &mem_col, - &pk.S_repr.col, - &L_col, - &pk.S_repr.ts_col, - )?; - // absorb the commitments - transcript.absorb(b"l", &comm_mem_oracles.as_slice()); - - let rho = transcript.squeeze(b"r")?; - let poly_eq = - MultilinearPolynomial::new(PowPolynomial::new(&rho, num_rounds_sc).evals()); - - Ok::<_, NovaError>(( - MemorySumcheckInstance::new( - mem_oracles.clone(), - mem_aux, - poly_eq.Z, - pk.S_repr.ts_row.clone(), - pk.S_repr.ts_col.clone(), - ), - comm_mem_oracles, - mem_oracles, - )) - }, - ); - - let (mut mem_sc_inst, comm_mem_oracles, mem_oracles) = mem_res?; - - let mut witness_sc_inst = WitnessBoundSumcheck::new(tau, W.clone(), S.num_vars); - - let (sc, rand_sc, claims_mem, claims_outer, claims_inner, claims_witness) = - Self::prove_helper( - &mut mem_sc_inst, - &mut outer_sc_inst, - &mut inner_sc_inst, - &mut witness_sc_inst, - &mut transcript, - )?; - - // claims from the end of the sum-check - let eval_Az = claims_outer[0][0]; - let eval_Bz = claims_outer[0][1]; - - let eval_L_row = claims_inner[0][0]; - let eval_L_col = claims_inner[0][1]; - - let eval_t_plus_r_inv_row = claims_mem[0][0]; - let eval_w_plus_r_inv_row = claims_mem[0][1]; - let eval_ts_row = claims_mem[0][2]; - - let eval_t_plus_r_inv_col = claims_mem[1][0]; - let eval_w_plus_r_inv_col = claims_mem[1][1]; - let eval_ts_col = claims_mem[1][2]; - let eval_W = claims_witness[0][0]; - - // compute the remaining claims that did not come for free from the sum-check - // prover - let (eval_Cz, eval_E, eval_val_A, eval_val_B, eval_val_C, eval_row, eval_col) = { - let e = [ - &Cz, - &E, - &pk.S_repr.val_A, - &pk.S_repr.val_B, - &pk.S_repr.val_C, - &pk.S_repr.row, - &pk.S_repr.col, - ] - .into_par_iter() - .map(|p| MultilinearPolynomial::evaluate_with(p, &rand_sc)) - .collect::>(); - (e[0], e[1], e[2], e[3], e[4], e[5], e[6]) - }; - - // all the evaluations are at rand_sc, we can fold them into one claim - let eval_vec = vec![ - eval_W, - eval_Az, - eval_Bz, - eval_Cz, - eval_E, - eval_L_row, - eval_L_col, - eval_val_A, - eval_val_B, - eval_val_C, - eval_t_plus_r_inv_row, - eval_row, - eval_w_plus_r_inv_row, - eval_ts_row, - eval_t_plus_r_inv_col, - eval_col, - eval_w_plus_r_inv_col, - eval_ts_col, - ]; - - let comm_vec = [ - U.comm_W, - comm_Az, - comm_Bz, - comm_Cz, - U.comm_E, - comm_L_row, - comm_L_col, - pk.S_comm.comm_val_A, - pk.S_comm.comm_val_B, - pk.S_comm.comm_val_C, - comm_mem_oracles[0], - pk.S_comm.comm_row, - comm_mem_oracles[1], - pk.S_comm.comm_ts_row, - comm_mem_oracles[2], - pk.S_comm.comm_col, - comm_mem_oracles[3], - pk.S_comm.comm_ts_col, - ]; - let poly_vec = [ - &W, - &Az, - &Bz, - &Cz, - &E, - &L_row, - &L_col, - &pk.S_repr.val_A, - &pk.S_repr.val_B, - &pk.S_repr.val_C, - mem_oracles[0].as_ref(), - &pk.S_repr.row, - mem_oracles[1].as_ref(), - &pk.S_repr.ts_row, - mem_oracles[2].as_ref(), - &pk.S_repr.col, - mem_oracles[3].as_ref(), - &pk.S_repr.ts_col, - ]; - transcript.absorb(b"e", &eval_vec.as_slice()); // comm_vec is already in the transcript - let c = transcript.squeeze(b"c")?; - let w: PolyEvalWitness = PolyEvalWitness::batch(&poly_vec, &c); - let u: PolyEvalInstance = - PolyEvalInstance::batch(&comm_vec, rand_sc.clone(), &eval_vec, &c); - - let eval_arg = EE::prove(ck, &pk.pk_ee, &mut transcript, &u.c, &w.p, &rand_sc, &u.e)?; - - Ok(Self { - comm_Az: comm_Az.compress(), - comm_Bz: comm_Bz.compress(), - comm_Cz: comm_Cz.compress(), - comm_L_row: comm_L_row.compress(), - comm_L_col: comm_L_col.compress(), - - comm_t_plus_r_inv_row: comm_mem_oracles[0].compress(), - comm_w_plus_r_inv_row: comm_mem_oracles[1].compress(), - comm_t_plus_r_inv_col: comm_mem_oracles[2].compress(), - comm_w_plus_r_inv_col: comm_mem_oracles[3].compress(), - - eval_Az_at_tau, - eval_Bz_at_tau, - eval_Cz_at_tau, - - sc, - - eval_Az, - eval_Bz, - eval_Cz, - eval_E, - eval_L_row, - eval_L_col, - eval_val_A, - eval_val_B, - eval_val_C, - - eval_W, - - eval_t_plus_r_inv_row, - eval_row, - eval_w_plus_r_inv_row, - eval_ts_row, - - eval_col, - eval_t_plus_r_inv_col, - eval_w_plus_r_inv_col, - eval_ts_col, - - eval_arg, - }) - } - - /// verifies a proof of satisfiability of a `RelaxedR1CS` instance - fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { - let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); - - // append the verifier key (including commitment to R1CS matrices) and the - // RelaxedR1CSInstance to the transcript - transcript.absorb(b"vk", &vk.digest()); - transcript.absorb(b"U", U); - - let comm_Az = Commitment::::decompress(&self.comm_Az)?; - let comm_Bz = Commitment::::decompress(&self.comm_Bz)?; - let comm_Cz = Commitment::::decompress(&self.comm_Cz)?; - let comm_L_row = Commitment::::decompress(&self.comm_L_row)?; - let comm_L_col = Commitment::::decompress(&self.comm_L_col)?; - let comm_t_plus_r_inv_row = Commitment::::decompress(&self.comm_t_plus_r_inv_row)?; - let comm_w_plus_r_inv_row = Commitment::::decompress(&self.comm_w_plus_r_inv_row)?; - let comm_t_plus_r_inv_col = Commitment::::decompress(&self.comm_t_plus_r_inv_col)?; - let comm_w_plus_r_inv_col = Commitment::::decompress(&self.comm_w_plus_r_inv_col)?; - - transcript.absorb(b"c", &[comm_Az, comm_Bz, comm_Cz].as_slice()); - - let num_rounds_sc = vk.S_comm.N.log_2(); - let tau = transcript.squeeze(b"t")?; - let tau_coords = PowPolynomial::new(&tau, num_rounds_sc).coordinates(); - - // add claims about Az, Bz, and Cz to be checked later - // since all the three polynomials are opened at tau, - // we can combine them into a single polynomial opened at tau - let eval_vec = vec![ - self.eval_Az_at_tau, - self.eval_Bz_at_tau, - self.eval_Cz_at_tau, - ]; - - transcript.absorb(b"e", &eval_vec.as_slice()); - - transcript.absorb(b"e", &vec![comm_L_row, comm_L_col].as_slice()); - let comm_vec = vec![comm_Az, comm_Bz, comm_Cz]; - let c = transcript.squeeze(b"c")?; - let u: PolyEvalInstance = - PolyEvalInstance::batch(&comm_vec, tau_coords.clone(), &eval_vec, &c); - let claim = u.e; - - let gamma = transcript.squeeze(b"g")?; - - let r = transcript.squeeze(b"r")?; - - transcript.absorb( - b"l", - &vec![ - comm_t_plus_r_inv_row, - comm_w_plus_r_inv_row, - comm_t_plus_r_inv_col, - comm_w_plus_r_inv_col, - ] - .as_slice(), - ); - - let rho = transcript.squeeze(b"r")?; - - let num_claims = 10; - let s = transcript.squeeze(b"r")?; - let coeffs = powers(&s, num_claims); - let claim = (coeffs[7] + coeffs[8]) * claim; // rest are zeros - - // verify sc - let (claim_sc_final, rand_sc) = self.sc.verify(claim, num_rounds_sc, 3, &mut transcript)?; - - // verify claim_sc_final - let claim_sc_final_expected = { - let rand_eq_bound_rand_sc = PowPolynomial::new(&rho, num_rounds_sc).evaluate(&rand_sc); - let eq_tau: EqPolynomial<_> = PowPolynomial::new(&tau, num_rounds_sc).into(); - - let taus_bound_rand_sc = eq_tau.evaluate(&rand_sc); - let taus_masked_bound_rand_sc = - MaskedEqPolynomial::new(&eq_tau, vk.num_vars.log_2()).evaluate(&rand_sc); - - let eval_t_plus_r_row = { - let eval_addr_row = IdentityPolynomial::new(num_rounds_sc).evaluate(&rand_sc); - let eval_val_row = taus_bound_rand_sc; - let eval_t = eval_addr_row + gamma * eval_val_row; - eval_t + r - }; - - let eval_w_plus_r_row = { - let eval_addr_row = self.eval_row; - let eval_val_row = self.eval_L_row; - let eval_w = eval_addr_row + gamma * eval_val_row; - eval_w + r - }; - - let eval_t_plus_r_col = { - let eval_addr_col = IdentityPolynomial::new(num_rounds_sc).evaluate(&rand_sc); - - // memory contents is z, so we compute eval_Z from eval_W and eval_X - let eval_val_col = { - // rand_sc was padded, so we now remove the padding - let (factor, rand_sc_unpad) = { - let l = vk.S_comm.N.log_2() - (2 * vk.num_vars).log_2(); - - let mut factor = E::Scalar::ONE; - for r_p in rand_sc.iter().take(l) { - factor *= E::Scalar::ONE - r_p - } - - let rand_sc_unpad = rand_sc[l..].to_vec(); - - (factor, rand_sc_unpad) - }; - - let eval_X = { - // public IO is (u, X) - let X = vec![U.u] - .into_iter() - .chain(U.X.iter().cloned()) - .collect::>(); - - // evaluate the sparse polynomial at rand_sc_unpad[1..] - let poly_X = SparsePolynomial::new(rand_sc_unpad.len() - 1, X); - poly_X.evaluate(&rand_sc_unpad[1..]) - }; - - self.eval_W + factor * rand_sc_unpad[0] * eval_X - }; - let eval_t = eval_addr_col + gamma * eval_val_col; - eval_t + r - }; - - let eval_w_plus_r_col = { - let eval_addr_col = self.eval_col; - let eval_val_col = self.eval_L_col; - let eval_w = eval_addr_col + gamma * eval_val_col; - eval_w + r - }; - - let claim_mem_final_expected: E::Scalar = coeffs[0] - * (self.eval_t_plus_r_inv_row - self.eval_w_plus_r_inv_row) - + coeffs[1] * (self.eval_t_plus_r_inv_col - self.eval_w_plus_r_inv_col) - + coeffs[2] - * (rand_eq_bound_rand_sc - * (self.eval_t_plus_r_inv_row * eval_t_plus_r_row - self.eval_ts_row)) - + coeffs[3] - * (rand_eq_bound_rand_sc - * (self.eval_w_plus_r_inv_row * eval_w_plus_r_row - E::Scalar::ONE)) - + coeffs[4] - * (rand_eq_bound_rand_sc - * (self.eval_t_plus_r_inv_col * eval_t_plus_r_col - self.eval_ts_col)) - + coeffs[5] - * (rand_eq_bound_rand_sc - * (self.eval_w_plus_r_inv_col * eval_w_plus_r_col - E::Scalar::ONE)); - - let claim_outer_final_expected = coeffs[6] - * taus_bound_rand_sc - * (self.eval_Az * self.eval_Bz - U.u * self.eval_Cz - self.eval_E) - + coeffs[7] - * taus_bound_rand_sc - * (self.eval_Az + c * self.eval_Bz + c * c * self.eval_Cz); - let claim_inner_final_expected = coeffs[8] - * self.eval_L_row - * self.eval_L_col - * (self.eval_val_A + c * self.eval_val_B + c * c * self.eval_val_C); - - let claim_witness_final_expected = coeffs[9] * taus_masked_bound_rand_sc * self.eval_W; - - claim_mem_final_expected - + claim_outer_final_expected - + claim_inner_final_expected - + claim_witness_final_expected - }; - - if claim_sc_final_expected != claim_sc_final { - return Err(NovaError::InvalidSumcheckProof); - } - - let eval_vec = vec![ - self.eval_W, - self.eval_Az, - self.eval_Bz, - self.eval_Cz, - self.eval_E, - self.eval_L_row, - self.eval_L_col, - self.eval_val_A, - self.eval_val_B, - self.eval_val_C, - self.eval_t_plus_r_inv_row, - self.eval_row, - self.eval_w_plus_r_inv_row, - self.eval_ts_row, - self.eval_t_plus_r_inv_col, - self.eval_col, - self.eval_w_plus_r_inv_col, - self.eval_ts_col, - ]; - - let comm_vec = [ - U.comm_W, - comm_Az, - comm_Bz, - comm_Cz, - U.comm_E, - comm_L_row, - comm_L_col, - vk.S_comm.comm_val_A, - vk.S_comm.comm_val_B, - vk.S_comm.comm_val_C, - comm_t_plus_r_inv_row, - vk.S_comm.comm_row, - comm_w_plus_r_inv_row, - vk.S_comm.comm_ts_row, - comm_t_plus_r_inv_col, - vk.S_comm.comm_col, - comm_w_plus_r_inv_col, - vk.S_comm.comm_ts_col, - ]; - transcript.absorb(b"e", &eval_vec.as_slice()); // comm_vec is already in the transcript - let c = transcript.squeeze(b"c")?; - let u: PolyEvalInstance = - PolyEvalInstance::batch(&comm_vec, rand_sc.clone(), &eval_vec, &c); - - // verify - EE::verify( - &vk.vk_ee, - &mut transcript, - &u.c, - &rand_sc, - &u.e, - &self.eval_arg, - )?; - - Ok(()) - } -} - -// #[cfg(test)] -// mod tests { -// use ff::Field; -// use pasta_curves::Fq as Scalar; - -// use super::*; -// use crate::provider::PallasEngine; - -// #[test] -// fn test_padded() { -// let mut rng = rand::thread_rng(); -// let e = Scalar::random(&mut rng); -// let v: Vec = (0..10).map(|_| Scalar::random(&mut -// rng)).collect(); let n = 20; - -// let result = padded::(&v, n, &e); - -// assert_eq!(result.len(), n); -// assert_eq!(&result[..10], &v[..]); -// assert!(result[10..].iter().all(|&i| i == e)); -// } -// } diff --git a/src/spartan/snark.rs b/src/spartan/snark.rs deleted file mode 100644 index 1973615..0000000 --- a/src/spartan/snark.rs +++ /dev/null @@ -1,560 +0,0 @@ -//! This module implements `RelaxedR1CSSNARKTrait` using Spartan that is generic -//! over the polynomial commitment and evaluation argument (i.e., a PCS) -//! This version of Spartan does not use preprocessing so the verifier keeps the -//! entire description of R1CS matrices. This is essentially optimal for the -//! verifier when using an IPA-based polynomial commitment scheme. - -use std::sync::Arc; - -use ff::Field; -use itertools::Itertools as _; -use once_cell::sync::OnceCell; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -use crate::{ - digest::{DigestComputer, SimpleDigestible}, - errors::NovaError, - r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness, SparseMatrix}, - spartan::{ - compute_eval_table_sparse, - polys::{ - eq::EqPolynomial, - multilinear::{MultilinearPolynomial, SparsePolynomial}, - power::PowPolynomial, - }, - powers, - sumcheck::SumcheckProof, - PolyEvalInstance, PolyEvalWitness, - }, - traits::{ - evaluation::EvaluationEngineTrait, - snark::{DigestHelperTrait, RelaxedR1CSSNARKTrait}, - Engine, TranscriptEngineTrait, - }, - CommitmentKey, -}; - -/// A type that represents the prover's key -#[derive(Debug, Clone)] -pub struct ProverKey> { - pub pk_ee: EE::ProverKey, - pub vk_digest: E::Scalar, // digest of the verifier's key -} - -/// A type that represents the verifier's key -#[derive(Debug, Clone, Serialize)] -#[serde(bound = "")] -pub struct VerifierKey> { - vk_ee: EE::VerifierKey, - S: R1CSShape, - #[serde(skip, default = "OnceCell::new")] - digest: OnceCell, -} - -impl> SimpleDigestible for VerifierKey {} - -impl> VerifierKey { - fn new(shape: R1CSShape, vk_ee: EE::VerifierKey) -> Self { - Self { - vk_ee, - S: shape, - digest: OnceCell::new(), - } - } -} - -impl> DigestHelperTrait for VerifierKey { - /// Returns the digest of the verifier's key. - fn digest(&self) -> E::Scalar { - self.digest - .get_or_try_init(|| { - let dc = DigestComputer::::new(self); - dc.digest() - }) - .cloned() - .expect("Failure to retrieve digest!") - } -} - -/// A succinct proof of knowledge of a witness to a relaxed R1CS instance -/// The proof is produced using Spartan's combination of the sum-check and -/// the commitment to a vector viewed as a polynomial commitment -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct RelaxedR1CSSNARK> { - sc_proof_outer: SumcheckProof, - claims_outer: (E::Scalar, E::Scalar, E::Scalar), - eval_E: E::Scalar, - sc_proof_inner: SumcheckProof, - eval_W: E::Scalar, - sc_proof_batch: SumcheckProof, - evals_batch: Vec, - eval_arg: EE::EvaluationArgument, -} - -impl> RelaxedR1CSSNARKTrait for RelaxedR1CSSNARK { - type ProverKey = ProverKey; - type VerifierKey = VerifierKey; - - fn initialize_pk( - ck: Arc>, - vk_digest: ::Scalar, - ) -> Result { - todo!("not implemented for nova snarks"); - } - - fn setup( - ck: Arc>, - S: &R1CSShape, - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> { - let (pk_ee, vk_ee) = EE::setup(ck); - - let S = S.pad(); - - let vk: VerifierKey = VerifierKey::new(S, vk_ee); - - let pk = ProverKey { - pk_ee, - vk_digest: vk.digest(), - }; - - Ok((pk, vk)) - } - - /// produces a succinct proof of satisfiability of a `RelaxedR1CS` instance - #[tracing::instrument(skip_all, name = "SNARK::prove")] - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - S: &R1CSShape, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, - ) -> Result { - // pad the R1CSShape - let S = S.pad(); - // sanity check that R1CSShape has all required size characteristics - assert!(S.is_regular_shape()); - - let W = W.pad(&S); // pad the witness - let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); - - // append the digest of vk (which includes R1CS matrices) and the - // RelaxedR1CSInstance to the transcript - transcript.absorb(b"vk", &pk.vk_digest); - transcript.absorb(b"U", U); - - // compute the full satisfying assignment by concatenating W.W, U.u, and U.X - let mut z = [W.W.clone(), vec![U.u], U.X.clone()].concat(); - - let (num_rounds_x, num_rounds_y) = ( - usize::try_from(S.num_cons.ilog2()).unwrap(), - (usize::try_from(S.num_vars.ilog2()).unwrap() + 1), - ); - - // outer sum-check - let tau: EqPolynomial<_> = - PowPolynomial::new(&transcript.squeeze(b"t")?, num_rounds_x).into(); - - let mut poly_tau = MultilinearPolynomial::new(tau.evals()); - let (mut poly_Az, mut poly_Bz, poly_Cz, mut poly_uCz_E) = { - let (poly_Az, poly_Bz, poly_Cz) = S.multiply_vec(&z)?; - let poly_uCz_E = (0..S.num_cons) - .into_par_iter() - .map(|i| U.u * poly_Cz[i] + W.E[i]) - .collect::>(); - ( - MultilinearPolynomial::new(poly_Az), - MultilinearPolynomial::new(poly_Bz), - MultilinearPolynomial::new(poly_Cz), - MultilinearPolynomial::new(poly_uCz_E), - ) - }; - - let comb_func_outer = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - poly_C_comp: &E::Scalar, - poly_D_comp: &E::Scalar| - -> E::Scalar { - *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) - }; - let (sc_proof_outer, r_x, claims_outer) = SumcheckProof::prove_cubic_with_additive_term( - &E::Scalar::ZERO, // claim is zero - num_rounds_x, - &mut poly_tau, - &mut poly_Az, - &mut poly_Bz, - &mut poly_uCz_E, - comb_func_outer, - &mut transcript, - )?; - - // claims from the end of sum-check - let (claim_Az, claim_Bz): (E::Scalar, E::Scalar) = (claims_outer[1], claims_outer[2]); - let chis_r_x = EqPolynomial::evals_from_points(&r_x); - - let claim_Cz = MultilinearPolynomial::evaluate_with_chis(poly_Cz.evaluations(), &chis_r_x); - let eval_E = MultilinearPolynomial::evaluate_with_chis(&W.E, &chis_r_x); - transcript.absorb( - b"claims_outer", - &[claim_Az, claim_Bz, claim_Cz, eval_E].as_slice(), - ); - - // inner sum-check - let r = transcript.squeeze(b"r")?; - let claim_inner_joint = claim_Az + r * claim_Bz + r * r * claim_Cz; - - let poly_ABC = { - // compute the initial evaluation table for R(\tau, x) - let evals_rx = EqPolynomial::evals_from_points(&r_x.clone()); - - let (evals_A, evals_B, evals_C) = compute_eval_table_sparse(&S, &evals_rx); - - assert_eq!(evals_A.len(), evals_B.len()); - assert_eq!(evals_A.len(), evals_C.len()); - (0..evals_A.len()) - .into_par_iter() - .map(|i| evals_A[i] + r * evals_B[i] + r * r * evals_C[i]) - .collect::>() - }; - - let poly_z = { - z.resize(S.num_vars * 2, E::Scalar::ZERO); - z - }; - - let comb_func = |poly_A_comp: &E::Scalar, poly_B_comp: &E::Scalar| -> E::Scalar { - *poly_A_comp * *poly_B_comp - }; - let (sc_proof_inner, r_y, _claims_inner) = SumcheckProof::prove_quad( - &claim_inner_joint, - num_rounds_y, - &mut MultilinearPolynomial::new(poly_ABC), - &mut MultilinearPolynomial::new(poly_z), - comb_func, - &mut transcript, - )?; - - // Add additional claims about W and E polynomials to the list from CC - // We will reduce a vector of claims of evaluations at different points into - // claims about them at the same point. For example, eval_W =? - // W(r_y[1..]) and eval_E =? E(r_x) into two claims: eval_W_prime =? - // W(rz) and eval_E_prime =? E(rz) We can them combine the two into one: - // eval_W_prime + gamma * eval_E_prime =? (W + gamma*E)(rz), where gamma - // is a public challenge Since commitments to W and E are homomorphic, - // the verifier can compute a commitment to the batched polynomial. - let eval_W = MultilinearPolynomial::evaluate_with(&W.W, &r_y[1..]); - - let w_vec = vec![PolyEvalWitness { p: W.W }, PolyEvalWitness { p: W.E }]; - let u_vec = vec![ - PolyEvalInstance { - c: U.comm_W, - x: r_y[1..].to_vec(), - e: eval_W, - }, - PolyEvalInstance { - c: U.comm_E, - x: r_x, - e: eval_E, - }, - ]; - - let (batched_u, batched_w, sc_proof_batch, claims_batch_left) = - batch_eval_reduce(u_vec, &w_vec, &mut transcript)?; - - let eval_arg = EE::prove( - ck, - &pk.pk_ee, - &mut transcript, - &batched_u.c, - &batched_w.p, - &batched_u.x, - &batched_u.e, - )?; - - Ok(Self { - sc_proof_outer, - claims_outer: (claim_Az, claim_Bz, claim_Cz), - eval_E, - sc_proof_inner, - eval_W, - sc_proof_batch, - evals_batch: claims_batch_left, - eval_arg, - }) - } - - /// verifies a proof of satisfiability of a `RelaxedR1CS` instance - fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError> { - let mut transcript = E::TE::new(b"RelaxedR1CSSNARK"); - - // append the digest of R1CS matrices and the RelaxedR1CSInstance to the - // transcript - transcript.absorb(b"vk", &vk.digest()); - transcript.absorb(b"U", U); - - let (num_rounds_x, num_rounds_y) = ( - usize::try_from(vk.S.num_cons.ilog2()).unwrap(), - (usize::try_from(vk.S.num_vars.ilog2()).unwrap() + 1), - ); - - // outer sum-check - let tau: EqPolynomial<_> = - PowPolynomial::new(&transcript.squeeze(b"t")?, num_rounds_x).into(); - - let (claim_outer_final, r_x) = - self.sc_proof_outer - .verify(E::Scalar::ZERO, num_rounds_x, 3, &mut transcript)?; - - // verify claim_outer_final - let (claim_Az, claim_Bz, claim_Cz) = self.claims_outer; - let taus_bound_rx = tau.evaluate(&r_x); - let claim_outer_final_expected = - taus_bound_rx * (claim_Az * claim_Bz - U.u * claim_Cz - self.eval_E); - if claim_outer_final != claim_outer_final_expected { - return Err(NovaError::InvalidSumcheckProof); - } - - transcript.absorb( - b"claims_outer", - &[ - self.claims_outer.0, - self.claims_outer.1, - self.claims_outer.2, - self.eval_E, - ] - .as_slice(), - ); - - // inner sum-check - let r = transcript.squeeze(b"r")?; - let claim_inner_joint = - self.claims_outer.0 + r * self.claims_outer.1 + r * r * self.claims_outer.2; - - let (claim_inner_final, r_y) = - self.sc_proof_inner - .verify(claim_inner_joint, num_rounds_y, 2, &mut transcript)?; - - // verify claim_inner_final - let eval_Z = { - let eval_X = { - // public IO is (u, X) - let X = vec![U.u] - .into_iter() - .chain(U.X.iter().cloned()) - .collect::>(); - SparsePolynomial::new(usize::try_from(vk.S.num_vars.ilog2()).unwrap(), X) - .evaluate(&r_y[1..]) - }; - (E::Scalar::ONE - r_y[0]) * self.eval_W + r_y[0] * eval_X - }; - - // compute evaluations of R1CS matrices - let multi_evaluate = |M_vec: &[&SparseMatrix], - r_x: &[E::Scalar], - r_y: &[E::Scalar]| - -> Vec { - let evaluate_with_table = - |M: &SparseMatrix, T_x: &[E::Scalar], T_y: &[E::Scalar]| -> E::Scalar { - M.par_iter_rows() - .enumerate() - .map(|(row_idx, row)| { - M.get_row(row) - .map(|(val, col_idx)| T_x[row_idx] * T_y[*col_idx] * val) - .sum::() - }) - .sum() - }; - - let (T_x, T_y) = rayon::join( - || EqPolynomial::evals_from_points(r_x), - || EqPolynomial::evals_from_points(r_y), - ); - - (0..M_vec.len()) - .into_par_iter() - .map(|i| evaluate_with_table(M_vec[i], &T_x, &T_y)) - .collect() - }; - - let evals = multi_evaluate(&[&vk.S.A, &vk.S.B, &vk.S.C], &r_x, &r_y); - - let claim_inner_final_expected = (evals[0] + r * evals[1] + r * r * evals[2]) * eval_Z; - if claim_inner_final != claim_inner_final_expected { - return Err(NovaError::InvalidSumcheckProof); - } - - // add claims about W and E polynomials - let u_vec: Vec> = vec![ - PolyEvalInstance { - c: U.comm_W, - x: r_y[1..].to_vec(), - e: self.eval_W, - }, - PolyEvalInstance { - c: U.comm_E, - x: r_x, - e: self.eval_E, - }, - ]; - - let batched_u = batch_eval_verify( - u_vec, - &mut transcript, - &self.sc_proof_batch, - &self.evals_batch, - )?; - - // verify - EE::verify( - &vk.vk_ee, - &mut transcript, - &batched_u.c, - &batched_u.x, - &batched_u.e, - &self.eval_arg, - )?; - - Ok(()) - } -} - -/// Reduces a batch of polynomial evaluation claims using Sumcheck -/// to a single claim at the same point. -/// -/// # Details -/// -/// We are given as input a list of instance/witness pairs -/// u = [(Cᵢ, xᵢ, eᵢ)], w = [Pᵢ], such that -/// - nᵢ = |xᵢ| -/// - Cᵢ = Commit(Pᵢ) -/// - eᵢ = Pᵢ(xᵢ) -/// - |Pᵢ| = 2^nᵢ -/// -/// We allow the polynomial Pᵢ to have different sizes, by appropriately scaling -/// the claims and resulting evaluations from Sumcheck. -pub(in crate::spartan) fn batch_eval_reduce( - u_vec: Vec>, - w_vec: &[PolyEvalWitness], - transcript: &mut E::TE, -) -> Result< - ( - PolyEvalInstance, - PolyEvalWitness, - SumcheckProof, - Vec, - ), - NovaError, -> { - let num_claims = u_vec.len(); - assert_eq!(w_vec.len(), num_claims); - - // Compute nᵢ and n = maxᵢ{nᵢ} - let num_rounds = u_vec.iter().map(|u| u.x.len()).collect::>(); - - // Check polynomials match number of variables, i.e. |Pᵢ| = 2^nᵢ - zip_with_for_each!(iter, (w_vec, num_rounds), |w, num_vars| assert_eq!( - w.p.len(), - 1 << num_vars - )); - - // generate a challenge, and powers of it for random linear combination - let rho = transcript.squeeze(b"r")?; - let powers_of_rho = powers(&rho, num_claims); - - let (claims, u_xs, comms): (Vec<_>, Vec<_>, Vec<_>) = - u_vec.into_iter().map(|u| (u.e, u.x, u.c)).multiunzip(); - - // Create clones of polynomials to be given to Sumcheck - // Pᵢ(X) - let polys_P: Vec> = w_vec - .iter() - .map(|w| MultilinearPolynomial::new(w.p.clone())) - .collect(); - // eq(xᵢ, X) - let polys_eq: Vec> = u_xs - .into_iter() - .map(|ux| MultilinearPolynomial::new(EqPolynomial::evals_from_points(&ux))) - .collect(); - - // For each i, check eᵢ = ∑ₓ Pᵢ(x)eq(xᵢ,x), where x ∈ {0,1}^nᵢ - let comb_func = |poly_P: &E::Scalar, poly_eq: &E::Scalar| -> E::Scalar { *poly_P * *poly_eq }; - let (sc_proof_batch, r, claims_batch) = SumcheckProof::prove_quad_batch( - &claims, - &num_rounds, - polys_P, - polys_eq, - &powers_of_rho, - comb_func, - transcript, - )?; - - let (claims_batch_left, _): (Vec, Vec) = claims_batch; - - transcript.absorb(b"l", &claims_batch_left.as_slice()); - - // we now combine evaluation claims at the same point r into one - let gamma = transcript.squeeze(b"g")?; - - let u_joint = - PolyEvalInstance::batch_diff_size(&comms, &claims_batch_left, &num_rounds, r, gamma); - - // P = ∑ᵢ γⁱ⋅Pᵢ - let w_joint = - PolyEvalWitness::batch_diff_size(&w_vec.iter().by_ref().collect::>(), gamma); - - Ok((u_joint, w_joint, sc_proof_batch, claims_batch_left)) -} - -/// Verifies a batch of polynomial evaluation claims using Sumcheck -/// reducing them to a single claim at the same point. -pub(in crate::spartan) fn batch_eval_verify( - u_vec: Vec>, - transcript: &mut E::TE, - sc_proof_batch: &SumcheckProof, - evals_batch: &[E::Scalar], -) -> Result, NovaError> { - let num_claims = u_vec.len(); - assert_eq!(evals_batch.len(), num_claims); - - // generate a challenge - let rho = transcript.squeeze(b"r")?; - let powers_of_rho = powers(&rho, num_claims); - - // Compute nᵢ and n = maxᵢ{nᵢ} - let num_rounds = u_vec.iter().map(|u| u.x.len()).collect::>(); - let num_rounds_max = *num_rounds.iter().max().unwrap(); - - let claims = u_vec.iter().map(|u| u.e).collect::>(); - - let (claim_batch_final, r) = - sc_proof_batch.verify_batch(&claims, &num_rounds, &powers_of_rho, 2, transcript)?; - - let claim_batch_final_expected = { - let evals_r = u_vec.iter().map(|u| { - let (_, r_hi) = r.split_at(num_rounds_max - u.x.len()); - EqPolynomial::new(r_hi.to_vec()).evaluate(&u.x) - }); - - zip_with!( - (evals_r, evals_batch.iter(), powers_of_rho.iter()), - |e_i, p_i, rho_i| e_i * *p_i * rho_i - ) - .sum() - }; - - if claim_batch_final != claim_batch_final_expected { - return Err(NovaError::InvalidSumcheckProof); - } - - transcript.absorb(b"l", &evals_batch); - - // we now combine evaluation claims at the same point r into one - let gamma = transcript.squeeze(b"g")?; - - let comms = u_vec.into_iter().map(|u| u.c).collect::>(); - - let u_joint = PolyEvalInstance::batch_diff_size(&comms, evals_batch, &num_rounds, r, gamma); - - Ok(u_joint) -} diff --git a/src/spartan/sumcheck/engine.rs b/src/spartan/sumcheck/engine.rs deleted file mode 100644 index 5e41d03..0000000 --- a/src/spartan/sumcheck/engine.rs +++ /dev/null @@ -1,630 +0,0 @@ -use ff::Field; -use rayon::prelude::*; - -use crate::{ - provider::util::field::batch_invert, - spartan::{ - math::Math, - polys::{ - eq::EqPolynomial, masked_eq::MaskedEqPolynomial, multilinear::MultilinearPolynomial, - power::PowPolynomial, - }, - sumcheck::SumcheckProof, - }, - traits::commitment::CommitmentEngineTrait, - Commitment, CommitmentKey, Engine, NovaError, -}; - -/// Defines a trait for implementing sum-check in a generic manner -pub trait SumcheckEngine: Send + Sync { - /// returns the initial claims - fn initial_claims(&self) -> Vec; - - /// degree of the sum-check polynomial - fn degree(&self) -> usize; - - /// the size of the polynomials - fn size(&self) -> usize; - - /// returns evaluation points at 0, 2, d-1 (where d is the degree of the - /// sum-check polynomial) - fn evaluation_points(&self) -> Vec>; - - /// bounds a variable in the constituent polynomials - fn bound(&mut self, r: &E::Scalar); - - /// returns the final claims - fn final_claims(&self) -> Vec>; -} - -/// The [`WitnessBoundSumcheck`] ensures that the witness polynomial W defined -/// over n = log(N) variables, is zero outside of the first `num_vars = 2^m` -/// entries. -/// -/// # Details -/// -/// The `W` polynomial is padded with zeros to size N = 2^n. -/// The `masked_eq` polynomials is defined as with regards to a random challenge -/// `tau` as the eq(tau) polynomial, where the first 2^m evaluations to 0. -/// -/// The instance is given by -/// `0 = ∑_{0≤i<2^n} masked_eq[i] * W[i]`. -/// It is equivalent to the expression -/// `0 = ∑_{2^m≤i<2^n} eq[i] * W[i]` -/// Since `eq` is random, the instance is only satisfied if `W[2^{m}..] = 0`. -pub(in crate::spartan) struct WitnessBoundSumcheck { - poly_W: MultilinearPolynomial, - poly_masked_eq: MultilinearPolynomial, -} - -impl WitnessBoundSumcheck { - pub fn new(tau: E::Scalar, poly_W_padded: Vec, num_vars: usize) -> Self { - let num_vars_log = num_vars.log_2(); - // When num_vars = num_rounds, we shouldn't have to prove anything - // but we still want this instance to compute the evaluation of W - let num_rounds = poly_W_padded.len().log_2(); - assert!(num_vars_log < num_rounds); - - let tau_coords = PowPolynomial::new(&tau, num_rounds).coordinates(); - let poly_masked_eq_evals = - MaskedEqPolynomial::new(&EqPolynomial::new(tau_coords), num_vars_log).evals(); - - Self { - poly_W: MultilinearPolynomial::new(poly_W_padded), - poly_masked_eq: MultilinearPolynomial::new(poly_masked_eq_evals), - } - } -} -impl SumcheckEngine for WitnessBoundSumcheck { - fn initial_claims(&self) -> Vec { - vec![E::Scalar::ZERO] - } - - fn degree(&self) -> usize { - 3 - } - - fn size(&self) -> usize { - assert_eq!(self.poly_W.len(), self.poly_masked_eq.len()); - self.poly_W.len() - } - - fn evaluation_points(&self) -> Vec> { - let comb_func = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - _: &E::Scalar| - -> E::Scalar { *poly_A_comp * *poly_B_comp }; - - let (eval_point_0, eval_point_2, eval_point_3) = - SumcheckProof::::compute_eval_points_cubic( - &self.poly_masked_eq, - &self.poly_W, - &self.poly_W, // unused - &comb_func, - ); - - vec![vec![eval_point_0, eval_point_2, eval_point_3]] - } - - fn bound(&mut self, r: &E::Scalar) { - [&mut self.poly_W, &mut self.poly_masked_eq] - .par_iter_mut() - .for_each(|poly| poly.bind_poly_var_top(r)); - } - - fn final_claims(&self) -> Vec> { - vec![vec![self.poly_W[0], self.poly_masked_eq[0]]] - } -} - -pub(in crate::spartan) struct MemorySumcheckInstance { - // row - w_plus_r_row: MultilinearPolynomial, - t_plus_r_row: MultilinearPolynomial, - t_plus_r_inv_row: MultilinearPolynomial, - w_plus_r_inv_row: MultilinearPolynomial, - ts_row: MultilinearPolynomial, - - // col - w_plus_r_col: MultilinearPolynomial, - t_plus_r_col: MultilinearPolynomial, - t_plus_r_inv_col: MultilinearPolynomial, - w_plus_r_inv_col: MultilinearPolynomial, - ts_col: MultilinearPolynomial, - - // eq - poly_eq: MultilinearPolynomial, - - // zero polynomial - poly_zero: MultilinearPolynomial, -} - -impl MemorySumcheckInstance { - /// Computes witnesses for `MemoryInstanceSumcheck` - /// - /// # Description - /// We use the logUp protocol to prove that - /// ∑ TS[i]/(T[i] + r) - 1/(W[i] + r) = 0 - /// where - /// T_row[i] = mem_row[i] * gamma + i - /// = eq(tau)[i] * gamma + i - /// W_row[i] = L_row[i] * gamma + addr_row[i] - /// = eq(tau)[row[i]] * gamma + addr_row[i] - /// T_col[i] = mem_col[i] * gamma + i - /// = z[i] * gamma + i - /// W_col[i] = addr_col[i] * gamma + addr_col[i] - /// = z[col[i]] * gamma + addr_col[i] - /// and - /// `TS_row`, `TS_col` are integer-valued vectors representing the number - /// of reads to each memory cell of `L_row`, `L_col` - /// - /// The function returns oracles for the polynomials TS[i]/(T[i] + r), - /// 1/(W[i] + r), as well as auxiliary polynomials T[i] + r, W[i] + r - pub fn compute_oracles( - ck: &CommitmentKey, - r: &E::Scalar, - gamma: &E::Scalar, - mem_row: &[E::Scalar], - addr_row: &[E::Scalar], - L_row: &[E::Scalar], - ts_row: &[E::Scalar], - mem_col: &[E::Scalar], - addr_col: &[E::Scalar], - L_col: &[E::Scalar], - ts_col: &[E::Scalar], - ) -> Result<([Commitment; 4], [Vec; 4], [Vec; 4]), NovaError> { - // hash the tuples of (addr,val) memory contents and read responses into a - // single field element using `hash_func` - let hash_func_vec = |mem: &[E::Scalar], - addr: &[E::Scalar], - lookups: &[E::Scalar]| - -> (Vec, Vec) { - let hash_func = - |addr: &E::Scalar, val: &E::Scalar| -> E::Scalar { *val * gamma + *addr }; - assert_eq!(addr.len(), lookups.len()); - rayon::join( - || { - (0..mem.len()) - .map(|i| hash_func(&E::Scalar::from(i as u64), &mem[i])) - .collect::>() - }, - || { - (0..addr.len()) - .map(|i| hash_func(&addr[i], &lookups[i])) - .collect::>() - }, - ) - }; - - let ((T_row, W_row), (T_col, W_col)) = rayon::join( - || hash_func_vec(mem_row, addr_row, L_row), - || hash_func_vec(mem_col, addr_col, L_col), - ); - - // compute vectors TS[i]/(T[i] + r) and 1/(W[i] + r) - let helper = |T: &[E::Scalar], - W: &[E::Scalar], - TS: &[E::Scalar], - r: &E::Scalar| - -> ( - ( - Result, NovaError>, - Result, NovaError>, - ), - (Vec, Vec), - ) { - rayon::join( - || { - rayon::join( - || { - let inv = - batch_invert(T.par_iter().map(|e| *e + *r).collect::>())?; - - // compute inv[i] * TS[i] in parallel - Ok( - zip_with!((inv.into_par_iter(), TS.par_iter()), |e1, e2| e1 * *e2) - .collect::>(), - ) - }, - || batch_invert(W.par_iter().map(|e| *e + *r).collect::>()), - ) - }, - || { - rayon::join( - || T.par_iter().map(|e| *e + *r).collect(), - || W.par_iter().map(|e| *e + *r).collect(), - ) - }, - ) - }; - - let ( - ((t_plus_r_inv_row, w_plus_r_inv_row), (t_plus_r_row, w_plus_r_row)), - ((t_plus_r_inv_col, w_plus_r_inv_col), (t_plus_r_col, w_plus_r_col)), - ) = rayon::join( - || helper(&T_row, &W_row, ts_row, r), - || helper(&T_col, &W_col, ts_col, r), - ); - - let t_plus_r_inv_row = t_plus_r_inv_row?; - let w_plus_r_inv_row = w_plus_r_inv_row?; - let t_plus_r_inv_col = t_plus_r_inv_col?; - let w_plus_r_inv_col = w_plus_r_inv_col?; - - let ( - (comm_t_plus_r_inv_row, comm_w_plus_r_inv_row), - (comm_t_plus_r_inv_col, comm_w_plus_r_inv_col), - ) = rayon::join( - || { - rayon::join( - || E::CE::commit(ck, &t_plus_r_inv_row), - || E::CE::commit(ck, &w_plus_r_inv_row), - ) - }, - || { - rayon::join( - || E::CE::commit(ck, &t_plus_r_inv_col), - || E::CE::commit(ck, &w_plus_r_inv_col), - ) - }, - ); - - let comm_vec = [ - comm_t_plus_r_inv_row, - comm_w_plus_r_inv_row, - comm_t_plus_r_inv_col, - comm_w_plus_r_inv_col, - ]; - - let poly_vec = [ - t_plus_r_inv_row, - w_plus_r_inv_row, - t_plus_r_inv_col, - w_plus_r_inv_col, - ]; - - let aux_poly_vec = [t_plus_r_row, w_plus_r_row, t_plus_r_col, w_plus_r_col]; - - Ok((comm_vec, poly_vec, aux_poly_vec)) - } - - pub fn new( - polys_oracle: [Vec; 4], - polys_aux: [Vec; 4], - poly_eq: Vec, - ts_row: Vec, - ts_col: Vec, - ) -> Self { - let [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] = polys_oracle; - let [t_plus_r_row, w_plus_r_row, t_plus_r_col, w_plus_r_col] = polys_aux; - - let zero = vec![E::Scalar::ZERO; poly_eq.len()]; - - Self { - w_plus_r_row: MultilinearPolynomial::new(w_plus_r_row), - t_plus_r_row: MultilinearPolynomial::new(t_plus_r_row), - t_plus_r_inv_row: MultilinearPolynomial::new(t_plus_r_inv_row), - w_plus_r_inv_row: MultilinearPolynomial::new(w_plus_r_inv_row), - ts_row: MultilinearPolynomial::new(ts_row), - w_plus_r_col: MultilinearPolynomial::new(w_plus_r_col), - t_plus_r_col: MultilinearPolynomial::new(t_plus_r_col), - t_plus_r_inv_col: MultilinearPolynomial::new(t_plus_r_inv_col), - w_plus_r_inv_col: MultilinearPolynomial::new(w_plus_r_inv_col), - ts_col: MultilinearPolynomial::new(ts_col), - poly_eq: MultilinearPolynomial::new(poly_eq), - poly_zero: MultilinearPolynomial::new(zero), - } - } -} - -impl SumcheckEngine for MemorySumcheckInstance { - fn initial_claims(&self) -> Vec { - vec![E::Scalar::ZERO; 6] - } - - fn degree(&self) -> usize { - 3 - } - - fn size(&self) -> usize { - // sanity checks - assert_eq!(self.w_plus_r_row.len(), self.t_plus_r_row.len()); - assert_eq!(self.w_plus_r_row.len(), self.ts_row.len()); - assert_eq!(self.w_plus_r_row.len(), self.w_plus_r_col.len()); - assert_eq!(self.w_plus_r_row.len(), self.t_plus_r_col.len()); - assert_eq!(self.w_plus_r_row.len(), self.ts_col.len()); - - self.w_plus_r_row.len() - } - - fn evaluation_points(&self) -> Vec> { - let comb_func = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - _poly_C_comp: &E::Scalar| - -> E::Scalar { *poly_A_comp - *poly_B_comp }; - - let comb_func2 = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - poly_C_comp: &E::Scalar, - _poly_D_comp: &E::Scalar| - -> E::Scalar { - *poly_A_comp * (*poly_B_comp * *poly_C_comp - E::Scalar::ONE) - }; - - let comb_func3 = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - poly_C_comp: &E::Scalar, - poly_D_comp: &E::Scalar| - -> E::Scalar { - *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) - }; - - // inv related evaluation points - // 0 = ∑ TS[i]/(T[i] + r) - 1/(W[i] + r) - let (eval_inv_0_row, eval_inv_2_row, eval_inv_3_row) = - SumcheckProof::::compute_eval_points_cubic( - &self.t_plus_r_inv_row, - &self.w_plus_r_inv_row, - &self.poly_zero, - &comb_func, - ); - - let (eval_inv_0_col, eval_inv_2_col, eval_inv_3_col) = - SumcheckProof::::compute_eval_points_cubic( - &self.t_plus_r_inv_col, - &self.w_plus_r_inv_col, - &self.poly_zero, - &comb_func, - ); - - // row related evaluation points - // 0 = ∑ eq[i] * (inv_T[i] * (T[i] + r) - TS[i])) - let (eval_T_0_row, eval_T_2_row, eval_T_3_row) = - SumcheckProof::::compute_eval_points_cubic_with_additive_term( - &self.poly_eq, - &self.t_plus_r_inv_row, - &self.t_plus_r_row, - &self.ts_row, - &comb_func3, - ); - // 0 = ∑ eq[i] * (inv_W[i] * (T[i] + r) - 1)) - let (eval_W_0_row, eval_W_2_row, eval_W_3_row) = - SumcheckProof::::compute_eval_points_cubic_with_additive_term( - &self.poly_eq, - &self.w_plus_r_inv_row, - &self.w_plus_r_row, - &self.poly_zero, - &comb_func2, - ); - - // column related evaluation points - let (eval_T_0_col, eval_T_2_col, eval_T_3_col) = - SumcheckProof::::compute_eval_points_cubic_with_additive_term( - &self.poly_eq, - &self.t_plus_r_inv_col, - &self.t_plus_r_col, - &self.ts_col, - &comb_func3, - ); - let (eval_W_0_col, eval_W_2_col, eval_W_3_col) = - SumcheckProof::::compute_eval_points_cubic_with_additive_term( - &self.poly_eq, - &self.w_plus_r_inv_col, - &self.w_plus_r_col, - &self.poly_zero, - &comb_func2, - ); - - vec![ - vec![eval_inv_0_row, eval_inv_2_row, eval_inv_3_row], - vec![eval_inv_0_col, eval_inv_2_col, eval_inv_3_col], - vec![eval_T_0_row, eval_T_2_row, eval_T_3_row], - vec![eval_W_0_row, eval_W_2_row, eval_W_3_row], - vec![eval_T_0_col, eval_T_2_col, eval_T_3_col], - vec![eval_W_0_col, eval_W_2_col, eval_W_3_col], - ] - } - - fn bound(&mut self, r: &E::Scalar) { - [ - &mut self.t_plus_r_row, - &mut self.t_plus_r_inv_row, - &mut self.w_plus_r_row, - &mut self.w_plus_r_inv_row, - &mut self.ts_row, - &mut self.t_plus_r_col, - &mut self.t_plus_r_inv_col, - &mut self.w_plus_r_col, - &mut self.w_plus_r_inv_col, - &mut self.ts_col, - &mut self.poly_eq, - ] - .par_iter_mut() - .for_each(|poly| poly.bind_poly_var_top(r)); - } - - fn final_claims(&self) -> Vec> { - let poly_row_final = vec![ - self.t_plus_r_inv_row[0], - self.w_plus_r_inv_row[0], - self.ts_row[0], - ]; - - let poly_col_final = vec![ - self.t_plus_r_inv_col[0], - self.w_plus_r_inv_col[0], - self.ts_col[0], - ]; - - vec![poly_row_final, poly_col_final] - } -} - -pub(in crate::spartan) struct OuterSumcheckInstance { - poly_tau: MultilinearPolynomial, - poly_Az: MultilinearPolynomial, - poly_Bz: MultilinearPolynomial, - poly_uCz_E: MultilinearPolynomial, - - poly_Mz: MultilinearPolynomial, - eval_Mz_at_tau: E::Scalar, - - poly_zero: MultilinearPolynomial, -} - -impl OuterSumcheckInstance { - pub fn new( - tau: Vec, - Az: Vec, - Bz: Vec, - uCz_E: Vec, - Mz: Vec, - eval_Mz_at_tau: &E::Scalar, - ) -> Self { - let zero = vec![E::Scalar::ZERO; tau.len()]; - Self { - poly_tau: MultilinearPolynomial::new(tau), - poly_Az: MultilinearPolynomial::new(Az), - poly_Bz: MultilinearPolynomial::new(Bz), - poly_uCz_E: MultilinearPolynomial::new(uCz_E), - poly_Mz: MultilinearPolynomial::new(Mz), - eval_Mz_at_tau: *eval_Mz_at_tau, - poly_zero: MultilinearPolynomial::new(zero), - } - } -} - -impl SumcheckEngine for OuterSumcheckInstance { - fn initial_claims(&self) -> Vec { - vec![E::Scalar::ZERO, self.eval_Mz_at_tau] - } - - fn degree(&self) -> usize { - 3 - } - - fn size(&self) -> usize { - assert_eq!(self.poly_tau.len(), self.poly_Az.len()); - assert_eq!(self.poly_tau.len(), self.poly_Bz.len()); - assert_eq!(self.poly_tau.len(), self.poly_uCz_E.len()); - assert_eq!(self.poly_tau.len(), self.poly_Mz.len()); - self.poly_tau.len() - } - - fn evaluation_points(&self) -> Vec> { - let comb_func = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - poly_C_comp: &E::Scalar, - poly_D_comp: &E::Scalar| - -> E::Scalar { - *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) - }; - - let (eval_point_h_0, eval_point_h_2, eval_point_h_3) = - SumcheckProof::::compute_eval_points_cubic_with_additive_term( - &self.poly_tau, - &self.poly_Az, - &self.poly_Bz, - &self.poly_uCz_E, - &comb_func, - ); - - let comb_func2 = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - _poly_C_comp: &E::Scalar| - -> E::Scalar { *poly_A_comp * *poly_B_comp }; - - let (eval_point_e_0, eval_point_e_2, eval_point_e_3) = - SumcheckProof::::compute_eval_points_cubic( - &self.poly_tau, - &self.poly_Mz, - &self.poly_zero, - &comb_func2, - ); - - vec![ - vec![eval_point_h_0, eval_point_h_2, eval_point_h_3], - vec![eval_point_e_0, eval_point_e_2, eval_point_e_3], - ] - } - - fn bound(&mut self, r: &E::Scalar) { - [ - &mut self.poly_tau, - &mut self.poly_Az, - &mut self.poly_Bz, - &mut self.poly_uCz_E, - &mut self.poly_Mz, - ] - .par_iter_mut() - .for_each(|poly| poly.bind_poly_var_top(r)); - } - - fn final_claims(&self) -> Vec> { - vec![vec![self.poly_Az[0], self.poly_Bz[0]]] - } -} - -pub(in crate::spartan) struct InnerSumcheckInstance { - pub(in crate::spartan) claim: E::Scalar, - pub(in crate::spartan) poly_L_row: MultilinearPolynomial, - pub(in crate::spartan) poly_L_col: MultilinearPolynomial, - pub(in crate::spartan) poly_val: MultilinearPolynomial, -} -impl InnerSumcheckInstance { - pub fn new( - claim: E::Scalar, - poly_L_row: MultilinearPolynomial, - poly_L_col: MultilinearPolynomial, - poly_val: MultilinearPolynomial, - ) -> Self { - Self { - claim, - poly_L_row, - poly_L_col, - poly_val, - } - } -} -impl SumcheckEngine for InnerSumcheckInstance { - fn initial_claims(&self) -> Vec { - vec![self.claim] - } - - fn degree(&self) -> usize { - 3 - } - - fn size(&self) -> usize { - assert_eq!(self.poly_L_row.len(), self.poly_val.len()); - assert_eq!(self.poly_L_row.len(), self.poly_L_col.len()); - self.poly_L_row.len() - } - - fn evaluation_points(&self) -> Vec> { - let (poly_A, poly_B, poly_C) = (&self.poly_L_row, &self.poly_L_col, &self.poly_val); - let comb_func = |poly_A_comp: &E::Scalar, - poly_B_comp: &E::Scalar, - poly_C_comp: &E::Scalar| - -> E::Scalar { *poly_A_comp * *poly_B_comp * *poly_C_comp }; - - let (eval_point_0, eval_point_2, eval_point_3) = - SumcheckProof::::compute_eval_points_cubic(poly_A, poly_B, poly_C, &comb_func); - - vec![vec![eval_point_0, eval_point_2, eval_point_3]] - } - - fn bound(&mut self, r: &E::Scalar) { - [ - &mut self.poly_L_row, - &mut self.poly_L_col, - &mut self.poly_val, - ] - .par_iter_mut() - .for_each(|poly| poly.bind_poly_var_top(r)); - } - - fn final_claims(&self) -> Vec> { - vec![vec![self.poly_L_row[0], self.poly_L_col[0]]] - } -} diff --git a/src/spartan/sumcheck/mod.rs b/src/spartan/sumcheck/mod.rs deleted file mode 100644 index 3a0cde4..0000000 --- a/src/spartan/sumcheck/mod.rs +++ /dev/null @@ -1,632 +0,0 @@ -use ff::Field; -use itertools::Itertools as _; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -use crate::{ - errors::NovaError, - spartan::polys::{ - multilinear::MultilinearPolynomial, - univariate::{CompressedUniPoly, UniPoly}, - }, - traits::{Engine, TranscriptEngineTrait}, -}; - -pub(in crate::spartan) mod engine; - -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub(crate) struct SumcheckProof { - compressed_polys: Vec>, -} - -impl SumcheckProof { - pub fn new(compressed_polys: Vec>) -> Self { - Self { compressed_polys } - } - - pub fn verify( - &self, - claim: E::Scalar, - num_rounds: usize, - degree_bound: usize, - transcript: &mut E::TE, - ) -> Result<(E::Scalar, Vec), NovaError> { - let mut e = claim; - let mut r: Vec = Vec::new(); - - // verify that there is a univariate polynomial for each round - if self.compressed_polys.len() != num_rounds { - return Err(NovaError::InvalidSumcheckProof); - } - - for i in 0..self.compressed_polys.len() { - let poly = self.compressed_polys[i].decompress(&e); - - // verify degree bound - if poly.degree() != degree_bound { - return Err(NovaError::InvalidSumcheckProof); - } - - // we do not need to check if poly(0) + poly(1) = e, as - // decompress() call above already ensures that holds - debug_assert_eq!(poly.eval_at_zero() + poly.eval_at_one(), e); - - // append the prover's message to the transcript - transcript.absorb(b"p", &poly); - - // derive the verifier's challenge for the next round - let r_i = transcript.squeeze(b"c")?; - - r.push(r_i); - - // evaluate the claimed degree-ell polynomial at r_i - e = poly.evaluate(&r_i); - } - - Ok((e, r)) - } - - pub fn verify_batch( - &self, - claims: &[E::Scalar], - num_rounds: &[usize], - coeffs: &[E::Scalar], - degree_bound: usize, - transcript: &mut E::TE, - ) -> Result<(E::Scalar, Vec), NovaError> { - let num_instances = claims.len(); - assert_eq!(num_rounds.len(), num_instances); - assert_eq!(coeffs.len(), num_instances); - - // n = maxᵢ{nᵢ} - let num_rounds_max = *num_rounds.iter().max().unwrap(); - - // Random linear combination of claims, - // where each claim is scaled by 2^{n-nᵢ} to account for the padding. - // - // claim = ∑ᵢ coeffᵢ⋅2^{n-nᵢ}⋅cᵢ - let claim = zip_with!( - ( - zip_with!(iter, (claims, num_rounds), |claim, num_rounds| { - let scaling_factor = 1 << (num_rounds_max - num_rounds); - E::Scalar::from(scaling_factor as u64) * claim - }), - coeffs.iter() - ), - |scaled_claim, coeff| scaled_claim * coeff - ) - .sum(); - - self.verify(claim, num_rounds_max, degree_bound, transcript) - } - - #[inline] - fn compute_eval_points_quad( - poly_A: &MultilinearPolynomial, - poly_B: &MultilinearPolynomial, - comb_func: &F, - ) -> (E::Scalar, E::Scalar) - where - F: Fn(&E::Scalar, &E::Scalar) -> E::Scalar + Sync, - { - let len = poly_A.len() / 2; - (0..len) - .into_par_iter() - .map(|i| { - // eval 0: bound_func is A(low) - let eval_point_0 = comb_func(&poly_A[i], &poly_B[i]); - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; - let eval_point_2 = comb_func(&poly_A_bound_point, &poly_B_bound_point); - (eval_point_0, eval_point_2) - }) - .reduce( - || (E::Scalar::ZERO, E::Scalar::ZERO), - |a, b| (a.0 + b.0, a.1 + b.1), - ) - } - - pub fn prove_quad( - claim: &E::Scalar, - num_rounds: usize, - poly_A: &mut MultilinearPolynomial, - poly_B: &mut MultilinearPolynomial, - comb_func: F, - transcript: &mut E::TE, - ) -> Result<(Self, Vec, Vec), NovaError> - where - F: Fn(&E::Scalar, &E::Scalar) -> E::Scalar + Sync, - { - let mut r: Vec = Vec::new(); - let mut polys: Vec> = Vec::new(); - let mut claim_per_round = *claim; - for _ in 0..num_rounds { - let poly = { - let (eval_point_0, eval_point_2) = - Self::compute_eval_points_quad(poly_A, poly_B, &comb_func); - - let evals = vec![eval_point_0, claim_per_round - eval_point_0, eval_point_2]; - UniPoly::from_evals(&evals) - }; - - // append the prover's message to the transcript - transcript.absorb(b"p", &poly); - - // derive the verifier's challenge for the next round - let r_i = transcript.squeeze(b"c")?; - r.push(r_i); - polys.push(poly.compress()); - - // Set up next round - claim_per_round = poly.evaluate(&r_i); - - // bind all tables to the verifier's challenge - rayon::join( - || poly_A.bind_poly_var_top(&r_i), - || poly_B.bind_poly_var_top(&r_i), - ); - } - - Ok(( - Self { - compressed_polys: polys, - }, - r, - vec![poly_A[0], poly_B[0]], - )) - } - - pub fn prove_quad_batch( - claims: &[E::Scalar], - num_rounds: &[usize], - mut poly_A_vec: Vec>, - mut poly_B_vec: Vec>, - coeffs: &[E::Scalar], - comb_func: F, - transcript: &mut E::TE, - ) -> Result<(Self, Vec, (Vec, Vec)), NovaError> - where - F: Fn(&E::Scalar, &E::Scalar) -> E::Scalar + Sync, - { - let num_claims = claims.len(); - - assert_eq!(num_rounds.len(), num_claims); - assert_eq!(poly_A_vec.len(), num_claims); - assert_eq!(poly_B_vec.len(), num_claims); - assert_eq!(coeffs.len(), num_claims); - - for (i, &num_rounds) in num_rounds.iter().enumerate() { - let expected_size = 1 << num_rounds; - - // Direct indexing with the assumption that the index will always be in bounds - let a = &poly_A_vec[i]; - let b = &poly_B_vec[i]; - - for (l, polyname) in [(a.len(), "poly_A_vec"), (b.len(), "poly_B_vec")].iter() { - assert_eq!( - *l, expected_size, - "Mismatch in size for {} at index {}", - polyname, i - ); - } - } - - let num_rounds_max = *num_rounds.iter().max().unwrap(); - let mut e = zip_with!( - iter, - (claims, num_rounds, coeffs), - |claim, num_rounds, coeff| { - let scaled_claim = - E::Scalar::from((1 << (num_rounds_max - num_rounds)) as u64) * claim; - scaled_claim * coeff - } - ) - .sum(); - let mut r: Vec = Vec::new(); - let mut quad_polys: Vec> = Vec::new(); - - for current_round in 0..num_rounds_max { - let remaining_rounds = num_rounds_max - current_round; - let evals: Vec<(E::Scalar, E::Scalar)> = zip_with!( - par_iter, - (num_rounds, claims, poly_A_vec, poly_B_vec), - |num_rounds, claim, poly_A, poly_B| { - if remaining_rounds <= *num_rounds { - Self::compute_eval_points_quad(poly_A, poly_B, &comb_func) - } else { - let remaining_variables = remaining_rounds - num_rounds - 1; - let scaled_claim = - E::Scalar::from((1 << remaining_variables) as u64) * claim; - (scaled_claim, scaled_claim) - } - } - ) - .collect(); - - let evals_combined_0 = (0..evals.len()).map(|i| evals[i].0 * coeffs[i]).sum(); - let evals_combined_2 = (0..evals.len()).map(|i| evals[i].1 * coeffs[i]).sum(); - - let evals = vec![evals_combined_0, e - evals_combined_0, evals_combined_2]; - let poly = UniPoly::from_evals(&evals); - - // append the prover's message to the transcript - transcript.absorb(b"p", &poly); - - // derive the verifier's challenge for the next round - let r_i = transcript.squeeze(b"c")?; - r.push(r_i); - - // bound all tables to the verifier's challenge - zip_with_for_each!( - ( - num_rounds.par_iter(), - poly_A_vec.par_iter_mut(), - poly_B_vec.par_iter_mut() - ), - |num_rounds, poly_A, poly_B| { - if remaining_rounds <= *num_rounds { - let _ = rayon::join( - || poly_A.bind_poly_var_top(&r_i), - || poly_B.bind_poly_var_top(&r_i), - ); - } - } - ); - - e = poly.evaluate(&r_i); - quad_polys.push(poly.compress()); - } - poly_A_vec.iter().for_each(|p| assert_eq!(p.len(), 1)); - poly_B_vec.iter().for_each(|p| assert_eq!(p.len(), 1)); - - let poly_A_final = poly_A_vec - .into_iter() - .map(|poly| poly[0]) - .collect::>(); - let poly_B_final = poly_B_vec - .into_iter() - .map(|poly| poly[0]) - .collect::>(); - - let eval_expected = zip_with!( - iter, - (poly_A_final, poly_B_final, coeffs), - |eA, eB, coeff| comb_func(eA, eB) * coeff - ) - .sum::(); - assert_eq!(e, eval_expected); - - let claims_prod = (poly_A_final, poly_B_final); - - Ok((Self::new(quad_polys), r, claims_prod)) - } - - #[inline] - fn compute_eval_points_cubic( - poly_A: &MultilinearPolynomial, - poly_B: &MultilinearPolynomial, - poly_C: &MultilinearPolynomial, - comb_func: &F, - ) -> (E::Scalar, E::Scalar, E::Scalar) - where - F: Fn(&E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, - { - let len = poly_A.len() / 2; - (0..len) - .into_par_iter() - .map(|i| { - // eval 0: bound_func is A(low) - let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i]); - - let poly_A_right_term = poly_A[len + i] - poly_A[i]; - let poly_B_right_term = poly_B[len + i] - poly_B[i]; - let poly_C_right_term = poly_C[len + i] - poly_C[i]; - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A_right_term; - let poly_B_bound_point = poly_B[len + i] + poly_B_right_term; - let poly_C_bound_point = poly_C[len + i] + poly_C_right_term; - let eval_point_2 = comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - ); - - // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with - // bound_func applied to eval(2) - let poly_A_bound_point = poly_A_bound_point + poly_A_right_term; - let poly_B_bound_point = poly_B_bound_point + poly_B_right_term; - let poly_C_bound_point = poly_C_bound_point + poly_C_right_term; - let eval_point_3 = comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - ); - (eval_point_0, eval_point_2, eval_point_3) - }) - .reduce( - || (E::Scalar::ZERO, E::Scalar::ZERO, E::Scalar::ZERO), - |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), - ) - } - - #[inline] - fn compute_eval_points_cubic_with_additive_term( - poly_A: &MultilinearPolynomial, - poly_B: &MultilinearPolynomial, - poly_C: &MultilinearPolynomial, - poly_D: &MultilinearPolynomial, - comb_func: &F, - ) -> (E::Scalar, E::Scalar, E::Scalar) - where - F: Fn(&E::Scalar, &E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, - { - let len = poly_A.len() / 2; - (0..len) - .into_par_iter() - .map(|i| { - // eval 0: bound_func is A(low) - let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]); - - let poly_A_right_term = poly_A[len + i] - poly_A[i]; - let poly_B_right_term = poly_B[len + i] - poly_B[i]; - let poly_C_right_term = poly_C[len + i] - poly_C[i]; - let poly_D_right_term = poly_D[len + i] - poly_D[i]; - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A_right_term; - let poly_B_bound_point = poly_B[len + i] + poly_B_right_term; - let poly_C_bound_point = poly_C[len + i] + poly_C_right_term; - let poly_D_bound_point = poly_D[len + i] + poly_D_right_term; - let eval_point_2 = comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - &poly_D_bound_point, - ); - - // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with - // bound_func applied to eval(2) - let poly_A_bound_point = poly_A_bound_point + poly_A_right_term; - let poly_B_bound_point = poly_B_bound_point + poly_B_right_term; - let poly_C_bound_point = poly_C_bound_point + poly_C_right_term; - let poly_D_bound_point = poly_D_bound_point + poly_D_right_term; - let eval_point_3 = comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - &poly_D_bound_point, - ); - (eval_point_0, eval_point_2, eval_point_3) - }) - .reduce( - || (E::Scalar::ZERO, E::Scalar::ZERO, E::Scalar::ZERO), - |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), - ) - } - - pub fn prove_cubic_with_additive_term( - claim: &E::Scalar, - num_rounds: usize, - poly_A: &mut MultilinearPolynomial, - poly_B: &mut MultilinearPolynomial, - poly_C: &mut MultilinearPolynomial, - poly_D: &mut MultilinearPolynomial, - comb_func: F, - transcript: &mut E::TE, - ) -> Result<(Self, Vec, Vec), NovaError> - where - F: Fn(&E::Scalar, &E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, - { - let mut r: Vec = Vec::new(); - let mut polys: Vec> = Vec::new(); - let mut claim_per_round = *claim; - - for _ in 0..num_rounds { - let poly = { - // Make an iterator returning the contributions to the evaluations - let (eval_point_0, eval_point_2, eval_point_3) = - Self::compute_eval_points_cubic_with_additive_term( - poly_A, poly_B, poly_C, poly_D, &comb_func, - ); - - let evals = vec![ - eval_point_0, - claim_per_round - eval_point_0, - eval_point_2, - eval_point_3, - ]; - UniPoly::from_evals(&evals) - }; - - // append the prover's message to the transcript - transcript.absorb(b"p", &poly); - - // derive the verifier's challenge for the next round - let r_i = transcript.squeeze(b"c")?; - r.push(r_i); - polys.push(poly.compress()); - - // Set up next round - claim_per_round = poly.evaluate(&r_i); - - // bound all tables to the verifier's challenge - rayon::join( - || { - rayon::join( - || poly_A.bind_poly_var_top(&r_i), - || poly_B.bind_poly_var_top(&r_i), - ) - }, - || { - rayon::join( - || poly_C.bind_poly_var_top(&r_i), - || poly_D.bind_poly_var_top(&r_i), - ) - }, - ); - } - - Ok(( - Self { - compressed_polys: polys, - }, - r, - vec![poly_A[0], poly_B[0], poly_C[0], poly_D[0]], - )) - } - - pub fn prove_cubic_with_additive_term_batch( - claims: &[E::Scalar], - num_rounds: &[usize], - mut poly_A_vec: Vec>, - mut poly_B_vec: Vec>, - mut poly_C_vec: Vec>, - mut poly_D_vec: Vec>, - coeffs: &[E::Scalar], - comb_func: F, - transcript: &mut E::TE, - ) -> Result<(Self, Vec, Vec>), NovaError> - where - F: Fn(&E::Scalar, &E::Scalar, &E::Scalar, &E::Scalar) -> E::Scalar + Sync, - { - let num_instances = claims.len(); - assert_eq!(num_rounds.len(), num_instances); - assert_eq!(coeffs.len(), num_instances); - assert_eq!(poly_A_vec.len(), num_instances); - assert_eq!(poly_B_vec.len(), num_instances); - assert_eq!(poly_C_vec.len(), num_instances); - assert_eq!(poly_D_vec.len(), num_instances); - - for (i, &num_rounds) in num_rounds.iter().enumerate() { - let expected_size = 1 << num_rounds; - - // Direct indexing with the assumption that the index will always be in bounds - let a = &poly_A_vec[i]; - let b = &poly_B_vec[i]; - let c = &poly_C_vec[i]; - let d = &poly_D_vec[i]; - - for (l, polyname) in [ - (a.len(), "poly_A"), - (b.len(), "poly_B"), - (c.len(), "poly_C"), - (d.len(), "poly_D"), - ] - .iter() - { - assert_eq!( - *l, expected_size, - "Mismatch in size for {} at index {}", - polyname, i - ); - } - } - - let num_rounds_max = *num_rounds.iter().max().unwrap(); - - let mut r: Vec = Vec::new(); - let mut polys: Vec> = Vec::new(); - let mut claim_per_round = zip_with!( - iter, - (claims, num_rounds, coeffs), - |claim, num_rounds, coeff| { - let scaled_claim = - E::Scalar::from((1 << (num_rounds_max - num_rounds)) as u64) * claim; - scaled_claim * *coeff - } - ) - .sum(); - - for current_round in 0..num_rounds_max { - let remaining_rounds = num_rounds_max - current_round; - let evals: Vec<(E::Scalar, E::Scalar, E::Scalar)> = zip_with!( - par_iter, - (num_rounds, claims, poly_A_vec, poly_B_vec, poly_C_vec, poly_D_vec), - |num_rounds, claim, poly_A, poly_B, poly_C, poly_D| { - if remaining_rounds <= *num_rounds { - Self::compute_eval_points_cubic_with_additive_term( - poly_A, poly_B, poly_C, poly_D, &comb_func, - ) - } else { - let remaining_variables = remaining_rounds - num_rounds - 1; - let scaled_claim = - E::Scalar::from((1 << remaining_variables) as u64) * claim; - (scaled_claim, scaled_claim, scaled_claim) - } - } - ) - .collect(); - - let evals_combined_0 = (0..num_instances).map(|i| evals[i].0 * coeffs[i]).sum(); - let evals_combined_2 = (0..num_instances).map(|i| evals[i].1 * coeffs[i]).sum(); - let evals_combined_3 = (0..num_instances).map(|i| evals[i].2 * coeffs[i]).sum(); - - let evals = vec![ - evals_combined_0, - claim_per_round - evals_combined_0, - evals_combined_2, - evals_combined_3, - ]; - let poly = UniPoly::from_evals(&evals); - - // append the prover's message to the transcript - transcript.absorb(b"p", &poly); - - // derive the verifier's challenge for the next round - let r_i = transcript.squeeze(b"c")?; - r.push(r_i); - - polys.push(poly.compress()); - - // Set up next round - claim_per_round = poly.evaluate(&r_i); - - // bound all the tables to the verifier's challenge - - zip_with_for_each!( - ( - num_rounds.par_iter(), - poly_A_vec.par_iter_mut(), - poly_B_vec.par_iter_mut(), - poly_C_vec.par_iter_mut(), - poly_D_vec.par_iter_mut() - ), - |num_rounds, poly_A, poly_B, poly_C, poly_D| { - if remaining_rounds <= *num_rounds { - let _ = rayon::join( - || { - rayon::join( - || poly_A.bind_poly_var_top(&r_i), - || poly_B.bind_poly_var_top(&r_i), - ) - }, - || { - rayon::join( - || poly_C.bind_poly_var_top(&r_i), - || poly_D.bind_poly_var_top(&r_i), - ) - }, - ); - } - } - ); - } - - let poly_A_final = poly_A_vec.into_iter().map(|poly| poly[0]).collect(); - let poly_B_final = poly_B_vec.into_iter().map(|poly| poly[0]).collect(); - let poly_C_final = poly_C_vec.into_iter().map(|poly| poly[0]).collect(); - let poly_D_final = poly_D_vec.into_iter().map(|poly| poly[0]).collect(); - - Ok(( - Self { - compressed_polys: polys, - }, - r, - vec![poly_A_final, poly_B_final, poly_C_final, poly_D_final], - )) - } -} diff --git a/src/supernova/circuit.rs b/src/supernova/circuit.rs deleted file mode 100644 index f9a3c05..0000000 --- a/src/supernova/circuit.rs +++ /dev/null @@ -1,838 +0,0 @@ -//! Supernova implementation support arbitrary argumented circuits and running -//! instances. There are two Verification Circuits for each argumented circuit: -//! The primary and the secondary. Each of them is over a cycle curve but -//! only the primary executes the next step of the computation. -//! Each circuit takes as input 2 hashes. -//! Each circuit folds the last invocation of the other into the respective -//! running instance, specified by `augmented_circuit_index` -//! -//! The augmented circuit F' for `SuperNova` that includes everything from Nova -//! and additionally checks: -//! 1. Ui[] are contained in X[0] hash pre-image. -//! 2. R1CS Instance u is folded into Ui[augmented_circuit_index] correctly; -//! just like Nova IVC. -//! 3. (optional by F logic) F circuit might check `program_counter_{i}` -//! invoked current F circuit is legal or not. -//! 3. F circuit produce `program_counter_{i+1}` and sent to next round to -//! optionally constraint the next F' argumented circuit. -use std::marker::PhantomData; - -use bellpepper::gadgets::{boolean_utils::conditionally_select_slice, Assignment}; -use bellpepper_core::{ - boolean::{AllocatedBit, Boolean}, - num::AllocatedNum, - ConstraintSystem, SynthesisError, -}; -use ff::{Field, PrimeField}; -use itertools::Itertools as _; -use serde::{Deserialize, Serialize}; - -use crate::{ - constants::{NIO_NOVA_FOLD, NUM_HASH_BITS}, - gadgets::{ - alloc_num_equals, alloc_scalar_as_base, alloc_zero, - conditionally_select_alloc_relaxed_r1cs, - conditionally_select_vec_allocated_relaxed_r1cs_instance, le_bits_to_num, AllocatedPoint, - AllocatedR1CSInstance, AllocatedRelaxedR1CSInstance, - }, - r1cs::{R1CSInstance, RelaxedR1CSInstance}, - supernova::{ - num_ro_inputs, - utils::{get_from_vec_alloc_relaxed_r1cs, get_selector_vec_from_index}, - }, - traits::{commitment::CommitmentTrait, Engine, ROCircuitTrait, ROConstantsCircuit}, - zip_with, Commitment, -}; - -// NOTE: This trait below is actually useful outside of this if you want to -// implement a step circuit on your own type. We use it in our prover code. -// However, there is a conflicting "StepCircuit" in -// `crate::traits::circuit::StepCircuit` which I deleted. We should likely have -// a supertrait here for NIVC that provides the circuit index because we only -// want that when we are using NIVC. Program counter should be able to be put to -// `None` otherwise, or we could handle that slightly differently too - -/// A helper trait for a step of the incremental computation for `SuperNova` -/// (i.e., circuit for F) -- to be implemented by applications. -pub trait StepCircuit: Send + Sync + Clone { - /// Return the the number of inputs or outputs of each step - /// (this method is called only at circuit synthesis time) - /// `synthesize` and `output` methods are expected to take as - /// input a vector of size equal to arity and output a vector of size equal - /// to arity - fn arity(&self) -> usize; - - /// Return this `StepCircuit`'s assigned index, for use when enforcing the - /// program counter. - fn circuit_index(&self) -> usize; - - /// Synthesize the circuit for a computation step and return variable - /// that corresponds to the output of the step `pc_{i+1}` and `z_{i+1}` - fn synthesize>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError>; -} - -// NOTES: This seems to just enforce that when we call a circuit at a given -// step, it matches the set program counter. - -/// A helper trait for a step of the incremental computation for `SuperNova` -/// (i.e., circuit for F) -- automatically implemented for `StepCircuit` and -/// used internally to enforce that the circuit selected by the program counter -/// is used at each step. -pub trait EnforcingStepCircuit: Send + Sync + Clone + StepCircuit { - /// Delegate synthesis to `StepCircuit::synthesize`, and additionally, - /// enforce the constraint that program counter `pc`, if supplied, is - /// equal to the circuit's assigned index. - fn enforcing_synthesize>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError> { - if let Some(pc) = pc { - let circuit_index = F::from(self.circuit_index() as u64); - - // pc * 1 = circuit_index - cs.enforce( - || "pc matches circuit index", - |lc| lc + pc.get_variable(), - |lc| lc + CS::one(), - |lc| lc + (circuit_index, CS::one()), - ); - } - self.synthesize(cs, pc, z) - } -} - -impl> EnforcingStepCircuit for S {} - -/// A trivial step circuit that simply returns the input -/// NOTE: Should only be used as secondary circuit!!! -#[derive(Clone, Debug, Default)] -pub struct TrivialCircuit { - _p: PhantomData, -} - -impl StepCircuit for TrivialCircuit -where - F: PrimeField, -{ - fn arity(&self) -> usize { - 1 - } - - /// This will not interfere with other circuit indices in the primary - /// circuit. - fn circuit_index(&self) -> usize { - 0 - } - - fn synthesize>( - &self, - _cs: &mut CS, - program_counter: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError> { - Ok((program_counter.cloned(), z.to_vec())) - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct SuperNovaAugmentedCircuitParams { - limb_width: usize, - n_limbs: usize, - is_primary_circuit: bool, // A boolean indicating if this is the primary circuit -} - -impl SuperNovaAugmentedCircuitParams { - pub const fn new(limb_width: usize, n_limbs: usize, is_primary_circuit: bool) -> Self { - Self { - limb_width, - n_limbs, - is_primary_circuit, - } - } - - pub fn get_n_limbs(&self) -> usize { - self.n_limbs - } -} - -#[derive(Debug)] -pub struct SuperNovaAugmentedCircuitInputs<'a, E: Engine> { - pp_digest: E::Scalar, - i: E::Base, - /// Input to the circuit for the base case - z0: &'a [E::Base], - /// Input to the circuit for the non-base case - zi: Option<&'a [E::Base]>, - /// List of `RelaxedR1CSInstance`. - /// `None` if this is the base case. - /// Elements are `None` if the circuit at that index was not yet executed. - U: Option<&'a [Option>]>, - /// R1CS proof to be folded into U - u: Option<&'a R1CSInstance>, - /// Nova folding proof for accumulating u into U[j] - T: Option<&'a Commitment>, - /// Index of the current circuit - program_counter: Option, - /// Index j of circuit being folded into U[j] - last_augmented_circuit_index: E::Base, -} - -impl<'a, E: Engine> SuperNovaAugmentedCircuitInputs<'a, E> { - /// Create new inputs/witness for the verification circuit - pub fn new( - pp_digest: E::Scalar, - i: E::Base, - z0: &'a [E::Base], - zi: Option<&'a [E::Base]>, - U: Option<&'a [Option>]>, - u: Option<&'a R1CSInstance>, - T: Option<&'a Commitment>, - program_counter: Option, - last_augmented_circuit_index: E::Base, - ) -> Self { - Self { - pp_digest, - i, - z0, - zi, - U, - u, - T, - program_counter, - last_augmented_circuit_index, - } - } -} - -/// The augmented circuit F' in `SuperNova` that includes a step circuit F -/// and the circuit for the verifier in `SuperNova`'s non-interactive folding -/// scheme, `SuperNova` NIFS will fold strictly r1cs instance u with respective -/// relaxed r1cs instance `U[last_augmented_circuit_index]` -pub struct SuperNovaAugmentedCircuit<'a, E: Engine, SC: EnforcingStepCircuit> { - params: &'a SuperNovaAugmentedCircuitParams, - ro_consts: ROConstantsCircuit, - inputs: Option>, - step_circuit: &'a SC, // The function that is applied for each step - num_augmented_circuits: usize, // number of overall augmented circuits -} - -impl<'a, E: Engine, SC: EnforcingStepCircuit> SuperNovaAugmentedCircuit<'a, E, SC> { - /// Create a new verification circuit for the input relaxed r1cs instances - pub const fn new( - params: &'a SuperNovaAugmentedCircuitParams, - inputs: Option>, - step_circuit: &'a SC, - ro_consts: ROConstantsCircuit, - num_augmented_circuits: usize, - ) -> Self { - Self { - params, - inputs, - step_circuit, - ro_consts, - num_augmented_circuits, - } - } - - /// Allocate all witnesses from the augmented function's non-deterministic - /// inputs. Optional entries are allocated as their default values. - fn alloc_witness::Base>>( - &self, - mut cs: CS, - arity: usize, - num_augmented_circuits: usize, - ) -> Result< - ( - AllocatedNum, - AllocatedNum, - Vec>, - Vec>, - Vec>, - AllocatedR1CSInstance, - AllocatedPoint, - Option>, - Vec, - ), - SynthesisError, - > { - let last_augmented_circuit_index = - AllocatedNum::alloc(cs.namespace(|| "last_augmented_circuit_index"), || { - Ok(self.inputs.get()?.last_augmented_circuit_index) - })?; - - // Allocate the params - let params = alloc_scalar_as_base::( - cs.namespace(|| "params"), - self.inputs.as_ref().map(|inputs| inputs.pp_digest), - )?; - - // Allocate i - let i = AllocatedNum::alloc(cs.namespace(|| "i"), || Ok(self.inputs.get()?.i))?; - - // Allocate program_counter only on primary circuit - let program_counter = if self.params.is_primary_circuit { - Some(AllocatedNum::alloc( - cs.namespace(|| "program_counter"), - || { - Ok(self - .inputs - .get()? - .program_counter - .expect("program_counter missing")) - }, - )?) - } else { - None - }; - - // Allocate z0 - let z_0 = (0..arity) - .map(|i| { - AllocatedNum::alloc(cs.namespace(|| format!("z0_{i}")), || { - Ok(self.inputs.get()?.z0[i]) - }) - }) - .collect::>, _>>()?; - - // Allocate zi. If inputs.zi is not provided (base case) allocate default value - // 0 - let zero = vec![E::Base::ZERO; arity]; - let z_i = (0..arity) - .map(|i| { - AllocatedNum::alloc(cs.namespace(|| format!("zi_{i}")), || { - Ok(self.inputs.get()?.zi.unwrap_or(&zero)[i]) - }) - }) - .collect::>, _>>()?; - - // Allocate the running instances - let U = (0..num_augmented_circuits) - .map(|i| { - AllocatedRelaxedR1CSInstance::alloc( - cs.namespace(|| format!("Allocate U {:?}", i)), - self.inputs - .as_ref() - .and_then(|inputs| inputs.U.and_then(|U| U[i].as_ref())), - self.params.limb_width, - self.params.n_limbs, - ) - }) - .collect::>, _>>()?; - - // Allocate the r1cs instance to be folded in - let u = AllocatedR1CSInstance::alloc( - cs.namespace(|| "allocate instance u to fold"), - self.inputs.as_ref().and_then(|inputs| inputs.u), - )?; - - // Allocate T - let T = AllocatedPoint::alloc( - cs.namespace(|| "allocate T"), - self.inputs - .as_ref() - .and_then(|inputs| inputs.T.map(|T| T.to_coordinates())), - )?; - T.check_on_curve(cs.namespace(|| "check T on curve"))?; - - // Compute instance selector - let last_augmented_circuit_selector = get_selector_vec_from_index( - cs.namespace(|| "instance selector"), - &last_augmented_circuit_index, - num_augmented_circuits, - )?; - - Ok(( - params, - i, - z_0, - z_i, - U, - u, - T, - program_counter, - last_augmented_circuit_selector, - )) - } - - /// Synthesizes base case and returns the new relaxed `R1CSInstance` - fn synthesize_base_case::Base>>( - &self, - mut cs: CS, - u: AllocatedR1CSInstance, - last_augmented_circuit_selector: &[Boolean], - ) -> Result>, SynthesisError> { - let mut cs = cs.namespace(|| "alloc U_i default"); - - // Allocate a default relaxed r1cs instance - let default = AllocatedRelaxedR1CSInstance::default( - cs.namespace(|| "Allocate primary U_default".to_string()), - self.params.limb_width, - self.params.n_limbs, - )?; - - // The primary circuit just initialize single AllocatedRelaxedR1CSInstance - let U_default = if self.params.is_primary_circuit { - vec![default] - } else { - // The secondary circuit convert the incoming R1CS instance on index which match - // last_augmented_circuit_index - let incoming_r1cs = AllocatedRelaxedR1CSInstance::from_r1cs_instance( - cs.namespace(|| "Allocate incoming_r1cs"), - u, - self.params.limb_width, - self.params.n_limbs, - )?; - - last_augmented_circuit_selector - .iter() - .enumerate() - .map(|(i, equal_bit)| { - // If index match last_augmented_circuit_index, then return incoming_r1cs, - // otherwise return the default one - conditionally_select_alloc_relaxed_r1cs( - cs.namespace(|| format!("select on index namespace {:?}", i)), - &incoming_r1cs, - &default, - equal_bit, - ) - }) - .collect::>, _>>()? - }; - Ok(U_default) - } - - /// Synthesizes non base case and returns the new relaxed `R1CSInstance` - /// And a boolean indicating if all checks pass - fn synthesize_non_base_case::Base>>( - &self, - mut cs: CS, - params: &AllocatedNum, - i: &AllocatedNum, - z_0: &[AllocatedNum], - z_i: &[AllocatedNum], - U: &[AllocatedRelaxedR1CSInstance], - u: &AllocatedR1CSInstance, - T: &AllocatedPoint, - arity: usize, - last_augmented_circuit_selector: &[Boolean], - program_counter: &Option>, - ) -> Result< - ( - Vec>, - AllocatedBit, - ), - SynthesisError, - > { - // Check that u.x[0] = Hash(params, i, program_counter, z0, zi, U[]) - let mut ro = E::ROCircuit::new( - self.ro_consts.clone(), - num_ro_inputs( - self.num_augmented_circuits, - self.params.get_n_limbs(), - arity, - self.params.is_primary_circuit, - ), - ); - ro.absorb(params); - ro.absorb(i); - - if self.params.is_primary_circuit { - let Some(program_counter) = program_counter.as_ref() else { - return Err(SynthesisError::AssignmentMissing); - }; - ro.absorb(program_counter) - } - - for e in z_0 { - ro.absorb(e); - } - for e in z_i { - ro.absorb(e); - } - - U.iter().enumerate().try_for_each(|(i, U)| { - U.absorb_in_ro(cs.namespace(|| format!("absorb U {:?}", i)), &mut ro) - })?; - - let hash_bits = ro.squeeze(cs.namespace(|| "Input hash"), NUM_HASH_BITS)?; - let hash = le_bits_to_num(cs.namespace(|| "bits to hash"), &hash_bits)?; - let check_pass: AllocatedBit = alloc_num_equals( - cs.namespace(|| "check consistency of u.X[0] with H(params, U, i, z0, zi)"), - &u.X[0], - &hash, - )?; - - // Run NIFS Verifier - let U_to_fold = get_from_vec_alloc_relaxed_r1cs( - cs.namespace(|| "U to fold"), - U, - last_augmented_circuit_selector, - )?; - let U_fold = U_to_fold.fold_with_r1cs( - cs.namespace(|| "compute fold of U and u"), - params, - u, - T, - self.ro_consts.clone(), - self.params.limb_width, - self.params.n_limbs, - )?; - - // update AllocatedRelaxedR1CSInstance on index match augmented circuit index - let U_next: Vec> = zip_with!( - (U.iter(), last_augmented_circuit_selector.iter()), - |U, equal_bit| { - conditionally_select_alloc_relaxed_r1cs( - cs.namespace(|| "select on index namespace"), - &U_fold, - U, - equal_bit, - ) - } - ) - .collect::>, _>>()?; - - Ok((U_next, check_pass)) - } - - pub fn synthesize::Base>>( - self, - cs: &mut CS, - ) -> Result<(Option>, Vec>), SynthesisError> { - let arity = self.step_circuit.arity(); - let num_augmented_circuits = if self.params.is_primary_circuit { - // primary circuit only fold single running instance with secondary output - // strict r1cs instance - 1 - } else { - // secondary circuit contains the logic to choose one of multiple augments - // running instance to fold - self.num_augmented_circuits - }; - - if self.inputs.is_some() { - // Check arity of z0 - let z0_len = self.inputs.as_ref().map_or(0, |inputs| inputs.z0.len()); - if self.step_circuit.arity() != z0_len { - return Err(SynthesisError::IncompatibleLengthVector(format!( - "z0_len {:?} != arity length {:?}", - z0_len, - self.step_circuit.arity() - ))); - } - - // The primary curve should always fold the circuit with index 0 - let last_augmented_circuit_index = self - .inputs - .get() - .map_or(E::Base::ZERO, |inputs| inputs.last_augmented_circuit_index); - if self.params.is_primary_circuit && last_augmented_circuit_index != E::Base::ZERO { - return Err(SynthesisError::IncompatibleLengthVector( - "primary circuit running instance only valid on index 0".to_string(), - )); - } - } - - // Allocate witnesses - let (params, i, z_0, z_i, U, u, T, program_counter, last_augmented_circuit_selector) = self - .alloc_witness( - cs.namespace(|| "allocate the circuit witness"), - arity, - num_augmented_circuits, - )?; - - // Compute variable indicating if this is the base case - let zero = alloc_zero(cs.namespace(|| "zero")); - let is_base_case = - alloc_num_equals(cs.namespace(|| "Check if base case"), &i.clone(), &zero)?; - - // Synthesize the circuit for the non-base case and get the new running - // instances along with a boolean indicating if all checks have passed - // must use return `last_augmented_circuit_index_checked` since it got range - // checked - let (U_next_non_base, check_non_base_pass) = self.synthesize_non_base_case( - cs.namespace(|| "synthesize non base case"), - ¶ms, - &i, - &z_0, - &z_i, - &U, - &u, - &T, - arity, - &last_augmented_circuit_selector, - &program_counter, - )?; - - // Synthesize the circuit for the base case and get the new running instances - let U_next_base = self.synthesize_base_case( - cs.namespace(|| "base case"), - u.clone(), - &last_augmented_circuit_selector, - )?; - - // Either check_non_base_pass=true or we are in the base case - let should_be_false = AllocatedBit::nor( - cs.namespace(|| "check_non_base_pass nor base_case"), - &check_non_base_pass, - &is_base_case, - )?; - cs.enforce( - || "check_non_base_pass nor base_case = false", - |lc| lc + should_be_false.get_variable(), - |lc| lc + CS::one(), - |lc| lc, - ); - - // Compute the U_next - let U_next = conditionally_select_vec_allocated_relaxed_r1cs_instance( - cs.namespace(|| "U_next"), - &U_next_base[..], - &U_next_non_base[..], - &Boolean::from(is_base_case.clone()), - )?; - - // Compute i + 1 - let i_next = AllocatedNum::alloc(cs.namespace(|| "i + 1"), || { - Ok(*i.get_value().get()? + E::Base::ONE) - })?; - cs.enforce( - || "check i + 1", - |lc| lc + i.get_variable() + CS::one(), - |lc| lc + CS::one(), - |lc| lc + i_next.get_variable(), - ); - - // Compute z_{i+1} - let z_input = conditionally_select_slice( - cs.namespace(|| "select input to F"), - &z_0, - &z_i, - &Boolean::from(is_base_case), - )?; - - let (program_counter_new, z_next) = self.step_circuit.enforcing_synthesize( - &mut cs.namespace(|| "F"), - program_counter.as_ref(), - &z_input, - )?; - - if z_next.len() != arity { - return Err(SynthesisError::IncompatibleLengthVector( - "z_next".to_string(), - )); - } - - // To check correct folding sequencing we are just going to make a hash. - // The next RunningInstance folding can take the pre-image of this hash as - // witness and check. - - // "Finally, there is a subtle sizing issue in the above description: in each - // step, because Ui+1 is produced as the public IO of F0 - // program_counter+1, it must be contained in the public IO of instance - // ui+1. In the next iteration, because ui+1 is folded - // into Ui+1[program_counter+1], this means that Ui+1[program_counter+1] is at - // least as large as Ui by the properties of the folding scheme. This - // means that the list of running instances grows in each step. To - // alleviate this issue, we have each F0j only produce a hash - // of its outputs as public output. In the subsequent step, the next augmented - // function takes as non-deterministic input a preimage to this hash." pg.16 - - // https://eprint.iacr.org/2022/1758.pdf - - // Compute the new hash H(params, i+1, program_counter, z0, z_{i+1}, U_next) - let mut ro = E::ROCircuit::new( - self.ro_consts.clone(), - num_ro_inputs( - self.num_augmented_circuits, - self.params.get_n_limbs(), - self.step_circuit.arity(), - self.params.is_primary_circuit, - ), - ); - ro.absorb(¶ms); - ro.absorb(&i_next); - // optionally absorb program counter if exist - if program_counter.is_some() { - ro.absorb( - program_counter_new - .as_ref() - .expect("new program counter missing"), - ) - } - for e in &z_0 { - ro.absorb(e); - } - for e in &z_next { - ro.absorb(e); - } - U_next.iter().enumerate().try_for_each(|(i, U)| { - U.absorb_in_ro(cs.namespace(|| format!("absorb U_new {:?}", i)), &mut ro) - })?; - - let hash_bits = ro.squeeze(cs.namespace(|| "output hash bits"), NUM_HASH_BITS)?; - let hash = le_bits_to_num(cs.namespace(|| "convert hash to num"), &hash_bits)?; - - // We are cycling of curve implementation, so primary/secondary will rotate hash - // in IO for the others to check bypass unmodified hash of other circuit - // as next X[0] and output the computed the computed hash as next X[1] - u.X[1].inputize(cs.namespace(|| "bypass unmodified hash of the other circuit"))?; - hash.inputize(cs.namespace(|| "output new hash of this circuit"))?; - - Ok((program_counter_new, z_next)) - } -} - -#[cfg(test)] -mod tests { - use expect_test::{expect, Expect}; - - use super::*; - use crate::{ - bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - solver::SatisfyingAssignment, - test_shape_cs::TestShapeCS, - }, - constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, - gadgets::scalar_as_base, - provider::{poseidon::PoseidonConstantsCircuit, Bn256EngineIPA, GrumpkinEngine}, - supernova::circuit::TrivialCircuit, - traits::{snark::default_ck_hint, CurveCycleEquipped, Dual}, - }; - - // In the following we use 1 to refer to the primary, and 2 to refer to the - // secondary circuit - fn test_supernova_recursive_circuit_with( - primary_params: &SuperNovaAugmentedCircuitParams, - secondary_params: &SuperNovaAugmentedCircuitParams, - ro_consts1: ROConstantsCircuit>, - ro_consts2: ROConstantsCircuit, - num_constraints_primary: &Expect, - num_constraints_secondary: &Expect, - num_augmented_circuits: usize, - ) where - E1: CurveCycleEquipped, - { - let tc1 = TrivialCircuit::default(); - // Initialize the shape and ck for the primary - let circuit1: SuperNovaAugmentedCircuit< - '_, - Dual, - TrivialCircuit< as Engine>::Base>, - > = SuperNovaAugmentedCircuit::new( - primary_params, - None, - &tc1, - ro_consts1.clone(), - num_augmented_circuits, - ); - let mut cs: TestShapeCS = TestShapeCS::new(); - let _ = circuit1.synthesize(&mut cs); - let (shape1, ck1) = cs.r1cs_shape_and_key(&*default_ck_hint()); - - num_constraints_primary.assert_eq(&cs.num_constraints().to_string()); - - let tc2 = TrivialCircuit::default(); - // Initialize the shape and ck for the secondary - let circuit2: SuperNovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = - SuperNovaAugmentedCircuit::new( - secondary_params, - None, - &tc2, - ro_consts2.clone(), - num_augmented_circuits, - ); - let mut cs: TestShapeCS> = TestShapeCS::new(); - let _ = circuit2.synthesize(&mut cs); - let (shape2, ck2) = cs.r1cs_shape_and_key(&*default_ck_hint()); - - num_constraints_secondary.assert_eq(&cs.num_constraints().to_string()); - - // Execute the base case for the primary - let zero1 = < as Engine>::Base as Field>::ZERO; - let mut cs1 = SatisfyingAssignment::::new(); - let vzero1 = vec![zero1]; - let inputs1: SuperNovaAugmentedCircuitInputs<'_, Dual> = - SuperNovaAugmentedCircuitInputs::new( - scalar_as_base::(zero1), // pass zero for testing - zero1, - &vzero1, - None, - None, - None, - None, - Some(zero1), - zero1, - ); - let circuit1: SuperNovaAugmentedCircuit< - '_, - Dual, - TrivialCircuit< as Engine>::Base>, - > = SuperNovaAugmentedCircuit::new( - primary_params, - Some(inputs1), - &tc1, - ro_consts1, - num_augmented_circuits, - ); - let _ = circuit1.synthesize(&mut cs1); - let (inst1, witness1) = cs1.r1cs_instance_and_witness(&shape1, &ck1).unwrap(); - // Make sure that this is satisfiable - shape1.is_sat(&ck1, &inst1, &witness1).unwrap(); - - // Execute the base case for the secondary - let zero2 = <::Base as Field>::ZERO; - let mut cs2 = SatisfyingAssignment::>::new(); - let vzero2 = vec![zero2]; - let inputs2: SuperNovaAugmentedCircuitInputs<'_, E1> = SuperNovaAugmentedCircuitInputs::new( - scalar_as_base::>(zero2), // pass zero for testing - zero2, - &vzero2, - None, - None, - Some(&inst1), - None, - Some(zero2), - zero2, - ); - let circuit2: SuperNovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = - SuperNovaAugmentedCircuit::new( - secondary_params, - Some(inputs2), - &tc2, - ro_consts2, - num_augmented_circuits, - ); - let _ = circuit2.synthesize(&mut cs2); - let (inst2, witness2) = cs2.r1cs_instance_and_witness(&shape2, &ck2).unwrap(); - // Make sure that it is satisfiable - shape2.is_sat(&ck2, &inst2, &witness2).unwrap(); - } - - #[test] - fn test_supernova_recursive_circuit_grumpkin() { - let params1 = SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); - let params2 = SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); - let ro_consts1: ROConstantsCircuit = PoseidonConstantsCircuit::default(); - let ro_consts2: ROConstantsCircuit = PoseidonConstantsCircuit::default(); - - test_supernova_recursive_circuit_with::( - ¶ms1, - ¶ms2, - ro_consts1, - ro_consts2, - &expect!["10004"], - &expect!["10573"], - 1, - ); - // TODO: extend to num_augmented_circuits >= 2 - } -} diff --git a/src/supernova/mod.rs b/src/supernova/mod.rs deleted file mode 100644 index 90b3740..0000000 --- a/src/supernova/mod.rs +++ /dev/null @@ -1,1296 +0,0 @@ -#![doc = include_str!("./Readme.md")] - -use std::{ops::Index, sync::Arc}; - -use bellpepper_core::{ConstraintSystem, SynthesisError}; -use ff::Field; -use itertools::Itertools as _; -use once_cell::sync::OnceCell; -use rayon::prelude::*; -use serde::{Deserialize, Serialize}; -use tracing::debug; - -use crate::{ - bellpepper::{ - r1cs::{NovaShape, NovaWitness}, - shape_cs::ShapeCS, - solver::SatisfyingAssignment, - }, - constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_HASH_BITS}, - digest::{DigestComputer, SimpleDigestible}, - errors::NovaError, - nifs::NIFS, - r1cs::{ - self, commitment_key_size, CommitmentKeyHint, R1CSInstance, R1CSResult, R1CSShape, - R1CSWitness, RelaxedR1CSInstance, RelaxedR1CSWitness, - }, - scalar_as_base, - traits::{ - commitment::{CommitmentEngineTrait, CommitmentTrait}, - AbsorbInROTrait, CurveCycleEquipped, Dual, Engine, ROConstants, ROConstantsCircuit, - ROTrait, - }, - Commitment, CommitmentKey, R1CSWithArity, -}; - -mod circuit; // declare the module first -pub use circuit::{StepCircuit, SuperNovaAugmentedCircuitParams, TrivialCircuit}; -use circuit::{SuperNovaAugmentedCircuit, SuperNovaAugmentedCircuitInputs}; -use error::SuperNovaError; - -/// A struct that manages all the digests of the primary circuits of a SuperNova -/// instance -#[derive(Debug, PartialEq, Eq, Serialize)] -pub struct CircuitDigests { - digests: Vec, -} - -impl SimpleDigestible for CircuitDigests {} - -impl std::ops::Deref for CircuitDigests { - type Target = Vec; - - fn deref(&self) -> &Self::Target { - &self.digests - } -} - -impl CircuitDigests { - /// Construct a new [`CircuitDigests`] - pub fn new(digests: Vec) -> Self { - Self { digests } - } - - /// Return the [`CircuitDigests`]' digest. - pub fn digest(&self) -> E::Scalar { - let dc: DigestComputer<'_, ::Scalar, Self> = DigestComputer::new(self); - dc.digest().expect("Failure in computing digest") - } -} - -/// A vector of [`R1CSWithArity`] adjoined to a set of [`PublicParams`] -#[derive(Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct PublicParams -where - E1: CurveCycleEquipped, -{ - /// The internal circuit shapes - circuit_shapes: Vec>, - - ro_consts_primary: ROConstants, - ro_consts_circuit_primary: ROConstantsCircuit>, - ck_primary: Arc>, // This is shared between all circuit params - augmented_circuit_params_primary: SuperNovaAugmentedCircuitParams, - - ro_consts_secondary: ROConstants>, - ro_consts_circuit_secondary: ROConstantsCircuit, - ck_secondary: Arc>>, - circuit_shape_secondary: R1CSWithArity>, - augmented_circuit_params_secondary: SuperNovaAugmentedCircuitParams, - - /// Digest constructed from this `PublicParams`' parameters - #[serde(skip, default = "OnceCell::new")] - digest: OnceCell, -} - -/// Auxiliary [`PublicParams`] information about the commitment keys and -/// secondary circuit. This is used as a helper struct when reconstructing -/// [`PublicParams`] downstream in lurk. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct AuxParams -where - E1: CurveCycleEquipped, -{ - pub ro_consts_primary: ROConstants, - pub ro_consts_circuit_primary: ROConstantsCircuit>, - pub ck_primary: Arc>, // This is shared between all circuit params - pub augmented_circuit_params_primary: SuperNovaAugmentedCircuitParams, - - pub ro_consts_secondary: ROConstants>, - pub ro_consts_circuit_secondary: ROConstantsCircuit, - pub ck_secondary: Arc>>, - pub circuit_shape_secondary: R1CSWithArity>, - pub augmented_circuit_params_secondary: SuperNovaAugmentedCircuitParams, - - pub digest: E1::Scalar, -} - -use std::io::Cursor; - -use crate::{ - fast_serde, - fast_serde::{FastSerde, SerdeByteError, SerdeByteTypes}, -}; - -impl FastSerde for AuxParams -where - E1: CurveCycleEquipped, - >::CommitmentKey: FastSerde, - <::CE as CommitmentEngineTrait>::CommitmentKey: - FastSerde, -{ - /// Byte format: - /// [0..4] - Magic number (4 bytes) - /// [4] - Serde type: AuxParams (u8) - /// [5] - Number of sections (u8 = 8) - /// Sections (repeated 8 times): - /// [N] - Section type (u8) - /// [N+1..5] - Section size (u32) - /// [N+5..] - Section data (variable length) - /// Section types: - /// 1: ro_consts_primary (bincode) - /// 2: ro_consts_circuit_primary (bincode) - /// 3: ck_primary (FastSerde) - /// 4: ro_consts_secondary (bincode) - /// 5: ro_consts_circuit_secondary (bincode) - /// 6: ck_secondary (FastSerde) - /// 7: circuit_shape_secondary (json) - /// 8: digest (bincode) - fn to_bytes(&self) -> Vec { - let mut out = Vec::new(); - - // Write header - out.extend_from_slice(&fast_serde::MAGIC_NUMBER); - out.push(fast_serde::SerdeByteTypes::AuxParams as u8); - out.push(8); // num_sections - - // Write sections - Self::write_section_bytes( - &mut out, - 1, - &bincode::serialize(&self.ro_consts_primary).unwrap(), - ); - Self::write_section_bytes( - &mut out, - 2, - &bincode::serialize(&self.ro_consts_circuit_primary).unwrap(), - ); - Self::write_section_bytes(&mut out, 3, &self.ck_primary.to_bytes()); - Self::write_section_bytes( - &mut out, - 4, - &bincode::serialize(&self.ro_consts_secondary).unwrap(), - ); - Self::write_section_bytes( - &mut out, - 5, - &bincode::serialize(&self.ro_consts_circuit_secondary).unwrap(), - ); - Self::write_section_bytes(&mut out, 6, &self.ck_secondary.to_bytes()); - Self::write_section_bytes( - &mut out, - 7, - &bincode::serialize(&self.circuit_shape_secondary).unwrap(), - ); - Self::write_section_bytes(&mut out, 8, &bincode::serialize(&self.digest).unwrap()); - - out - } - - fn from_bytes(bytes: &Vec) -> Result { - let mut cursor = Cursor::new(bytes); - - // Validate header - Self::validate_header(&mut cursor, SerdeByteTypes::AuxParams, 8)?; - - // Read all sections - let ro_consts_primary = bincode::deserialize(&Self::read_section_bytes(&mut cursor, 1)?)?; - let ro_consts_circuit_primary = - bincode::deserialize(&Self::read_section_bytes(&mut cursor, 2)?)?; - let ck_primary = Arc::new( - >::CommitmentKey::from_bytes( - &Self::read_section_bytes(&mut cursor, 3)?, - )?, - ); - let ro_consts_secondary = bincode::deserialize(&Self::read_section_bytes(&mut cursor, 4)?)?; - let ro_consts_circuit_secondary = - bincode::deserialize(&Self::read_section_bytes(&mut cursor, 5)?)?; - let ck_secondary = Arc::new(<::CE as CommitmentEngineTrait< - E1::Secondary, - >>::CommitmentKey::from_bytes( - &Self::read_section_bytes(&mut cursor, 6)? - )?); - let circuit_shape_secondary = - bincode::deserialize(&Self::read_section_bytes(&mut cursor, 7)?)?; - let digest = bincode::deserialize(&Self::read_section_bytes(&mut cursor, 8)?)?; - - // NOTE: This does not check the digest. Maybe we should. - Ok(Self { - ro_consts_primary, - ro_consts_circuit_primary, - ck_primary, - augmented_circuit_params_primary: SuperNovaAugmentedCircuitParams::new( - BN_LIMB_WIDTH, - BN_N_LIMBS, - true, - ), - ro_consts_secondary, - ro_consts_circuit_secondary, - ck_secondary, - circuit_shape_secondary, - augmented_circuit_params_secondary: SuperNovaAugmentedCircuitParams::new( - BN_LIMB_WIDTH, - BN_N_LIMBS, - false, - ), - digest, - }) - } -} - -impl Index for PublicParams -where - E1: CurveCycleEquipped, -{ - type Output = R1CSWithArity; - - fn index(&self, index: usize) -> &Self::Output { - &self.circuit_shapes[index] - } -} - -impl SimpleDigestible for PublicParams where E1: CurveCycleEquipped {} - -impl PublicParams -where - E1: CurveCycleEquipped, -{ - /// Construct a new [`PublicParams`] - /// - /// # Note - /// - /// Public parameters set up a number of bases for the homomorphic - /// commitment scheme of Nova. - /// - /// Some final compressing SNARKs, like variants of Spartan, use computation - /// commitments that require larger sizes for these parameters. These - /// SNARKs provide a hint for these values by implementing - /// `RelaxedR1CSSNARKTrait::commitment_key_floor()`, which can be passed to - /// this function. - /// - /// If you're not using such a SNARK, pass `&(|_| 0)` instead. - /// - /// # Arguments - /// - /// * `non_uniform_circuit`: The non-uniform circuit of type `NC`. - /// * `ck_hint1`: A `CommitmentKeyHint` for `E1`, which is a function that - /// provides a hint for the number of generators required in the - /// commitment scheme for the primary circuit. - /// * `ck_hint2`: A `CommitmentKeyHint` for `E2`, similar to `ck_hint1`, but - /// for the secondary circuit. - pub fn setup>( - non_uniform_circuit: &NC, - ck_hint1: &CommitmentKeyHint, - ck_hint2: &CommitmentKeyHint>, - ) -> Self { - let num_circuits = non_uniform_circuit.num_circuits(); - - let augmented_circuit_params_primary = - SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); - let ro_consts_primary: ROConstants = ROConstants::::default(); - // ro_consts_circuit_primary are parameterized by E2 because the type alias uses - // E2::Base = E1::Scalar - let ro_consts_circuit_primary: ROConstantsCircuit> = - ROConstantsCircuit::>::default(); - - let circuit_shapes = get_circuit_shapes(non_uniform_circuit); - - let ck_primary = Self::compute_primary_ck(&circuit_shapes, ck_hint1); - let ck_primary = Arc::new(ck_primary); - - let augmented_circuit_params_secondary = - SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); - let ro_consts_secondary = ROConstants::>::default(); - let c_secondary = non_uniform_circuit.secondary_circuit(); - let F_arity_secondary = c_secondary.arity(); - let ro_consts_circuit_secondary: ROConstantsCircuit = - ROConstantsCircuit::::default(); - - let circuit_secondary: SuperNovaAugmentedCircuit<'_, E1, NC::C2> = - SuperNovaAugmentedCircuit::new( - &augmented_circuit_params_secondary, - None, - &c_secondary, - ro_consts_circuit_secondary.clone(), - num_circuits, - ); - let mut cs: ShapeCS> = ShapeCS::new(); - circuit_secondary - .synthesize(&mut cs) - .expect("circuit synthesis failed"); - let (r1cs_shape_secondary, ck_secondary) = cs.r1cs_shape_and_key(ck_hint2); - let ck_secondary = Arc::new(ck_secondary); - let circuit_shape_secondary = R1CSWithArity::new(r1cs_shape_secondary, F_arity_secondary); - - let pp = Self { - circuit_shapes, - ro_consts_primary, - ro_consts_circuit_primary, - ck_primary, - augmented_circuit_params_primary, - ro_consts_secondary, - ro_consts_circuit_secondary, - ck_secondary, - circuit_shape_secondary, - augmented_circuit_params_secondary, - digest: OnceCell::new(), - }; - - // make sure to initialize the `OnceCell` and compute the digest - // and avoid paying for unexpected performance costs later - pp.digest(); - pp - } - - /// Breaks down an instance of [`PublicParams`] into the circuit params and - /// auxiliary params. - pub fn into_parts(self) -> (Vec>, AuxParams) { - let digest = self.digest(); - - let Self { - circuit_shapes, - ro_consts_primary, - ro_consts_circuit_primary, - ck_primary, - augmented_circuit_params_primary, - ro_consts_secondary, - ro_consts_circuit_secondary, - ck_secondary, - circuit_shape_secondary, - augmented_circuit_params_secondary, - digest: _digest, - } = self; - - let aux_params = AuxParams { - ro_consts_primary, - ro_consts_circuit_primary, - ck_primary, - augmented_circuit_params_primary, - ro_consts_secondary, - ro_consts_circuit_secondary, - ck_secondary, - circuit_shape_secondary, - augmented_circuit_params_secondary, - digest, - }; - - (circuit_shapes, aux_params) - } - - /// Returns just the [`AuxParams`] portion of [`PublicParams`] from a - /// reference to [`PublicParams`]. - pub fn aux_params(&self) -> AuxParams { - AuxParams { - ro_consts_primary: self.ro_consts_primary.clone(), - ro_consts_circuit_primary: self.ro_consts_circuit_primary.clone(), - ck_primary: self.ck_primary.clone(), - augmented_circuit_params_primary: self.augmented_circuit_params_primary.clone(), - ro_consts_secondary: self.ro_consts_secondary.clone(), - ro_consts_circuit_secondary: self.ro_consts_circuit_secondary.clone(), - ck_secondary: self.ck_secondary.clone(), - circuit_shape_secondary: self.circuit_shape_secondary.clone(), - augmented_circuit_params_secondary: self.augmented_circuit_params_secondary.clone(), - digest: self.digest(), - } - } - - /// Create a [`PublicParams`] from a vector of raw [`R1CSWithArity`] and - /// auxiliary params. - pub fn from_parts(circuit_shapes: Vec>, aux_params: AuxParams) -> Self { - let pp = Self { - circuit_shapes, - ro_consts_primary: aux_params.ro_consts_primary, - ro_consts_circuit_primary: aux_params.ro_consts_circuit_primary, - ck_primary: aux_params.ck_primary, - augmented_circuit_params_primary: aux_params.augmented_circuit_params_primary, - ro_consts_secondary: aux_params.ro_consts_secondary, - ro_consts_circuit_secondary: aux_params.ro_consts_circuit_secondary, - ck_secondary: aux_params.ck_secondary, - circuit_shape_secondary: aux_params.circuit_shape_secondary, - augmented_circuit_params_secondary: aux_params.augmented_circuit_params_secondary, - digest: OnceCell::new(), - }; - assert_eq!( - aux_params.digest, - pp.digest(), - "param data is invalid; aux_params contained the incorrect digest" - ); - pp - } - - /// Create a [`PublicParams`] from a vector of raw [`R1CSWithArity`] and - /// auxiliary params. We don't check that the `aux_params.digest` is a - /// valid digest for the created params. - pub fn from_parts_unchecked( - circuit_shapes: Vec>, - aux_params: AuxParams, - ) -> Self { - Self { - circuit_shapes, - ro_consts_primary: aux_params.ro_consts_primary, - ro_consts_circuit_primary: aux_params.ro_consts_circuit_primary, - ck_primary: aux_params.ck_primary, - augmented_circuit_params_primary: aux_params.augmented_circuit_params_primary, - ro_consts_secondary: aux_params.ro_consts_secondary, - ro_consts_circuit_secondary: aux_params.ro_consts_circuit_secondary, - ck_secondary: aux_params.ck_secondary, - circuit_shape_secondary: aux_params.circuit_shape_secondary, - augmented_circuit_params_secondary: aux_params.augmented_circuit_params_secondary, - digest: aux_params.digest.into(), - } - } - - /// Compute primary and secondary commitment keys sized to handle the - /// largest of the circuits in the provided `R1CSWithArity`. - fn compute_primary_ck( - circuit_params: &[R1CSWithArity], - ck_hint1: &CommitmentKeyHint, - ) -> CommitmentKey { - let size_primary = circuit_params - .iter() - .map(|circuit| commitment_key_size(&circuit.r1cs_shape, ck_hint1)) - .max() - .unwrap(); - - E1::CE::setup(b"ck", size_primary) - } - - /// Return the [`PublicParams`]' digest. - pub fn digest(&self) -> E1::Scalar { - self.digest - .get_or_try_init(|| { - let dc: DigestComputer<'_, ::Scalar, Self> = - DigestComputer::new(self); - dc.digest() - }) - .cloned() - .expect("Failure in retrieving digest") - } - - /// Returns the number of constraints and variables of inner circuit based - /// on index - pub fn num_constraints_and_variables(&self, index: usize) -> (usize, usize) { - ( - self.circuit_shapes[index].r1cs_shape.num_cons, - self.circuit_shapes[index].r1cs_shape.num_vars, - ) - } - - /// Returns the number of constraints and variables of the secondary circuit - pub fn num_constraints_and_variables_secondary(&self) -> (usize, usize) { - ( - self.circuit_shape_secondary.r1cs_shape.num_cons, - self.circuit_shape_secondary.r1cs_shape.num_vars, - ) - } - - /// All of the primary circuit digests of this [`PublicParams`] - pub fn circuit_param_digests(&self) -> CircuitDigests { - let digests = self - .circuit_shapes - .iter() - .map(|cp| cp.digest()) - .collect::>(); - CircuitDigests { digests } - } - - /// Returns all the primary R1CS Shapes - fn primary_r1cs_shapes(&self) -> Vec<&R1CSShape> { - self.circuit_shapes - .iter() - .map(|cs| &cs.r1cs_shape) - .collect::>() - } -} - -pub fn get_circuit_shapes>( - non_uniform_circuit: &NC, -) -> Vec> { - let num_circuits = non_uniform_circuit.num_circuits(); - let augmented_circuit_params_primary = - SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); - - // ro_consts_circuit_primary are parameterized by E2 because the type alias uses - // E2::Base = E1::Scalar - let ro_consts_circuit_primary: ROConstantsCircuit> = - ROConstantsCircuit::>::default(); - - (0..num_circuits) - .map(|i| { - let c_primary = non_uniform_circuit.primary_circuit(i); - let F_arity = c_primary.arity(); - // Initialize ck for the primary - let circuit_primary: SuperNovaAugmentedCircuit<'_, Dual, NC::C1> = - SuperNovaAugmentedCircuit::new( - &augmented_circuit_params_primary, - None, - &c_primary, - ro_consts_circuit_primary.clone(), - num_circuits, - ); - let mut cs: ShapeCS = ShapeCS::new(); - circuit_primary - .synthesize(&mut cs) - .expect("circuit synthesis failed"); - - // We use the largest commitment_key for all instances - let r1cs_shape_primary = cs.r1cs_shape(); - R1CSWithArity::new(r1cs_shape_primary, F_arity) - }) - .collect::>() -} - -/// A resource buffer for SuperNova's [`RecursiveSNARK`] for storing scratch -/// values that are computed by `prove_step`, which allows the reuse of memory -/// allocations and avoids unnecessary new allocations in the critical section. -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -struct ResourceBuffer { - l_w: Option>, - l_u: Option>, - - ABC_Z_1: R1CSResult, - ABC_Z_2: R1CSResult, - - /// buffer for `commit_T` - T: Vec, -} - -/// A SNARK that proves the correct execution of an non-uniform incremental -/// computation -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct RecursiveSNARK -where - E1: CurveCycleEquipped, -{ - // Cached digest of the public parameters - pp_digest: E1::Scalar, - num_augmented_circuits: usize, - - // Number of iterations performed up to now - i: usize, - - // Inputs and outputs of the primary circuits - z0_primary: Vec, - zi_primary: Vec, - - // Proven circuit index, and current program counter - proven_circuit_index: usize, - program_counter: E1::Scalar, - - /// Buffer for memory needed by the primary fold-step - buffer_primary: ResourceBuffer, - /// Buffer for memory needed by the secondary fold-step - buffer_secondary: ResourceBuffer>, - - // Relaxed instances for the primary circuits - // Entries are `None` if the circuit has not been executed yet - r_W_primary: Vec>>, - r_U_primary: Vec>>, - - // Inputs and outputs of the secondary circuit - z0_secondary: Vec< as Engine>::Scalar>, - zi_secondary: Vec< as Engine>::Scalar>, - // Relaxed instance for the secondary circuit - r_W_secondary: RelaxedR1CSWitness>, - r_U_secondary: RelaxedR1CSInstance>, - // Proof for the secondary circuit to be accumulated into r_secondary in the next iteration - l_w_secondary: R1CSWitness>, - l_u_secondary: R1CSInstance>, -} - -impl RecursiveSNARK -where - E1: CurveCycleEquipped, -{ - /// iterate base step to get new instance of recursive SNARK - #[allow(clippy::too_many_arguments)] - pub fn new>( - pp: &PublicParams, - non_uniform_circuit: &C0, - c_primary: &C0::C1, - c_secondary: &C0::C2, - z0_primary: &[E1::Scalar], - z0_secondary: &[ as Engine>::Scalar], - ) -> Result { - let num_augmented_circuits = non_uniform_circuit.num_circuits(); - let circuit_index = non_uniform_circuit.initial_circuit_index(); - - let r1cs_secondary = &pp.circuit_shape_secondary.r1cs_shape; - - // check the length of the secondary initial input - if z0_secondary.len() != pp.circuit_shape_secondary.F_arity { - return Err(SuperNovaError::NovaError( - NovaError::InvalidStepOutputLength, - )); - } - - // check the arity of all the primary circuits match the initial input length - // pp.circuit_shapes.iter().try_for_each(|circuit| { - // if circuit.F_arity != z0_primary.len() { - // return Err(SuperNovaError::NovaError( - // NovaError::InvalidStepOutputLength, - // )); - // } - // Ok(()) - // })?; - - // base case for the primary - let mut cs_primary = SatisfyingAssignment::::new(); - let program_counter = E1::Scalar::from(circuit_index as u64); - let inputs_primary: SuperNovaAugmentedCircuitInputs<'_, Dual> = - SuperNovaAugmentedCircuitInputs::new( - scalar_as_base::(pp.digest()), - E1::Scalar::ZERO, - z0_primary, - None, // zi = None for basecase - None, // U = [None], since no previous proofs have been computed - None, // u = None since we are not verifying a secondary circuit - None, // T = None since there is not proof to fold - Some(program_counter), // pc = initial_program_counter for primary circuit - E1::Scalar::ZERO, // u_index is always zero for the primary circuit - ); - - let circuit_primary: SuperNovaAugmentedCircuit<'_, Dual, C0::C1> = - SuperNovaAugmentedCircuit::new( - &pp.augmented_circuit_params_primary, - Some(inputs_primary), - c_primary, - pp.ro_consts_circuit_primary.clone(), - num_augmented_circuits, - ); - - let (zi_primary_pc_next, zi_primary) = - circuit_primary.synthesize(&mut cs_primary).map_err(|err| { - debug!("err {:?}", err); - NovaError::from(err) - })?; - if zi_primary.len() != pp[circuit_index].F_arity { - return Err(SuperNovaError::NovaError( - NovaError::InvalidStepOutputLength, - )); - } - let (u_primary, w_primary) = cs_primary - .r1cs_instance_and_witness(&pp[circuit_index].r1cs_shape, &pp.ck_primary) - .map_err(|err| { - debug!("err {:?}", err); - err - })?; - - // base case for the secondary - let mut cs_secondary = SatisfyingAssignment::>::new(); - let u_primary_index = as Engine>::Scalar::from(circuit_index as u64); - let inputs_secondary: SuperNovaAugmentedCircuitInputs<'_, E1> = - SuperNovaAugmentedCircuitInputs::new( - pp.digest(), - as Engine>::Scalar::ZERO, - z0_secondary, - None, // zi = None for basecase - None, // U = Empty list of accumulators for the primary circuits - Some(&u_primary), // Proof for first iteration of current primary circuit - None, // T = None, since we just copy u_primary rather than fold it - None, // program_counter is always None for secondary circuit - u_primary_index, // index of the circuit proof u_primary - ); - - let circuit_secondary: SuperNovaAugmentedCircuit<'_, E1, C0::C2> = - SuperNovaAugmentedCircuit::new( - &pp.augmented_circuit_params_secondary, - Some(inputs_secondary), - c_secondary, - pp.ro_consts_circuit_secondary.clone(), - num_augmented_circuits, - ); - let (_, zi_secondary) = circuit_secondary - .synthesize(&mut cs_secondary) - .map_err(NovaError::from)?; - if zi_secondary.len() != pp.circuit_shape_secondary.F_arity { - return Err(NovaError::InvalidStepOutputLength.into()); - } - let (u_secondary, w_secondary) = cs_secondary - .r1cs_instance_and_witness(r1cs_secondary, &pp.ck_secondary) - .map_err(|_| SuperNovaError::NovaError(NovaError::UnSat))?; - - // IVC proof for the primary circuit - let l_w_primary = w_primary; - let l_u_primary = u_primary; - let r_W_primary = - RelaxedR1CSWitness::from_r1cs_witness(&pp[circuit_index].r1cs_shape, l_w_primary); - - let r_U_primary = RelaxedR1CSInstance::from_r1cs_instance( - &*pp.ck_primary, - &pp[circuit_index].r1cs_shape, - l_u_primary, - ); - - // IVC proof of the secondary circuit - let l_w_secondary = w_secondary; - let l_u_secondary = u_secondary; - - // Initialize relaxed instance/witness pair for the secondary circuit proofs - let r_W_secondary = RelaxedR1CSWitness::>::default(r1cs_secondary); - let r_U_secondary = RelaxedR1CSInstance::default(&*pp.ck_secondary, r1cs_secondary); - - // Outputs of the two circuits and next program counter thus far. - let zi_primary = zi_primary - .iter() - .map(|v| { - v.get_value() - .ok_or(NovaError::from(SynthesisError::AssignmentMissing).into()) - }) - .collect::::Scalar>, SuperNovaError>>()?; - let zi_primary_pc_next = zi_primary_pc_next - .expect("zi_primary_pc_next missing") - .get_value() - .ok_or::(NovaError::from(SynthesisError::AssignmentMissing).into())?; - let zi_secondary = zi_secondary - .iter() - .map(|v| { - v.get_value() - .ok_or(NovaError::from(SynthesisError::AssignmentMissing).into()) - }) - .collect:: as Engine>::Scalar>, SuperNovaError>>()?; - - // handle the base case by initialize U_next in next round - let r_W_primary_initial_list = (0..num_augmented_circuits) - .map(|i| (i == circuit_index).then(|| r_W_primary.clone())) - .collect::>>>(); - - let r_U_primary_initial_list = (0..num_augmented_circuits) - .map(|i| (i == circuit_index).then(|| r_U_primary.clone())) - .collect::>>>(); - - // find the largest length r1cs shape for the buffer size - let max_num_cons = pp - .circuit_shapes - .iter() - .map(|circuit| circuit.r1cs_shape.num_cons) - .max() - .unwrap(); - - let buffer_primary = ResourceBuffer { - l_w: None, - l_u: None, - ABC_Z_1: R1CSResult::default(max_num_cons), - ABC_Z_2: R1CSResult::default(max_num_cons), - T: r1cs::default_T::(max_num_cons), - }; - - let buffer_secondary = ResourceBuffer { - l_w: None, - l_u: None, - ABC_Z_1: R1CSResult::default(r1cs_secondary.num_cons), - ABC_Z_2: R1CSResult::default(r1cs_secondary.num_cons), - T: r1cs::default_T::>(r1cs_secondary.num_cons), - }; - - Ok(Self { - pp_digest: pp.digest(), - num_augmented_circuits, - i: 0_usize, // after base case, next iteration start from 1 - z0_primary: z0_primary.to_vec(), - zi_primary, - - proven_circuit_index: circuit_index, - program_counter: zi_primary_pc_next, - - buffer_primary, - buffer_secondary, - - r_W_primary: r_W_primary_initial_list, - r_U_primary: r_U_primary_initial_list, - z0_secondary: z0_secondary.to_vec(), - zi_secondary, - r_W_secondary, - r_U_secondary, - l_w_secondary, - l_u_secondary, - }) - } - - /// Inputs of the primary circuits - pub fn z0_primary(&self) -> &Vec { - &self.z0_primary - } - - /// Outputs of the primary circuits - pub fn zi_primary(&self) -> &Vec { - &self.zi_primary - } - - /// executing a step of the incremental computation - #[allow(clippy::too_many_arguments)] - #[tracing::instrument(skip_all, name = "supernova::RecursiveSNARK::prove_step")] - pub fn prove_step< - C1: StepCircuit, - C2: StepCircuit< as Engine>::Scalar>, - >( - &mut self, - pp: &PublicParams, - c_primary: &C1, - c_secondary: &C2, - ) -> Result<(), SuperNovaError> { - // First step was already done in the constructor - if self.i == 0 { - self.i = 1; - return Ok(()); - } - - // save the inputs before proceeding to the `i+1`th step - let r_U_primary_i = self.r_U_primary.clone(); - // Create single-entry accumulator list for the secondary circuit to hand to - // SuperNovaAugmentedCircuitInputs - let r_U_secondary_i = vec![Some(self.r_U_secondary.clone())]; - let l_u_secondary_i = self.l_u_secondary.clone(); - - let circuit_index = c_primary.circuit_index(); - assert_eq!(self.program_counter, E1::Scalar::from(circuit_index as u64)); - - // fold the secondary circuit's instance - let (nifs_secondary, _) = NIFS::prove_mut( - &*pp.ck_secondary, - &pp.ro_consts_secondary, - &scalar_as_base::(self.pp_digest), - &pp.circuit_shape_secondary.r1cs_shape, - &mut self.r_U_secondary, - &mut self.r_W_secondary, - &self.l_u_secondary, - &self.l_w_secondary, - &mut self.buffer_secondary.T, - &mut self.buffer_secondary.ABC_Z_1, - &mut self.buffer_secondary.ABC_Z_2, - ) - .map_err(SuperNovaError::NovaError)?; - - let mut cs_primary = SatisfyingAssignment::::with_capacity( - pp[circuit_index].r1cs_shape.num_io + 1, - pp[circuit_index].r1cs_shape.num_vars, - ); - let T = Commitment::>::decompress(&nifs_secondary.comm_T) - .map_err(SuperNovaError::NovaError)?; - let inputs_primary: SuperNovaAugmentedCircuitInputs<'_, Dual> = - SuperNovaAugmentedCircuitInputs::new( - scalar_as_base::(self.pp_digest), - E1::Scalar::from(self.i as u64), - &self.z0_primary, - Some(&self.zi_primary), - Some(&r_U_secondary_i), - Some(&l_u_secondary_i), - Some(&T), - Some(self.program_counter), - E1::Scalar::ZERO, - ); - - let circuit_primary: SuperNovaAugmentedCircuit<'_, Dual, C1> = - SuperNovaAugmentedCircuit::new( - &pp.augmented_circuit_params_primary, - Some(inputs_primary), - c_primary, - pp.ro_consts_circuit_primary.clone(), - self.num_augmented_circuits, - ); - - let (zi_primary_pc_next, zi_primary) = circuit_primary - .synthesize(&mut cs_primary) - .map_err(NovaError::from)?; - if zi_primary.len() != pp[circuit_index].F_arity { - return Err(SuperNovaError::NovaError( - NovaError::InvalidInitialInputLength, - )); - } - - let (l_u_primary, l_w_primary) = cs_primary - .r1cs_instance_and_witness(&pp[circuit_index].r1cs_shape, &pp.ck_primary) - .map_err(SuperNovaError::NovaError)?; - - let (r_U_primary, r_W_primary) = if let (Some(Some(r_U_primary)), Some(Some(r_W_primary))) = ( - self.r_U_primary.get_mut(circuit_index), - self.r_W_primary.get_mut(circuit_index), - ) { - (r_U_primary, r_W_primary) - } else { - self.r_U_primary[circuit_index] = Some(RelaxedR1CSInstance::default( - &*pp.ck_primary, - &pp[circuit_index].r1cs_shape, - )); - self.r_W_primary[circuit_index] = - Some(RelaxedR1CSWitness::default(&pp[circuit_index].r1cs_shape)); - ( - self.r_U_primary[circuit_index].as_mut().unwrap(), - self.r_W_primary[circuit_index].as_mut().unwrap(), - ) - }; - - let (nifs_primary, _) = NIFS::prove_mut( - &*pp.ck_primary, - &pp.ro_consts_primary, - &self.pp_digest, - &pp[circuit_index].r1cs_shape, - r_U_primary, - r_W_primary, - &l_u_primary, - &l_w_primary, - &mut self.buffer_primary.T, - &mut self.buffer_primary.ABC_Z_1, - &mut self.buffer_primary.ABC_Z_2, - ) - .map_err(SuperNovaError::NovaError)?; - - let mut cs_secondary = SatisfyingAssignment::>::with_capacity( - pp.circuit_shape_secondary.r1cs_shape.num_io + 1, - pp.circuit_shape_secondary.r1cs_shape.num_vars, - ); - let binding = Commitment::::decompress(&nifs_primary.comm_T) - .map_err(SuperNovaError::NovaError)?; - let inputs_secondary: SuperNovaAugmentedCircuitInputs<'_, E1> = - SuperNovaAugmentedCircuitInputs::new( - self.pp_digest, - as Engine>::Scalar::from(self.i as u64), - &self.z0_secondary, - Some(&self.zi_secondary), - Some(&r_U_primary_i), - Some(&l_u_primary), - Some(&binding), - None, // pc is always None for secondary circuit - as Engine>::Scalar::from(circuit_index as u64), - ); - - let circuit_secondary: SuperNovaAugmentedCircuit<'_, E1, C2> = - SuperNovaAugmentedCircuit::new( - &pp.augmented_circuit_params_secondary, - Some(inputs_secondary), - c_secondary, - pp.ro_consts_circuit_secondary.clone(), - self.num_augmented_circuits, - ); - let (_, zi_secondary) = circuit_secondary - .synthesize(&mut cs_secondary) - .map_err(NovaError::from)?; - if zi_secondary.len() != pp.circuit_shape_secondary.F_arity { - return Err(SuperNovaError::NovaError( - NovaError::InvalidInitialInputLength, - )); - } - - let (l_u_secondary_next, l_w_secondary_next) = cs_secondary - .r1cs_instance_and_witness(&pp.circuit_shape_secondary.r1cs_shape, &pp.ck_secondary)?; - - // update the running instances and witnesses - let zi_primary = zi_primary - .iter() - .map(|v| { - v.get_value() - .ok_or(NovaError::from(SynthesisError::AssignmentMissing).into()) - }) - .collect::::Scalar>, SuperNovaError>>()?; - let zi_primary_pc_next = zi_primary_pc_next - .expect("zi_primary_pc_next missing") - .get_value() - .ok_or::(NovaError::from(SynthesisError::AssignmentMissing).into())?; - let zi_secondary = zi_secondary - .iter() - .map(|v| { - v.get_value() - .ok_or(NovaError::from(SynthesisError::AssignmentMissing).into()) - }) - .collect:: as Engine>::Scalar>, SuperNovaError>>()?; - - if zi_primary.len() != pp[circuit_index].F_arity - || zi_secondary.len() != pp.circuit_shape_secondary.F_arity - { - return Err(SuperNovaError::NovaError( - NovaError::InvalidStepOutputLength, - )); - } - - self.l_w_secondary = l_w_secondary_next; - self.l_u_secondary = l_u_secondary_next; - self.i += 1; - self.zi_primary = zi_primary; - self.zi_secondary = zi_secondary; - self.proven_circuit_index = circuit_index; - self.program_counter = zi_primary_pc_next; - Ok(()) - } - - /// verify recursive snark - pub fn verify( - &self, - pp: &PublicParams, - z0_primary: &[E1::Scalar], - z0_secondary: &[ as Engine>::Scalar], - ) -> Result<(Vec, Vec< as Engine>::Scalar>), SuperNovaError> { - // number of steps cannot be zero - if self.i == 0 { - debug!("must verify on valid RecursiveSNARK where i > 0"); - return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); - } - - // Check lengths of r_primary - if self.r_U_primary.len() != self.num_augmented_circuits - || self.r_W_primary.len() != self.num_augmented_circuits - { - debug!("r_primary length mismatch"); - return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); - } - - // Check that there are no missing instance/witness pairs - self.r_U_primary - .iter() - .zip_eq(self.r_W_primary.iter()) - .enumerate() - .try_for_each(|(i, (u, w))| match (u, w) { - (Some(_), Some(_)) | (None, None) => Ok(()), - _ => { - debug!("r_primary[{:?}]: mismatched instance/witness pair", i); - Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)) - } - })?; - - let circuit_index = self.proven_circuit_index; - - // check we have an instance/witness pair for the circuit_index - if self.r_U_primary[circuit_index].is_none() { - debug!( - "r_primary[{:?}]: instance/witness pair is missing", - circuit_index - ); - return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); - } - - // check the (relaxed) R1CS instances public outputs. - { - for (i, r_U_primary_i) in self.r_U_primary.iter().enumerate() { - if let Some(u) = r_U_primary_i { - if u.X.len() != 2 { - debug!( - "r_U_primary[{:?}] got instance length {:?} != 2", - i, - u.X.len(), - ); - return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); - } - } - } - - if self.l_u_secondary.X.len() != 2 { - debug!( - "l_U_secondary got instance length {:?} != 2", - self.l_u_secondary.X.len(), - ); - return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); - } - - if self.r_U_secondary.X.len() != 2 { - debug!( - "r_U_secondary got instance length {:?} != 2", - self.r_U_secondary.X.len(), - ); - return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); - } - } - - let hash_primary = { - let num_absorbs = num_ro_inputs( - self.num_augmented_circuits, - pp.augmented_circuit_params_primary.get_n_limbs(), - pp[circuit_index].F_arity, - true, // is_primary - ); - - let mut hasher = - as Engine>::RO::new(pp.ro_consts_secondary.clone(), num_absorbs); - hasher.absorb(self.pp_digest); - hasher.absorb(E1::Scalar::from(self.i as u64)); - hasher.absorb(self.program_counter); - - for e in z0_primary { - hasher.absorb(*e); - } - for e in &self.zi_primary { - hasher.absorb(*e); - } - - self.r_U_secondary.absorb_in_ro(&mut hasher); - hasher.squeeze(NUM_HASH_BITS) - }; - - let hash_secondary = { - let num_absorbs = num_ro_inputs( - self.num_augmented_circuits, - pp.augmented_circuit_params_secondary.get_n_limbs(), - pp.circuit_shape_secondary.F_arity, - false, // is_primary - ); - let mut hasher = ::RO::new(pp.ro_consts_primary.clone(), num_absorbs); - hasher.absorb(scalar_as_base::(self.pp_digest)); - hasher.absorb( as Engine>::Scalar::from(self.i as u64)); - - for e in z0_secondary { - hasher.absorb(*e); - } - for e in &self.zi_secondary { - hasher.absorb(*e); - } - - self.r_U_primary.iter().enumerate().for_each(|(i, U)| { - U.as_ref() - .unwrap_or(&RelaxedR1CSInstance::default( - &*pp.ck_primary, - &pp[i].r1cs_shape, - )) - .absorb_in_ro(&mut hasher); - }); - hasher.squeeze(NUM_HASH_BITS) - }; - - if hash_primary != self.l_u_secondary.X[0] { - debug!( - "hash_primary {:?} not equal l_u_secondary.X[0] {:?}", - hash_primary, self.l_u_secondary.X[0] - ); - return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); - } - if hash_secondary != scalar_as_base::>(self.l_u_secondary.X[1]) { - debug!( - "hash_secondary {:?} not equal l_u_secondary.X[1] {:?}", - hash_secondary, self.l_u_secondary.X[1] - ); - return Err(SuperNovaError::NovaError(NovaError::ProofVerifyError)); - } - - // check the satisfiability of all instance/witness pairs - let (res_r_primary, (res_r_secondary, res_l_secondary)) = rayon::join( - || { - self.r_U_primary - .par_iter() - .zip_eq(self.r_W_primary.par_iter()) - .enumerate() - .try_for_each(|(i, (u, w))| { - if let (Some(u), Some(w)) = (u, w) { - pp[i].r1cs_shape.is_sat_relaxed(&pp.ck_primary, u, w)? - } - Ok(()) - }) - }, - || { - rayon::join( - || { - pp.circuit_shape_secondary.r1cs_shape.is_sat_relaxed( - &pp.ck_secondary, - &self.r_U_secondary, - &self.r_W_secondary, - ) - }, - || { - pp.circuit_shape_secondary.r1cs_shape.is_sat( - &pp.ck_secondary, - &self.l_u_secondary, - &self.l_w_secondary, - ) - }, - ) - }, - ); - - res_r_primary.map_err(|err| match err { - NovaError::UnSatIndex(i) => SuperNovaError::UnSatIndex("r_primary", i), - e => SuperNovaError::NovaError(e), - })?; - res_r_secondary.map_err(|err| match err { - NovaError::UnSatIndex(i) => SuperNovaError::UnSatIndex("r_secondary", i), - e => SuperNovaError::NovaError(e), - })?; - res_l_secondary.map_err(|err| match err { - NovaError::UnSatIndex(i) => SuperNovaError::UnSatIndex("l_secondary", i), - e => SuperNovaError::NovaError(e), - })?; - - Ok((self.zi_primary.clone(), self.zi_secondary.clone())) - } -} - -/// SuperNova helper trait, for implementors that provide sets of sub-circuits -/// to be proved via NIVC. `C1` must be a type (likely an `Enum`) for which a -/// potentially-distinct instance can be supplied for each `index` below -/// `self.num_circuits()`. -pub trait NonUniformCircuit -where - E1: CurveCycleEquipped, -{ - /// The type of the step-circuits on the primary - type C1: StepCircuit; - /// The type of the step-circuits on the secondary - type C2: StepCircuit< as Engine>::Scalar>; - - /// Initial circuit index, defaults to zero. - fn initial_circuit_index(&self) -> usize { - 0 - } - - /// How many circuits are provided? - fn num_circuits(&self) -> usize; - - /// Return a new instance of the primary circuit at `index`. - fn primary_circuit(&self, circuit_index: usize) -> Self::C1; - - /// Return a new instance of the secondary circuit. - fn secondary_circuit(&self) -> Self::C2; -} - -/// Compute the circuit digest of a supernova [`StepCircuit`]. -/// -/// Note for callers: This function should be called with its performance -/// characteristics in mind. It will synthesize and digest the full `circuit` -/// given. -pub fn circuit_digest>( - circuit: &C, - num_augmented_circuits: usize, -) -> E1::Scalar { - let augmented_circuit_params = - SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); - - // ro_consts_circuit are parameterized by E2 because the type alias uses - // E2::Base = E1::Scalar - let ro_consts_circuit = ROConstantsCircuit::>::default(); - - // Initialize ck for the primary - let augmented_circuit: SuperNovaAugmentedCircuit<'_, Dual, C> = - SuperNovaAugmentedCircuit::new( - &augmented_circuit_params, - None, - circuit, - ro_consts_circuit, - num_augmented_circuits, - ); - let mut cs: ShapeCS = ShapeCS::new(); - let _ = augmented_circuit.synthesize(&mut cs); - - let F_arity = circuit.arity(); - let circuit_params = R1CSWithArity::new(cs.r1cs_shape(), F_arity); - circuit_params.digest() -} - -/// Compute the number of absorbs for the random-oracle computing the circuit -/// output X = H(vk, i, pc, z0, zi, U) -fn num_ro_inputs(num_circuits: usize, num_limbs: usize, arity: usize, is_primary: bool) -> usize { - let num_circuits = if is_primary { 1 } else { num_circuits }; - - // [W(x,y,∞), E(x,y,∞), u] + [X0, X1] * #num_limb - let instance_size = 3 + 3 + 1 + 2 * num_limbs; - - 2 // params, i - + usize::from(is_primary) // optional program counter - + 2 * arity // z0, zi - + num_circuits * instance_size -} - -pub mod error; -pub mod snark; -mod utils; - -#[cfg(test)] -mod test; diff --git a/src/supernova/snark.rs b/src/supernova/snark.rs deleted file mode 100644 index d01f9b5..0000000 --- a/src/supernova/snark.rs +++ /dev/null @@ -1,663 +0,0 @@ -//! This module defines a final compressing SNARK for supernova proofs - -use ff::PrimeField; -use serde::{Deserialize, Serialize}; - -use super::{error::SuperNovaError, PublicParams, RecursiveSNARK}; -use crate::{ - constants::NUM_HASH_BITS, - errors::NovaError, - r1cs::{R1CSInstance, RelaxedR1CSWitness}, - scalar_as_base, - traits::{ - snark::{BatchedRelaxedR1CSSNARKTrait, RelaxedR1CSSNARKTrait}, - AbsorbInROTrait, CurveCycleEquipped, Dual, Engine, ROTrait, - }, - RelaxedR1CSInstance, NIFS, -}; - -/// A type that holds the prover key for `CompressedSNARK` -#[derive(Debug, Serialize, Deserialize)] -pub struct ProverKey -where - E1: CurveCycleEquipped, - S1: BatchedRelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, -{ - pub pk_primary: S1::ProverKey, - pub pk_secondary: S2::ProverKey, -} - -/// A type that holds the verifier key for `CompressedSNARK` -#[derive(Debug, Serialize, Deserialize)] -pub struct VerifierKey -where - E1: CurveCycleEquipped, - S1: BatchedRelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, -{ - pub vk_primary: S1::VerifierKey, - pub vk_secondary: S2::VerifierKey, -} - -/// A SNARK that proves the knowledge of a valid `RecursiveSNARK` -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct CompressedSNARK -where - E1: CurveCycleEquipped, - S1: BatchedRelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, -{ - r_U_primary: Vec>, - r_W_snark_primary: S1, - - r_U_secondary: RelaxedR1CSInstance>, - l_u_secondary: R1CSInstance>, - nifs_secondary: NIFS>, - f_W_snark_secondary: S2, - - num_steps: usize, - program_counter: E1::Scalar, - - zn_primary: Vec, - zn_secondary: Vec< as Engine>::Scalar>, -} - -impl CompressedSNARK -where - E1: CurveCycleEquipped, - S1: BatchedRelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, -{ - pub fn initialize_pk( - pp: &PublicParams, - primary_vk_digest: E1::Scalar, - secondary_vk_digest: as Engine>::Scalar, - ) -> Result, SuperNovaError> { - // TODO: Should we actually clone here? - let pk_primary = S1::initialize_pk(pp.ck_primary.clone(), primary_vk_digest)?; - let pk_secondary = S2::initialize_pk(pp.ck_secondary.clone(), secondary_vk_digest)?; - - return Ok(ProverKey { - pk_primary, - pk_secondary, - }); - } - - /// Creates prover and verifier keys for `CompressedSNARK` - pub fn setup( - pp: &PublicParams, - ) -> Result<(ProverKey, VerifierKey), SuperNovaError> { - let (pk_primary, vk_primary) = S1::setup(pp.ck_primary.clone(), pp.primary_r1cs_shapes())?; - - let (pk_secondary, vk_secondary) = S2::setup( - pp.ck_secondary.clone(), - &pp.circuit_shape_secondary.r1cs_shape, - )?; - - let prover_key = ProverKey { - pk_primary, - pk_secondary, - }; - let verifier_key = VerifierKey { - vk_primary, - vk_secondary, - }; - - Ok((prover_key, verifier_key)) - } - - /// Create a new `CompressedSNARK` - pub fn prove( - pp: &PublicParams, - pk: &ProverKey, - recursive_snark: &RecursiveSNARK, - ) -> Result { - // fold the secondary circuit's instance - let res_secondary = NIFS::prove( - &*pp.ck_secondary, - &pp.ro_consts_secondary, - &scalar_as_base::(pp.digest()), - &pp.circuit_shape_secondary.r1cs_shape, - &recursive_snark.r_U_secondary, - &recursive_snark.r_W_secondary, - &recursive_snark.l_u_secondary, - &recursive_snark.l_w_secondary, - ); - - let (nifs_secondary, (f_U_secondary, f_W_secondary), _) = res_secondary?; - - // Prepare the list of primary Relaxed R1CS instances (a default instance is - // provided for uninitialized circuits) - let r_U_primary = recursive_snark - .r_U_primary - .iter() - .enumerate() - .map(|(idx, r_U)| { - r_U.clone().unwrap_or_else(|| { - RelaxedR1CSInstance::default(&*pp.ck_primary, &pp[idx].r1cs_shape) - }) - }) - .collect::>(); - - // Prepare the list of primary relaxed R1CS witnesses (a default witness is - // provided for uninitialized circuits) - let r_W_primary: Vec> = recursive_snark - .r_W_primary - .iter() - .enumerate() - .map(|(idx, r_W)| { - r_W.clone() - .unwrap_or_else(|| RelaxedR1CSWitness::default(&pp[idx].r1cs_shape)) - }) - .collect::>(); - - // Generate a primary SNARK proof for the list of primary circuits - let r_W_snark_primary = S1::prove( - &pp.ck_primary, - &pk.pk_primary, - pp.primary_r1cs_shapes(), - &r_U_primary, - &r_W_primary, - )?; - - // Generate a secondary SNARK proof for the secondary circuit - let f_W_snark_secondary = S2::prove( - &pp.ck_secondary, - &pk.pk_secondary, - &pp.circuit_shape_secondary.r1cs_shape, - &f_U_secondary, - &f_W_secondary, - )?; - - let compressed_snark = Self { - r_U_primary, - r_W_snark_primary, - - r_U_secondary: recursive_snark.r_U_secondary.clone(), - l_u_secondary: recursive_snark.l_u_secondary.clone(), - nifs_secondary, - f_W_snark_secondary, - - num_steps: recursive_snark.i, - program_counter: recursive_snark.program_counter, - - zn_primary: recursive_snark.zi_primary.clone(), - zn_secondary: recursive_snark.zi_secondary.clone(), - }; - - Ok(compressed_snark) - } - - /// Verify the correctness of the `CompressedSNARK` - pub fn verify( - &self, - pp: &PublicParams, - vk: &VerifierKey, - z0_primary: &[E1::Scalar], - z0_secondary: &[ as Engine>::Scalar], - ) -> Result<(Vec, Vec< as Engine>::Scalar>), SuperNovaError> { - let last_circuit_idx = field_as_usize(self.program_counter); - - let num_field_primary_ro = 3 // params_next, i_new, program_counter_new - + 2 * pp[last_circuit_idx].F_arity // zo, z1 - + (7 + 2 * pp.augmented_circuit_params_primary.get_n_limbs()); // # 1 * (7 + [X0, X1]*#num_limb) - - // secondary circuit - // NOTE: This count ensure the number of witnesses sent by the prover must equal - // the number of NIVC circuits - let num_field_secondary_ro = 2 // params_next, i_new - + 2 * pp.circuit_shape_secondary.F_arity // zo, z1 - + pp.circuit_shapes.len() * (7 + 2 * pp.augmented_circuit_params_primary.get_n_limbs()); // #num_augment - - // Compute the primary and secondary hashes given the digest, program counter, - // instances, and witnesses provided by the prover - let (hash_primary, hash_secondary) = { - let mut hasher = - as Engine>::RO::new(pp.ro_consts_secondary.clone(), num_field_primary_ro); - - hasher.absorb(pp.digest()); - hasher.absorb(E1::Scalar::from(self.num_steps as u64)); - hasher.absorb(self.program_counter); - - for e in z0_primary { - hasher.absorb(*e); - } - - for e in &self.zn_primary { - hasher.absorb(*e); - } - - self.r_U_secondary.absorb_in_ro(&mut hasher); - - let mut hasher2 = - ::RO::new(pp.ro_consts_primary.clone(), num_field_secondary_ro); - - hasher2.absorb(scalar_as_base::(pp.digest())); - hasher2.absorb( as Engine>::Scalar::from(self.num_steps as u64)); - - for e in z0_secondary { - hasher2.absorb(*e); - } - - for e in &self.zn_secondary { - hasher2.absorb(*e); - } - - self.r_U_primary.iter().for_each(|U| { - U.absorb_in_ro(&mut hasher2); - }); - - ( - hasher.squeeze(NUM_HASH_BITS), - hasher2.squeeze(NUM_HASH_BITS), - ) - }; - - // Compare the computed hashes with the public IO of the last invocation of - // `prove_step` - if hash_primary != self.l_u_secondary.X[0] { - return Err(NovaError::ProofVerifyError.into()); - } - - if hash_secondary != scalar_as_base::>(self.l_u_secondary.X[1]) { - return Err(NovaError::ProofVerifyError.into()); - } - - // Verify the primary SNARK - let res_primary = self - .r_W_snark_primary - .verify(&vk.vk_primary, &self.r_U_primary); - - // Fold the secondary circuit's instance - let f_U_secondary = self.nifs_secondary.verify( - &pp.ro_consts_secondary, - &scalar_as_base::(pp.digest()), - &self.r_U_secondary, - &self.l_u_secondary, - )?; - - // Verify the secondary SNARK - let res_secondary = self - .f_W_snark_secondary - .verify(&vk.vk_secondary, &f_U_secondary); - - res_primary?; - - res_secondary?; - - Ok((self.zn_primary.clone(), self.zn_secondary.clone())) - } -} - -fn field_as_usize(x: F) -> usize { - u32::from_le_bytes(x.to_repr().as_ref()[0..4].try_into().unwrap()) as usize -} - -#[cfg(test)] -mod test { - use std::marker::PhantomData; - - use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; - use ff::Field; - - use super::*; - use crate::{ - provider::{ipa_pc, Bn256EngineIPA}, - spartan::{batched, batched_ppsnark, snark::RelaxedR1CSSNARK}, - supernova::{circuit::TrivialCircuit, NonUniformCircuit, StepCircuit}, - }; - - type EE = ipa_pc::EvaluationEngine; - type S1 = batched::BatchedRelaxedR1CSSNARK>; - type S1PP = batched_ppsnark::BatchedRelaxedR1CSSNARK>; - type S2 = RelaxedR1CSSNARK>; - - #[derive(Clone)] - struct SquareCircuit { - _p: PhantomData, - } - - impl StepCircuit for SquareCircuit { - fn arity(&self) -> usize { - 1 - } - - fn circuit_index(&self) -> usize { - 0 - } - - fn synthesize>( - &self, - cs: &mut CS, - _pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result< - ( - Option>, - Vec>, - ), - SynthesisError, - > { - let z_i = &z[0]; - - let z_next = z_i.square(cs.namespace(|| "z_i^2"))?; - - let next_pc = - AllocatedNum::alloc(cs.namespace(|| "next_pc"), || Ok(E::Scalar::from(1u64)))?; - - cs.enforce( - || "next_pc = 1", - |lc| lc + CS::one(), - |lc| lc + next_pc.get_variable(), - |lc| lc + CS::one(), - ); - - Ok((Some(next_pc), vec![z_next])) - } - } - - #[derive(Clone)] - struct CubeCircuit { - _p: PhantomData, - } - - impl StepCircuit for CubeCircuit { - fn arity(&self) -> usize { - 1 - } - - fn circuit_index(&self) -> usize { - 1 - } - - fn synthesize>( - &self, - cs: &mut CS, - _pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result< - ( - Option>, - Vec>, - ), - SynthesisError, - > { - let z_i = &z[0]; - - let z_sq = z_i.square(cs.namespace(|| "z_i^2"))?; - let z_cu = z_sq.mul(cs.namespace(|| "z_i^3"), z_i)?; - - let next_pc = - AllocatedNum::alloc(cs.namespace(|| "next_pc"), || Ok(E::Scalar::from(0u64)))?; - - cs.enforce( - || "next_pc = 0", - |lc| lc + CS::one(), - |lc| lc + next_pc.get_variable(), - |lc| lc, - ); - - Ok((Some(next_pc), vec![z_cu])) - } - } - - #[derive(Clone)] - enum TestCircuit { - Square(SquareCircuit), - Cube(CubeCircuit), - } - - impl TestCircuit { - fn new(num_steps: usize) -> Vec { - let mut circuits = Vec::new(); - - for idx in 0..num_steps { - if idx % 2 == 0 { - circuits.push(Self::Square(SquareCircuit { _p: PhantomData })) - } else { - circuits.push(Self::Cube(CubeCircuit { _p: PhantomData })) - } - } - - circuits - } - } - - impl StepCircuit for TestCircuit { - fn arity(&self) -> usize { - 1 - } - - fn circuit_index(&self) -> usize { - match self { - Self::Square(c) => c.circuit_index(), - Self::Cube(c) => c.circuit_index(), - } - } - - fn synthesize>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result< - ( - Option>, - Vec>, - ), - SynthesisError, - > { - match self { - Self::Square(c) => c.synthesize(cs, pc, z), - Self::Cube(c) => c.synthesize(cs, pc, z), - } - } - } - - impl NonUniformCircuit for TestCircuit { - type C1 = Self; - type C2 = TrivialCircuit< as Engine>::Scalar>; - - fn num_circuits(&self) -> usize { - 2 - } - - fn primary_circuit(&self, circuit_index: usize) -> Self { - match circuit_index { - 0 => Self::Square(SquareCircuit { _p: PhantomData }), - 1 => Self::Cube(CubeCircuit { _p: PhantomData }), - _ => panic!("Invalid circuit index"), - } - } - - fn secondary_circuit(&self) -> Self::C2 { - Default::default() - } - } - - #[derive(Clone)] - struct BigPowerCircuit { - _p: PhantomData, - } - - impl StepCircuit for BigPowerCircuit { - fn arity(&self) -> usize { - 1 - } - - fn circuit_index(&self) -> usize { - 1 - } - - fn synthesize>( - &self, - cs: &mut CS, - _pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result< - ( - Option>, - Vec>, - ), - SynthesisError, - > { - let mut x = z[0].clone(); - let mut y = x.clone(); - for i in 0..10_000 { - y = x.square(cs.namespace(|| format!("x_sq_{i}")))?; - x = y.clone(); - } - - let next_pc = - AllocatedNum::alloc(cs.namespace(|| "next_pc"), || Ok(E::Scalar::from(0u64)))?; - - cs.enforce( - || "next_pc = 0", - |lc| lc + CS::one(), - |lc| lc + next_pc.get_variable(), - |lc| lc, - ); - - Ok((Some(next_pc), vec![y])) - } - } - - #[derive(Clone)] - enum BigTestCircuit { - Square(SquareCircuit), - BigPower(BigPowerCircuit), - } - - impl BigTestCircuit { - fn new(num_steps: usize) -> Vec { - let mut circuits = Vec::new(); - - for idx in 0..num_steps { - if idx % 2 == 0 { - circuits.push(Self::Square(SquareCircuit { _p: PhantomData })) - } else { - circuits.push(Self::BigPower(BigPowerCircuit { _p: PhantomData })) - } - } - - circuits - } - } - - impl StepCircuit for BigTestCircuit { - fn arity(&self) -> usize { - 1 - } - - fn circuit_index(&self) -> usize { - match self { - Self::Square(c) => c.circuit_index(), - Self::BigPower(c) => c.circuit_index(), - } - } - - fn synthesize>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result< - ( - Option>, - Vec>, - ), - SynthesisError, - > { - match self { - Self::Square(c) => c.synthesize(cs, pc, z), - Self::BigPower(c) => c.synthesize(cs, pc, z), - } - } - } - - impl NonUniformCircuit for BigTestCircuit { - type C1 = Self; - type C2 = TrivialCircuit< as Engine>::Scalar>; - - fn num_circuits(&self) -> usize { - 2 - } - - fn primary_circuit(&self, circuit_index: usize) -> Self { - match circuit_index { - 0 => Self::Square(SquareCircuit { _p: PhantomData }), - 1 => Self::BigPower(BigPowerCircuit { _p: PhantomData }), - _ => panic!("Invalid circuit index"), - } - } - - fn secondary_circuit(&self) -> Self::C2 { - Default::default() - } - } - - fn test_compression_with(num_steps: usize, circuits_factory: F) - where - E1: CurveCycleEquipped, - S1: BatchedRelaxedR1CSSNARKTrait, - S2: RelaxedR1CSSNARKTrait>, - C: NonUniformCircuit as Engine>::Scalar>> - + StepCircuit, - F: Fn(usize) -> Vec, - { - let secondary_circuit = TrivialCircuit::default(); - let test_circuits = circuits_factory(num_steps); - - let pp = PublicParams::setup(&test_circuits[0], &*S1::ck_floor(), &*S2::ck_floor()); - - let z0_primary = vec![E1::Scalar::from(17u64)]; - let z0_secondary = vec![ as Engine>::Scalar::ZERO]; - - let mut recursive_snark = RecursiveSNARK::new( - &pp, - &test_circuits[0], - &test_circuits[0], - &secondary_circuit, - &z0_primary, - &z0_secondary, - ) - .unwrap(); - - for circuit in test_circuits.iter().take(num_steps) { - recursive_snark - .prove_step(&pp, circuit, &secondary_circuit) - .unwrap(); - - recursive_snark - .verify(&pp, &z0_primary, &z0_secondary) - .unwrap(); - } - - let (prover_key, verifier_key) = CompressedSNARK::<_, S1, S2>::setup(&pp).unwrap(); - - let compressed_snark = CompressedSNARK::prove(&pp, &prover_key, &recursive_snark).unwrap(); - - compressed_snark - .verify(&pp, &verifier_key, &z0_primary, &z0_secondary) - .unwrap(); - } - - #[test] - fn test_nivc_trivial_with_compression() { - const NUM_STEPS: usize = 6; - test_compression_with::, S2<_>, _, _>(NUM_STEPS, TestCircuit::new); - test_compression_with::, S2<_>, _, _>(NUM_STEPS, TestCircuit::new); - } - - #[test] - fn test_compression_with_circuit_size_difference() { - const NUM_STEPS: usize = 4; - test_compression_with::, S2<_>, _, _>( - NUM_STEPS, - BigTestCircuit::new, - ); - test_compression_with::, S2<_>, _, _>(NUM_STEPS, BigTestCircuit::new); - } -} diff --git a/src/supernova/test.rs b/src/supernova/test.rs deleted file mode 100644 index 722fda0..0000000 --- a/src/supernova/test.rs +++ /dev/null @@ -1,876 +0,0 @@ -use core::marker::PhantomData; -use std::fmt::Write; - -use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; -use expect_test::{expect, Expect}; -use ff::{Field, PrimeField}; -use tap::TapOptional; - -use super::{utils::get_selector_vec_from_index, *}; -use crate::{ - bellpepper::test_shape_cs::TestShapeCS, - gadgets::{alloc_one, alloc_zero}, - provider::{poseidon::PoseidonConstantsCircuit, Bn256EngineIPA, GrumpkinEngine}, - supernova::circuit::{StepCircuit, TrivialCircuit}, - traits::snark::default_ck_hint, -}; - -#[derive(Clone, Debug, Default)] -struct CubicCircuit { - _p: PhantomData, - circuit_index: usize, - rom_size: usize, -} - -impl CubicCircuit { - fn new(circuit_index: usize, rom_size: usize) -> Self { - Self { - circuit_index, - rom_size, - _p: PhantomData, - } - } -} - -fn next_rom_index_and_pc>( - cs: &mut CS, - rom_index: &AllocatedNum, - allocated_rom: &[AllocatedNum], - pc: &AllocatedNum, -) -> Result<(AllocatedNum, AllocatedNum), SynthesisError> { - // Compute a selector for the current rom_index in allocated_rom - let current_rom_selector = get_selector_vec_from_index( - cs.namespace(|| "rom selector"), - rom_index, - allocated_rom.len(), - )?; - - // Enforce that allocated_rom[rom_index] = pc - for (rom, bit) in allocated_rom.iter().zip_eq(current_rom_selector.iter()) { - // if bit = 1, then rom = pc - // bit * (rom - pc) = 0 - cs.enforce( - || "enforce bit = 1 => rom = pc", - |lc| lc + &bit.lc(CS::one(), F::ONE), - |lc| lc + rom.get_variable() - pc.get_variable(), - |lc| lc, - ); - } - - // Get the index of the current rom, or the index of the invalid rom if no match - let current_rom_index = current_rom_selector - .iter() - .position(|bit| bit.get_value().is_some_and(|v| v)) - .unwrap_or_default(); - let next_rom_index = current_rom_index + 1; - - let rom_index_next = AllocatedNum::alloc_infallible(cs.namespace(|| "next rom index"), || { - F::from(next_rom_index as u64) - }); - cs.enforce( - || " rom_index + 1 - next_rom_index_num = 0", - |lc| lc, - |lc| lc, - |lc| lc + rom_index.get_variable() + CS::one() - rom_index_next.get_variable(), - ); - - // Allocate the next pc without checking. - // The next iteration will check whether the next pc is valid. - let pc_next = AllocatedNum::alloc_infallible(cs.namespace(|| "next pc"), || { - allocated_rom - .get(next_rom_index) - .and_then(|v| v.get_value()) - .unwrap_or(-F::ONE) - }); - - Ok((rom_index_next, pc_next)) -} - -impl StepCircuit for CubicCircuit -where - F: PrimeField, -{ - fn arity(&self) -> usize { - 2 + self.rom_size // value + rom_pc + rom[].len() - } - - fn circuit_index(&self) -> usize { - self.circuit_index - } - - fn synthesize>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError> { - let rom_index = &z[1]; - let allocated_rom = &z[2..]; - - let (rom_index_next, pc_next) = next_rom_index_and_pc( - &mut cs.namespace(|| "next and rom_index and pc"), - rom_index, - allocated_rom, - pc.ok_or(SynthesisError::AssignmentMissing)?, - )?; - - // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and `y` are - // respectively the input and output. - let x = &z[0]; - let x_sq = x.square(cs.namespace(|| "x_sq"))?; - let x_cu = x_sq.mul(cs.namespace(|| "x_cu"), x)?; - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok(x_cu.get_value().unwrap() + x.get_value().unwrap() + F::from(5u64)) - })?; - - cs.enforce( - || "y = x^3 + x + 5", - |lc| { - lc + x_cu.get_variable() - + x.get_variable() - + CS::one() - + CS::one() - + CS::one() - + CS::one() - + CS::one() - }, - |lc| lc + CS::one(), - |lc| lc + y.get_variable(), - ); - - let mut z_next = vec![y]; - z_next.push(rom_index_next); - z_next.extend(z[2..].iter().cloned()); - Ok((Some(pc_next), z_next)) - } -} - -#[derive(Clone, Debug, Default)] -struct SquareCircuit { - _p: PhantomData, - circuit_index: usize, - rom_size: usize, -} - -impl SquareCircuit { - fn new(circuit_index: usize, rom_size: usize) -> Self { - Self { - circuit_index, - rom_size, - _p: PhantomData, - } - } -} - -impl StepCircuit for SquareCircuit -where - F: PrimeField, -{ - fn arity(&self) -> usize { - 2 + self.rom_size // value + rom_pc + rom[].len() - } - - fn circuit_index(&self) -> usize { - self.circuit_index - } - - fn synthesize>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError> { - let rom_index = &z[1]; - let allocated_rom = &z[2..]; - - let (rom_index_next, pc_next) = next_rom_index_and_pc( - &mut cs.namespace(|| "next and rom_index and pc"), - rom_index, - allocated_rom, - pc.ok_or(SynthesisError::AssignmentMissing)?, - )?; - - // Consider an equation: `x^2 + x + 5 = y`, where `x` and `y` are respectively - // the input and output. - let x = &z[0]; - let x_sq = x.square(cs.namespace(|| "x_sq"))?; - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - Ok(x_sq.get_value().unwrap() + x.get_value().unwrap() + F::from(5u64)) - })?; - - cs.enforce( - || "y = x^2 + x + 5", - |lc| { - lc + x_sq.get_variable() - + x.get_variable() - + CS::one() - + CS::one() - + CS::one() - + CS::one() - + CS::one() - }, - |lc| lc + CS::one(), - |lc| lc + y.get_variable(), - ); - - let mut z_next = vec![y]; - z_next.push(rom_index_next); - z_next.extend(z[2..].iter().cloned()); - Ok((Some(pc_next), z_next)) - } -} - -fn print_constraints_name_on_error_index< - E1, - C1: StepCircuit, - C2: StepCircuit< as Engine>::Scalar>, ->( - err: &SuperNovaError, - pp: &PublicParams, - c_primary: &C1, - c_secondary: &C2, - num_augmented_circuits: usize, -) where - E1: CurveCycleEquipped, -{ - match err { - SuperNovaError::UnSatIndex(msg, index) if *msg == "r_primary" => { - let circuit_primary: SuperNovaAugmentedCircuit<'_, Dual, C1> = - SuperNovaAugmentedCircuit::new( - &pp.augmented_circuit_params_primary, - None, - c_primary, - pp.ro_consts_circuit_primary.clone(), - num_augmented_circuits, - ); - let mut cs: TestShapeCS = TestShapeCS::new(); - let _ = circuit_primary.synthesize(&mut cs); - cs.constraints - .get(*index) - .tap_some(|constraint| debug!("{msg} failed at constraint {}", constraint.3)); - } - SuperNovaError::UnSatIndex(msg, index) - if *msg == "r_secondary" || *msg == "l_secondary" => - { - let circuit_secondary: SuperNovaAugmentedCircuit<'_, E1, C2> = - SuperNovaAugmentedCircuit::new( - &pp.augmented_circuit_params_secondary, - None, - c_secondary, - pp.ro_consts_circuit_secondary.clone(), - num_augmented_circuits, - ); - let mut cs: TestShapeCS> = TestShapeCS::new(); - let _ = circuit_secondary.synthesize(&mut cs); - cs.constraints - .get(*index) - .tap_some(|constraint| debug!("{msg} failed at constraint {}", constraint.3)); - } - _ => (), - } -} - -const OPCODE_0: usize = 0; -const OPCODE_1: usize = 1; - -struct TestROM { - rom: Vec, - _p: PhantomData, -} - -#[derive(Debug, Clone)] -enum TestROMCircuit { - Cubic(CubicCircuit), - Square(SquareCircuit), -} - -impl StepCircuit for TestROMCircuit { - fn arity(&self) -> usize { - match self { - Self::Cubic(x) => x.arity(), - Self::Square(x) => x.arity(), - } - } - - fn circuit_index(&self) -> usize { - match self { - Self::Cubic(x) => x.circuit_index(), - Self::Square(x) => x.circuit_index(), - } - } - - fn synthesize>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError> { - match self { - Self::Cubic(x) => x.synthesize(cs, pc, z), - Self::Square(x) => x.synthesize(cs, pc, z), - } - } -} - -impl NonUniformCircuit for TestROM -where - E1: CurveCycleEquipped, -{ - type C1 = TestROMCircuit; - type C2 = TrivialCircuit< as Engine>::Scalar>; - - fn num_circuits(&self) -> usize { - 2 - } - - fn primary_circuit(&self, circuit_index: usize) -> Self::C1 { - match circuit_index { - 0 => TestROMCircuit::Cubic(CubicCircuit::new(circuit_index, self.rom.len())), - 1 => TestROMCircuit::Square(SquareCircuit::new(circuit_index, self.rom.len())), - _ => panic!("unsupported primary circuit index"), - } - } - - fn secondary_circuit(&self) -> Self::C2 { - Default::default() - } - - fn initial_circuit_index(&self) -> usize { - self.rom[0] - } -} - -impl TestROM { - fn new(rom: Vec) -> Self { - Self { - rom, - _p: Default::default(), - } - } -} - -fn test_trivial_nivc_with() -where - E1: CurveCycleEquipped, -{ - // Here demo a simple RAM machine - // - with 2 argumented circuit - // - each argumented circuit contains primary and secondary circuit - // - a memory commitment via a public IO `rom` (like a program) to constraint - // the sequence execution - - // This test also ready to add more argumented circuit and ROM can be arbitrary - // length - - // ROM is for constraints the sequence of execution order for opcode - - // TODO: replace with memory commitment along with suggestion from Supernova 4.4 - // optimisations - - // This is mostly done with the existing Nova code. With additions of U_i[] and - // program_counter checks in the augmented circuit. - - let rom = vec![ - OPCODE_1, OPCODE_1, OPCODE_0, OPCODE_0, OPCODE_1, OPCODE_1, OPCODE_0, OPCODE_0, OPCODE_1, - OPCODE_1, - ]; // Rom can be arbitrary length. - - let test_rom = TestROM::::new(rom); - - let pp = PublicParams::setup(&test_rom, &*default_ck_hint(), &*default_ck_hint()); - - // extend z0_primary/secondary with rom content - let mut z0_primary = vec![::Scalar::ONE]; - z0_primary.push(::Scalar::ZERO); // rom_index = 0 - z0_primary.extend( - test_rom - .rom - .iter() - .map(|opcode| ::Scalar::from(*opcode as u64)), - ); - let z0_secondary = vec![ as Engine>::Scalar::ONE]; - - let mut recursive_snark_option: Option> = None; - - for &op_code in test_rom.rom.iter() { - let circuit_primary = test_rom.primary_circuit(op_code); - let circuit_secondary = test_rom.secondary_circuit(); - - let mut recursive_snark = recursive_snark_option.unwrap_or_else(|| { - RecursiveSNARK::new( - &pp, - &test_rom, - &circuit_primary, - &circuit_secondary, - &z0_primary, - &z0_secondary, - ) - .unwrap() - }); - - recursive_snark - .prove_step(&pp, &circuit_primary, &circuit_secondary) - .unwrap(); - recursive_snark - .verify(&pp, &z0_primary, &z0_secondary) - .map_err(|err| { - print_constraints_name_on_error_index( - &err, - &pp, - &circuit_primary, - &circuit_secondary, - test_rom.num_circuits(), - ) - }) - .unwrap(); - - recursive_snark_option = Some(recursive_snark) - } - - assert!(recursive_snark_option.is_some()); - - // Now you can handle the Result using if let - let RecursiveSNARK { - zi_primary, - zi_secondary, - program_counter, - .. - } = &recursive_snark_option.unwrap(); - - println!("zi_primary: {:?}", zi_primary); - println!("zi_secondary: {:?}", zi_secondary); - println!("final program_counter: {:?}", program_counter); - - // The final program counter should be -1 - assert_eq!(*program_counter, -::Scalar::ONE); -} - -#[test] -#[tracing_test::traced_test] -fn test_trivial_nivc() { - // Experimenting with selecting the running claims for nifs - test_trivial_nivc_with::(); -} - -// In the following we use 1 to refer to the primary, and 2 to refer to the -// secondary circuit -fn test_recursive_circuit_with( - primary_params: &SuperNovaAugmentedCircuitParams, - secondary_params: &SuperNovaAugmentedCircuitParams, - ro_consts1: ROConstantsCircuit>, - ro_consts2: ROConstantsCircuit, - num_constraints_primary: &Expect, - num_constraints_secondary: &Expect, -) where - E1: CurveCycleEquipped, -{ - // Initialize the shape and ck for the primary - let step_circuit1 = TrivialCircuit::default(); - let arity1 = step_circuit1.arity(); - let circuit1: SuperNovaAugmentedCircuit< - '_, - Dual, - TrivialCircuit< as Engine>::Base>, - > = SuperNovaAugmentedCircuit::new(primary_params, None, &step_circuit1, ro_consts1.clone(), 2); - let mut cs: ShapeCS = ShapeCS::new(); - if let Err(e) = circuit1.synthesize(&mut cs) { - panic!("{}", e) - } - let (shape1, ck1) = cs.r1cs_shape_and_key(&*default_ck_hint()); - num_constraints_primary.assert_eq(&cs.num_constraints().to_string()); - - // Initialize the shape and ck for the secondary - let step_circuit2 = TrivialCircuit::default(); - let arity2 = step_circuit2.arity(); - let circuit2: SuperNovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = - SuperNovaAugmentedCircuit::new( - secondary_params, - None, - &step_circuit2, - ro_consts2.clone(), - 2, - ); - let mut cs: ShapeCS> = ShapeCS::new(); - if let Err(e) = circuit2.synthesize(&mut cs) { - panic!("{}", e) - } - let (shape2, ck2) = cs.r1cs_shape_and_key(&*default_ck_hint()); - num_constraints_secondary.assert_eq(&cs.num_constraints().to_string()); - - // Execute the base case for the primary - let zero1 = < as Engine>::Base as Field>::ZERO; - let z0 = vec![zero1; arity1]; - let mut cs1 = SatisfyingAssignment::::new(); - let inputs1: SuperNovaAugmentedCircuitInputs<'_, Dual> = - SuperNovaAugmentedCircuitInputs::new( - scalar_as_base::(zero1), // pass zero for testing - zero1, - &z0, - None, - None, - None, - None, - Some(zero1), - zero1, - ); - let step_circuit = TrivialCircuit::default(); - let circuit1: SuperNovaAugmentedCircuit< - '_, - Dual, - TrivialCircuit< as Engine>::Base>, - > = SuperNovaAugmentedCircuit::new(primary_params, Some(inputs1), &step_circuit, ro_consts1, 2); - if let Err(e) = circuit1.synthesize(&mut cs1) { - panic!("{}", e) - } - let (inst1, witness1) = cs1.r1cs_instance_and_witness(&shape1, &ck1).unwrap(); - // Make sure that this is satisfiable - shape1.is_sat(&ck1, &inst1, &witness1).unwrap(); - - // Execute the base case for the secondary - let zero2 = <::Base as Field>::ZERO; - let z0 = vec![zero2; arity2]; - let mut cs2 = SatisfyingAssignment::>::new(); - let inputs2: SuperNovaAugmentedCircuitInputs<'_, E1> = SuperNovaAugmentedCircuitInputs::new( - scalar_as_base::>(zero2), // pass zero for testing - zero2, - &z0, - None, - None, - Some(&inst1), - None, - None, - zero2, - ); - let step_circuit = TrivialCircuit::default(); - let circuit2: SuperNovaAugmentedCircuit<'_, E1, TrivialCircuit<::Base>> = - SuperNovaAugmentedCircuit::new( - secondary_params, - Some(inputs2), - &step_circuit, - ro_consts2, - 2, - ); - if let Err(e) = circuit2.synthesize(&mut cs2) { - panic!("{}", e) - } - let (inst2, witness2) = cs2.r1cs_instance_and_witness(&shape2, &ck2).unwrap(); - // Make sure that it is satisfiable - shape2.is_sat(&ck2, &inst2, &witness2).unwrap(); -} - -#[test] -fn test_recursive_circuit() { - let params1 = SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); - let params2 = SuperNovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, false); - let ro_consts1: ROConstantsCircuit = PoseidonConstantsCircuit::default(); - let ro_consts2: ROConstantsCircuit = PoseidonConstantsCircuit::default(); - - test_recursive_circuit_with::( - ¶ms1, - ¶ms2, - ro_consts1, - ro_consts2, - &expect!["9836"], - &expect!["12017"], - ); -} - -fn test_pp_digest_with(non_uniform_circuit: &NC, expected: &Expect) -where - E1: CurveCycleEquipped, - NC: NonUniformCircuit, -{ - // TODO: add back in https://github.com/argumentcomputer/arecibo/issues/53 - // // this tests public parameters with a size specifically intended for a - // spark-compressed SNARK let pp_hint1 = - // Some(SPrime::::commitment_key_floor()); let pp_hint2 = - // Some(SPrime::::commitment_key_floor()); - let pp = PublicParams::::setup( - non_uniform_circuit, - &*default_ck_hint(), - &*default_ck_hint(), - ); - - let digest_str = pp - .digest() - .to_repr() - .as_ref() - .iter() - .fold(String::new(), |mut output, b| { - let _ = write!(output, "{b:02x}"); - output - }); - expected.assert_eq(&digest_str); -} - -#[test] -fn test_supernova_pp_digest() { - let rom = vec![ - OPCODE_1, OPCODE_1, OPCODE_0, OPCODE_0, OPCODE_1, OPCODE_1, OPCODE_0, OPCODE_0, OPCODE_1, - OPCODE_1, - ]; // Rom can be arbitrary length. - let test_rom_grumpkin = TestROM::::new(rom); - - test_pp_digest_with::( - &test_rom_grumpkin, - &expect!["30418e576c11dd698054a6cc69d1b1e43ddf0f562abfb50b777147afad741a01"], - ); -} - -// y is a non-deterministic hint representing the cube root of the input at a -// step. -#[derive(Clone, Debug)] -struct CubeRootCheckingCircuit { - y: Option, -} - -impl StepCircuit for CubeRootCheckingCircuit -where - F: PrimeField, -{ - fn arity(&self) -> usize { - 1 - } - - fn circuit_index(&self) -> usize { - 0 - } - - fn synthesize>( - &self, - cs: &mut CS, - _pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError> { - let x = &z[0]; - - // we allocate a variable and set it to the provided non-deterministic hint. - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - self.y.ok_or(SynthesisError::AssignmentMissing) - })?; - - // We now check if y = x^{1/3} by checking if y^3 = x - let y_sq = y.square(cs.namespace(|| "y_sq"))?; - let y_cube = y_sq.mul(cs.namespace(|| "y_cube"), &y)?; - - cs.enforce( - || "y^3 = x", - |lc| lc + y_cube.get_variable(), - |lc| lc + CS::one(), - |lc| lc + x.get_variable(), - ); - - let next_pc = alloc_one(&mut cs.namespace(|| "next_pc")); - - Ok((Some(next_pc), vec![y])) - } -} - -// y is a non-deterministic hint representing the fifth root of the input at a -// step. -#[derive(Clone, Debug)] -struct FifthRootCheckingCircuit { - y: Option, -} - -impl StepCircuit for FifthRootCheckingCircuit -where - F: PrimeField, -{ - fn arity(&self) -> usize { - 1 - } - - fn circuit_index(&self) -> usize { - 1 - } - - fn synthesize>( - &self, - cs: &mut CS, - _pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError> { - let x = &z[0]; - - // we allocate a variable and set it to the provided non-deterministic hint. - let y = AllocatedNum::alloc(cs.namespace(|| "y"), || { - self.y.ok_or(SynthesisError::AssignmentMissing) - })?; - - // We now check if y = x^{1/5} by checking if y^5 = x - let y_sq = y.square(cs.namespace(|| "y_sq"))?; - let y_quad = y_sq.square(cs.namespace(|| "y_quad"))?; - let y_pow_5 = y_quad.mul(cs.namespace(|| "y_fifth"), &y)?; - - cs.enforce( - || "y^5 = x", - |lc| lc + y_pow_5.get_variable(), - |lc| lc + CS::one(), - |lc| lc + x.get_variable(), - ); - - let next_pc = alloc_zero(&mut cs.namespace(|| "next_pc")); - - Ok((Some(next_pc), vec![y])) - } -} - -#[derive(Clone, Debug)] -enum RootCheckingCircuit { - Cube(CubeRootCheckingCircuit), - Fifth(FifthRootCheckingCircuit), -} - -impl RootCheckingCircuit { - fn new(num_steps: usize) -> (Vec, Vec) { - let mut powers = Vec::new(); - let rng = &mut rand::rngs::OsRng; - let mut seed = F::random(rng); - - for i in 0..num_steps + 1 { - let seed_sq = seed.clone().square(); - // Cube-root and fifth-root circuits alternate. We compute the hints backward, - // so the calculations appear to be associated with the 'wrong' - // circuit. The final circuit is discarded, and only the final seed is used (as - // z_0). - powers.push(if i % 2 == num_steps % 2 { - seed *= seed_sq; - Self::Fifth(FifthRootCheckingCircuit { y: Some(seed) }) - } else { - seed *= seed_sq.clone().square(); - Self::Cube(CubeRootCheckingCircuit { y: Some(seed) }) - }) - } - - // reverse the powers to get roots - let roots = powers.into_iter().rev().collect::>(); - (vec![roots[0].get_y().unwrap()], roots[1..].to_vec()) - } - - fn get_y(&self) -> Option { - match self { - Self::Fifth(x) => x.y, - Self::Cube(x) => x.y, - } - } -} - -impl StepCircuit for RootCheckingCircuit -where - F: PrimeField, -{ - fn arity(&self) -> usize { - 1 - } - - fn circuit_index(&self) -> usize { - match self { - Self::Cube(x) => x.circuit_index(), - Self::Fifth(x) => x.circuit_index(), - } - } - - fn synthesize>( - &self, - cs: &mut CS, - pc: Option<&AllocatedNum>, - z: &[AllocatedNum], - ) -> Result<(Option>, Vec>), SynthesisError> { - match self { - Self::Cube(c) => c.synthesize(cs, pc, z), - Self::Fifth(c) => c.synthesize(cs, pc, z), - } - } -} - -impl NonUniformCircuit for RootCheckingCircuit -where - E1: CurveCycleEquipped, -{ - type C1 = Self; - type C2 = TrivialCircuit< as Engine>::Scalar>; - - fn num_circuits(&self) -> usize { - 2 - } - - fn primary_circuit(&self, circuit_index: usize) -> Self { - match circuit_index { - 0 => Self::Cube(CubeRootCheckingCircuit { y: None }), - 1 => Self::Fifth(FifthRootCheckingCircuit { y: None }), - _ => unreachable!(), - } - } - - fn secondary_circuit(&self) -> Self::C2 { - TrivialCircuit::::default() - } -} - -fn test_nivc_nondet_with() -where - E1: CurveCycleEquipped, -{ - let circuit_secondary = TrivialCircuit::default(); - - let num_steps = 3; - - // produce non-deterministic hint - let (z0_primary, roots) = RootCheckingCircuit::new(num_steps); - assert_eq!(num_steps, roots.len()); - let z0_secondary = vec![ as Engine>::Scalar::ZERO]; - - // produce public parameters - let pp = PublicParams::::setup(&roots[0], &*default_ck_hint(), &*default_ck_hint()); - // produce a recursive SNARK - - let circuit_primary = &roots[0]; - - let mut recursive_snark = RecursiveSNARK::::new( - &pp, - circuit_primary, - circuit_primary, - &circuit_secondary, - &z0_primary, - &z0_secondary, - ) - .map_err(|err| { - print_constraints_name_on_error_index(&err, &pp, circuit_primary, &circuit_secondary, 2) - }) - .unwrap(); - - for circuit_primary in roots.iter().take(num_steps) { - let res = recursive_snark.prove_step(&pp, circuit_primary, &circuit_secondary); - assert!(res - .map_err(|err| { - print_constraints_name_on_error_index( - &err, - &pp, - circuit_primary, - &circuit_secondary, - 2, - ) - }) - .is_ok()); - - // verify the recursive SNARK - recursive_snark - .verify(&pp, &z0_primary, &z0_secondary) - .map_err(|err| { - print_constraints_name_on_error_index( - &err, - &pp, - circuit_primary, - &circuit_secondary, - 2, - ) - }) - .unwrap(); - } -} - -#[test] -fn test_nivc_nondet() { - test_nivc_nondet_with::(); -} diff --git a/src/traits/commitment.rs b/src/traits/commitment.rs deleted file mode 100644 index 87487fb..0000000 --- a/src/traits/commitment.rs +++ /dev/null @@ -1,89 +0,0 @@ -//! This module defines a collection of traits that define the behavior of a -//! commitment engine We require the commitment engine to provide a commitment -//! to vectors with a single group element -use core::{ - fmt::Debug, - ops::{Add, Mul, MulAssign}, -}; - -use serde::{Deserialize, Serialize}; - -use crate::{ - errors::NovaError, - traits::{AbsorbInROTrait, Engine, TranscriptReprTrait}, -}; - -/// A helper trait for types implementing scalar multiplication. -pub trait ScalarMul: Mul + MulAssign {} - -impl ScalarMul for T where T: Mul + MulAssign -{} - -/// This trait defines the behavior of the commitment -pub trait CommitmentTrait: - Clone - + Copy - + Debug - + Default - + PartialEq - + Eq - + Send - + Sync - + TranscriptReprTrait - + Serialize - + for<'de> Deserialize<'de> - + AbsorbInROTrait - + Add - + ScalarMul -{ - /// Holds the type of the compressed commitment - type CompressedCommitment: Clone - + Debug - + PartialEq - + Eq - + Send - + Sync - + TranscriptReprTrait - + Serialize - + for<'de> Deserialize<'de>; - - /// Compresses self into a compressed commitment - fn compress(&self) -> Self::CompressedCommitment; - - /// Returns the coordinate representation of the commitment - fn to_coordinates(&self) -> (E::Base, E::Base, bool); - - /// Decompresses a compressed commitment into a commitment - fn decompress(c: &Self::CompressedCommitment) -> Result; -} - -/// A trait that helps determine the length of a structure. -/// Note this does not impose any memory representation constraints on the -/// structure. -pub trait Len { - /// Returns the length of the structure. - fn length(&self) -> usize; -} - -/// A trait that ties different pieces of the commitment generation together -pub trait CommitmentEngineTrait: Clone + Send + Sync { - /// Holds the type of the commitment key - /// The key should quantify its length in terms of group generators. - type CommitmentKey: Len - + Clone - + PartialEq - + Debug - + Send - + Sync - + Serialize - + for<'de> Deserialize<'de>; - - /// Holds the type of the commitment - type Commitment: CommitmentTrait; - - /// Samples a new commitment key of a specified size - fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey; - - /// Commits to the provided vector using the provided generators - fn commit(ck: &Self::CommitmentKey, v: &[E::Scalar]) -> Self::Commitment; -} diff --git a/src/traits/evaluation.rs b/src/traits/evaluation.rs deleted file mode 100644 index e7d7537..0000000 --- a/src/traits/evaluation.rs +++ /dev/null @@ -1,60 +0,0 @@ -//! This module defines a collection of traits that define the behavior of a -//! polynomial evaluation engine A vector of size N is treated as a multilinear -//! polynomial in \log{N} variables, and a commitment provided by the commitment -//! engine is treated as a multilinear polynomial commitment -use std::sync::Arc; - -use serde::{Deserialize, Serialize}; - -use crate::{ - errors::NovaError, - traits::{commitment::CommitmentEngineTrait, Engine}, -}; - -/// A trait that ties different pieces of the commitment evaluation together -pub trait EvaluationEngineTrait: Clone + Send + Sync { - /// A type that holds the prover key - type ProverKey: Send + Sync + Serialize + for<'de> Deserialize<'de>; - - /// A type that holds the verifier key - type VerifierKey: Send - + Sync - // required for easy Digest computation purposes, could be relaxed to - // [`crate::digest::Digestible`] - + Serialize - + for<'de> Deserialize<'de>; - - /// A type that holds the evaluation argument - type EvaluationArgument: Clone + Send + Sync + Serialize + for<'de> Deserialize<'de>; - - /// A method to perform any additional setup needed to produce proofs of - /// evaluations - /// - /// **Note:** This method should be cheap and should not copy most of the - /// commitment key. Look at `CommitmentEngineTrait::setup` for generating - /// SRS data. - fn setup( - ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, - ) -> (Self::ProverKey, Self::VerifierKey); - - /// A method to prove the evaluation of a multilinear polynomial - fn prove( - ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, - pk: &Self::ProverKey, - transcript: &mut E::TE, - comm: &<::CE as CommitmentEngineTrait>::Commitment, - poly: &[E::Scalar], - point: &[E::Scalar], - eval: &E::Scalar, - ) -> Result; - - /// A method to verify the purported evaluation of a multilinear polynomials - fn verify( - vk: &Self::VerifierKey, - transcript: &mut E::TE, - comm: &<::CE as CommitmentEngineTrait>::Commitment, - point: &[E::Scalar], - eval: &E::Scalar, - arg: &Self::EvaluationArgument, - ) -> Result<(), NovaError>; -} diff --git a/src/traits/mod.rs b/src/traits/mod.rs deleted file mode 100644 index 1ad5602..0000000 --- a/src/traits/mod.rs +++ /dev/null @@ -1,192 +0,0 @@ -//! This module defines various traits required by the users of the library to -//! implement. -use core::fmt::Debug; - -use bellpepper_core::{boolean::AllocatedBit, num::AllocatedNum, ConstraintSystem, SynthesisError}; -use ff::{PrimeField, PrimeFieldBits}; -use num_bigint::BigInt; -use serde::{Deserialize, Serialize}; - -use crate::errors::NovaError; - -pub mod commitment; - -use commitment::CommitmentEngineTrait; - -/// Represents an element of a group -/// This is currently tailored for an elliptic curve group -pub trait Group: Clone + Copy + Debug + Send + Sync + Sized + Eq + PartialEq { - /// A type representing an element of the base field of the group - type Base: PrimeFieldBits + Serialize + for<'de> Deserialize<'de>; - - /// A type representing an element of the scalar field of the group - type Scalar: PrimeFieldBits - + PrimeFieldExt - + Send - + Sync - + Serialize - + for<'de> Deserialize<'de>; - - /// Returns A, B, the order of the group, the size of the base field as big - /// integers - fn group_params() -> (Self::Base, Self::Base, BigInt, BigInt); -} - -/// A collection of engines that are required by the library -pub trait Engine: Clone + Copy + Debug + Send + Sync + Sized + Eq + PartialEq { - /// A type representing an element of the base field of the group - type Base: PrimeFieldBits - + TranscriptReprTrait - + Serialize - + for<'de> Deserialize<'de>; - - /// A type representing an element of the scalar field of the group - type Scalar: PrimeFieldBits - + PrimeFieldExt - + Send - + Sync - + TranscriptReprTrait - + Serialize - + for<'de> Deserialize<'de>; - - /// A type that represents an element of the group - type GE: Group + Serialize + for<'de> Deserialize<'de>; - - /// A type that represents a circuit-friendly sponge that consumes elements - /// from the base field and squeezes out elements of the scalar field - type RO: ROTrait; - - /// An alternate implementation of `Self::RO` in the circuit model - type ROCircuit: ROCircuitTrait; - - /// A type that provides a generic Fiat-Shamir transcript to be used when - /// externalizing proofs - type TE: TranscriptEngineTrait; - - /// A type that defines a commitment engine over scalars in the group - type CE: CommitmentEngineTrait; -} - -/// This is a convenience trait to pair engines which fields are in a curve -/// cycle relationship -pub trait CurveCycleEquipped: Engine { - /// The secondary `Engine` of `Self` - type Secondary: Engine::Scalar, Scalar = ::Base>; -} - -/// Convenience projection to the secondary `Engine` of a `CurveCycleEquipped` -pub type Dual = ::Secondary; - -/// A helper trait to absorb different objects in RO -pub trait AbsorbInROTrait { - /// Absorbs the value in the provided RO - fn absorb_in_ro(&self, ro: &mut E::RO); -} - -/// A helper trait that defines the behavior of a hash function that we use as -/// an RO -pub trait ROTrait { - /// The circuit alter ego of this trait impl - this constrains it to use the - /// same constants - type CircuitRO: ROCircuitTrait; - - /// A type representing constants/parameters associated with the hash - /// function - type Constants: Debug - + Default - + Clone - + PartialEq - + Send - + Sync - + Serialize - + for<'de> Deserialize<'de>; - - /// Initializes the hash function - fn new(constants: Self::Constants, num_absorbs: usize) -> Self; - - /// Adds a scalar to the internal state - fn absorb(&mut self, e: Base); - - /// Returns a challenge of `num_bits` by hashing the internal state - fn squeeze(&mut self, num_bits: usize) -> Scalar; -} - -/// A helper trait that defines the behavior of a hash function that we use as -/// an RO in the circuit model -pub trait ROCircuitTrait { - /// the vanilla alter ego of this trait - this constrains it to use the same - /// constants - type NativeRO: ROTrait; - - /// A type representing constants/parameters associated with the hash - /// function on this Base field - type Constants: Debug - + Default - + Clone - + PartialEq - + Send - + Sync - + Serialize - + for<'de> Deserialize<'de>; - - /// Initializes the hash function - fn new(constants: Self::Constants, num_absorbs: usize) -> Self; - - /// Adds a scalar to the internal state - fn absorb(&mut self, e: &AllocatedNum); - - /// Returns a challenge of `num_bits` by hashing the internal state - fn squeeze>( - &mut self, - cs: CS, - num_bits: usize, - ) -> Result, SynthesisError>; -} - -/// An alias for constants associated with `E::RO` -pub type ROConstants = - <::RO as ROTrait<::Base, ::Scalar>>::Constants; - -/// An alias for constants associated with `E::ROCircuit` -pub type ROConstantsCircuit = - <::ROCircuit as ROCircuitTrait<::Base>>::Constants; - -/// This trait allows types to implement how they want to be added to -/// `TranscriptEngine` -pub trait TranscriptReprTrait: Send + Sync { - /// returns a byte representation of self to be added to the transcript - fn to_transcript_bytes(&self) -> Vec; -} - -/// This trait defines the behavior of a transcript engine compatible with -/// Spartan -pub trait TranscriptEngineTrait: Send + Sync { - /// initializes the transcript - fn new(label: &'static [u8]) -> Self; - - /// returns a scalar element of the group as a challenge - fn squeeze(&mut self, label: &'static [u8]) -> Result; - - /// absorbs any type that implements `TranscriptReprTrait` under a label - fn absorb>(&mut self, label: &'static [u8], o: &T); - - /// adds a domain separator - fn dom_sep(&mut self, bytes: &'static [u8]); -} - -/// Defines additional methods on `PrimeField` objects -pub trait PrimeFieldExt: PrimeField { - /// Returns a scalar representing the bytes - fn from_uniform(bytes: &[u8]) -> Self; -} - -impl> TranscriptReprTrait for &[T] { - fn to_transcript_bytes(&self) -> Vec { - self.iter() - .flat_map(|t| t.to_transcript_bytes()) - .collect::>() - } -} - -pub mod evaluation; -pub mod snark; diff --git a/src/traits/snark.rs b/src/traits/snark.rs deleted file mode 100644 index 8e6f93a..0000000 --- a/src/traits/snark.rs +++ /dev/null @@ -1,137 +0,0 @@ -//! This module defines a collection of traits that define the behavior of a -//! `zkSNARK` for `RelaxedR1CS` -use std::sync::Arc; - -use serde::{Deserialize, Serialize}; - -use crate::{ - errors::NovaError, - r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, - traits::Engine, - CommitmentKey, -}; - -// NOTES: This function seems heavily reliant on dynamic allocation all to -// return 0 in the end... - -/// Public parameter creation takes a size hint. This size hint carries the -/// particular requirements of the final compressing SNARK the user expected to -/// use with these public parameters, and the below is a sensible default, which -/// is to not require any more bases then the usual (maximum of the number of -/// variables and constraints of the involved R1CS circuit). -pub fn default_ck_hint() -> Box Fn(&'a R1CSShape) -> usize> { - // The default is to not put an additional floor on the size of the commitment - // key - Box::new(|_shape: &R1CSShape| 0) -} - -// NOTES: I'm not sure having a trait here is really necessary unless you're -// wanting to have a much larger abstraction. I'd consider just gutting this and -// forming one SNARK that we use. - -/// A trait that defines the behavior of a `zkSNARK` -pub trait RelaxedR1CSSNARKTrait: - Send + Sync + Serialize + for<'de> Deserialize<'de> -{ - /// A type that represents the prover's key - type ProverKey: Send + Sync; - - /// A type that represents the verifier's key - type VerifierKey: Send + Sync + Serialize; - - /// This associated function (not a method) provides a hint that offers - /// a minimum sizing cue for the commitment key used by this SNARK - /// implementation. The commitment key passed in setup should then - /// be at least as large as this hint. - fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { - // The default is to not put an additional floor on the size of the commitment - // key - default_ck_hint() - } - - /// Initialize a ProvingKey directly from a CommitmentKey and a - /// already known vk_digest. - fn initialize_pk( - ck: Arc>, - vk_digest: E::Scalar, - ) -> Result; - - /// Produces the keys for the prover and the verifier - fn setup( - ck: Arc>, - S: &R1CSShape, - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError>; - - /// Produces a new SNARK for a relaxed R1CS - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - S: &R1CSShape, - U: &RelaxedR1CSInstance, - W: &RelaxedR1CSWitness, - ) -> Result; - - /// Verifies a SNARK for a relaxed R1CS - fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError>; -} - -/// A trait that defines the behavior of a `zkSNARK` to prove knowledge of -/// satisfying witness to batches of relaxed R1CS instances. -pub trait BatchedRelaxedR1CSSNARKTrait: - Send + Sync + Serialize + for<'de> Deserialize<'de> -{ - /// A type that represents the prover's key - type ProverKey: Send + Sync + Serialize + for<'de> Deserialize<'de>; - - /// A type that represents the verifier's key - type VerifierKey: Send + Sync + DigestHelperTrait + Serialize + for<'de> Deserialize<'de>; - - // NOTES: If we don't need something more general here, this is just an odd - // thing to have defined generically since it just calls the weird function - // above. - - /// This associated function (not a method) provides a hint that offers - /// a minimum sizing cue for the commitment key used by this SNARK - /// implementation. The commitment key passed in setup should then - /// be at least as large as this hint. - fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { - default_ck_hint() - } - - /// Initialize a ProvingKey directly from a CommitmentKey and a - /// already known vk_digest. - fn initialize_pk( - ck: Arc>, - vk_digest: E::Scalar, - ) -> Result; - - /// Produces the keys for the prover and the verifier - /// - /// **Note:** This method should be cheap and should not copy most of the - /// commitment key. Look at `CommitmentEngineTrait::setup` for generating - /// SRS data. - fn setup( - ck: Arc>, // NOTES: Why `Arc` this? - S: Vec<&R1CSShape>, /* NOTES: Why not a &[R1CSShape] here?, would get the same - * thing across as an iter i think */ - ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError>; - - /// Produces a new SNARK for a batch of relaxed R1CS - fn prove( - ck: &CommitmentKey, - pk: &Self::ProverKey, - S: Vec<&R1CSShape>, - U: &[RelaxedR1CSInstance], - W: &[RelaxedR1CSWitness], - ) -> Result; - - /// Verifies a SNARK for a batch of relaxed R1CS - fn verify(&self, vk: &Self::VerifierKey, U: &[RelaxedR1CSInstance]) - -> Result<(), NovaError>; -} - -/// A helper trait that defines the behavior of a verifier key of `zkSNARK` -pub trait DigestHelperTrait { - /// Returns the digest of the verifier's key - fn digest(&self) -> E::Scalar; -} diff --git a/taplo.toml b/taplo.toml new file mode 100644 index 0000000..f58bb47 --- /dev/null +++ b/taplo.toml @@ -0,0 +1,20 @@ +# .toml file formatting settings for `taplo` +# https://taplo.tamasfe.dev/configuration/formatter-options.html + +[formatting] +# align entries vertically +align_entries=true +# allow up to 1 consecutive empty line (default: 2) +allowed_blank_lines=1 +# collapse arrays into one line if they fit +array_auto_collapse=true +# default: 80 +column_width=100 +# remove whitespace around '=' +compact_entries=true +# alphabetically sort entries not separated by line breaks +reorder_keys=false +# align entries vertically (default: true) +# align_comments =false +# expand arrays into multiple lines (default: true) +# array_auto_expand =false