diff --git a/Cargo.lock b/Cargo.lock index c887678be54..0f95bd0eef5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -327,7 +327,7 @@ dependencies = [ "arrow-data", "arrow-schema", "arrow-select", - "flatbuffers", + "flatbuffers 25.12.19", "lz4_flex 0.12.0", "zstd", ] @@ -580,6 +580,15 @@ dependencies = [ "num-traits", ] +[[package]] +name = "atomic" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a89cbf775b137e9b968e67227ef7f775587cde3fd31b0d8599dbd0f598a48340" +dependencies = [ + "bytemuck", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -636,9 +645,9 @@ dependencies = [ [[package]] name = "aws-lc-rs" -version = "1.15.3" +version = "1.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e84ce723ab67259cfeb9877c6a639ee9eb7a27b28123abd71db7f0d5d0cc9d86" +checksum = "6a88aab2464f1f25453baa7a07c84c5b7684e274054ba06817f382357f77a288" dependencies = [ "aws-lc-sys", "zeroize", @@ -646,9 +655,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.36.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a442ece363113bd4bd4c8b18977a7798dd4d3c3383f34fb61936960e8f4ad8" +checksum = "b45afffdee1e7c9126814751f88dddc747f41d91da16c9551a0f1e8a11e788a1" dependencies = [ "cc", "cmake", @@ -658,9 +667,9 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "1.5.18" +version = "1.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "959dab27ce613e6c9658eb3621064d0e2027e5f2acb65bc526a43577facea557" +checksum = "d81b5b2898f6798ad58f484856768bca817e3cd9de0974c24ae0f1113fe88f1b" dependencies = [ "aws-credential-types", "aws-sigv4", @@ -683,16 +692,15 @@ dependencies = [ [[package]] name = "aws-sdk-dynamodb" -version = "1.102.0" +version = "1.101.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5f7e6a53cf5ee8b7041c73106d9a93480b47f8b955466262b043aab0b5bf489" +checksum = "b6f98cd9e5f2fc790aff1f393bc3c8680deea31c05d3c6f23b625cdc50b1b6b4" dependencies = [ "aws-credential-types", "aws-runtime", "aws-smithy-async", "aws-smithy-http", "aws-smithy-json", - "aws-smithy-observability", "aws-smithy-runtime", "aws-smithy-runtime-api", "aws-smithy-types", @@ -706,9 +714,9 @@ dependencies = [ [[package]] name = "aws-sdk-s3" -version = "1.120.0" +version = "1.119.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06673901e961f20fa8d7da907da48f7ad6c1b383e3726c22bd418900f015abe1" +checksum = "1d65fddc3844f902dfe1864acb8494db5f9342015ee3ab7890270d36fbd2e01c" dependencies = [ "aws-credential-types", "aws-runtime", @@ -718,7 +726,6 @@ dependencies = [ "aws-smithy-eventstream", "aws-smithy-http", "aws-smithy-json", - "aws-smithy-observability", "aws-smithy-runtime", "aws-smithy-runtime-api", "aws-smithy-types", @@ -731,7 +738,7 @@ dependencies = [ "http 0.2.12", "http 1.4.0", "http-body 0.4.6", - "lru 0.16.3", + "lru", "percent-encoding", "regex-lite", "sha2", @@ -741,16 +748,15 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.92.0" +version = "1.91.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7d63bd2bdeeb49aa3f9b00c15e18583503b778b2e792fc06284d54e7d5b6566" +checksum = "8ee6402a36f27b52fe67661c6732d684b2635152b676aa2babbfb5204f99115d" dependencies = [ "aws-credential-types", "aws-runtime", "aws-smithy-async", "aws-smithy-http", "aws-smithy-json", - "aws-smithy-observability", "aws-smithy-runtime", "aws-smithy-runtime-api", "aws-smithy-types", @@ -764,16 +770,15 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.94.0" +version = "1.93.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532d93574bf731f311bafb761366f9ece345a0416dbcc273d81d6d1a1205239b" +checksum = "a45a7f750bbd170ee3677671ad782d90b894548f4e4ae168302c57ec9de5cb3e" dependencies = [ "aws-credential-types", "aws-runtime", "aws-smithy-async", "aws-smithy-http", "aws-smithy-json", - "aws-smithy-observability", "aws-smithy-runtime", "aws-smithy-runtime-api", "aws-smithy-types", @@ -787,16 +792,15 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.96.0" +version = "1.95.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "357e9a029c7524db6a0099cd77fbd5da165540339e7296cca603531bc783b56c" +checksum = "55542378e419558e6b1f398ca70adb0b2088077e79ad9f14eb09441f2f7b2164" dependencies = [ "aws-credential-types", "aws-runtime", "aws-smithy-async", "aws-smithy-http", "aws-smithy-json", - "aws-smithy-observability", "aws-smithy-query", "aws-smithy-runtime", "aws-smithy-runtime-api", @@ -850,9 +854,9 @@ dependencies = [ [[package]] name = "aws-smithy-checksums" -version = "0.63.13" +version = "0.63.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23374b9170cbbcc6f5df8dc5ebb9b6c5c28a3c8f599f0e8b8b10eb6f4a5c6e74" +checksum = "87294a084b43d649d967efe58aa1f9e0adc260e13a6938eb904c0ae9b45824ae" dependencies = [ "aws-smithy-http", "aws-smithy-types", @@ -942,9 +946,9 @@ dependencies = [ [[package]] name = "aws-smithy-observability" -version = "0.2.0" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef1fcbefc7ece1d70dcce29e490f269695dfca2d2bacdeaf9e5c3f799e4e6a42" +checksum = "17f616c3f2260612fe44cede278bafa18e73e6479c4e393e2c4518cf2a9a228a" dependencies = [ "aws-smithy-runtime-api", ] @@ -961,9 +965,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.9.8" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb5b6167fcdf47399024e81ac08e795180c576a20e4d4ce67949f9a88ae37dc1" +checksum = "a392db6c583ea4a912538afb86b7be7c5d8887d91604f50eb55c262ee1b4a5f5" dependencies = [ "aws-smithy-async", "aws-smithy-http", @@ -985,9 +989,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime-api" -version = "1.10.0" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efce7aaaf59ad53c5412f14fc19b2d5c6ab2c3ec688d272fd31f76ec12f44fb0" +checksum = "ab0d43d899f9e508300e587bf582ba54c27a452dd0a9ea294690669138ae14a2" dependencies = [ "aws-smithy-async", "aws-smithy-types", @@ -1002,9 +1006,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.3.6" +version = "1.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65f172bcb02424eb94425db8aed1b6d583b5104d4d5ddddf22402c661a320048" +checksum = "905cb13a9895626d49cf2ced759b062d913834c7482c38e49557eac4e6193f01" dependencies = [ "base64-simd", "bytes", @@ -1475,6 +1479,9 @@ name = "bytes" version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" +dependencies = [ + "serde", +] [[package]] name = "bytes-utils" @@ -1503,9 +1510,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.53" +version = "1.2.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "755d2fce177175ffca841e9a06afdb2c4ab0f593d53b4dee48147dfaade85932" +checksum = "cd4932aefd12402b36c60956a4fe0035421f544799057659ff86f923657aada3" dependencies = [ "find-msvc-tools", "jobserver", @@ -1542,9 +1549,9 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.43" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ "iana-time-zone", "js-sys", @@ -1637,9 +1644,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.7" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" +checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" [[package]] name = "cmake" @@ -1658,9 +1665,9 @@ checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "comfy-table" -version = "7.2.2" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958c5d6ecf1f214b4c2bbbbf6ab9523a864bd136dcf71a7e8904799acfe1ad47" +checksum = "b03b7db8e0b4b2fdad6c551e634134e99ec000e5c8c3b6856c65e8bbaded7a3b" dependencies = [ "unicode-segmentation", "unicode-width", @@ -1798,9 +1805,9 @@ dependencies = [ [[package]] name = "crc" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" +checksum = "5eb8a2a1cd12ab0d987a5d5e825195d372001a4094a0376319d5a0ad71c1ba0d" dependencies = [ "crc-catalog", ] @@ -1813,14 +1820,15 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc-fast" -version = "1.9.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd92aca2c6001b1bf5ba0ff84ee74ec8501b52bbef0cac80bf25a6c1d87a83d" +checksum = "6ddc2d09feefeee8bd78101665bd8645637828fa9317f9f292496dbbd8c65ff3" dependencies = [ "crc", "digest", + "rand 0.9.2", + "regex", "rustversion", - "spin 0.10.0", ] [[package]] @@ -1916,6 +1924,16 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "crossbeam-skiplist" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df29de440c58ca2cc6e587ec3d22347551a32435fbde9d2bff64e78a9ffa151b" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.21" @@ -2144,7 +2162,7 @@ dependencies = [ "futures", "itertools 0.14.0", "log", - "object_store", + "object_store 0.12.4", "parking_lot", "parquet", "rand 0.9.2", @@ -2177,7 +2195,7 @@ dependencies = [ "futures", "itertools 0.14.0", "log", - "object_store", + "object_store 0.12.4", "parking_lot", "tokio", ] @@ -2202,7 +2220,7 @@ dependencies = [ "futures", "itertools 0.14.0", "log", - "object_store", + "object_store 0.12.4", "tokio", ] @@ -2221,7 +2239,7 @@ dependencies = [ "indexmap", "libc", "log", - "object_store", + "object_store 0.12.4", "parquet", "paste", "sqlparser", @@ -2263,7 +2281,7 @@ dependencies = [ "glob", "itertools 0.14.0", "log", - "object_store", + "object_store 0.12.4", "rand 0.9.2", "tokio", "url", @@ -2289,7 +2307,7 @@ dependencies = [ "datafusion-session", "futures", "itertools 0.14.0", - "object_store", + "object_store 0.12.4", "tokio", ] @@ -2311,7 +2329,7 @@ dependencies = [ "datafusion-physical-plan", "datafusion-session", "futures", - "object_store", + "object_store 0.12.4", "regex", "tokio", ] @@ -2334,7 +2352,7 @@ dependencies = [ "datafusion-physical-plan", "datafusion-session", "futures", - "object_store", + "object_store 0.12.4", "tokio", ] @@ -2362,7 +2380,7 @@ dependencies = [ "futures", "itertools 0.14.0", "log", - "object_store", + "object_store 0.12.4", "parking_lot", "parquet", "tokio", @@ -2387,7 +2405,7 @@ dependencies = [ "datafusion-expr", "futures", "log", - "object_store", + "object_store 0.12.4", "parking_lot", "rand 0.9.2", "tempfile", @@ -2750,7 +2768,7 @@ dependencies = [ "datafusion", "half", "itertools 0.14.0", - "object_store", + "object_store 0.12.4", "pbjson-types", "prost", "substrait", @@ -2979,6 +2997,12 @@ dependencies = [ "const-random", ] +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + [[package]] name = "downcast" version = "0.11.0" @@ -2997,6 +3021,19 @@ version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" +[[package]] +name = "duration-str" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f88959de2d447fd3eddcf1909d1f19fe084e27a056a6904203dc5d8b9e771c1e" +dependencies = [ + "rust_decimal", + "serde", + "thiserror 2.0.17", + "time", + "winnow 0.6.26", +] + [[package]] name = "dyn-clone" version = "1.0.20" @@ -3139,6 +3176,12 @@ dependencies = [ "encoding_rs", ] +[[package]] +name = "endian-type" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" + [[package]] name = "env_filter" version = "0.1.4" @@ -3240,6 +3283,17 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "fail-parallel" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5666e8ca4ec174d896fb742789c29b1bea9319dcfd623c41bececc0a60c4939d" +dependencies = [ + "log", + "once_cell", + "rand 0.8.5", +] + [[package]] name = "fast-float2" version = "0.2.3" @@ -3277,22 +3331,39 @@ dependencies = [ "subtle", ] +[[package]] +name = "figment" +version = "0.10.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3" +dependencies = [ + "atomic", + "pear", + "serde", + "serde_json", + "serde_yaml", + "toml", + "uncased", + "version_check", +] + [[package]] name = "filetime" -version = "0.2.27" +version = "0.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db" +checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed" dependencies = [ "cfg-if", "libc", "libredox", + "windows-sys 0.60.2", ] [[package]] name = "find-msvc-tools" -version = "0.1.8" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" +checksum = "f449e6c6c08c865631d4890cfacf252b3d396c9bcc83adb6623cdb02a8336c41" [[package]] name = "findshlibs" @@ -3312,6 +3383,16 @@ version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" +[[package]] +name = "flatbuffers" +version = "24.12.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f1baf0dbf96932ec9a3038d57900329c015b0bfb7b63d904f3bc27e2b02a096" +dependencies = [ + "bitflags 1.3.2", + "rustc_version", +] + [[package]] name = "flatbuffers" version = "25.12.19" @@ -3777,7 +3858,7 @@ dependencies = [ "rustls-pemfile", "serde", "serde_json", - "thiserror 2.0.18", + "thiserror 2.0.17", "time", "tokio", ] @@ -3930,7 +4011,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror 2.0.18", + "thiserror 2.0.17", "tokio", "ureq", "windows-sys 0.60.2", @@ -4443,6 +4524,12 @@ dependencies = [ "str_stack", ] +[[package]] +name = "inlinable_string" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" + [[package]] name = "inout" version = "0.1.4" @@ -4628,9 +4715,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.85" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" dependencies = [ "once_cell", "wasm-bindgen", @@ -4707,6 +4794,7 @@ dependencies = [ "chrono", "clap", "criterion", + "crossbeam-skiplist", "dashmap", "datafusion", "datafusion-expr", @@ -4743,7 +4831,7 @@ dependencies = [ "lzma-sys", "mock_instant", "moka", - "object_store", + "object_store 0.12.4", "paste", "permutation", "pin-project", @@ -4757,12 +4845,14 @@ dependencies = [ "semver", "serde", "serde_json", + "slatedb", "snafu", "tantivy", "tempfile", "test-log", "tokio", "tokio-stream", + "tokio-util", "tracing", "tracing-chrome", "tracing-subscriber", @@ -4822,7 +4912,7 @@ dependencies = [ "mock_instant", "moka", "num_cpus", - "object_store", + "object_store 0.12.4", "pin-project", "proptest", "prost", @@ -4955,7 +5045,7 @@ dependencies = [ "lance-datagen", "lance-index", "lance-linalg", - "object_store", + "object_store 0.12.4", "parquet", "rand 0.9.2", "tempfile", @@ -4990,7 +5080,7 @@ dependencies = [ "libc", "log", "num-traits", - "object_store", + "object_store 0.12.4", "pprof", "pretty_assertions", "proptest", @@ -5075,7 +5165,7 @@ dependencies = [ "log", "ndarray", "num-traits", - "object_store", + "object_store 0.12.4", "pprof", "prost", "prost-build", @@ -5128,7 +5218,7 @@ dependencies = [ "log", "mock_instant", "mockall", - "object_store", + "object_store 0.12.4", "object_store_opendal", "opendal", "path_abs", @@ -5208,7 +5298,7 @@ dependencies = [ "lance-io", "lance-namespace", "log", - "object_store", + "object_store 0.12.4", "rand 0.9.2", "reqwest", "rstest 0.23.0", @@ -5262,7 +5352,7 @@ dependencies = [ "lance-file", "lance-io", "log", - "object_store", + "object_store 0.12.4", "pprof", "pretty_assertions", "proptest", @@ -5312,7 +5402,7 @@ dependencies = [ "lance-core", "lance-file", "lance-io", - "object_store", + "object_store 0.12.4", "snafu", "tokio", "url", @@ -5524,7 +5614,7 @@ dependencies = [ "reqwest", "serde", "tar", - "thiserror 2.0.18", + "thiserror 2.0.17", "tokio", "yada", ] @@ -5647,15 +5737,6 @@ dependencies = [ "hashbrown 0.15.5", ] -[[package]] -name = "lru" -version = "0.16.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593" -dependencies = [ - "hashbrown 0.16.1", -] - [[package]] name = "lru-slab" version = "0.1.2" @@ -5952,6 +6033,15 @@ dependencies = [ "rawpointer", ] +[[package]] +name = "nibble_vec" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" +dependencies = [ + "smallvec", +] + [[package]] name = "nix" version = "0.26.4" @@ -6149,9 +6239,39 @@ dependencies = [ [[package]] name = "object_store" -version = "0.12.5" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cfccb68961a56facde1163f9319e0d15743352344e7808a11795fb99698dcaf" +dependencies = [ + "async-trait", + "base64 0.22.1", + "bytes", + "chrono", + "futures", + "humantime", + "hyper 1.8.1", + "itertools 0.13.0", + "md-5", + "parking_lot", + "percent-encoding", + "quick-xml 0.37.5", + "rand 0.8.5", + "reqwest", + "ring", + "serde", + "serde_json", + "snafu", + "tokio", + "tracing", + "url", + "walkdir", +] + +[[package]] +name = "object_store" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbfbfff40aeccab00ec8a910b57ca8ecf4319b335c542f2edcd19dd25a1e2a00" +checksum = "4c1be0c6c22ec0817cdc77d3842f721a17fd30ab6965001415b5402a74e6b740" dependencies = [ "async-trait", "base64 0.22.1", @@ -6176,7 +6296,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "thiserror 2.0.18", + "thiserror 2.0.17", "tokio", "tracing", "url", @@ -6195,7 +6315,7 @@ dependencies = [ "bytes", "chrono", "futures", - "object_store", + "object_store 0.12.4", "opendal", "pin-project", "tokio", @@ -6442,7 +6562,7 @@ dependencies = [ "num-bigint", "num-integer", "num-traits", - "object_store", + "object_store 0.12.4", "paste", "seq-macro", "simdutf8", @@ -6518,6 +6638,29 @@ dependencies = [ "hmac", ] +[[package]] +name = "pear" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdeeaa00ce488657faba8ebf44ab9361f9365a97bd39ffb8a60663f57ff4b467" +dependencies = [ + "inlinable_string", + "pear_codegen", + "yansi", +] + +[[package]] +name = "pear_codegen" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bab5b985dc082b345f812b7df84e1bef27e7207b39e448439ba8bd69c93f147" +dependencies = [ + "proc-macro2", + "proc-macro2-diagnostics", + "quote", + "syn 2.0.114", +] + [[package]] name = "pem" version = "3.0.6" @@ -6871,7 +7014,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit", + "toml_edit 0.23.10+spec-1.0.0", ] [[package]] @@ -6907,6 +7050,19 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "proc-macro2-diagnostics" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", + "version_check", + "yansi", +] + [[package]] name = "proptest" version = "1.9.0" @@ -6919,7 +7075,7 @@ dependencies = [ "num-traits", "rand 0.9.2", "rand_chacha 0.9.0", - "rand_xorshift", + "rand_xorshift 0.4.0", "regex-syntax", "rusty-fork", "tempfile", @@ -7045,7 +7201,7 @@ dependencies = [ "rustc-hash", "rustls 0.23.36", "socket2 0.6.1", - "thiserror 2.0.18", + "thiserror 2.0.17", "tokio", "tracing", "web-time", @@ -7066,7 +7222,7 @@ dependencies = [ "rustls 0.23.36", "rustls-pki-types", "slab", - "thiserror 2.0.18", + "thiserror 2.0.17", "tinyvec", "tracing", "web-time", @@ -7107,6 +7263,16 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" +[[package]] +name = "radix_trie" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c069c179fcdc6a2fe24d8d18305cf085fdbd4f922c041943e203685d6a1c58fd" +dependencies = [ + "endian-type", + "nibble_vec", +] + [[package]] name = "rand" version = "0.7.3" @@ -7138,7 +7304,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", - "rand_core 0.9.5", + "rand_core 0.9.4", ] [[package]] @@ -7168,7 +7334,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core 0.9.5", + "rand_core 0.9.4", ] [[package]] @@ -7191,9 +7357,9 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.9.5" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" +checksum = "4f1b3bc831f92381018fd9c6350b917c7b21f1eed35a65a51900e0e55a3d7afa" dependencies = [ "getrandom 0.3.4", ] @@ -7227,13 +7393,22 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "rand_xorshift" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" dependencies = [ - "rand_core 0.9.5", + "rand_core 0.9.4", ] [[package]] @@ -7242,7 +7417,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f703f4665700daf5512dcca5f43afa6af89f09db47fb56be587f80636bda2d41" dependencies = [ - "rand_core 0.9.5", + "rand_core 0.9.4", ] [[package]] @@ -7338,7 +7513,7 @@ checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" dependencies = [ "getrandom 0.2.17", "libredox", - "thiserror 2.0.18", + "thiserror 2.0.17", ] [[package]] @@ -7640,11 +7815,21 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "rust_decimal" +version = "1.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35affe401787a9bd846712274d97654355d21b2a2c092a3139aabe31e9022282" +dependencies = [ + "arrayvec", + "num-traits", +] + [[package]] name = "rustc-demangle" -version = "0.1.27" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" [[package]] name = "rustc-hash" @@ -7710,7 +7895,7 @@ dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.9", + "rustls-webpki 0.103.8", "subtle", "zeroize", ] @@ -7738,9 +7923,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.14.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" +checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" dependencies = [ "web-time", "zeroize", @@ -7758,9 +7943,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.9" +version = "0.103.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" dependencies = [ "aws-lc-rs", "ring", @@ -8029,6 +8214,15 @@ dependencies = [ "syn 2.0.114", ] +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + [[package]] name = "serde_tokenstream" version = "0.2.2" @@ -8162,7 +8356,7 @@ checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint", "num-traits", - "thiserror 2.0.18", + "thiserror 2.0.17", "time", ] @@ -8187,6 +8381,48 @@ version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" +[[package]] +name = "slatedb" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d19a612f4cf53c6d2e28bab3ce9e184722cec134b03ccfcfe0714f773ae59cc" +dependencies = [ + "async-channel 2.5.0", + "async-trait", + "atomic", + "bitflags 2.10.0", + "bytemuck", + "bytes", + "chrono", + "crc32fast", + "crossbeam-channel", + "crossbeam-skiplist", + "dotenvy", + "duration-str", + "fail-parallel", + "figment", + "flatbuffers 24.12.23", + "futures", + "log", + "moka", + "object_store 0.11.2", + "once_cell", + "parking_lot", + "radix_trie", + "rand 0.8.5", + "rand_xorshift 0.3.0", + "serde", + "serde_json", + "siphasher", + "thiserror 1.0.69", + "tokio", + "tracing", + "tracing-subscriber", + "ulid", + "uuid", + "walkdir", +] + [[package]] name = "smallvec" version = "1.15.1" @@ -8443,9 +8679,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "symbolic-common" -version = "12.17.1" +version = "12.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520cf51c674f8b93d533f80832babe413214bb766b6d7cb74ee99ad2971f8467" +checksum = "b3d8046c5674ab857104bc4559d505f4809b8060d57806e45d49737c97afeb60" dependencies = [ "debugid", "memmap2", @@ -8455,9 +8691,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.17.1" +version = "12.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f0de2ee0ffa2641e17ba715ad51d48b9259778176517979cb38b6aa86fa7425" +checksum = "1accb6e5c4b0f682de907623912e616b44be1c9e725775155546669dbff720ec" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -8557,7 +8793,7 @@ dependencies = [ "itertools 0.14.0", "levenshtein_automata", "log", - "lru 0.12.5", + "lru", "lz4_flex 0.11.5", "measure_time", "memmap2", @@ -8579,7 +8815,7 @@ dependencies = [ "tantivy-stacker", "tantivy-tokenizer-api", "tempfile", - "thiserror 2.0.18", + "thiserror 2.0.17", "time", "uuid", "winapi", @@ -8748,11 +8984,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.18" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" dependencies = [ - "thiserror-impl 2.0.18", + "thiserror-impl 2.0.17", ] [[package]] @@ -8768,9 +9004,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.18" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", @@ -8808,9 +9044,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.45" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" dependencies = [ "deranged", "itoa", @@ -8819,22 +9055,22 @@ dependencies = [ "num-conv", "num_threads", "powerfmt", - "serde_core", + "serde", "time-core", "time-macros", ] [[package]] name = "time-core" -version = "0.1.7" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b36ee98fd31ec7426d599183e8fe26932a8dc1fb76ddb6214d05493377d34ca" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" [[package]] name = "time-macros" -version = "0.2.25" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e552d1249bf61ac2a52db88179fd0673def1e1ad8243a00d9ec9ed71fee3dd" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" dependencies = [ "num-conv", "time-core", @@ -8999,6 +9235,27 @@ dependencies = [ "tokio", ] +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime 0.6.11", + "toml_edit 0.22.27", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] + [[package]] name = "toml_datetime" version = "0.7.5+spec-1.1.0" @@ -9008,6 +9265,20 @@ dependencies = [ "serde_core", ] +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime 0.6.11", + "toml_write", + "winnow 0.7.14", +] + [[package]] name = "toml_edit" version = "0.23.10+spec-1.0.0" @@ -9015,9 +9286,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ "indexmap", - "toml_datetime", + "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", - "winnow", + "winnow 0.7.14", ] [[package]] @@ -9026,14 +9297,20 @@ version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" dependencies = [ - "winnow", + "winnow 0.7.14", ] +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + [[package]] name = "tower" -version = "0.5.3" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", @@ -9227,7 +9504,7 @@ dependencies = [ "serde", "serde_json", "syn 2.0.114", - "thiserror 2.0.18", + "thiserror 2.0.17", "unicode-ident", ] @@ -9257,12 +9534,32 @@ dependencies = [ "const_fn", ] +[[package]] +name = "ulid" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "470dbf6591da1b39d43c14523b2b469c86879a53e8b758c8e090a470fe7b1fbe" +dependencies = [ + "rand 0.9.2", + "serde", + "web-time", +] + [[package]] name = "unarray" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" +[[package]] +name = "uncased" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697" +dependencies = [ + "version_check", +] + [[package]] name = "unicase" version = "2.9.0" @@ -9482,18 +9779,18 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasip2" -version = "1.0.2+wasi-0.2.9" +version = "1.0.1+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.108" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" dependencies = [ "cfg-if", "once_cell", @@ -9504,12 +9801,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.58" +version = "0.4.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" +checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" dependencies = [ "cfg-if", - "futures-util", "js-sys", "once_cell", "wasm-bindgen", @@ -9518,9 +9814,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.108" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9528,9 +9824,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.108" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" dependencies = [ "bumpalo", "proc-macro2", @@ -9541,9 +9837,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.108" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" dependencies = [ "unicode-ident", ] @@ -9563,9 +9859,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.85" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" +checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" dependencies = [ "js-sys", "wasm-bindgen", @@ -9931,6 +10227,15 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" +[[package]] +name = "winnow" +version = "0.6.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e90edd2ac1aa278a5c4599b1d89cf03074b610800f866d4026dc199d7929a28" +dependencies = [ + "memchr", +] + [[package]] name = "winnow" version = "0.7.14" @@ -9965,9 +10270,9 @@ dependencies = [ [[package]] name = "wit-bindgen" -version = "0.51.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "wkb" @@ -10154,9 +10459,9 @@ checksum = "40990edd51aae2c2b6907af74ffb635029d5788228222c4bb811e9351c0caad3" [[package]] name = "zmij" -version = "1.0.16" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65" +checksum = "ac93432f5b761b22864c774aac244fa5c0fd877678a4c37ebf6cf42208f9c9ec" [[package]] name = "zstd" diff --git a/Cargo.toml b/Cargo.toml index 7a4801a3f24..6e5ea4ee59a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -107,6 +107,7 @@ criterion = { version = "0.5", features = [ "html_reports", ] } crossbeam-queue = "0.3" +crossbeam-skiplist = "0.1" datafusion = { version = "51.0.0", default-features = false, features = [ "nested_expressions", "regex_expressions", @@ -173,6 +174,7 @@ serde = { version = "^1" } serde_json = { version = "1" } semver = "1.0" shellexpand = "3.0" +slatedb = "0.3" snafu = "0.8" strum = "0.26" tantivy = { version = "0.24.1", features = ["stopwords"] } diff --git a/docs/src/format/table/mem_wal.md b/docs/src/format/table/mem_wal.md index 74cf3ab72cb..7a096194107 100644 --- a/docs/src/format/table/mem_wal.md +++ b/docs/src/format/table/mem_wal.md @@ -3,27 +3,27 @@ Lance MemTable & WAL (MemWAL) specification describes a Log-Structured-Merge (LSM) tree architecture for Lance tables, enabling high-performance streaming write workloads while maintaining indexed read performance for key workloads including scan, point lookup, vector search and full-text search. -!!!note - MemWAL requires the table to have an [unenforced primary key](index.md#unenforced-primary-key) defined. - ## Overall Architecture ![MemWAL Overview](../../images/mem_wal_overview.png) -### Base Table +A Lance table is called a **base table** under the context of the MemWAL spec. +It must have an [unenforced primary key](index.md#unenforced-primary-key) defined in the table schema. -Under the MemWAL setup, the Lance table is called the **base table**. +On top of the base table, the MemWAL spec defines a set of regions. +Writers write to regions, and data in each region is merged into the base table asynchronously. +An index is kept in the base table for readers to quickly discover the state of all regions at a point of time. -### Region +### MemWAL Region -A **Region** is the main unit to horizontally scale out writes. +A **MemWAL Region** is the main unit to horizontally scale out writes. -Each region has exactly one active writer at any time, using **epoch-based fencing** to guarantee single-writer semantics without distributed coordination. -Writers claim a region by incrementing the writer epoch, then write data to that region. -Data in each region is merged into the base table gradually in the background. +Each region has exactly one active writer at any time. +Writers claim a region and then write data to that region. +Data in each region is expected to be merged into the base table asynchronously. -Regions must contain rows that are **mutually exclusive**. -Two regions contain rows with the same primary key, the following scenario can cause data corruption: +Rows of the same primary key must be written to one and only one region. +If two regions contain rows with the same primary key, the following scenario can cause data corruption: 1. Region A receives a write with primary key `pk=1` at time T1 2. Region B receives a write with primary key `pk=1` at time T2 (T2 > T1) @@ -32,100 +32,153 @@ Two regions contain rows with the same primary key, the following scenario can c 5. The row from Region A (older) now overwrites the row from Region B (newer) This violates the expected "last write wins" semantics. -By ensuring each primary key is assigned to exactly one region via the region spec, merge order between regions becomes irrelevant for correctness. +By ensuring each primary key is assigned to exactly one region via the region spec, +merge order between regions becomes irrelevant for correctness. + +See [MemWAL Region Architecture](#memwal-region-architecture) for the complete region architecture. ### MemWAL Index -A **MemWAL Index** is the centralized structure for all MemWAL metadata for a base table. -A table has at most one MemWAL index. -It stores: +A **MemWAL Index** is the centralized structure for all MemWAL metadata on top of a base table. +A table has at most one MemWAL index. It stores: - **Configuration**: Region specs defining how rows map to regions, and which indexes to maintain - **Merge progress**: Last generation merged to base table for each region - **Index catchup progress**: Which merged generation each base table index has been rebuilt to cover -- **Region snapshots**: Point-in-time snapshot of all region states for read optimization +- **Region snapshots**: Snapshot of all region states for read optimization -The index is the source of truth for **configuration** and **merge progress**, but region state snapshots are for read-only optimization (each region's manifest is authoritative for its own state). +The index is the source of truth for **configuration**, **merge progress** and **index catchup progress** +Writers and mergers read the MemWAL index to get these configurations before writing. -Writers read the MemWAL index to get configuration (region specs, maintained indexes) before writing. -Readers use the index to get a [snapshot of all region states](#reader-consistency), then query each region's data alongside the base table and merge results at runtime. +Each [region's manifest](#region-manifest) is authoritative for its own state. +Readers use **region snapshots** is a read-only optimization to see a point-in-time view of all regions without the need to open each region manifest. -A [background process](#memwal-index-builder) periodically updates region snapshots by listing regions and loading their manifests. See [MemWAL Index Details](#memwal-index-details) for the complete structure. ## Region Architecture ![Region Architecture](../../images/mem_wal_regional.png) -Within a region, writes enter its MemTable and are flushed to the regional WAL for durability. -The MemTable is flushed to storage as a Flushed MemTable based on memory pressure and other conditions. -Flushed MemTables are then asynchronously merged into the base table. +Within a region, writes are stored in an **in-memory table (MemTable)**. +It is also written to the region's **Write-Ahead Log (WAL)** for durability guarantee. +The MemTable is periodically **flushed** to storage based on memory pressure and other conditions. +**Flushed MemTables** in storage are then asynchronously **merged** into the base table. ### MemTable -An in-memory Lance table that buffers incoming writes. -Each write inserts a fragment in the MemTable, making data immediately queryable without waiting for persistence. +A MemTable holds rows inserted into the region before flushing to storage. +It serves 2 purposes: + +1. build up data and related indexes to be flushed to storage as a flushed MemTable +2. allow a reader to potentially access data that is not flushed to storage yet + +#### MemTable Format -In addition to the data fragments, a MemTable maintains: +The complete in-memory format of a MemTable is implementation-specific and out of the scope of this spec. +The Lance core Rust SDK maintains one default implementation and is available through all its language binding SDKs, +but integrations are free to build their own MemTable format depending on the specific use cases, +as long as it follows the MemWAL storage layout, reader and writer requirements when flushing MemTable. -- **Primary key bloom filter**: For efficient existence checks during staleness detection -- **In-memory index builders**: Incremental index structures that mirror base table indexes, enabling indexed queries on unflushed data -- **WAL fragment mapping**: Tracks correspondence between MemTable fragment IDs and WAL entry IDs for index remapping during flush +Conceptually, becuase Lance uses [Arrow for its in-memory data representaiton](https://arrow.apache.org/docs/format/index.html), +for the ease of explanation in this spec, we will treat MemTable as a list of Arrow record batches, +and each write into the MemTable is a new Arrow record batch. + +#### MemTable Generation + +Based on conditions like memory limit and durability requirements, +a MemTable needs to be **flushed** to storage and discarded. +When that happens, new writes go to a new MemTable and the cycle repeats. +Each MemTable is assigned a monotonically increasing generation number starting from 1. +When MemTable of generation `N` is discarded, the next MemTable gets assigned generation `N+1`. ### WAL -Write-Ahead Log (WAL) serves as the durable storage of MemTable. -A write to MemTable must be persisted also to the WAL to become fully durable. +WAL serves as the durable storage of all MemTables in a region. +It consists of data in MemTables ordered by generation. Every time we write to the WAL, we call it a **WAL Flush**. +#### WAL Durability + +When a write is flushed to WAL, the specific write becomes durable. +Otherwise, if the MemTable is lost, data is also lost. + +Multiple writes can be batched together in a single WAL flush to reduce WAL flush frequency and improve throughput. +The more writes a single WAL flush batches, the longer it takes for a write to be durable. + The whole LSM tree's durability is determined by the durability of the WAL. -For example, if WAL is stored in Amazon S3, it has the 99.999999999% durability. +For example, if WAL is stored in Amazon S3, it has 99.999999999% durability. If it is stored in local disk, the data will be lost if the local disk is damaged. -A WAL consists of an ordered sequence of WAL entries starting from 1. Each entry is a Lance format file. -The writer epoch is stored in the Lance file's schema metadata with key `writer_epoch` for fencing validation during replay. +#### WAL Entry + +Each time a WAL flush happens, it adds a new **WAL Entry** to the WAL. +In other words, a WAL consists of an ordered list of WAL entries starting from position 0. +Writer must flush WAL entries in sequential order from lower to higher position. +If WAL entry `N` is not flushed fully, WAL entry `N+1` must not exist in storage. + +### WAL Replay + +**Replaying** a WAL means to read data in the WAL from a lower to a higher position. +This is commonly used to recover the latest MemTable after it is lost, +by reading from the start position of the latest MemTable generation till the highest position in the WAL, +assuming proper fencing to guard against multiple writers to the same region. + +See [Writer Fencing](#writer-fencing) for the full fencing mechanism. + +#### WAL Entry Format + +Each WAL entry is a file in storage following the [Apache Arrow IPC stream format](https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format) to store the batch of writes in the MemTable. +The writer epoch is stored in the stream's Arrow schema metadata with key `writer_epoch` for fencing validation during replay. + +#### WAL Storage Layout Each WAL entry is stored within the WAL directory of the region located at `_mem_wal/{region_id}/wal`. WAL files use bit-reversed 64-bit binary naming to distribute files evenly across the directory keyspace. This optimizes S3 throughput by spreading sequential writes across S3's internal partitions, minimizing throttling. The filename is the bit-reversed binary representation of the entry ID with suffix `.lance`. -For example, entry ID 5 (binary `000...101`) becomes `1010000000000000000000000000000000000000000000000000000000000000.lance`. +For example, entry ID 5 (binary `000...101`) becomes `1010000000000000000000000000000000000000000000000000000000000000.arrow`. ### Flushed MemTable -A flushed MemTable is a complete Lance table created by flushing the MemTable to storage. +A flushed MemTable is created by flushing the MemTable to storage. +In Lance MemWAL spec, a flushed MemTable must be a Lance table following the Lance table format spec. !!!note This is called Sorted String Table (SSTable) or Sorted Run in many LSM-tree literatures and implementations. However, since our MemTable is not sorted, we just use the term flushed MemTable to avoid confusion. -Each flushed MemTable has a **generation** number starting from 1 that identifies its relative position among all flushed MemTables in the region. -When MemTable with generation `i` is flushed, the next MemTable gets generation number `i+1`. +#### Flushed MemTable Storage Layout -The MemTable of generation `i` is flushed to `_mem_wal/{region_uuid}/{random_hash}_gen_{i}/` directory, where `{random_hash}` is an 8-character hex value generated at flush time. -The directory content follows [Lance table layout](layout.md). +The MemTable of generation `i` is flushed to `_mem_wal/{region_uuid}/{random_hex}_gen_{i}/` directory, +where `{random_hex}` is a random 8-character hex value generated at flush time. +The random hex value is necessary to ensure if one MemTable flush attempt fails, +The retry can use another directory. +The content within the generation directory follows the [Lance table storage layout](layout.md). -The actual directory path for each generation is recorded in the region manifest's `flushed_generations` list (see [Region Manifest](#region-manifest)). +#### Merging MemTable to Base Table -Generation numbers determine merge order: lower numbers represent older data and must be merged to the base table first to preserve correct upsert semantics. +Generation numbers determine merge order of flushed MemTable into base table: +lower numbers represent older data and must be merged to the base table first to preserve correct upsert semantics. ### Region Manifest -Each region has a manifest file containing epoch-based fencing tokens, WAL pointers, and flushed MemTable generation trackers. This is the source of truth for region state. +Each region has a manifest file. This is the source of truth for the state of a region. -The manifest is serialized as a protobuf binary file using the `RegionManifest` message. +#### Region Manifest Contents The manifest contains: -- **Fencing state**: `writer_epoch` (writer fencing token) +- **Fencing state**: `writer_epoch` as the latest writer fencing token, see [Writer Fencing](#writer-fencing) for more details. - **WAL pointers**: `replay_after_wal_id` (last entry flushed to MemTable), `wal_id_last_seen` (last entry seen at manifest update) -- **Generation trackers**: `current_generation` (next generation to flush) -- **Flushed generations**: `flushed_generations` list of generation number and directory path pairs (e.g., generation 1 at `a1b2c3d4_gen_1`) +- **Generation trackers**: `current_generation` (next generation to flush), `flushed_generations` list of generation number and directory path pairs (e.g., generation 1 at `a1b2c3d4_gen_1`) Note: `wal_id_last_seen` is a hint that may be stale since it's not updated on WAL write. +It is updated opportunistically by any reader that can update the region manifest. The manifest itself is atomically written, but recovery must try to get newer WAL files to find the actual state beyond this hint. +The manifest is serialized as a protobuf binary file using the `RegionManifest` message. +
RegionManifest protobuf message @@ -135,7 +188,10 @@ The manifest itself is atomically written, but recovery must try to get newer WA
-Manifests are versioned starting from 1 and immutable. Each update creates a new manifest file at the next version number. +#### Region Manifest Versioning + +Manifests are versioned starting from 1 and immutable. +Each update creates a new manifest file at the next version number. Updates use put-if-not-exists or file rename to ensure atomicity depending on the storage system. If two processes compete, one wins and the other retries. @@ -152,24 +208,26 @@ To read the latest manifest version: 3. Continue until a version is not found 4. The latest version is the last found version -This approach uses HEAD requests instead of LIST operations in cloud storage, which is generally faster and is friendly to systems like S3 Express that do not support lexicographically sorted listing. - !!!note This works because the write rate to region manifests is significantly lower than read rates. Region manifests are only updated when region metadata changes (MemTable flush), not on every write. This ensures HEAD requests will eventually terminate and find the latest version. +#### Region Manifest Storage Layout + All region manifest versions are stored in `_mem_wal/{region_id}/manifest` directory. Each region manifest version file uses bit-reversed 64-bit binary naming, the same scheme as WAL files. For example, version 5 becomes `1010000000000000000000000000000000000000000000000000000000000000.binpb`. -The region manifest is updated atomically in the following cases: +#### Region Manifest Transaction -| Trigger | Fields Updated | Details | -|---------|----------------|---------| -| [Initialization & Recovery](#initialization--recovery) | `writer_epoch` | Incremented when writer claims the region | -| [MemTable Flush](#memtable-flush) | `replay_after_wal_id`, `wal_id_last_seen`, `current_generation`, `flushed_generations` | After flushing MemTable to storage | -| [MemWAL Index Builder](#memwal-index-builder) | `wal_id_last_seen` | Periodically scans WAL entries and updates hint | -| [Garbage Collector](#garbage-collector) | `flushed_generations` | Removes entries for deleted flushed MemTables | +The following transactions are available against a region manifest: + +| Transaction | Fields Updated | Details | +|-------------|----------------|---------| +| ClaimRegion | `writer_epoch` | Incremented when writer claims the region | +| FlushMemTable | `replay_after_wal_entry_position`, `wal_entry_position_last_seen`, `current_generation`, `flushed_generations` | Update all the fields atomically to mark the successful flushing of a MemTable to storage | +| ReportLastSeenWalEntryPosition | `wal_entry_position_last_seen` | Update the last seen WAL entry position | +| CleanupFlushedGeneration | `flushed_generations` | Removes entries for flushed MemTables that are no longer in use | !!!note WAL flush does **not** update the manifest to keep the hot write path fast. @@ -198,11 +256,11 @@ This field is updated atomically with merge-insert data commits, enabling confli Each entry contains the region UUID and generation number. **Index catchup progress** (`index_catchup`) tracks which merged generation each base table index has been rebuilt to cover. -When data is merged from a flushed MemTable to the base table, the base table's indexes are rebuilt asynchronously. +When data is merged from a flushed MemTable to the base table, the base table's indexes may be rebuilt asynchronously. During this window, queries should use the flushed MemTable's pre-built indexes instead of scanning unindexed data in the base table. -See [Index Catchup and Read Path](#index-catchup-and-read-path) for details. +See [Indexed Read Plan](#indexed-read-plan) for details. -**Region snapshot fields** (`snapshot_ts_millis`, `num_regions`, `inline_snapshots`) provide a point-in-time snapshot of region states. +**Region snapshot fields** (`snapshot_ts_millis`, `num_regions`, `inline_snapshots`) provide a snapshot of region states. The actual region manifests remain authoritative for region state. When `num_regions` is 0, the `inline_snapshots` field may be `None` or an empty Lance file with 0 rows but proper schema. @@ -363,257 +421,73 @@ Here is a recap of the storage layout with all the files and concepts defined so └── bloom_filter.bin # Primary key bloom filter ``` -## Writer Expectations +## Implementation Expectation -A writer operates on a single region within a single process and may spawn asynchronous tasks for background operations like WAL flush and MemTable flush. +This specification describes the storage layout for the LSM tree architecture. Implementations are free to use any approach to fulfill the storage layout requirements. Once data is written to the expected storage layout, the reader and writer expectations apply. -### Writer Configuration +The specification defines: -Writers can be configured with the following options that affect write behavior: +- **Storage layout**: The directory structure, file formats, and naming conventions for WAL entries, flushed MemTables, region manifests, and the MemWAL index +- **Durability guarantees**: How data is persisted through WAL entries and flushed MemTables +- **Consistency model**: How readers and writers coordinate through manifests and epoch-based fencing -| Option | Description | -|--------|-------------| -| **Durable write** | Each write is persisted to WAL before reporting success. Ensures no data loss on crash, but adds latency for object storage writes. | -| **Indexed write** | Each write refreshes MemTable indexes before reporting success. Ensures new data is immediately searchable via indexes, but adds indexing latency. | +Implementations may choose different approaches for: -Both options can be enabled independently. When disabled: +- In-memory data structures and indexing +- Buffering strategies before WAL flush +- Background task scheduling and concurrency +- Query execution strategies -- **Non-durable writes** buffer data in memory until a flush threshold is reached, accepting potential data loss on crash -- **Non-indexed writes** defer index updates, meaning newly written data may not appear in index-accelerated queries until the next index refresh +As long as the storage layout is correct and the documented invariants are maintained, implementations can optimize for their specific use cases. + +## Writer Expectations -### Synchronous vs Asynchronous Operations +A writer operates on a single region and is responsible for: -Writer operations can be categorized by their synchronous or asynchronous nature: +1. Claiming the region using epoch-based fencing +2. Writing data to WAL entries and flushed MemTables following the [storage layout](#storage-layout) +3. Maintaining the region manifest to track WAL and generation progress -| Operation | Mode | Description | -|-----------|------|-------------| -| [Initialization & Recovery](#initialization--recovery) | Synchronous | Claims region and replays WAL entries | -| [Write to MemTable](#write-operations) | Synchronous | Data inserted into in-memory fragments | -| [WAL Flush](#wal-flush) | Configurable | Synchronous with durable writes, asynchronous otherwise | -| [Index Update](#memtable-indexing) | Configurable | Synchronous with indexed writes, asynchronous otherwise | -| [MemTable Flush](#memtable-flush) | Asynchronous | Triggered by thresholds, runs in background | +### Writer Fencing -### Initialization & Recovery +Writers use epoch-based fencing to ensure single-writer semantics per region. -A writer must claim a region before performing any write operations: +To claim a region: 1. Load the latest region manifest 2. Increment `writer_epoch` by one -3. Atomically write a new manifest -4. If the write fails (another writer claimed the epoch), reload the manifest and retry with a higher epoch -5. After initialization, read WAL entries sequentially from `replay_after_wal_id + 1` until not found -6. Replay valid WAL entries (those with `writer_epoch` <= current epoch) to reconstruct the MemTable with 1:1 [WAL fragment mapping](#wal-fragment-mapping-construction) (each WAL entry becomes one MemTable fragment) - -After initialization, the writer updates the [WAL fragment mapping](#wal-fragment-mapping-construction) as new [WAL flushes](#wal-flush) occur. - -### Write Operations - -Each write operation follows this sequence: - -1. Validate incoming records -2. Insert records into the MemTable, creating an in-memory fragment (immediately queryable via full scan) -3. Track the Lance data file in the new fragment for pending WAL flush -4. Optionally trigger WAL flush based on size, count, or time thresholds -5. For [durable writes](#writer-configuration), wait for WAL flush to complete before returning -6. For [indexed writes](#writer-configuration), update MemTable indexes before returning: - - Insert primary keys into the bloom filter - - For each vector column with a base table index: encode and insert using the same index type as base table - - For each index in `maintained_indexes`: update the corresponding index structure +3. Atomically write a new manifest version +4. If the write fails (another writer claimed the epoch), reload and retry with a higher epoch -### WAL Flush - -WAL flush batches pending MemTable fragments into a single Lance data file: - -1. Identify pending (unflushed) fragments in the MemTable -2. Start writing the WAL entry to object storage -3. Stream binary pages from each pending fragment's Lance data file directly to the WAL entry -4. Write the footer containing batched data file metadata and `writer_epoch` in schema metadata -5. Complete the WAL entry write atomically -6. Mark fragments as flushed in the MemTable -7. Update the [WAL fragment mapping](#wal-fragment-mapping-construction) (MemTable fragment IDs in this batch -> WAL entry ID and positions) for index remapping during [MemTable Flush](#memtable-flush) - -!!!note - The region manifest is **not** updated on every WAL flush. The `wal_id_last_seen` field is a hint that can be updated: - - 1. **During MemTable flush** - when the region manifest is updated anyway - 2. **By a background index builder** - which scans WAL entries and updates each region's `wal_id_last_seen` - - This keeps the hot write path fast. On recovery, the writer reads WAL entries sequentially starting from `wal_id_last_seen + 1` to discover any WAL entries beyond what the manifest indicates. - -The WAL flush behavior depends on the [durable write](#writer-configuration) option: - -| Mode | Behavior | Result | -|------|----------|--------| -| Durable write | Flush immediately, wait for completion | One or more Lance files per write | -| Non-durable write | Buffer until threshold, return immediately | Batched Lance files (fewer S3 operations) | - -### MemTable Indexing - -MemTable indexing differs from base table indexing to balance write performance with query capability. -Rather than maintaining all base table indexes, MemTables maintain a subset specified in the [MemWAL Index](#memwal-index). - -MemTables maintain a **primary key bloom filter** for efficiently checking whether a primary key exists in a generation. -This enables staleness detection during search queries without requiring expensive point lookups. - -For vector indexes, MemTables use the same index type as the base table (e.g., IVF-PQ with the same centroids and PQ codebook). -This ensures distances are precise and directly comparable across generations. -The centroid assignment also impacts recall, so using the same centroids ensures consistent search quality. - -For full-text search indexes, MemTables inherit tokenizer configuration from base table indexes to ensure consistent tokenization across generations. -Each generation maintains its own corpus statistics (document count, term frequencies) which are aggregated at query time for globally-comparable BM25 scores. - -When a MemTable is flushed to storage: - -1. Indexes are serialized to disk in the flushed MemTable's `_indices/` directory following the Lance table index format -2. The primary key bloom filter is serialized to `bloom_filter.bin` in the generation directory -3. The in-memory index structures may be retained as a cache for readers in the same process - -### WAL Fragment Mapping Construction - -The WAL fragment mapping tracks the correspondence between MemTable fragment IDs and WAL entry IDs. -This mapping is essential for remapping indexes during [MemTable flush](#memtable-flush), since indexes reference MemTable fragment IDs but the flushed MemTable references WAL entry IDs. - -The mapping is structured as: `MemTable fragment ID -> (WAL entry ID, position within entry)` - -Where: - -- **MemTable fragment ID**: The fragment's position in the MemTable (0-indexed within the current generation) -- **WAL entry ID**: The WAL entry containing this fragment's data (relative to `replay_after_wal_id`) -- **Position within entry**: The fragment's position within the WAL entry (since multiple fragments may be batched) - -The mapping is updated in two scenarios: - -1. **[Initialization & Recovery](#initialization--recovery)**: During WAL replay, each replayed WAL entry creates MemTable fragments with 1:1 mapping (one fragment per WAL entry, position 0) -2. **[WAL Flush](#wal-flush)**: After flushing pending fragments to a new WAL entry, the mapping records which MemTable fragments were written to which WAL entry and their positions - -During [MemTable flush](#memtable-flush), indexes are remapped by translating MemTable fragment IDs to the corresponding WAL entry references using this mapping. - -### MemTable Flush - -Flushing the MemTable creates a new flushed MemTable (generation) with data and indexes: - -1. Generate a random 8-character hex prefix (e.g., `a1b2c3d4`) -2. Create directory `_mem_wal/{region_uuid}/{random_hash}_gen_{current_generation}/` -3. Identify WAL entries to include (from `replay_after_wal_id + 1` to the last flushed entry) -4. Create table manifest with `base_paths` pointing to the WAL directory -5. Add fragment entries referencing WAL files via `base_id` -6. Remap indexes using the [WAL fragment mapping](#wal-fragment-mapping-construction): - - Read index entries referencing MemTable fragment IDs - - Translate to flushed MemTable fragment IDs using the mapping - - Write remapped indexes to `_mem_wal/{region_uuid}/{random_hash}_gen_{current_generation}/_indices/` -7. Write the manifest to `_mem_wal/{region_uuid}/{random_hash}_gen_{current_generation}/_versions/{version}.manifest` (using [V2 naming scheme](transaction.md#manifest-naming-schemes)) -8. Update the region manifest: - - Advance `replay_after_wal_id` to the last flushed WAL entry - - Update `wal_id_last_seen` - - Increment `current_generation` - - Append `(current_generation, {random_hash}_gen_{current_generation})` to `flushed_generations` - -The random prefix ensures that flush retries write to a new directory, avoiding conflicts with partially written files from failed attempts. Only the directory recorded in `flushed_generations` is considered valid. - -If the writer crashes before completing MemTable flush, the new writer replays WAL entries into memory with 1:1 [WAL fragment mapping](#wal-fragment-mapping-construction), rebuilds the in-memory indexes, and can then perform a fresh MemTable flush with a new random prefix. - -### Writer Fencing - -Before any manifest update (MemTable flush), a writer must verify its `writer_epoch` remains valid: +Before any manifest update, a writer must verify its `writer_epoch` remains valid: - If `local_writer_epoch == stored_writer_epoch`: The writer is still active and may proceed - If `local_writer_epoch < stored_writer_epoch`: The writer has been fenced and must abort -Fenced writers must stop all operations immediately and notify pending writes of the failure. - -For a concrete example of fencing between two writers, see [Appendix 1: Writer Fencing Example](#appendix-1-writer-fencing-example). +For a concrete example, see [Appendix 1: Writer Fencing Example](#appendix-1-writer-fencing-example). ## Background Job Expectations -Background jobs run independently from writers and handle asynchronous maintenance tasks. +Background jobs handle merging flushed MemTables to the base table and garbage collection. ### MemTable Merger -Flushed MemTables are merged to the base table in generation order using Lance's merge-insert operation. - -#### Merge Workflow - -1. Read `merged_generations[region_id]` -2. Load the region manifest and identify unmerged flushed MemTables from `flushed_generations`: those with generation numbers > `merged_generations[region_id]` -3. For each flushed MemTable in ascending generation order: - - Look up the directory path from `flushed_generations` - - Open it as a Lance table - - Execute merge-insert into the base table, atomically updating the MemWAL Index: - - Set `merged_generations[region_id]` to this generation - - On commit conflict, apply [conflict resolution rules](#conflict-resolution-and-concurrency) -4. After merge, the flushed MemTable and its referenced WAL files may be garbage collected (see [Garbage Collector](#garbage-collector)) - -Ordered merge ensures correct upsert semantics: flushed MemTables with higher generation numbers overwrite those with lower numbers. +Flushed MemTables must be merged to the base table in **ascending generation order** within each region. This ordering is essential for correct upsert semantics: newer generations must overwrite older ones. -#### Conflict Resolution and Concurrency +The merge uses Lance's merge-insert operation with atomic transaction semantics: -Multiple mergers may operate on the same region concurrently. This is safe due to: - -1. **Atomic update**: `merged_generations` is updated atomically with the data commit -2. **Conflict resolution**: When a merge-insert commit encounters a version conflict, the merger reads the conflicting commit's `merged_generations`. If `merged_generations[region_id] >= my_generation`, abort without retry (data already merged or superseded). Otherwise, retry the commit as normal. -3. **Merge-insert idempotency**: If two mergers merge the same generation before either commits, both write identical data (primary key upsert semantics) - -After aborting due to a conflict, reload the MemWAL Index and region manifest, then continue to the next unmerged generation. - -`merged_generations` is the single source of truth for merge progress. -If a merger crashes after committing, the next merger reads the MemWAL Index to determine which generations are already merged. +- `merged_generations[region_id]` is updated atomically with the data commit +- On commit conflict, check the conflicting commit's `merged_generations` to determine if the generation was already merged For a concrete example, see [Appendix 2: Concurrent Merger Example](#appendix-2-concurrent-merger-example). -### MemWAL Index Builder - -A background process periodically builds a new region snapshot: - -1. Load the existing MemWAL Index to preserve configuration (`region_specs`, `maintained_indexes`) and merge progress (`merged_generations`) -2. List all region directories under `_mem_wal/` -3. For each region: - - Load the region manifest - - Scan WAL entries sequentially to find the actual last entry ID - - If the observed WAL ID is greater than `wal_id_last_seen`, update the region manifest (ignore errors since this is best-effort) - - Copy manifest fields (including `flushed_generations`) into a region snapshot row -4. Determine storage strategy based on region count: - - If `num_regions <= threshold`: Serialize as Lance file bytes to `inline_snapshots` - - If `num_regions > threshold`: Write as Lance file to `_indices/{UUID}/index.lance` -5. Create new `MemWalIndexDetails` with preserved configuration, merge progress, and new region snapshots -6. Update the table manifest with the new index metadata - -This process serves two purposes: - -- Keeps `wal_id_last_seen` up-to-date in region manifests (since writers don't update it on every WAL flush) -- Provides readers with an efficient snapshot of all region states - -The build frequency is implementation-defined. More frequent builds reduce staleness but increase I/O overhead. - -### Base Table Index Builder - -A background process rebuilds base table indexes to cover newly merged data and updates `index_catchup` progress in the MemWAL Index. -Typically there is a dedicated builder for each index. - -The index builder workflow is expected to be: -1. Rebuild the base table index to the latest state, this automatically covers all merged generations -2. Read the current `merged_generations` -3. Update the MemWAL Index atomically: - - Set `index_catchup[index_name].caught_up_generations` to match `merged_generations` -4. On commit conflict, reload the MemWAL Index and retry - ### Garbage Collector -The garbage collector removes obsolete data from the region directory and updates the region manifest to remove entries from `flushed_generations` for deleted flushed MemTables. - -Eligible for deletion: +The garbage collector removes obsolete data from region directories. Flushed MemTables and their referenced WAL files may be deleted after: -1. **Flushed MemTable directories**: Generation directories where `generation <= merged_generations[region_id]` AND `generation <= min(index_catchup[I].caught_up_generation)` for all maintained indexes -2. **WAL data files**: Files referenced only by deleted generations -3. **Old region manifest versions**: Versions older than the current version minus a retention threshold -4. **Orphaned directories**: Directories matching `*_gen_*` pattern but not in `flushed_generations` (from failed flush attempts) - -**Index catchup consideration**: Flushed MemTables must be retained until all base table indexes have caught up. -Since flushed MemTables contain pre-built indexes, they are used for indexed queries when the base table index has not yet been rebuilt to cover the merged data. -Only after all indexes in `maintained_indexes` have `caught_up_generation >= generation` can a flushed MemTable be safely deleted. - -**Time travel consideration**: Garbage collection must not remove generations that are reachable by any retained base table version. When a reader opens an older table version, the MemWAL Index snapshot from that version references specific `merged_generations` values. Generations that satisfy `generation > merged_generations[region_id]` for any retained table version must be preserved. - -Garbage collection must verify that no flushed MemTable still references a WAL file before deletion. +1. The generation has been merged to the base table (`generation <= merged_generations[region_id]`) +2. All maintained indexes have caught up (`generation <= min(index_catchup[I].caught_up_generation)`) +3. No retained base table version references the generation for time travel ## Reader Expectations @@ -704,119 +578,6 @@ the generations in the gap `(index_gen, merged_gen]` have data already merged in Since flushed MemTables contain pre-built indexes (created during [MemTable flush](#memtable-flush)), queries can use these indexes instead of scanning unindexed data in the base table. This ensures all reads remain indexed regardless of how far behind the async index builder is. -See [Appendix 4: Index Catchup Example](#appendix-4-index-catchup-example) for a detailed timeline showing how this works in practice. - -### Query Execution - -Query execution unions datasets within each region and deduplicates by primary key according to [LSM tree merge read](#lsm-tree-merge-read). - -The next few subsections go through the query plan expectations using custom execution nodes optimized for MemWAL's data model. - -All query plans assume the following MemWAL setup: - -``` -base_table: shared across all regions (gen -1) - -region_A: - gen 1: flushed_gen_1 - gen 2: in_memory_memtable - -region_B: - gen 1: flushed_gen_1 - gen 2: flushed_gen_2 - gen 3: in_memory_memtable -``` - -Existing Lance index optimizations (scalar indexes, fragment pruning, etc.) continue to apply within each scan and is omitted. -See [Appendix 3: Execution Nodes](#appendix-3-execution-nodes) for uncommon execution nodes we use here for optimized performance. - -#### Scan Queries - -For scan queries, the base table is scanned once and each region's MemTables are scanned separately. -Deduplication happens per primary key across all generations. - -``` -DeduplicateExec: partition_by=[pk], order_by=[_gen DESC, _rowaddr DESC] - UnionExec - # Base table (shared) - ScanExec: base_table[gen=-1], filter=[pushed_down] - # Region A MemTables - ScanExec: region_A[gen=2], filter=[pushed_down] - ScanExec: region_A[gen=1], filter=[pushed_down] - # Region B MemTables - ScanExec: region_B[gen=3], filter=[pushed_down] - ScanExec: region_B[gen=2], filter=[pushed_down] - ScanExec: region_B[gen=1], filter=[pushed_down] -``` - -#### Point Lookups - -Primary key-based point lookups first determine the target region using the region spec, then short-circuit by checking newest generations first within that region, falling back to the base table. - -Bloom filters optimize point lookups by skipping generations that definitely don't contain the key: - -1. Check the bloom filter for each MemTable generation (newest first) -2. If the bloom filter returns negative, skip that generation (key definitely not present) -3. If the bloom filter returns positive, try to take last matching row of that generation -4. If the key is found, return immediately without checking older generations - -``` -# After region pruning: only region_A needs to be checked -# Bloom filters checked before each scan to skip unnecessary I/O -CoalesceFirstExec: return_first_non_null - BloomFilterGuardExec: bf[region_A][gen=2] - TakeLastExec: region_A[gen=2], filter=[pk = target] - BloomFilterGuardExec: bf[region_A][gen=1] - TakeLastExec: region_A[gen=1], filter=[pk = target] - TakeLastExec: base_table[gen=-1], filter=[pk = target] -``` - -#### Vector Search Queries - -Vector search uses bloom filters to detect stale results across all generations. - -``` -GlobalLimitExec: limit=k - SortExec: order_by=[_dist ASC] - FilterStaleExec: bloom_filters=[bf[region_A][gen=2], bf[region_A][gen=1], bf[region_B][gen=3], bf[region_B][gen=2], bf[region_B][gen=1]] - UnionExec - # Base table (shared) - KNNExec: base_table[gen=-1], k=k - # Region A MemTables - KNNExec: region_A[gen=2], k=k - KNNExec: region_A[gen=1], k=k - # Region B MemTables - KNNExec: region_B[gen=3], k=k - KNNExec: region_B[gen=2], k=k - KNNExec: region_B[gen=1], k=k -``` - -For each candidate from generation G, `FilterStaleExec` checks if the primary key exists in bloom filters of generations > G. -If found, the candidate is filtered out because a newer version exists that was not as relevant to the query. - -#### Full-Text Search Queries - -Full-text search aggregates corpus statistics across all generations for globally-comparable BM25 scores. - -``` -GlobalLimitExec: limit=k - SortExec: order_by=[_bm25 DESC] - FilterStaleExec: bloom_filters=[bf[region_A][gen=2], bf[region_A][gen=1], bf[region_B][gen=3], bf[region_B][gen=2], bf[region_B][gen=1]] - GlobalBM25Exec # Aggregates stats across all generations - UnionExec - # Base table (shared) - FTSExec: base_table[gen=-1], query="search terms" - # Region A MemTables - FTSExec: region_A[gen=2], query="search terms" - FTSExec: region_A[gen=1], query="search terms" - # Region B MemTables - FTSExec: region_B[gen=3], query="search terms" - FTSExec: region_B[gen=2], query="search terms" - FTSExec: region_B[gen=1], query="search terms" -``` - -`GlobalBM25Exec` collects document counts and term frequencies from all FTS indexes, computes global BM25 parameters, and passes them to each `FTSExec` for comparable scoring. - ## Appendices ### Appendix 1: Writer Fencing Example @@ -929,78 +690,3 @@ The MemWAL Index is the single source of truth. Merger B correctly used it to de 2. **Conflict resolution uses MemWAL Index**: When a commit conflicts, the merger checks the conflicting commit's MemWAL Index. 3. **No progress regression**: Because MemWAL Index is updated atomically with data, concurrent mergers cannot regress the merge progress. - -### Appendix 3: Execution Nodes - -This appendix describes custom execution nodes for MemWAL query execution. - -#### DeduplicateExec - -Deduplicates rows by primary key, keeping the row with highest `(_gen, _rowaddr)`. -Since each dataset has a fixed `_gen` and rows are naturally ordered by `_rowaddr`, this can be implemented as a streaming operator without full materialization. - -#### TakeLastExec - -Efficiently finds the last matching row for a filter predicate without full scan. -If the primary key has a btree index, directly queries the btree to get the result. -Otherwise, scans fragments in reverse order and within each fragment takes the last matching row. -Returns immediately upon finding a match, avoiding unnecessary I/O on earlier fragments. - -#### CoalesceFirstExec - -Returns the first non-empty result from multiple inputs with short-circuit evaluation. -Inputs are evaluated lazily in order; on first non-empty result, remaining inputs are not evaluated. - -#### FilterStaleExec - -Filters out rows that have a newer version in a higher generation. -For each candidate with primary key `pk` from generation G, checks bloom filters of generations > G. -If the bloom filter indicates the key may exist in a newer generation, the candidate is filtered out. -False positives from bloom filters may cause some valid results to be filtered, but this is acceptable for search workloads where approximate results are expected. - -#### BloomFilterGuardExec - -Guards a child execution node with a bloom filter check. -Given a primary key, checks the bloom filter before executing the child node. -If the bloom filter returns negative (key definitely not present), returns empty without executing the child. -If the bloom filter returns positive (key may be present), executes the child node normally. -Used in point lookups to skip unnecessary scans of generations that don't contain the target key. - -### Appendix 4: Index Catchup Example - -This example demonstrates how `index_catchup` enables indexed reads during async index rebuilding. - -#### Scenario Setup - -``` -Generation: 1 2 3 4 5 6 - | | | | | | -State: merged merged merged merged flushed active - | | | | | -Base IVF index: [-- covers 1-3 --] | | - ↑ ↑ ↑ - index_gen=3 merged_gen=4 | - current_gen=6 -``` - -In this example: - -- **Generations 1-4** have been merged to the base table (`merged_gen=4`) -- **Base IVF index** has only been rebuilt to cover generations 1-3 (`index_gen=3`) -- **Generation 4** is in the base table but NOT covered by the base IVF index -- **Generation 5** is flushed to disk (not yet merged to base table) -- **Generation 6** is the active in-memory MemTable - -#### Example Read Strategy for Vector Search - -Without `index_catchup` tracking, the query planner would need to perform an expensive full scan on the base table for generation 4. -With `index_catchup`, the planner knows exactly which data is indexed and can use flushed MemTable indexes for the gap: - -| Data Source | Generations | Strategy | -|-------------|-------------|----------| -| Base table with IVF index | 1-3 | Use base table's IVF index | -| Flushed MemTable gen 4 | 4 | Use flushed MemTable's IVF index | -| Flushed MemTable gen 5 | 5 | Use flushed MemTable's IVF index | -| Active MemTable | 6 | Use in-memory IVF index | - -All data sources provide indexed access, maintaining query performance during async index rebuild. diff --git a/docs/src/images/mem_wal_regional.png b/docs/src/images/mem_wal_regional.png index 451ee95414f..5681fa27b8b 100644 Binary files a/docs/src/images/mem_wal_regional.png and b/docs/src/images/mem_wal_regional.png differ diff --git a/result.log b/result.log new file mode 100644 index 00000000000..4df6b442121 --- /dev/null +++ b/result.log @@ -0,0 +1,7447 @@ +[ec2-user@ip-172-31-76-18 lance]$ RUST_LOG=debug DATASET_PREFIX=s3://gh-dev-us-east-1/t39 cargo test -p lance region_writer_tests::test_region_writer_s3 -- --nocapture --ignored +warning: lance-linalg@2.1.0-beta.0: fp16kernels feature is not enabled, skipping build of fp16 kernels + Compiling lance v2.1.0-beta.0 (/data/lance/rust/lance) + Finished `test` profile [unoptimized + debuginfo] target(s) in 1m 56s + Running unittests src/lib.rs (target/debug/deps/lance-0b12383e8102b815) + +running 1 test +[TEST] Creating dataset at s3://gh-dev-us-east-1/t39/test_s3_a741fe1c-cd8d-4fe7-9fea-35a0a97fcc23 +[TEST] Creating IVF-PQ index on dataset... +[TEST] IVF-PQ index creation took 25.32551982s +[TEST] Config: flush_interval=Some(100ms), wal_buffer_size=10485760 +[07:43:45.917][WRITER] Creating wal_flusher with flush_interval=Some(100ms) +[07:43:45.917][EXECUTOR] Adding handler task 'wal_flusher' +[07:43:45.917][WRITER] Adding memtable_flusher handler +[07:43:45.917][EXECUTOR] Adding handler task 'memtable_flusher' +[07:43:45.917][WRITER] memtable_flusher handler added +[07:43:45.917][EXECUTOR] Adding handler task 'writer' +[TEST] Writing 10000 batches x 20 rows = 200000 total rows +[07:43:52.235][PUT] put #0 +[07:43:52.235][EXECUTOR] Task 'wal_flusher' spawned and starting +[07:43:52.235][DISPATCHER] Task 'wal_flusher' started, has 0 tickers +[07:43:52.235][EXECUTOR] Task 'memtable_flusher' spawned and starting +[07:43:52.235][DISPATCHER] Task 'memtable_flusher' started, has 0 tickers +[07:43:52.235][EXECUTOR] Task 'writer' spawned and starting +[07:43:52.235][DISPATCHER] Task 'writer' started, has 0 tickers +[07:43:52.235][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.235][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.235][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.237][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.237][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.237][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.237][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.237][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.237][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.237][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.237][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.237][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.238][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.238][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.238][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.238][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.238][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.238][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.238][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.238][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.239][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.239][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.239][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.239][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.239][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.239][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.239][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.239][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.240][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.240][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.240][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.240][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.240][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.240][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.240][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.240][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.241][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.241][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.241][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.241][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.241][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.241][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.241][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.241][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.242][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.242][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.242][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.242][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.242][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.242][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.242][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.242][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.243][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.243][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.243][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.243][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.243][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.243][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.243][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.243][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.244][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.244][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.244][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.244][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.244][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.244][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.244][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.244][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.245][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.245][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.245][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.245][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.245][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.245][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.245][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.245][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.246][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.246][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.246][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.246][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.246][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.246][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.246][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.246][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.247][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.247][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.247][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.247][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.247][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.247][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.247][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.247][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 100 batches in 12.985678ms +[07:43:52.248][PUT] put #100 +[07:43:52.248][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.248][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.248][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.248][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.248][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.248][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.248][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.248][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.249][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.249][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.249][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.249][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.249][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.249][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.249][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.249][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.250][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.250][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.250][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.250][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.250][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.250][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.250][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.250][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.251][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.251][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.251][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.251][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.251][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.251][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.251][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.252][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.252][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.252][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.252][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.252][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.252][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.252][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.252][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.253][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.253][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.253][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.253][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.253][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.253][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.253][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.254][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.254][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.254][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.254][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.254][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.254][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.254][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.254][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.255][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.255][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.255][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.255][WAL_SIZE_TRIGGER] total_bytes=10519112, threshold #1 crossed (10MB), triggering flush to batch #157 +[07:43:52.255][DISPATCHER] Task 'wal_flusher' received message: TriggerWalFlush { end_batch_id: 157, done: None } +[07:43:52.255][WAL_FLUSH_HANDLER] Received TriggerWalFlush for batch #157, starting flush +[07:43:52.255] [WAL_FLUSH] Flushing range 0..157 (157 batches, 3140 rows) to WAL #1 +[07:43:52.268][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.268][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.268][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.268][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.268][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.268][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.269][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.269][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.269][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.269][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.270][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.270][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.270][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.270][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.270][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.271][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.272][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.272][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.272][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.272][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.272][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.274][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.274][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.274][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.274][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.274][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.274][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.274][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.274][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 200 batches in 40.473111ms +[07:43:52.275][PUT] put #200 +[07:43:52.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.277][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.277][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.277][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.277][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.277][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.277][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.278][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.278][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.278][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.278][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.278][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.279][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.279][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.279][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.279][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.279][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.279][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.280][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.280][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.280][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.280][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.280][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.280][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.280][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.280][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.281][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.281][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.281][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.281][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.281][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.281][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.282][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.282][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.282][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.282][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.282][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.282][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.282][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.282][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.283][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.283][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.283][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.283][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.283][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.284][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.284][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.284][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.284][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.284][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.284][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.285][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.285][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.285][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.285][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.285][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.286][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.286][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.286][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.286][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.286][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.286][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.286][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.287][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.287][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.287][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.287][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.287][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.287][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.288][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.288][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.288][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.288][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.288][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.289][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.289][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.289][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.289][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.289][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.289][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.289][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.289][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.289][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.290][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.290][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 300 batches in 55.778367ms +[07:43:52.290][PUT] put #300 +[07:43:52.290][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.290][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.291][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.291][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.291][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.291][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.291][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.291][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.292][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.292][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.292][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.292][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.292][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.292][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.292][WAL_SIZE_TRIGGER] total_bytes=21005456, threshold #2 crossed (20MB), triggering flush to batch #314 +[07:43:52.292][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.292][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.293][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.293][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.293][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.293][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.293][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.293][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.293][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.293][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.293][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.294][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.294][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.294][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.294][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.294][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.294][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.294][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.294][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.294][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.295][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.295][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.295][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.295][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.295][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.295][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.295][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.295][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.295][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.296][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.296][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.296][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.296][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.296][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.296][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.296][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.296][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.296][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.297][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.297][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.297][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.297][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.297][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.297][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.297][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.297][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.297][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.298][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.298][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.298][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.298][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.298][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.298][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.298][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.298][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.299][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.299][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.299][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.299][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.299][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.299][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.299][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.299][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.299][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.300][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.300][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.300][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.300][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.300][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.300][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.300][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.300][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.300][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.301][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.301][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.301][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.301][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.301][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.301][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.301][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.301][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.301][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.302][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.302][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.302][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.302][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 400 batches in 67.385989ms +[07:43:52.302][PUT] put #400 +[07:43:52.302][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.302][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.302][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.302][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.302][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.303][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.303][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.303][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.303][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.303][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.303][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.303][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.303][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.304][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.304][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.304][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.304][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.304][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.304][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.304][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.304][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.306][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.306][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.306][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.306][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.306][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.306][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.306][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.307][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.307][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.307][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.307][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.307][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.307][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.307][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.307][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.308][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.308][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.308][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.308][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.308][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.308][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.308][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.310][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.310][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.310][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.310][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.310][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.310][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.310][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.311][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.311][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.311][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.311][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.311][WAL_SIZE_TRIGGER] total_bytes=31491800, threshold #3 crossed (30MB), triggering flush to batch #471 +[07:43:52.311][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.311][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.311][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.312][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.312][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.312][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.312][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.312][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.312][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.312][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.312][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.313][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.313][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.313][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.313][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.313][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.313][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.313][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.314][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.314][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.314][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.314][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.314][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.314][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.314][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.314][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.315][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.315][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.315][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 500 batches in 80.473942ms +[07:43:52.315][PUT] put #500 +[07:43:52.315][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.315][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.315][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.315][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.316][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.316][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.316][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.316][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.316][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.316][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.316][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.317][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.317][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.317][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.317][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.317][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.317][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.317][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.317][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.318][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.318][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.318][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.318][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.318][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.318][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.318][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.318][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.319][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.319][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.319][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.319][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.319][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.319][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.319][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.319][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.320][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.320][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.320][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.320][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.320][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.320][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.320][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.320][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.321][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.321][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.321][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.321][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.321][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.321][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.321][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.321][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.322][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.322][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.322][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.322][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.322][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.322][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.322][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.322][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.323][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.323][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.323][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.323][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.323][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.323][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.323][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.323][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.324][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.324][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.324][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.324][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.324][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.324][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.324][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.324][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.325][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.325][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.325][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.325][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.325][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.325][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.325][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.325][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.326][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.326][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.326][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.326][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.326][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.326][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.326][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.326][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.327][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.327][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.327][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.327][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.327][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.327][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.327][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.327][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.327][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 600 batches in 93.086627ms +[07:43:52.328][PUT] put #600 +[07:43:52.328][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.328][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.328][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.328][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.328][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.328][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.328][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.329][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.329][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.329][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.329][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.329][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.329][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.329][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.329][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.330][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.330][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.330][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.330][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.330][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.330][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.330][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.330][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.331][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.331][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.331][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.331][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.331][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.331][WAL_SIZE_TRIGGER] total_bytes=41978144, threshold #4 crossed (40MB), triggering flush to batch #628 +[07:43:52.331][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.331][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.331][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.332][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.332][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.332][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.332][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.332][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.332][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.332][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.332][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.333][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.333][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.333][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.333][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.333][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.333][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.333][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.333][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.334][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.334][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.334][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.334][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.334][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.334][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.334][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.334][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.335][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.335][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.335][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.335][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.335][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.335][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.335][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.335][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.336][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.336][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.336][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.336][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.336][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.336][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.336][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.336][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.336][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.337][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.337][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.337][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.337][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.337][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.337][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.337][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.340][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.340][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.340][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.340][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 700 batches in 105.59352ms +[07:43:52.340][PUT] put #700 +[07:43:52.340][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.340][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.340][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.342][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.342][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.342][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.342][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.342][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.342][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.342][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.342][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.344][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.344][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.344][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.344][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.344][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.344][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.344][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.344][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.346][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.346][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.346][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.346][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.346][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.346][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.346][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.346][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.347][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.347][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.347][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.347][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.347][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.347][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.347][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.347][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.348][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.348][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.348][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.348][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.348][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.348][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.348][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.348][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.350][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.350][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.350][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.350][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.350][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.350][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.350][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.351][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.351][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.351][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.351][WAL_SIZE_TRIGGER] total_bytes=52464488, threshold #5 crossed (50MB), triggering flush to batch #785 +[07:43:52.351][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.351][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.351][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.351][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.353][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.353][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 800 batches in 118.309071ms +[07:43:52.353][PUT] put #800 +[07:43:52.353][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.353][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.353][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.353][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.353][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.354][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.354][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.354][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.354][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.354][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.354][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.354][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.354][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.357][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.357][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.357][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.357][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.357][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.357][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.357][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.357][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.358][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.358][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.358][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.358][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.358][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.358][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.358][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.358][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.359][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.359][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.359][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.359][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.359][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.359][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.359][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.359][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.359][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.360][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.360][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.360][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.360][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.360][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.360][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.360][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.360][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.361][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.361][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.361][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.361][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.361][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.361][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.361][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.361][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.362][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.362][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.362][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.362][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.362][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.362][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.362][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.362][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.363][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.363][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.363][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.363][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.363][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.363][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.363][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.363][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.364][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.364][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.364][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.364][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.364][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.364][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.364][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.364][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.364][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.365][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.365][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.365][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.365][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 900 batches in 130.53951ms +[07:43:52.365][PUT] put #900 +[07:43:52.365][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.365][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.365][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.365][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.366][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.366][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.366][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.366][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.366][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.366][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.366][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.366][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.367][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.367][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.367][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.367][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.367][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.367][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.367][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.367][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.368][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.368][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.368][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.368][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.368][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.368][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.368][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.368][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.369][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.369][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.369][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.369][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.369][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.369][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.369][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.369][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.370][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.370][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.370][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.370][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.370][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.370][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.370][WAL_SIZE_TRIGGER] total_bytes=62950832, threshold #6 crossed (60MB), triggering flush to batch #942 +[07:43:52.370][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.370][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.371][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.371][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.371][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.371][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.371][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.371][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.371][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.371][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.372][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.372][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.372][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.372][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.372][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.372][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.372][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.372][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.373][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.373][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.373][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.373][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.373][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.373][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.373][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.373][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.374][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.374][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.374][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.374][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.374][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.374][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.374][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.374][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.375][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.375][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.375][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.375][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.375][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.375][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.375][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.375][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.376][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.376][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.376][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.376][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.376][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.376][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.376][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.376][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.377][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.377][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.377][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.377][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.377][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.377][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.377][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.377][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 1000 batches in 142.929799ms +[07:43:52.377][PUT] put #1000 +[07:43:52.378][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.378][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.378][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.378][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.378][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.378][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.378][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.378][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.379][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.379][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.379][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.379][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.379][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.379][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.379][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.379][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.380][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.380][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.380][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.380][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.380][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.380][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.380][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.380][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.380][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.381][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.381][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.381][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.381][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.381][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.381][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.381][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.381][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.382][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.382][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.382][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.382][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.382][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.382][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.382][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.382][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.383][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.383][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.383][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.383][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.383][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.383][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.383][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.383][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.384][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.384][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.384][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.384][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.384][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.384][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.384][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.384][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.385][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.385][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.385][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.385][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.385][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.385][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.385][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.387][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.387][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.387][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.387][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.387][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.387][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.387][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.387][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.390][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.390][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.390][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.390][WAL_SIZE_TRIGGER] total_bytes=73437176, threshold #7 crossed (70MB), triggering flush to batch #1099 +[07:43:52.390][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 1100 batches in 155.651028ms +[07:43:52.390][PUT] put #1100 +[07:43:52.390][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.390][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.392][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.392][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.392][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.392][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.392][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.392][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.392][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.392][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.394][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.394][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.394][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.394][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.394][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.394][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.394][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.395][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.395][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.395][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.395][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.395][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.395][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.395][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.395][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.396][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.396][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.396][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.396][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.396][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.396][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.396][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.396][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.397][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.397][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.397][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.397][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.397][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.397][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.397][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.397][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.398][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.398][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.398][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.398][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.398][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.398][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.398][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.398][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.399][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.399][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.399][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.399][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.399][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.399][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.399][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.399][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.400][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.400][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.400][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.400][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.400][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.400][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.400][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.400][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.401][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.401][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.401][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.401][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.401][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.401][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.401][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.401][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.402][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.402][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.402][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.402][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.402][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.402][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.402][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.402][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.403][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.403][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.403][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 1200 batches in 168.353179ms +[07:43:52.403][PUT] put #1200 +[07:43:52.403][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.403][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.403][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.403][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.403][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.404][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.404][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.404][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.404][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.404][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.404][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.404][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.404][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.405][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.405][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.405][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.405][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.405][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.405][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.405][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.405][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.406][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.406][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.406][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.406][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.406][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.406][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.406][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.406][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.407][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.407][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.407][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.407][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.407][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.407][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.407][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.407][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.407][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.408][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.408][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.408][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.408][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.408][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.408][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.408][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.408][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.409][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.409][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.409][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.409][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.409][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.409][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.409][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.409][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.410][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.410][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.410][WAL_SIZE_TRIGGER] total_bytes=83923520, threshold #8 crossed (80MB), triggering flush to batch #1256 +[07:43:52.410][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.410][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.410][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.410][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.410][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.410][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.411][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.411][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.411][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.411][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.411][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.411][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.411][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.411][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.412][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.412][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.412][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.412][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.412][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.412][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.412][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.412][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.413][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.413][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.413][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.413][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.413][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.413][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.413][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.413][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.414][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.414][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.414][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.414][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.414][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.414][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.414][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.414][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.415][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.415][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.415][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.415][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.415][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.415][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 1300 batches in 180.745743ms +[07:43:52.415][PUT] put #1300 +[07:43:52.415][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.415][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.416][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.416][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.416][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.416][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.416][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.416][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.416][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.416][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.417][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.417][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.417][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.417][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.417][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.417][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.417][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.417][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.420][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.420][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.420][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.420][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.420][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.420][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.420][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.420][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.425][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.425][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.425][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.425][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.425][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.425][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.425][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.426][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.426][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.426][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.426][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.426][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.426][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.426][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.426][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.427][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.427][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.427][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.427][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.427][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.427][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.427][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.427][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.428][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.428][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.428][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 1400 batches in 193.378319ms +[07:43:52.428][PUT] put #1400 +[07:43:52.428][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.428][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.428][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.428][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.428][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.429][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.429][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.429][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.429][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.429][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.429][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.429][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.429][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.430][WAL_SIZE_TRIGGER] total_bytes=94409864, threshold #9 crossed (90MB), triggering flush to batch #1413 +[07:43:52.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.431][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.431][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.431][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.431][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.431][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.431][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.431][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.431][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.432][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.432][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.432][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.432][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.432][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.432][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.432][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.432][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.433][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.433][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.433][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.433][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.433][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.433][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.433][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.433][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.434][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.435][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.435][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.435][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.435][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.435][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.435][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.435][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.436][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.436][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.436][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.436][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.436][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.436][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.436][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.436][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.436][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.437][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.437][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.437][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.437][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.437][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.437][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.437][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.437][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.438][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.438][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.438][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.438][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.438][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.438][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.438][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.438][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.439][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.439][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.439][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.439][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.439][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.439][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.439][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.439][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.440][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.440][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.440][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.440][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.440][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.440][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.440][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.440][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.441][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.441][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.441][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.441][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.441][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.441][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 1500 batches in 206.730747ms +[07:43:52.441][PUT] put #1500 +[07:43:52.441][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.441][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.442][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.442][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.442][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.442][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.442][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.442][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.442][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.442][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.443][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.443][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.443][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.443][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.443][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.443][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.443][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.443][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.444][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.444][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.444][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.444][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.444][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.444][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.444][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.444][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.445][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.445][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.445][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.445][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.445][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.445][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.445][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.445][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.446][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.446][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.446][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.446][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.446][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.446][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.446][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.446][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.446][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.447][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.447][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.447][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.447][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.447][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.447][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.447][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.447][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.448][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.448][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.448][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.448][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.448][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.448][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.448][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.448][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.449][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.449][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.449][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.449][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.449][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.449][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.449][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.449][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.450][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.450][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.450][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.450][WAL_SIZE_TRIGGER] total_bytes=104896208, threshold #10 crossed (100MB), triggering flush to batch #1570 +[07:43:52.450][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.450][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.450][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.450][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.450][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.451][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.451][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.451][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.451][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.451][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.451][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.451][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.451][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.452][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.452][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.452][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.452][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.452][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.452][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.452][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.452][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.453][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.453][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.453][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.453][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.453][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.453][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.453][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.453][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.454][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 1600 batches in 219.182588ms +[07:43:52.454][PUT] put #1600 +[07:43:52.454][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.454][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.454][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.454][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.454][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.454][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.455][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.455][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.455][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.455][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.455][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.455][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.455][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.455][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.456][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.456][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.456][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.456][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.456][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.456][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.456][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.457][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.457][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.457][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.457][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.457][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.457][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.457][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.457][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.458][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.458][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.458][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.458][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.458][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.458][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.458][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.458][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.458][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.460][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.460][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.460][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.460][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.460][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.460][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.460][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.460][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.462][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.462][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.462][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.462][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.462][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.462][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.462][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.462][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.464][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.464][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.464][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.464][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.464][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.464][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.464][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.464][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.465][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.465][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.465][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.465][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.465][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.465][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.465][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.465][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.466][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.466][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.466][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.466][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.466][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.466][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 1700 batches in 231.787219ms +[07:43:52.466][PUT] put #1700 +[07:43:52.466][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.468][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.468][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.468][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.468][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.468][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.468][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.468][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.468][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.470][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.470][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.470][WAL_SIZE_TRIGGER] total_bytes=115382552, threshold #11 crossed (110MB), triggering flush to batch #1727 +[07:43:52.470][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.470][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.470][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.470][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.470][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.471][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.471][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.471][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.471][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.471][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.471][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.471][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.471][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.474][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.474][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.474][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.474][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.474][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.474][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.474][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.476][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.476][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.476][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.476][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.476][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.476][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.476][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.477][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.477][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.477][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.477][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.477][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.477][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.477][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.477][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.477][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.478][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.478][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.478][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.478][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.478][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.478][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.478][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.478][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.479][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.479][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.479][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.479][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.479][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 1800 batches in 244.68894ms +[07:43:52.479][PUT] put #1800 +[07:43:52.479][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.479][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.480][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.480][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.480][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.480][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.480][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.480][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.480][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.480][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.480][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.481][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.481][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.481][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.481][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.481][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.481][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.481][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.482][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.482][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.482][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.482][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.482][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.482][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.482][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.482][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.483][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.483][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.483][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.483][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.483][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.483][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.483][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.483][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.484][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.484][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.484][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.484][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.484][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.484][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.484][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.484][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.485][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.485][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.485][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.485][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.485][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.485][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.485][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.485][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.486][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.486][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.486][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.486][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.486][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.486][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.486][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.486][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.487][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.487][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.487][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.487][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.487][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.487][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.487][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.487][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.487][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.488][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.488][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.488][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.488][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.488][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.488][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.488][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.488][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.489][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.489][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.489][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.489][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.489][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.489][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.489][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.489][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.490][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.490][WAL_SIZE_TRIGGER] total_bytes=125868896, threshold #12 crossed (120MB), triggering flush to batch #1884 +[07:43:52.490][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.490][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.490][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.490][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.490][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.490][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.490][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.491][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.491][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.491][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.491][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.491][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.491][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.491][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.491][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.492][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 1900 batches in 257.163294ms +[07:43:52.492][PUT] put #1900 +[07:43:52.492][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.492][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.492][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.492][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.492][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.492][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.493][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.493][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.493][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.493][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.493][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.493][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.493][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.493][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.494][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.494][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.494][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.494][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.494][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.494][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.494][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.494][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.495][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.495][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.495][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.495][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.495][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.495][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.495][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.495][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.495][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.496][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.496][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.496][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.496][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.496][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.496][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.496][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.496][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.497][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.497][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.497][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.497][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.497][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.497][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.497][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.497][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.498][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.498][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.498][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.498][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.498][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.498][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.498][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.498][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.501][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.501][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.501][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.501][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.501][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.501][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.501][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.501][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.504][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.504][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.504][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.504][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.504][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 2000 batches in 269.683469ms +[07:43:52.504][PUT] put #2000 +[07:43:52.504][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.504][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.505][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.505][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.505][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.505][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.505][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.505][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.505][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.505][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.506][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.506][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.506][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.506][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.506][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.506][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.506][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.506][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.507][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.507][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.507][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.507][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.507][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.507][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.507][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.507][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.509][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.509][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.509][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.509][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.509][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.509][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.509][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.509][WAL_SIZE_TRIGGER] total_bytes=136355240, threshold #13 crossed (130MB), triggering flush to batch #2041 +[07:43:52.509][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.510][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.510][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.510][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.510][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.510][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.510][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.510][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.510][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.511][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.511][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.511][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.511][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.511][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.511][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.511][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.511][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.512][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.512][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.512][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.512][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.512][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.512][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.512][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.512][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.513][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.513][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.513][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.513][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.513][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.513][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.513][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.513][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.514][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.514][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.514][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.514][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.514][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.514][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.514][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.514][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.517][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 2100 batches in 282.141554ms +[07:43:52.517][PUT] put #2100 +[07:43:52.517][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.517][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.517][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.517][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.517][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.517][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.518][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.518][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.518][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.518][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.518][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.518][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.518][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.518][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.519][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.519][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.519][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.519][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.519][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.519][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.519][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.519][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.520][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.520][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.520][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.520][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.520][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.520][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.520][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.520][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.521][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.521][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.521][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.521][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.521][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.521][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.521][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.521][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.522][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.522][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.522][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.522][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.522][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.522][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.522][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.522][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.523][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.523][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.523][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.523][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.523][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.523][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.523][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.523][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.524][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.524][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.524][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.524][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.524][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.524][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.524][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.524][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.524][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.525][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.525][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.525][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.525][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.525][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.525][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.525][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.525][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.526][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.526][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.526][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.526][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.526][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.526][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.526][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.526][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.527][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.527][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.527][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.527][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.527][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.527][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.527][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.527][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.528][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.528][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.528][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.528][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.528][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.528][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.528][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.529][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.529][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.529][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.529][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.529][WAL_SIZE_TRIGGER] total_bytes=146841584, threshold #14 crossed (140MB), triggering flush to batch #2198 +[07:43:52.529][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.529][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 2200 batches in 294.770391ms +[07:43:52.529][PUT] put #2200 +[07:43:52.529][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.529][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.530][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.530][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.530][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.530][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.530][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.530][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.530][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.531][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.531][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.531][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.531][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.531][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.531][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.531][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.531][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.531][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.532][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.532][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.532][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.532][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.532][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.532][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.532][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.532][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.533][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.533][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.533][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.533][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.533][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.533][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.533][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.533][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.534][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.534][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.534][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.534][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.534][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.534][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.534][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.534][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.534][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.535][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.535][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.535][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.535][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.535][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.535][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.535][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.536][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.536][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.536][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.536][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.536][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.536][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.536][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.536][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.536][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.537][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.537][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.537][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.537][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.537][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.537][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.537][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.537][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.541][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.541][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.541][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.541][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.541][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.541][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.541][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.541][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 2300 batches in 306.983532ms +[07:43:52.542][PUT] put #2300 +[07:43:52.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.543][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.543][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.543][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.543][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.543][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.543][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.543][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.543][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.547][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.547][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.547][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.547][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.547][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.547][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.547][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.547][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.548][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.548][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.548][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.548][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.548][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.548][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.548][WAL_SIZE_TRIGGER] total_bytes=157327928, threshold #15 crossed (150MB), triggering flush to batch #2355 +[07:43:52.548][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.548][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.551][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.551][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.551][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.551][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.551][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.551][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.551][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.551][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.554][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.554][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 2400 batches in 319.263645ms +[07:43:52.554][PUT] put #2400 +[07:43:52.554][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.554][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.554][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.554][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.554][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.554][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.557][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.557][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.557][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.557][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.557][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.557][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.557][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.557][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.558][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.558][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.558][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.558][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.558][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.558][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.558][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.558][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.559][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.559][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.559][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.559][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.559][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.559][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.559][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.559][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.559][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.560][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.560][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.560][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.560][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.560][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.560][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.560][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.560][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.561][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.561][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.561][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.561][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.561][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.561][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.561][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.561][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.562][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.562][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.562][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.562][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.562][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.562][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.562][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.562][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.563][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.563][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.563][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.563][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.563][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.563][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.563][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.563][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.564][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.564][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.564][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.564][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.564][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.564][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.564][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.564][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.565][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.565][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.565][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.565][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.565][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.565][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.565][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.565][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.566][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.566][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.566][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.566][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.566][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 2500 batches in 331.68051ms +[07:43:52.566][PUT] put #2500 +[07:43:52.566][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.566][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.567][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.567][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.567][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.567][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.567][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.567][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.567][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.567][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.567][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.568][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.568][WAL_SIZE_TRIGGER] total_bytes=167814272, threshold #16 crossed (160MB), triggering flush to batch #2512 +[07:43:52.568][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.568][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.568][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.568][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.568][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.568][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.568][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.569][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.569][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.569][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.569][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.569][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.569][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.569][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.570][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.570][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.570][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.570][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.570][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.570][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.570][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.570][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.571][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.571][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.571][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.571][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.571][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.571][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.571][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.571][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.572][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.572][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.572][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.572][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.572][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.572][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.572][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.572][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.573][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.573][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.573][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.573][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.573][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.573][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.573][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.573][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.574][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.574][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.574][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.574][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.574][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.574][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.574][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.575][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.575][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.575][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.575][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.575][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.575][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.575][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.575][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.576][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.576][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.576][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.576][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.576][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.576][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.576][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.576][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.577][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.577][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.577][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.577][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.577][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.577][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.577][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.577][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.578][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.578][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.578][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.578][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.578][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.578][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.578][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.578][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.579][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.579][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.579][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 2600 batches in 344.330312ms +[07:43:52.579][PUT] put #2600 +[07:43:52.579][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.579][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.579][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.579][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.579][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.580][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.580][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.580][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.580][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.580][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.580][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.580][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.580][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.581][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.581][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.581][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.581][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.581][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.581][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.581][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.581][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.583][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.583][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.583][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.583][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.583][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.583][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.583][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.583][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.584][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.584][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.584][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.584][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.584][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.584][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.584][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.584][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.585][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.585][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.585][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.585][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.585][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.585][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.585][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.586][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.586][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.586][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.586][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.586][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.586][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.586][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.586][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.586][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.587][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.587][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.587][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.587][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.587][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.587][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.587][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.587][WAL_SIZE_TRIGGER] total_bytes=178300616, threshold #17 crossed (170MB), triggering flush to batch #2669 +[07:43:52.588][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.588][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.588][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.588][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.588][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.588][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.588][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.588][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.589][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.589][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.589][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.589][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.589][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.589][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.589][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.590][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.590][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.590][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.590][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.590][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.590][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.590][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.590][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.590][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.591][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.591][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.591][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.591][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.591][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.591][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.591][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 2700 batches in 356.964818ms +[07:43:52.592][PUT] put #2700 +[07:43:52.592][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.592][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.592][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.592][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.592][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.592][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.592][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.592][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.594][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.594][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.594][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.594][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.594][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.594][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.594][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.594][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.596][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.596][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.596][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.596][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.596][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.596][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.596][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.596][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.597][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.597][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.597][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.597][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.597][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.597][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.597][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.597][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.598][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.598][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.598][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.598][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.598][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.598][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.598][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.598][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.598][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.599][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.599][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.599][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.599][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.599][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.599][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.599][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.599][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.600][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.600][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.600][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.600][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.600][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.600][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.600][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.600][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.601][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.601][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.601][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.601][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.601][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.601][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.601][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.601][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.602][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.602][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.602][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.602][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.602][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.602][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.602][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.602][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.602][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.603][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.603][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.603][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.603][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.603][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.603][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.603][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.603][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.604][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.604][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 2800 batches in 369.32116ms +[07:43:52.604][PUT] put #2800 +[07:43:52.604][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.604][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.604][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.604][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.604][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.605][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.605][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.605][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.605][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.605][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.605][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.605][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.605][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.606][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.606][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.606][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.606][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.606][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.606][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.606][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.606][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.606][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.607][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.607][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.607][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.607][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.607][WAL_SIZE_TRIGGER] total_bytes=188786960, threshold #18 crossed (180MB), triggering flush to batch #2826 +[07:43:52.607][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.607][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.607][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.608][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.608][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.608][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.608][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.608][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.608][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.608][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.608][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.609][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.609][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.609][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.609][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.609][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.609][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.609][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.609][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.610][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.610][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.610][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.610][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.610][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.610][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.610][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.610][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.611][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.611][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.611][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.611][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.611][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.611][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.611][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.611][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.612][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.612][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.612][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.612][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.612][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.612][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.612][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.612][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.613][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.613][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.613][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.613][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.613][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.613][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.613][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.613][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.614][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.614][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.614][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.614][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.614][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.614][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.614][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.614][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.614][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.615][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.615][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.615][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.615][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.615][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.615][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.615][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.615][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.616][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.616][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.616][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.616][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.616][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.616][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 2900 batches in 381.749759ms +[07:43:52.616][PUT] put #2900 +[07:43:52.616][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.616][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.617][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.617][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.617][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.617][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.617][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.617][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.617][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.617][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.618][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.618][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.618][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.618][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.618][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.618][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.618][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.618][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.624][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.624][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.624][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.624][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.624][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.624][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.624][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.624][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.625][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.625][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.625][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.625][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.625][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.625][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.625][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.625][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.627][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.627][WAL_SIZE_TRIGGER] total_bytes=199273304, threshold #19 crossed (190MB), triggering flush to batch #2983 +[07:43:52.627][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.627][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.627][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.627][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.627][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.627][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 3000 batches in 394.302224ms +[07:43:52.629][PUT] put #3000 +[07:43:52.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.631][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.631][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.631][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.631][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.631][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.631][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.631][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.631][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.634][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.634][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.634][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.634][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.634][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.634][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.634][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.634][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.635][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.635][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.635][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.635][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.635][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.635][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.635][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.635][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.636][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.636][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.636][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.636][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.636][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.636][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.636][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.636][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.637][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.637][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.637][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.637][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.637][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.637][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.637][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.637][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.637][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.638][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.638][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.638][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.638][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.638][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.638][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.638][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.638][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.639][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.639][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.639][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.639][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.639][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.639][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.639][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.639][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.640][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.640][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.640][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.640][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.640][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.640][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.640][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.640][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.641][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.641][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.641][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.641][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.641][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 3100 batches in 406.594949ms +[07:43:52.641][PUT] put #3100 +[07:43:52.641][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.641][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.641][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.642][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.642][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.642][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.642][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.642][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.642][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.642][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.642][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.643][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.643][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.643][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.643][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.643][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.643][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.643][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.643][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.644][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.644][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.644][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.644][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.644][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.644][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.644][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.644][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.645][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.645][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.645][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.645][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.645][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.645][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.645][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.645][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.646][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.646][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.646][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.646][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.646][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.646][WAL_SIZE_TRIGGER] total_bytes=209759648, threshold #20 crossed (200MB), triggering flush to batch #3140 +[07:43:52.646][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.646][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.647][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.647][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.647][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.647][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.647][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.647][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.647][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.647][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.648][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.648][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.648][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.648][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.648][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.648][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.648][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.648][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.649][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.649][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.649][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.649][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.649][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.649][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.649][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.649][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.650][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.650][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.650][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.650][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.650][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.650][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.650][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.650][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.651][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.651][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.651][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.651][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.651][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.651][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.651][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.651][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.651][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.652][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.652][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.652][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.652][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.652][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.652][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.652][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.652][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.653][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.653][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.653][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.653][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.653][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.653][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.653][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.653][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.654][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 3200 batches in 419.163564ms +[07:43:52.654][PUT] put #3200 +[07:43:52.654][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.654][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.654][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.654][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.654][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.654][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.654][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.655][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.655][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.655][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.655][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.655][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.655][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.655][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.655][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.656][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.656][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.656][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.656][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.656][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.656][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.656][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.656][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.657][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.657][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.657][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.657][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.657][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.657][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.657][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.657][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.657][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.658][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.658][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.658][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.658][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.658][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.658][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.658][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.658][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.659][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.659][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.659][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.659][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.659][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.659][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.659][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.659][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.660][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.660][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.660][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.660][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.660][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.660][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.660][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.660][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.661][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.661][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.661][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.661][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.661][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.661][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.661][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.661][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.663][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.663][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.663][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.663][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.663][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.663][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.663][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.663][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.664][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.664][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.664][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.664][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.664][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.664][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.664][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.664][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.665][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.665][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.665][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.665][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.665][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.665][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.665][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.665][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.665][WAL_SIZE_TRIGGER] total_bytes=220245992, threshold #21 crossed (210MB), triggering flush to batch #3297 +[07:43:52.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 3300 batches in 431.390538ms +[07:43:52.666][PUT] put #3300 +[07:43:52.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.670][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.670][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.670][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.670][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.670][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.670][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.670][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.670][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.674][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.674][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.674][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.674][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.674][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.674][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.674][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.674][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.676][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.676][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.676][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.676][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.676][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.676][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.676][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.676][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.677][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.677][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.677][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.677][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.677][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.677][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.677][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.677][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.677][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.678][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.678][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.678][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.678][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.678][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 3400 batches in 443.672459ms +[07:43:52.678][PUT] put #3400 +[07:43:52.678][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.678][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.679][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.679][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.679][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.679][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.679][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.679][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.679][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.679][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.679][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.680][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.680][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.680][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.680][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.680][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.680][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.680][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.680][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.681][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.681][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.681][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.681][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.681][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.681][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.681][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.682][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.682][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.682][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.682][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.682][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.682][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.682][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.682][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.682][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.683][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.683][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.683][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.683][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.683][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.683][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.683][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.683][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.684][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.684][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.684][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.684][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.684][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.684][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.684][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.684][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.685][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.685][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.685][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.685][WAL_SIZE_TRIGGER] total_bytes=230732336, threshold #22 crossed (220MB), triggering flush to batch #3454 +[07:43:52.685][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.685][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.685][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.685][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.685][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.686][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.686][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.686][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.686][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.686][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.686][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.686][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.686][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.687][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.687][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.687][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.687][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.687][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.687][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.687][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.687][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.688][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.688][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.688][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.688][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.688][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.688][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.688][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.688][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.689][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.689][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.689][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.689][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.689][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.689][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.689][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.689][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.690][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.690][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.690][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.690][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.690][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.690][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.690][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.690][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.691][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 3500 batches in 456.207369ms +[07:43:52.691][PUT] put #3500 +[07:43:52.691][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.691][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.691][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.691][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.691][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.691][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.692][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.692][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.692][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.692][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.692][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.692][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.692][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.692][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.693][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.693][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.693][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.693][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.693][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.693][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.693][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.693][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.694][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.694][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.694][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.694][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.694][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.694][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.694][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.694][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.695][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.695][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.695][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.695][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.695][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.695][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.695][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.695][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.696][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.696][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.696][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.696][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.696][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.696][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.696][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.696][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.697][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.697][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.697][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.697][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.697][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.697][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.697][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.697][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.697][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.701][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.701][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.701][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.701][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.701][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.701][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.701][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.701][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.702][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.702][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.702][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.702][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.702][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.702][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.702][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.703][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.703][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.703][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.703][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.703][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 3600 batches in 468.615621ms +[07:43:52.703][PUT] put #3600 +[07:43:52.703][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.703][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.703][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.704][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.704][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.704][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.704][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.704][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.704][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.704][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.704][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.705][WAL_SIZE_TRIGGER] total_bytes=241218680, threshold #23 crossed (230MB), triggering flush to batch #3611 +[07:43:52.705][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.705][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.705][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.705][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.705][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.705][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.705][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.709][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.709][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.709][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.709][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.709][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.709][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.709][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.710][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.710][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.710][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.710][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.710][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.710][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.710][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.710][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.715][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.715][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.715][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.715][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.715][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.715][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.715][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.715][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.716][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 3700 batches in 481.101645ms +[07:43:52.716][PUT] put #3700 +[07:43:52.716][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.716][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.716][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.716][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.716][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.716][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.716][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.717][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.717][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.717][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.717][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.717][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.717][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.717][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.717][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.718][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.718][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.718][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.718][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.718][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.718][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.718][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.718][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.719][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.719][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.719][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.719][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.719][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.719][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.719][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.719][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.719][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.720][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.720][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.720][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.720][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.720][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.720][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.720][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.720][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.721][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.721][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.721][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.721][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.721][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.721][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.721][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.721][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.722][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.722][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.722][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.722][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.722][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.722][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.722][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.722][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.723][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.723][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.723][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.723][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.723][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.723][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.723][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.723][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.723][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.724][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.724][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.724][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.724][WAL_SIZE_TRIGGER] total_bytes=251705024, threshold #24 crossed (240MB), triggering flush to batch #3768 +[07:43:52.724][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.724][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.724][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.724][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.725][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.725][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.725][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.725][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.725][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.725][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.725][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.725][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.726][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.726][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.726][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.726][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.726][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.726][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.726][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.726][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.727][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.727][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.727][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.727][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.727][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.727][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.727][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.727][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.728][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.728][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.728][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.728][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 3800 batches in 493.494913ms +[07:43:52.728][PUT] put #3800 +[07:43:52.728][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.728][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.728][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.728][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.729][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.729][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.729][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.729][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.729][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.729][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.729][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.729][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.730][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.730][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.730][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.730][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.730][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.730][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.730][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.730][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.730][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.731][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.731][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.731][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.731][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.731][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.731][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.731][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.731][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.732][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.732][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.732][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.732][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.732][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.732][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.732][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.732][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.733][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.733][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.733][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.733][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.733][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.733][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.733][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.733][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.733][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.734][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.734][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.734][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.734][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.734][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.734][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.734][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.734][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.735][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.735][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.735][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.735][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.735][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.735][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.735][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.735][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.736][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.736][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.736][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.736][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.736][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.736][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.736][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.736][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.737][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.737][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.737][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.737][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.737][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.737][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.737][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.737][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.738][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.738][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.738][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.738][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.738][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.738][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.738][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.738][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.739][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.739][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.739][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.739][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.739][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.739][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.739][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.739][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.739][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.740][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.740][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.740][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.740][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.740][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 3900 batches in 505.653308ms +[07:43:52.740][PUT] put #3900 +[07:43:52.740][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.740][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.740][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.741][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.741][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.741][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.741][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.741][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.741][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.741][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.741][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.742][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.742][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.742][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.742][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.742][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.742][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.742][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.742][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.743][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.743][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.743][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.743][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.743][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.743][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.743][WAL_SIZE_TRIGGER] total_bytes=262191368, threshold #25 crossed (250MB), triggering flush to batch #3925 +[07:43:52.743][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.743][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.745][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.745][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.745][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.745][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.745][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.745][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.745][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.745][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.746][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.746][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.746][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.746][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.746][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.746][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.746][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.746][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.748][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.748][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.748][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.748][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.748][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.748][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.748][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.748][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.750][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.750][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.750][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.750][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.750][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.750][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.750][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.750][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.751][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.751][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.751][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.751][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.751][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.751][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.751][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.751][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 4000 batches in 518.043689ms +[07:43:52.753][PUT] put #4000 +[07:43:52.753][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.753][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.753][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.753][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.753][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.753][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.753][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.755][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.755][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.755][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:52.755][DISPATCHER] Task 'memtable_flusher' received message: FlushMemTable { done: None } +[07:43:52.755][FLUSH_HANDLER] Received FlushMemTable: batches=4019, remaining=3981, pending_batches=4019 +[07:43:52.755][MEMTABLE_FLUSH] Starting do_flush: 4019 batches +[07:43:52.755][FREEZE] Starting freeze_memtable +[07:43:52.755][FREEZE] WAL flush: pending_batches=4019, remaining_capacity=3981 +[07:43:53.038][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.038][WAL_TIME_TRIGGER] total_bytes=268536608, interval elapsed, triggering flush to batch #4020 +[07:43:53.038][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.038][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.038][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.038][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.038][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.039][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.039][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.039][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.039][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.039][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.039][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.039][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.039][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.040][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.040][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.040][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.040][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.041][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.041][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.041][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.041][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.041][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.042][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.042][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.042][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.042][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.042][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.042][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.042][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.043][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.043][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.043][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.043][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.044][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.044][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.044][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.044][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.044][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.044][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.044][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.044][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.044][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.045][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.045][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.045][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.046][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.046][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.046][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.046][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.046][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.046][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.046][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.047][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.047][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.047][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.047][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.047][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.048][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.048][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.048][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.048][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.048][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.048][WAL_SIZE_TRIGGER] total_bytes=272677712, threshold #26 crossed (260MB), triggering flush to batch #4082 +[07:43:53.048][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.048][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.048][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.049][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.049][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.049][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.049][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.049][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.049][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.049][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.049][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.050][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.050][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.050][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.051][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.051][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.051][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.051][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 4100 batches in 816.800049ms +[07:43:53.051][PUT] put #4100 +[07:43:53.051][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.051][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.052][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.052][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.052][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.052][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.052][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.052][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.053][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.053][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.053][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.053][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.054][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.054][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.054][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.054][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.055][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.055][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.055][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.055][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.055][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.055][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.055][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.056][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.056][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.056][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.057][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.057][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.057][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.057][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.057][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.058][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.058][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.058][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.058][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.058][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.059][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.059][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.059][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.059][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.059][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.060][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.060][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.060][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.060][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.060][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.060][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.060][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.060][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.061][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.061][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.061][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.061][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.061][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.061][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.061][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.061][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.062][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.062][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.062][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.062][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.062][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.062][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.062][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.062][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.063][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.063][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.063][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.063][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.063][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.063][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.063][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.063][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.064][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.064][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.064][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.064][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.064][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.064][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.064][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.064][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.065][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.065][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.065][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.065][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.065][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.065][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.065][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.065][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.066][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.066][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.066][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.066][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.066][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.066][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.066][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.066][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.067][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.067][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.067][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 4200 batches in 832.472053ms +[07:43:53.067][PUT] put #4200 +[07:43:53.067][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.067][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.067][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.067][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.068][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.068][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.068][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.068][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.068][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.068][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.068][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.068][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.069][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.069][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.069][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.069][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.069][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.069][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.069][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.069][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.070][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.070][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.070][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.070][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.070][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.070][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.070][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.070][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.071][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.072][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.072][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.072][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.072][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.072][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.072][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.072][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.072][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.073][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.073][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.073][WAL_SIZE_TRIGGER] total_bytes=283164056, threshold #27 crossed (270MB), triggering flush to batch #4239 +[07:43:53.073][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.073][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.073][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.073][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.073][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.073][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.074][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.074][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.074][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.074][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.075][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.075][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.075][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.075][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.075][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.075][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.076][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.076][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.076][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.076][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.076][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.076][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.076][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.076][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.077][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.077][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.077][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.077][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.077][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.077][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.077][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.078][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.079][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.079][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.079][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.079][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.079][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.079][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.079][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.079][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.080][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.080][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.080][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.080][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.080][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.080][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.080][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.080][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.082][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.082][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.082][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.082][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.082][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.082][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.082][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.082][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.083][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.083][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.083][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.083][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.084][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 4300 batches in 849.655944ms +[07:43:53.084][PUT] put #4300 +[07:43:53.084][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.084][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.085][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.085][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.085][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.085][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.085][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.085][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.085][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.085][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.086][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.086][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.086][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.086][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.086][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.086][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.086][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.086][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.087][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.087][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.087][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.087][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.087][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.087][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.087][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.089][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.089][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.089][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.089][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.089][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.089][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.089][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.090][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.090][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.090][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.090][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.090][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.090][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.090][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.090][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.091][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.091][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.091][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.091][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.091][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.091][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.091][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.091][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.092][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.092][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.092][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.092][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.092][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.092][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.092][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.092][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.093][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.093][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.093][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.093][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.093][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.093][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.093][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.093][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.094][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.094][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.094][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.094][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.094][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.094][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.094][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.095][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.095][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.095][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.095][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.095][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.095][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.095][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.095][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.096][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.096][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.096][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.096][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.096][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.096][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.096][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.096][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.096][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.096][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.097][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.097][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.097][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.097][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.097][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.097][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.097][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.097][WAL_SIZE_TRIGGER] total_bytes=293650400, threshold #28 crossed (280MB), triggering flush to batch #4396 +[07:43:53.097][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.097][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.097][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.097][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 4400 batches in 862.832419ms +[07:43:53.097][PUT] put #4400 +[07:43:53.097][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.097][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.098][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.098][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.098][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.098][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.098][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.098][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.098][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.098][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.098][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.098][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.098][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.098][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.098][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.099][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.099][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.099][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.099][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.099][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.099][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.099][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.099][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.099][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.099][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.099][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.099][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.099][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.100][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.100][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.100][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.100][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.100][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.100][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.100][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.100][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.100][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.100][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.100][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.100][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.100][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.100][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.101][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.101][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.101][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.101][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.101][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.101][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.101][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.101][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.101][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.101][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.101][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.101][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.101][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.102][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.102][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.102][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.102][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.102][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.102][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.102][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.102][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.102][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.102][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.102][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.102][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.102][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.103][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.103][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.103][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.103][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.103][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.103][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.103][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.103][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.103][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.103][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.103][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.103][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.103][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.104][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.104][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.104][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.104][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.104][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.104][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.104][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.104][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.104][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.104][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.104][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.104][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.104][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.104][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.105][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.105][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.105][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.105][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.105][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 4500 batches in 870.4199ms +[07:43:53.105][PUT] put #4500 +[07:43:53.105][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.105][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.105][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.105][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.105][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.105][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.105][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.106][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.106][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.106][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.106][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.106][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.106][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.106][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.106][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.106][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.106][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.106][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.106][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.106][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.107][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.127][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.127][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.127][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.127][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.127][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.127][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.127][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.128][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.128][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.128][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.128][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.128][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.128][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.128][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.128][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.128][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.129][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.129][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.129][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.129][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.129][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.129][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.129][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.129][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.129][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.130][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.130][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.130][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.130][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.130][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.130][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.130][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.130][WAL_SIZE_TRIGGER] total_bytes=304136744, threshold #29 crossed (290MB), triggering flush to batch #4553 +[07:43:53.130][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.131][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.131][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.131][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.131][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.131][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.131][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.131][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.131][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.132][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.132][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.132][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.132][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.132][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.132][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.132][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.132][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.132][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.133][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.133][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.133][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.133][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.133][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.133][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.133][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.133][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.133][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.134][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.134][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.134][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.134][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.134][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.134][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.134][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.134][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.134][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.135][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.135][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.135][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.135][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.135][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.135][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.135][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.135][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.135][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.136][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.136][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 4600 batches in 901.314097ms +[07:43:53.136][PUT] put #4600 +[07:43:53.136][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.136][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.136][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.136][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.136][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.136][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.137][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.137][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.137][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.137][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.137][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.137][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.137][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.137][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.137][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.138][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.138][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.138][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.138][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.140][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.140][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.140][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.141][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.141][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.141][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.141][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.141][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.141][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.141][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.141][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.142][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.142][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.142][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.142][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.142][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.142][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.142][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.142][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.143][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.143][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.143][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.143][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.143][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.143][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.143][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.143][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.143][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.144][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.144][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.145][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.145][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.145][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.145][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.145][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.146][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.146][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.146][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.146][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.146][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.146][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.146][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.146][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.146][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.147][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.147][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.147][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.147][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.147][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.147][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.147][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.147][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.147][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.148][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.148][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.148][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.148][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.148][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.148][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.148][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.148][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.148][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.150][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.150][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.150][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.150][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.151][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.151][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.151][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.151][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.151][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.151][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.151][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.151][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.151][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.152][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.152][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.152][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.152][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.152][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.152][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 4700 batches in 917.692484ms +[07:43:53.152][PUT] put #4700 +[07:43:53.152][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.152][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.152][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.153][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.153][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.154][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.154][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.154][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.154][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.155][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.155][WAL_SIZE_TRIGGER] total_bytes=314623088, threshold #30 crossed (300MB), triggering flush to batch #4710 +[07:43:53.155][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.155][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.155][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.155][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.155][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.155][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.155][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.156][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.156][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.156][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.156][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.156][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.156][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.156][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.156][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.157][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.177][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.177][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.177][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.177][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.177][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.177][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.177][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.177][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.178][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.178][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.178][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.178][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.178][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.178][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.178][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.178][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.178][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.179][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.179][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.179][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.179][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.179][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.179][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.179][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.179][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.179][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.180][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.180][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.180][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.180][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.180][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.180][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.180][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.180][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.180][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.181][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.181][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.181][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.181][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.181][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.181][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.181][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.181][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.181][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.182][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.182][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.182][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.182][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.182][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.182][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.182][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.182][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.182][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.183][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.183][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.183][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.183][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.183][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.183][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.183][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.183][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.184][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.184][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.184][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.184][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.184][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.184][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.184][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.184][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.184][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.185][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.185][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.185][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.185][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 4800 batches in 950.475943ms +[07:43:53.185][PUT] put #4800 +[07:43:53.185][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.185][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.185][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.185][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.186][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.186][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.186][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.186][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.186][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.186][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.186][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.186][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.186][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.187][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.187][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.187][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.187][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.187][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.187][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.187][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.187][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.187][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.187][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.188][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.188][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.188][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.188][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.188][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.188][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.188][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.188][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.188][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.189][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.189][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.189][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.189][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.189][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.189][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.189][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.189][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.189][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.190][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.190][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.190][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.190][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.190][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.190][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.190][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.190][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.191][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.191][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.191][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.191][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.191][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.191][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.191][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.191][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.191][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.192][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.192][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.192][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.192][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.192][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.192][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.192][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.192][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.192][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.192][WAL_SIZE_TRIGGER] total_bytes=325109432, threshold #31 crossed (310MB), triggering flush to batch #4867 +[07:43:53.193][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.193][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.193][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.193][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.193][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.193][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.193][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.193][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.193][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.194][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.194][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.194][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.194][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.194][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.194][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.194][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.194][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.194][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.195][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.195][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.195][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.195][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.195][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.195][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.195][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.195][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.195][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.196][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.196][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.196][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.196][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.196][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.196][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 4900 batches in 961.681714ms +[07:43:53.196][PUT] put #4900 +[07:43:53.196][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.196][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.196][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.217][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.218][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.218][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.218][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.218][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.218][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.219][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.219][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.219][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.219][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.219][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.219][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.219][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.219][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.219][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.220][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.220][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.220][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.220][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.220][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.220][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.220][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.220][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.220][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.221][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.221][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.221][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.221][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.221][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.221][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.221][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.221][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.221][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.222][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.222][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.222][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.222][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.222][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.222][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.222][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.222][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.222][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.223][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.223][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.223][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.223][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.223][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.223][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.223][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.223][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.223][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.224][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.224][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.224][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.224][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.224][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.224][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.224][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.224][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.224][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.225][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.225][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.225][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.225][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.225][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.225][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.225][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.225][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.225][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.226][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.226][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.226][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.226][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.226][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.226][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.226][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.226][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.226][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.227][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.227][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.227][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.227][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.227][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.227][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.227][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.227][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.227][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.228][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.228][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.228][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.228][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.228][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.228][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.228][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.228][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.228][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.230][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 5000 batches in 995.843667ms +[07:43:53.230][PUT] put #5000 +[07:43:53.230][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.231][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.231][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.231][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.231][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.231][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.231][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.231][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.231][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.231][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.232][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.232][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.232][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.232][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.232][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.232][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.232][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.232][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.232][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.233][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.234][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.234][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.234][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.235][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.235][WAL_SIZE_TRIGGER] total_bytes=335608064, threshold #32 crossed (320MB), triggering flush to batch #5024 +[07:43:53.235][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.235][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.235][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.235][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.235][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.235][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.235][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.235][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.236][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.259][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.259][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.259][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.259][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.259][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.259][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.259][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.259][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.260][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.260][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.260][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.260][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.260][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.260][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.260][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.260][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.260][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.261][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.261][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.261][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.261][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.261][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.261][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.261][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.261][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.261][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.261][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.262][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.262][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.262][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.262][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.262][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.262][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.262][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.262][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.263][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.263][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.263][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.263][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.263][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.263][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.263][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.263][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.263][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.264][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.264][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.264][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.264][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.264][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.264][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.264][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.264][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.264][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.265][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.265][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.265][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.265][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.265][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.265][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 5100 batches in 1.030714187s +[07:43:53.265][PUT] put #5100 +[07:43:53.265][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.265][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.266][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.266][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.266][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.266][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.266][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.266][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.266][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.266][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.266][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.267][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.267][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.267][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.267][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.267][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.267][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.267][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.267][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.267][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.268][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.268][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.268][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.268][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.268][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.268][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.268][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.268][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.269][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.269][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.269][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.269][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.269][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.269][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.269][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.269][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.269][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.270][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.270][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.270][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.270][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.270][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.270][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.270][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.270][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.270][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.271][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.271][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.271][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.271][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.271][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.271][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.271][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.271][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.272][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.272][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.272][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.272][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.272][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.272][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.272][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.272][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.272][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.273][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.274][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.274][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.274][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.274][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.274][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.274][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.274][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.274][WAL_SIZE_TRIGGER] total_bytes=346040184, threshold #33 crossed (330MB), triggering flush to batch #5179 +[07:43:53.274][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.275][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.276][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.301][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.301][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 5200 batches in 1.066454847s +[07:43:53.301][PUT] put #5200 +[07:43:53.301][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.301][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.301][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.301][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.301][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.302][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.302][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.302][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.302][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.302][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.302][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.302][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.303][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.303][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.303][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.303][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.303][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.303][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.304][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.305][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.306][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.306][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.306][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.306][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.306][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.306][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.306][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.306][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.307][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.307][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.307][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.307][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.307][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.307][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.307][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.307][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.308][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.308][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.308][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.308][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.308][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.308][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.308][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.308][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.309][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.310][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.310][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.310][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.310][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.310][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.310][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.310][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.310][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.310][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.311][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.311][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.311][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.311][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.311][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.311][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.311][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.311][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.311][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.312][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.312][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.312][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.312][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.312][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.312][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.312][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.312][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.313][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.313][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.313][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.313][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.313][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.313][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.313][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.313][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.313][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.314][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.314][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.314][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.314][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.314][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 5300 batches in 1.07959778s +[07:43:53.314][PUT] put #5300 +[07:43:53.314][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.314][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.314][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.315][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.315][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.315][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.315][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.315][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.315][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.315][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.315][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.315][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.316][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.316][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.316][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.316][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.316][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.316][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.316][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.316][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.316][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.317][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.337][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.337][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.337][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.337][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.337][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.337][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.337][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.338][WAL_SIZE_TRIGGER] total_bytes=356539608, threshold #34 crossed (340MB), triggering flush to batch #5335 +[07:43:53.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.338][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.339][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.340][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.340][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.340][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.340][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.340][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.340][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.340][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.340][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.340][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.341][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.342][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.342][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.342][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.342][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.342][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.342][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.342][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.342][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.343][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.344][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.344][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.344][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.344][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.344][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.344][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.344][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.344][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.345][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.346][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 5400 batches in 1.111123599s +[07:43:53.346][PUT] put #5400 +[07:43:53.346][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.346][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.348][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.348][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.348][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.348][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.348][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.349][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.350][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.350][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.350][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.350][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.350][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.350][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.350][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.350][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.350][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.351][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.351][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.351][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.351][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.351][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.351][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.351][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.351][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.352][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.353][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.353][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.353][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.353][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.353][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.353][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.353][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.353][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.353][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.354][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.354][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.354][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.354][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.354][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.354][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.354][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.354][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.355][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.356][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.357][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.381][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.381][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.381][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.381][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.381][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.381][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.381][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.382][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.382][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.382][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.382][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.382][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.382][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.382][WAL_SIZE_TRIGGER] total_bytes=367039032, threshold #35 crossed (350MB), triggering flush to batch #5491 +[07:43:53.382][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.383][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.383][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.383][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.383][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.383][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.383][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.383][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.383][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 5500 batches in 1.149037023s +[07:43:53.384][PUT] put #5500 +[07:43:53.384][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.384][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.384][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.384][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.384][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.384][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.384][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.384][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.385][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.385][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.385][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.385][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.385][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.385][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.385][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.385][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.385][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.386][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.387][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.387][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.387][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.387][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.387][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.387][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.387][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.387][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.388][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.389][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.390][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.390][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.390][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.390][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.390][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.390][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.390][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.390][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.390][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.391][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.392][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.392][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.392][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.392][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.392][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.392][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.392][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.392][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.393][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.394][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.394][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.394][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.394][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.394][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.394][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.394][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.394][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.394][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.395][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.395][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.395][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 5600 batches in 1.160439477s +[07:43:53.395][PUT] put #5600 +[07:43:53.395][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.395][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.395][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.395][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.395][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.396][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.396][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.396][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.396][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.396][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.396][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.396][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.396][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.417][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.417][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.417][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.417][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.417][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.417][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.417][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.417][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.418][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.419][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.420][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.420][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.420][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.420][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.420][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.420][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.420][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.420][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.420][WAL_SIZE_TRIGGER] total_bytes=377538456, threshold #36 crossed (360MB), triggering flush to batch #5647 +[07:43:53.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.421][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.422][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.423][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.424][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.425][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.425][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.425][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.425][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.425][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.425][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.425][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.425][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.425][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.426][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.426][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.426][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.429][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.429][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.429][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.429][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.429][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 5700 batches in 1.194819082s +[07:43:53.429][PUT] put #5700 +[07:43:53.429][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.430][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.431][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.431][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.431][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.431][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.431][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.431][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.433][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.433][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.433][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.433][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.433][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.433][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.433][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.434][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.434][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.434][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.434][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.434][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.434][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.434][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.434][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.435][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.435][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.435][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.435][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.435][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.435][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.435][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.435][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.435][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.436][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.436][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.436][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.436][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.436][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.436][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.458][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.458][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.458][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.459][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.460][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.460][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.460][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.460][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.460][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.460][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.460][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.460][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.461][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.462][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.462][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.462][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.462][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.462][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.462][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.462][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.462][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.463][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.464][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.464][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.464][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.464][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.464][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.464][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.464][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 5800 batches in 1.229803926s +[07:43:53.464][PUT] put #5800 +[07:43:53.464][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.465][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.465][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.465][WAL_SIZE_TRIGGER] total_bytes=388037880, threshold #37 crossed (370MB), triggering flush to batch #5803 +[07:43:53.465][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.465][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.465][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.465][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.465][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.465][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.466][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.466][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.466][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.466][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.466][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.466][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.466][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.466][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.466][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.467][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.468][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.468][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.468][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.468][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.468][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.468][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.468][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.468][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.469][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.470][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.470][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.470][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.470][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.470][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.470][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.470][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.470][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.470][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.471][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.471][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.471][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.471][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.471][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.471][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.471][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.471][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.472][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.473][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.474][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.474][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.474][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.474][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.474][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.474][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.474][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.474][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.475][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.476][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.476][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.476][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.476][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 5900 batches in 1.241486176s +[07:43:53.476][PUT] put #5900 +[07:43:53.476][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.476][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.476][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.476][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.477][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.498][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.498][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.499][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.500][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.501][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.501][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.501][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.501][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.501][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.501][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.501][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.501][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.502][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.503][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.504][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.504][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.504][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.504][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.504][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.504][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.504][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.504][WAL_SIZE_TRIGGER] total_bytes=398470000, threshold #38 crossed (380MB), triggering flush to batch #5958 +[07:43:53.504][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.505][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.505][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.505][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.505][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.505][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.505][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.506][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.507][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.507][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.507][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.507][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.507][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.507][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.507][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.507][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.508][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.510][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.510][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.510][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.510][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.511][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.511][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.511][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.511][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.511][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.511][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.511][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.511][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.512][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.512][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.512][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.512][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.512][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 6000 batches in 1.277587758s +[07:43:53.512][PUT] put #6000 +[07:43:53.512][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.512][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.514][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.514][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.514][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.514][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.514][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.514][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.514][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.515][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.516][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.517][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.537][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.537][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.537][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.537][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.537][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.537][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.537][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.538][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.539][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.540][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.541][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.541][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.541][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.541][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.541][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.541][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.541][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.541][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.542][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.543][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.543][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.543][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.543][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.543][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.543][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.543][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.543][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.544][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 6100 batches in 1.310587895s +[07:43:53.545][PUT] put #6100 +[07:43:53.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.545][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.546][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.547][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.547][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.547][WAL_SIZE_TRIGGER] total_bytes=408969424, threshold #39 crossed (390MB), triggering flush to batch #6114 +[07:43:53.547][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.547][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.547][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.547][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.547][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.547][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.548][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.548][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.548][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.548][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.548][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.548][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.548][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.548][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.549][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.550][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.551][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.551][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.551][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.551][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.551][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.551][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.551][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.551][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.552][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.553][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.554][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.554][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.554][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.554][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.554][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.554][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.554][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.554][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.555][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.556][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.557][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.581][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 6200 batches in 1.346613189s +[07:43:53.581][PUT] put #6200 +[07:43:53.581][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.581][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.581][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.582][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.583][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.583][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.583][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.583][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.583][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.583][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.583][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.584][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.584][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.584][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.586][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.586][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.586][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.586][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.586][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.586][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.586][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.587][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.587][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.587][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.587][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.587][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.587][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.587][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.587][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.588][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.588][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.588][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.588][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.588][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.588][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.588][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.588][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.589][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.589][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.589][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.589][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.589][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.589][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.589][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.590][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.590][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.590][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.590][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.590][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.590][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.590][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.591][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.591][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.591][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.591][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.591][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.591][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.591][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.591][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.591][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.592][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.592][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.592][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.592][WAL_SIZE_TRIGGER] total_bytes=419468848, threshold #40 crossed (400MB), triggering flush to batch #6270 +[07:43:53.592][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.592][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.592][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.592][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.592][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.593][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.594][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.594][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.594][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.594][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.594][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.594][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.594][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.594][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 6300 batches in 1.3609152s +[07:43:53.595][PUT] put #6300 +[07:43:53.595][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.596][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.596][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.596][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.596][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.596][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.596][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.596][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.596][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.617][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.617][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.617][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.617][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.617][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.617][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.617][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.617][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.618][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.618][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.618][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.618][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.618][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.618][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.618][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.618][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.619][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.620][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.621][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.622][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.623][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.624][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.624][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.624][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.624][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.624][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.624][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.624][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.624][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.625][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.625][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.625][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.625][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.625][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.625][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.625][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.625][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.626][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.627][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.627][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.627][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.627][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.627][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 6400 batches in 1.392617789s +[07:43:53.627][PUT] put #6400 +[07:43:53.627][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.627][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.627][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.628][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.629][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.630][WAL_SIZE_TRIGGER] total_bytes=429968272, threshold #41 crossed (410MB), triggering flush to batch #6426 +[07:43:53.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.630][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.631][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.631][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.631][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.631][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.631][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.631][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.631][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.631][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.632][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.633][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.634][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.634][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.634][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.636][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.636][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.636][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.636][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.636][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.637][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.661][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.661][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.661][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.661][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.661][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.661][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.662][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.663][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.663][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.663][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.663][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.663][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.663][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.663][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.664][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.664][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.664][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.664][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.664][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.664][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.664][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.664][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.665][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.665][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.665][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.665][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.665][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 6500 batches in 1.430696847s +[07:43:53.665][PUT] put #6500 +[07:43:53.665][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.665][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.666][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.667][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.668][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.669][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.670][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.670][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.670][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.670][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.670][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.670][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.670][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.670][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.671][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.672][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.673][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.674][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.674][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.674][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.674][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.674][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.674][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.674][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.674][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.675][WAL_SIZE_TRIGGER] total_bytes=440467696, threshold #42 crossed (420MB), triggering flush to batch #6582 +[07:43:53.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.675][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.676][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.676][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.676][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.676][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.676][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.676][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.676][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.676][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.677][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.697][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.697][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 6600 batches in 1.462413973s +[07:43:53.697][PUT] put #6600 +[07:43:53.697][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.697][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.697][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.697][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.697][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.698][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.699][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.700][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.701][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.701][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.701][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.701][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.701][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.701][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.701][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.701][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.702][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.702][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.702][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.702][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.704][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.704][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.704][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.704][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.704][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.704][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.705][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.705][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.705][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.705][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.705][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.705][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.705][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.706][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.707][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.708][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.709][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.709][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.709][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.709][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.709][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.709][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.709][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.709][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.709][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.710][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.710][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.710][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.710][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.710][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.710][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.710][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 6700 batches in 1.475809656s +[07:43:53.710][PUT] put #6700 +[07:43:53.710][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.711][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.712][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.713][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.714][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.715][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.715][WAL_SIZE_TRIGGER] total_bytes=450899816, threshold #43 crossed (430MB), triggering flush to batch #6737 +[07:43:53.715][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.715][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.715][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.715][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.715][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.715][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.716][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.716][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.716][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.716][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.716][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.716][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.716][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.740][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.740][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.741][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.741][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.741][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.741][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.741][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.741][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.741][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.741][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.742][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.742][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.742][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.742][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.742][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.742][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.742][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.743][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.743][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.743][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.743][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.743][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.743][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.743][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.743][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.744][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.745][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.745][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.745][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.745][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.745][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.745][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.745][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.745][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.746][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.746][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.746][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.746][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.746][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.746][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.746][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.746][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 6800 batches in 1.511967749s +[07:43:53.747][PUT] put #6800 +[07:43:53.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.747][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.748][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.748][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.748][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.748][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.748][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.748][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.748][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.748][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.749][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.750][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.750][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.750][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.750][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.750][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.750][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.750][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.750][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.751][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.751][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.751][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.751][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.751][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.751][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.751][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.751][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.752][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.753][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.753][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.753][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.753][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.753][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.753][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.753][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.753][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.753][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.754][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.755][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.755][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.755][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.755][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.755][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.755][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.755][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.755][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.755][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.756][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.756][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.756][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.756][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.756][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.756][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.756][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.756][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.756][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.757][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.777][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.777][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.777][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.777][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.777][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.777][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.777][WAL_SIZE_TRIGGER] total_bytes=461399240, threshold #44 crossed (440MB), triggering flush to batch #6893 +[07:43:53.777][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.778][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.778][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.778][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.778][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.778][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.778][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 6900 batches in 1.543669117s +[07:43:53.778][PUT] put #6900 +[07:43:53.778][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.778][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.779][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.779][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.779][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.779][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.779][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.779][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.779][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.779][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.779][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.780][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.780][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.780][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.780][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.780][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.780][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.780][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.780][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.780][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.781][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.781][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.781][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.781][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.781][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.781][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.781][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.781][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.782][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.782][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.782][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.784][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.784][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.784][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.784][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.784][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.785][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.785][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.785][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.785][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.785][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.785][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.785][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.785][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.785][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.786][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.786][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.786][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.786][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.786][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.786][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.786][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.786][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.787][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.787][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.787][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.787][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.787][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.787][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.787][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.787][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.787][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.788][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.788][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.788][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.788][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.788][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.788][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.788][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.788][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.789][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.789][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.789][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.789][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.789][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.789][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.789][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.789][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.789][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.790][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.790][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.790][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.790][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.790][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.790][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.790][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.790][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.790][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.791][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.791][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.791][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.791][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.791][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.791][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.791][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.791][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.791][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.792][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.792][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.792][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 7000 batches in 1.557446357s +[07:43:53.792][PUT] put #7000 +[07:43:53.792][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.792][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.792][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.792][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.793][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.793][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.793][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.793][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.793][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.793][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.793][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.793][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.793][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.793][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.794][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.794][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.794][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.794][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.794][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.794][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.794][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.794][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.795][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.795][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.795][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.795][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.795][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.795][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.795][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.795][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.795][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.796][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.796][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.796][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.796][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.796][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.796][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.796][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.796][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.798][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.798][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.798][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.798][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.798][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.799][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.799][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.799][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.799][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.799][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.799][WAL_SIZE_TRIGGER] total_bytes=471898664, threshold #45 crossed (450MB), triggering flush to batch #7049 +[07:43:53.799][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.799][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.799][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.800][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.800][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.800][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.800][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.800][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.800][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.800][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.800][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.800][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.801][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.801][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.801][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.802][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.802][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.803][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.803][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.803][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.803][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.803][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.803][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.803][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.803][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.804][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.804][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.804][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.804][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.804][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.804][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.804][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.804][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.804][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.805][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.805][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.805][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.806][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.806][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.806][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.807][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.807][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.807][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.807][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.807][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.807][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.807][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.807][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.808][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.808][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.808][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 7100 batches in 1.573315134s +[07:43:53.808][PUT] put #7100 +[07:43:53.808][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.808][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.808][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.808][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.808][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.808][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.809][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.809][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.809][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.809][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.809][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.809][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.809][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.809][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.810][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.810][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.810][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.810][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.810][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.810][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.810][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.810][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.811][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.811][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.811][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.811][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.811][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.811][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.811][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.811][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.811][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.812][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.812][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.812][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.812][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.812][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.812][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.812][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.812][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.813][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.813][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.813][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.813][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.813][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.813][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.813][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.813][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.813][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.814][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.814][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.814][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.814][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.814][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.814][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.814][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.814][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.815][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.815][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.815][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.815][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.815][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.815][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.815][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.815][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.815][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.816][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.816][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.816][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.816][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.816][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.816][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.816][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.816][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.816][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.817][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.817][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.817][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.817][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.817][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.817][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.817][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.817][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.817][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.818][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.818][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.818][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.818][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.818][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.818][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.818][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.818][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.818][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.819][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.819][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.819][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.819][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.819][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.819][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.819][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.819][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[TEST] Wrote 7200 batches in 1.584943997s +[07:43:53.819][PUT] put #7200 +[07:43:53.820][DISPATCHER] Task 'writer' received message: WriteBatchMessage { batch_rows: 20, batch_cols: 3 } +[07:43:53.820][BP_TRIGGER] iter=0 batch_store_nearly_full: remaining=799/8000 (<10%), triggering flush and waiting +[07:43:53.820][TRIGGER] Sending FlushMemTable message and waiting for completion +test dataset::mem_wal::write::region_writer_tests::test_region_writer_s3 has been running for over 60 seconds +[07:44:37.936] [WAL_FLUSH] WAL #1 complete: S3=171ms, index=45668ms, 157 batches +[07:44:37.936][DISPATCHER] Task 'wal_flusher' received message: TriggerWalFlush { end_batch_id: 314, done: None } +[07:44:37.936][WAL_FLUSH_HANDLER] Received TriggerWalFlush for batch #314, starting flush +[07:44:37.937] [WAL_FLUSH] Flushing range 157..314 (157 batches, 3140 rows) to WAL #3 +[07:45:23.627] [WAL_FLUSH] WAL #3 complete: S3=199ms, index=45680ms, 157 batches +[07:45:23.627][DISPATCHER] Task 'wal_flusher' received message: TriggerWalFlush { end_batch_id: 471, done: None } +[07:45:23.627][WAL_FLUSH_HANDLER] Received TriggerWalFlush for batch #471, starting flush +[07:45:23.628] [WAL_FLUSH] Flushing range 314..471 (157 batches, 3140 rows) to WAL #4 +[07:46:09.317] [WAL_FLUSH] WAL #4 complete: S3=215ms, index=45680ms, 157 batches +[07:46:09.318][DISPATCHER] Task 'wal_flusher' received message: TriggerWalFlush { end_batch_id: 628, done: None } +[07:46:09.318][WAL_FLUSH_HANDLER] Received TriggerWalFlush for batch #628, starting flush +[07:46:09.318] [WAL_FLUSH] Flushing range 471..628 (157 batches, 3140 rows) to WAL #5 +[07:46:54.992] [WAL_FLUSH] WAL #5 complete: S3=153ms, index=45664ms, 157 batches +[07:46:54.992][DISPATCHER] Task 'wal_flusher' received message: TriggerWalFlush { end_batch_id: 785, done: None } +[07:46:54.992][WAL_FLUSH_HANDLER] Received TriggerWalFlush for batch #785, starting flush +[07:46:54.993] [WAL_FLUSH] Flushing range 628..785 (157 batches, 3140 rows) to WAL #6 +[07:47:40.679] [WAL_FLUSH] WAL #6 complete: S3=172ms, index=45677ms, 157 batches +[07:47:40.680][DISPATCHER] Task 'wal_flusher' received message: TriggerWalFlush { end_batch_id: 942, done: None } +[07:47:40.680][WAL_FLUSH_HANDLER] Received TriggerWalFlush for batch #942, starting flush +[07:47:40.680] [WAL_FLUSH] Flushing range 785..942 (157 batches, 3140 rows) to WAL #7 diff --git a/rust/lance/Cargo.toml b/rust/lance/Cargo.toml index 25c30230b35..702fe7e9702 100644 --- a/rust/lance/Cargo.toml +++ b/rust/lance/Cargo.toml @@ -41,6 +41,7 @@ byteorder.workspace = true bytes.workspace = true chrono.workspace = true clap = { version = "4.1.1", features = ["derive"], optional = true } +crossbeam-skiplist.workspace = true # This is already used by datafusion dashmap = "6" deepsize.workspace = true @@ -81,6 +82,7 @@ humantime = { workspace = true } async_cell = "0.2.2" semver.workspace = true tokio-stream = { workspace = true } +tokio-util = { workspace = true } [target.'cfg(target_os = "linux")'.dev-dependencies] pprof.workspace = true @@ -113,6 +115,7 @@ aws-sdk-s3 = { workspace = true } geoarrow-array = { workspace = true } geoarrow-schema = { workspace = true } geo-types = { workspace = true } +slatedb = { workspace = true } [features] @@ -165,5 +168,13 @@ harness = false name = "random_access" harness = false +[[bench]] +name = "mem_wal_write" +harness = false + +[[bench]] +name = "memtable_read" +harness = false + [lints] workspace = true diff --git a/rust/lance/benches/mem_wal_write.rs b/rust/lance/benches/mem_wal_write.rs new file mode 100644 index 00000000000..ce7979d4077 --- /dev/null +++ b/rust/lance/benches/mem_wal_write.rs @@ -0,0 +1,593 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! Benchmark for MemWAL write throughput. +//! +//! ## Running against S3 +//! +//! ```bash +//! export AWS_DEFAULT_REGION=us-east-1 +//! export DATASET_PREFIX=s3://your-bucket/bench/mem_wal +//! cargo bench --bench mem_wal_write +//! ``` +//! +//! ## Running against local filesystem +//! +//! ```bash +//! export DATASET_PREFIX=/tmp/bench/mem_wal +//! cargo bench --bench mem_wal_write +//! ``` +//! +//! ## Configuration +//! +//! - `DATASET_PREFIX`: Base URI for datasets (required, e.g. s3://bucket/prefix or /tmp/bench) +//! - `BATCH_SIZE`: Number of rows per write batch (default: 20) +//! - `NUM_BATCHES`: Total number of batches to write (default: 1000) +//! - `DURABLE_WRITE`: yes/no/both (default: no) - whether writes wait for WAL flush +//! - `INDEXED_WRITE`: yes/no/both (default: no) - whether writes update indexes synchronously +//! - `MAX_WAL_BUFFER_SIZE`: WAL buffer size in bytes (default: 1MB from RegionWriterConfig) +//! - `MAX_FLUSH_INTERVAL_MS`: WAL flush interval in milliseconds, 0 to disable (default: 1000ms) +//! - `MAX_MEMTABLE_SIZE`: MemTable size threshold in bytes (default: 64MB from RegionWriterConfig) +//! - `VECTOR_DIM`: Vector dimension for the vector column (default: 512) +//! - `MEMWAL_MAINTAINED_INDEXES`: Comma-separated list of index names to maintain in MemWAL (default: id_btree) +//! - Available indexes: id_btree, text_fts, vector_ivfpq (all created on base table) +//! - Examples: `id_btree`, `id_btree,text_fts`, `vector_ivfpq` +//! - Use `none` to disable MemWAL index maintenance entirely +//! - `SAMPLE_SIZE`: Number of benchmark iterations (default: 10, minimum: 10) + +#![allow(clippy::print_stdout, clippy::print_stderr)] + +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use arrow_array::{ + FixedSizeListArray, Float32Array, Int64Array, RecordBatch, RecordBatchIterator, StringArray, +}; +use arrow_schema::{DataType, Field, Schema as ArrowSchema}; +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use lance::dataset::mem_wal::{DatasetMemWalExt, MemWalConfig, RegionWriterConfig}; +use lance::dataset::{Dataset, WriteParams}; +use lance::index::vector::VectorIndexParams; +use lance_arrow::FixedSizeListArrayExt; +use lance_index::scalar::ScalarIndexParams; +use lance_index::vector::ivf::IvfBuildParams; +use lance_index::vector::pq::PQBuildParams; +use lance_index::{DatasetIndexExt, IndexType}; +use lance_linalg::distance::DistanceType; +#[cfg(target_os = "linux")] +use pprof::criterion::{Output, PProfProfiler}; +use uuid::Uuid; + +/// Default number of rows per batch. +const DEFAULT_BATCH_SIZE: usize = 20; + +/// Default number of batches to write. +const DEFAULT_NUM_BATCHES: usize = 1000; + +/// Get batch size from environment or use default. +fn get_batch_size() -> usize { + std::env::var("BATCH_SIZE") + .ok() + .and_then(|s| s.parse().ok()) + .unwrap_or(DEFAULT_BATCH_SIZE) +} + +/// Get number of batches from environment or use default. +fn get_num_batches() -> usize { + std::env::var("NUM_BATCHES") + .ok() + .and_then(|s| s.parse().ok()) + .unwrap_or(DEFAULT_NUM_BATCHES) +} + +/// Parse yes/no/both env var, returns list of bool values to test. +fn parse_yes_no_both(var_name: &str, default: &str) -> Vec { + let value = std::env::var(var_name) + .unwrap_or_else(|_| default.to_string()) + .to_lowercase(); + match value.as_str() { + "yes" | "true" | "1" => vec![true], + "no" | "false" | "0" => vec![false], + "both" => vec![false, true], + _ => { + eprintln!( + "Invalid {} value '{}', using default '{}'", + var_name, value, default + ); + parse_yes_no_both(var_name, default) + } + } +} + +/// Get durable write settings from environment. +fn get_durable_write_options() -> Vec { + parse_yes_no_both("DURABLE_WRITE", "no") +} + +/// Get indexed write settings from environment. +fn get_indexed_write_options() -> Vec { + parse_yes_no_both("INDEXED_WRITE", "no") +} + +/// Get max WAL buffer size from environment or use default. +fn get_max_wal_buffer_size() -> Option { + std::env::var("MAX_WAL_BUFFER_SIZE") + .ok() + .and_then(|s| s.parse().ok()) +} + +/// Get max flush interval from environment or use default. +fn get_max_flush_interval() -> Option> { + std::env::var("MAX_FLUSH_INTERVAL_MS").ok().map(|s| { + let ms: u64 = s.parse().unwrap_or(0); + if ms == 0 { + None + } else { + Some(Duration::from_millis(ms)) + } + }) +} + +/// Get max memtable size from environment or use default. +fn get_max_memtable_size() -> Option { + std::env::var("MAX_MEMTABLE_SIZE") + .ok() + .and_then(|s| s.parse().ok()) +} + +/// Default vector dimension for benchmarks. +const DEFAULT_VECTOR_DIM: i32 = 512; + +/// Get vector dimension from environment or use default. +fn get_vector_dim() -> i32 { + std::env::var("VECTOR_DIM") + .ok() + .and_then(|s| s.parse().ok()) + .unwrap_or(DEFAULT_VECTOR_DIM) +} + +/// Parse MEMWAL_MAINTAINED_INDEXES environment variable. +/// Returns list of index names to maintain in MemWAL. +/// Use "none" to disable indexes entirely. +/// Default: "id_btree" +fn get_maintained_indexes() -> Vec { + let value = + std::env::var("MEMWAL_MAINTAINED_INDEXES").unwrap_or_else(|_| "id_btree".to_string()); + + if value.to_lowercase() == "none" { + return vec![]; + } + + value + .split(',') + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()) + .collect() +} + +/// Get sample size from environment or use default. +/// Minimum is 10 (Criterion requirement). +fn get_sample_size() -> usize { + std::env::var("SAMPLE_SIZE") + .ok() + .and_then(|s| s.parse().ok()) + .unwrap_or(10) + .max(10) +} + +/// Format bytes in human-readable form. +fn format_bytes(bytes: u64) -> String { + if bytes >= 1024 * 1024 * 1024 { + format!("{:.2} GB", bytes as f64 / (1024.0 * 1024.0 * 1024.0)) + } else if bytes >= 1024 * 1024 { + format!("{:.2} MB", bytes as f64 / (1024.0 * 1024.0)) + } else if bytes >= 1024 { + format!("{:.2} KB", bytes as f64 / 1024.0) + } else { + format!("{} B", bytes) + } +} + +/// Create test schema for benchmarks. +/// +/// Schema: +/// - id: Int64 (primary key, for BTree index) +/// - vector: FixedSizeList[dim] (for IVF-PQ vector index) +/// - text: Utf8 (for FTS inverted index) +fn create_test_schema(vector_dim: i32) -> Arc { + use std::collections::HashMap; + + // Create id field with primary key metadata + let mut id_metadata = HashMap::new(); + id_metadata.insert( + "lance-schema:unenforced-primary-key".to_string(), + "true".to_string(), + ); + let id_field = Field::new("id", DataType::Int64, false).with_metadata(id_metadata); + + Arc::new(ArrowSchema::new(vec![ + id_field, + Field::new( + "vector", + DataType::FixedSizeList( + Arc::new(Field::new("item", DataType::Float32, true)), + vector_dim, + ), + true, + ), + Field::new("text", DataType::Utf8, true), + ])) +} + +/// Sample text snippets for FTS benchmarking. +const SAMPLE_TEXTS: &[&str] = &[ + "The quick brown fox jumps over the lazy dog", + "Machine learning models require large datasets for training", + "Vector databases enable semantic search capabilities", + "Rust provides memory safety without garbage collection", + "Cloud native applications scale horizontally", + "Data lakehouse combines warehouse and lake benefits", + "Embeddings capture semantic meaning in vector space", + "Columnar storage optimizes analytical query performance", +]; + +/// Create a test batch with the given parameters. +fn create_test_batch( + schema: &ArrowSchema, + start_id: i64, + num_rows: usize, + vector_dim: i32, +) -> RecordBatch { + // Generate random vectors (deterministic based on row id for reproducibility) + let vectors: Vec = (0..num_rows) + .flat_map(|i| { + let seed = (start_id as usize + i) as f32; + (0..vector_dim as usize).map(move |d| (seed * 0.1 + d as f32 * 0.01).sin()) + }) + .collect(); + + let vector_array = + FixedSizeListArray::try_new_from_values(Float32Array::from(vectors), vector_dim).unwrap(); + + // Generate text content + let texts: Vec = (0..num_rows) + .map(|i| { + let base_text = SAMPLE_TEXTS[(start_id as usize + i) % SAMPLE_TEXTS.len()]; + format!("{} (row {})", base_text, start_id as usize + i) + }) + .collect(); + + RecordBatch::try_new( + Arc::new(schema.clone()), + vec![ + Arc::new(Int64Array::from_iter_values( + start_id..start_id + num_rows as i64, + )), + Arc::new(vector_array), + Arc::new(StringArray::from_iter_values(texts)), + ], + ) + .unwrap() +} + +/// Number of rows to create in base dataset for index training. +const BASE_DATASET_ROWS: usize = 1000; + +/// Create a Lance dataset with indexes and MemWAL initialized. +/// Uses DATASET_PREFIX environment variable (e.g. s3://bucket/prefix or /tmp/bench). +/// Creates base table indexes (id_btree, text_fts, vector_ivfpq) and initializes MemWAL with specified indexes. +async fn create_dataset( + schema: &ArrowSchema, + name_prefix: &str, + vector_dim: i32, + maintained_indexes: &[String], +) -> Dataset { + use lance_index::scalar::InvertedIndexParams; + + let prefix = std::env::var("DATASET_PREFIX").expect("DATASET_PREFIX not set"); + // Use short random suffix (8 chars) instead of full UUID + let short_id = &Uuid::new_v4().to_string()[..8]; + let uri = format!( + "{}/{}_{}", + prefix.trim_end_matches('/'), + name_prefix, + short_id + ); + + println!("Creating dataset at {} with indexes...", uri); + let start = Instant::now(); + + // Create initial dataset with 1000 rows for index training + let initial_batch = create_test_batch(schema, 0, BASE_DATASET_ROWS, vector_dim); + let batches = RecordBatchIterator::new([Ok(initial_batch)], Arc::new(schema.clone())); + let write_params = WriteParams::default(); + let mut dataset = Dataset::write(batches, &uri, Some(write_params)) + .await + .expect("Failed to create dataset"); + + // Create BTree index on id column + let scalar_params = ScalarIndexParams::default(); + dataset + .create_index( + &["id"], + IndexType::BTree, + Some("id_btree".to_string()), + &scalar_params, + false, + ) + .await + .expect("Failed to create BTree index"); + + // Create FTS index on text column + let fts_params = InvertedIndexParams::default(); + dataset + .create_index( + &["text"], + IndexType::Inverted, + Some("text_fts".to_string()), + &fts_params, + false, + ) + .await + .expect("Failed to create FTS index"); + + // Create IVF-PQ vector index on vector column + // Use small nlist for the small training dataset + let ivf_params = IvfBuildParams::new(16); // 16 partitions for 1000 rows + let pq_params = PQBuildParams::new(16, 8); // 16 sub-vectors, 8 bits + let vector_params = + VectorIndexParams::with_ivf_pq_params(DistanceType::L2, ivf_params, pq_params); + dataset + .create_index( + &["vector"], + IndexType::IvfPq, + Some("vector_ivfpq".to_string()), + &vector_params, + false, + ) + .await + .expect("Failed to create IVF-PQ index"); + + // Initialize MemWAL with specified maintained indexes + dataset + .initialize_mem_wal(MemWalConfig { + region_spec: None, + maintained_indexes: maintained_indexes.to_vec(), + }) + .await + .expect("Failed to initialize MemWAL"); + + println!( + "Dataset created in {:?} at {}", + start.elapsed(), + dataset.uri() + ); + + dataset +} + +/// Get storage label from DATASET_PREFIX (e.g. "s3" or "local"). +fn get_storage_label() -> &'static str { + let prefix = std::env::var("DATASET_PREFIX").unwrap_or_default(); + if prefix.starts_with("s3://") { + "s3" + } else if prefix.starts_with("gs://") { + "gcs" + } else if prefix.starts_with("az://") { + "azure" + } else { + "local" + } +} + +/// Build benchmark label from config options. +fn build_label( + num_batches: usize, + batch_size: usize, + durable: bool, + indexed: bool, + storage: &str, +) -> String { + let durable_str = if durable { "durable" } else { "nondurable" }; + // sync_indexed_write controls sync vs async index updates + let indexed_str = if indexed { "sync_idx" } else { "async_idx" }; + format!( + "{}x{} {} {} ({})", + num_batches, batch_size, durable_str, indexed_str, storage + ) +} + +/// Build dataset name prefix from config options. +fn build_name_prefix(durable: bool, indexed: bool) -> String { + let d = if durable { "d" } else { "nd" }; + // sync_indexed_write: sync (si) vs async (ai) + let i = if indexed { "si" } else { "ai" }; + format!("{}_{}", d, i) +} + +/// Benchmark Lance MemWAL write throughput. +fn bench_lance_memwal_write(c: &mut Criterion) { + if std::env::var("DATASET_PREFIX").is_err() { + eprintln!("DATASET_PREFIX not set, skipping benchmarks"); + eprintln!("Set DATASET_PREFIX to s3://bucket/prefix or /tmp/bench"); + return; + } + + let rt = tokio::runtime::Runtime::new().unwrap(); + let batch_size = get_batch_size(); + let num_batches = get_num_batches(); + let vector_dim = get_vector_dim(); + let schema = create_test_schema(vector_dim); + let storage_label = get_storage_label(); + let maintained_indexes = get_maintained_indexes(); + + let durable_options = get_durable_write_options(); + let indexed_options = get_indexed_write_options(); + let max_wal_buffer_size = get_max_wal_buffer_size(); + let max_flush_interval = get_max_flush_interval(); + let max_memtable_size = get_max_memtable_size(); + + println!( + "MemWAL indexes to maintain: {}", + if maintained_indexes.is_empty() { + "none".to_string() + } else { + maintained_indexes.join(", ") + } + ); + + let sample_size = get_sample_size(); + + let mut group = c.benchmark_group("MemWAL Write"); + group.throughput(Throughput::Elements((batch_size * num_batches) as u64)); + group.sample_size(sample_size); + group.warm_up_time(Duration::from_secs(1)); + + // Generate benchmarks for all combinations + for &durable in &durable_options { + for &indexed in &indexed_options { + let label = build_label(num_batches, batch_size, durable, indexed, storage_label); + let name_prefix = build_name_prefix(durable, indexed); + + // Create dataset ONCE before benchmark iterations + // Each iteration will use a different region on the same dataset + let dataset = rt.block_on(create_dataset( + &schema, + &name_prefix, + vector_dim, + &maintained_indexes, + )); + let dataset_uri = dataset.uri().to_string(); + + // Pre-generate all batches before timing (outside iter_custom) + let batches: Arc> = Arc::new( + (0..num_batches) + .map(|i| { + create_test_batch(&schema, (i * batch_size) as i64, batch_size, vector_dim) + }) + .collect(), + ); + + println!( + "Benchmark: {} | Dataset: {} | {} batches x {} rows pre-generated", + label, dataset_uri, num_batches, batch_size + ); + + group.bench_with_input( + BenchmarkId::new("Lance MemWAL", &label), + &(batch_size, num_batches, durable, indexed), + |b, &(_batch_size, _num_batches, durable, indexed)| { + let dataset_uri = dataset_uri.clone(); + let batches = batches.clone(); + b.to_async(&rt).iter_custom(|iters| { + let dataset_uri = dataset_uri.clone(); + let batches = batches.clone(); + async move { + let mut total_duration = Duration::ZERO; + + for iter in 0..iters { + // Re-open dataset (cheap operation) + let dataset = Dataset::open(&dataset_uri).await.unwrap(); + + // Create a NEW region for each iteration + let region_id = Uuid::new_v4(); + let default_config = RegionWriterConfig::default(); + let config = RegionWriterConfig { + region_id, + region_spec_id: 0, + durable_write: durable, + sync_indexed_write: indexed, + max_wal_buffer_size: max_wal_buffer_size + .unwrap_or(default_config.max_wal_buffer_size), + max_flush_interval: max_flush_interval + .unwrap_or(default_config.max_flush_interval), + max_memtable_size: max_memtable_size + .unwrap_or(default_config.max_memtable_size), + max_memtable_rows: default_config.max_memtable_rows, + max_memtable_batches: default_config.max_memtable_batches, + ivf_index_partition_capacity_safety_factor: default_config + .ivf_index_partition_capacity_safety_factor, + async_index_buffer_rows: default_config.async_index_buffer_rows, + async_index_interval: default_config.async_index_interval, + manifest_scan_batch_size: default_config + .manifest_scan_batch_size, + max_unflushed_bytes: default_config.max_unflushed_bytes, + max_immutable_memtables: default_config + .max_immutable_memtables, + backpressure_log_interval: default_config.backpressure_log_interval, + stats_log_interval: default_config.stats_log_interval, + }; + + if iter == 0 { + println!( + " Iter {}: Region {} | WAL buffer: {} | Flush interval: {:?} | MemTable size: {} | Indexed writes: {}", + iter, + region_id, + config.max_wal_buffer_size, + config.max_flush_interval, + config.max_memtable_size, + if indexed { "sync" } else { "async" } + ); + } + + // Get writer through Dataset API (index configs loaded automatically) + let writer = + dataset.mem_wal_writer(region_id, config).await.unwrap(); + + // Time writes (excluding close to measure pure put throughput) + let start = Instant::now(); + for batch in batches.iter() { + writer.put(batch.clone()).await.unwrap(); + } + let put_duration = start.elapsed(); + + // Close writer (includes final WAL flush) - measured separately + let close_start = Instant::now(); + let stats = writer.stats(); + writer.close().await.unwrap(); + let close_duration = close_start.elapsed(); + + total_duration += put_duration; + + // Report stats for first iteration + if iter == 0 { + println!( + " Stats: puts={} ({:.0}/s, avg {:?}) | WAL flushes={} ({}) | close={:?}", + stats.put_count, + stats.put_throughput(), + stats.avg_put_latency().unwrap_or_default(), + stats.wal_flush_count, + format_bytes(stats.wal_flush_bytes), + close_duration + ); + } + } + + total_duration + } + }) + }, + ); + } + } + + group.finish(); +} + +#[cfg(target_os = "linux")] +criterion_group!( + name = benches; + config = Criterion::default() + .significance_level(0.05) + .with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); + targets = bench_lance_memwal_write +); + +#[cfg(not(target_os = "linux"))] +criterion_group!( + name = benches; + config = Criterion::default().significance_level(0.05); + targets = bench_lance_memwal_write +); + +criterion_main!(benches); diff --git a/rust/lance/benches/memtable_read.rs b/rust/lance/benches/memtable_read.rs new file mode 100644 index 00000000000..cd9564dad97 --- /dev/null +++ b/rust/lance/benches/memtable_read.rs @@ -0,0 +1,1126 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! Benchmark comparing read performance between MemTable (with MemTableScanner) +//! and in-memory Lance tables. +//! +//! This benchmark tests different read operations: +//! +//! 1. **Scan**: Full table scan returning all rows +//! 2. **Point Lookup**: Scalar index lookup by primary key (BTree index) +//! 3. **Full-Text Search**: Token-based text search (FTS index) +//! 4. **Vector Search**: IVF-PQ vector similarity search +//! +//! ## Running the benchmark +//! +//! ```bash +//! cargo bench --bench mem_read_benchmark +//! ``` +//! +//! ## Configuration +//! +//! - `NUM_ROWS`: Total number of rows (default: 10000) +//! - `BATCH_SIZE`: Number of rows per batch (default: 100) +//! - `VECTOR_DIM`: Vector dimension (default: 128) +//! - `SAMPLE_SIZE`: Number of benchmark iterations (default: 100) + +#![allow(clippy::print_stdout, clippy::print_stderr)] + +use std::sync::Arc; + +use arrow_array::types::Float32Type; +use arrow_array::{ + Array, FixedSizeListArray, Float32Array, Int64Array, RecordBatch, RecordBatchIterator, + StringArray, +}; +use arrow_schema::{DataType, Field, Schema as ArrowSchema}; +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use futures::TryStreamExt; +use lance::dataset::mem_wal::write::{CacheConfig, IndexRegistry, MemTable}; +use lance::dataset::{Dataset, WriteParams}; +use lance::index::vector::VectorIndexParams; +use lance_arrow::FixedSizeListArrayExt; +use lance_index::scalar::inverted::tokenizer::InvertedIndexParams; +use lance_index::scalar::FullTextSearchQuery; +use lance_index::vector::ivf::storage::IvfModel; +use lance_index::vector::ivf::IvfBuildParams; +use lance_index::vector::kmeans::{train_kmeans, KMeansParams}; +use lance_index::vector::pq::builder::PQBuildParams; +use lance_index::{DatasetIndexExt, IndexType}; +use lance_linalg::distance::{DistanceType, MetricType}; +#[cfg(target_os = "linux")] +use pprof::criterion::{Output, PProfProfiler}; +use rand::Rng; +use uuid::Uuid; + +const DEFAULT_NUM_ROWS: usize = 10000; +const DEFAULT_BATCH_SIZE: usize = 100; +const DEFAULT_VECTOR_DIM: usize = 128; +const DEFAULT_NUM_LOOKUPS: usize = 100; +const DEFAULT_K: usize = 10; + +fn get_num_rows() -> usize { + std::env::var("NUM_ROWS") + .ok() + .and_then(|s| s.parse().ok()) + .unwrap_or(DEFAULT_NUM_ROWS) +} + +fn get_batch_size() -> usize { + std::env::var("BATCH_SIZE") + .ok() + .and_then(|s| s.parse().ok()) + .unwrap_or(DEFAULT_BATCH_SIZE) +} + +fn get_vector_dim() -> usize { + std::env::var("VECTOR_DIM") + .ok() + .and_then(|s| s.parse().ok()) + .unwrap_or(DEFAULT_VECTOR_DIM) +} + +fn get_sample_size() -> usize { + std::env::var("SAMPLE_SIZE") + .ok() + .and_then(|s| s.parse().ok()) + .unwrap_or(100) + .max(10) +} + +/// Create schema: (id: Int64, text: Utf8, vector: FixedSizeList[dim]) +fn create_schema(vector_dim: usize) -> Arc { + Arc::new(ArrowSchema::new(vec![ + Field::new("id", DataType::Int64, false), + Field::new("text", DataType::Utf8, true), + Field::new( + "vector", + DataType::FixedSizeList( + Arc::new(Field::new("item", DataType::Float32, true)), + vector_dim as i32, + ), + false, + ), + ])) +} + +/// Create a test batch with given parameters. +fn create_batch( + schema: &ArrowSchema, + start_id: i64, + num_rows: usize, + vector_dim: usize, +) -> RecordBatch { + let mut rng = rand::rng(); + + // Create IDs + let ids: Vec = (start_id..start_id + num_rows as i64).collect(); + + // Create text with some common words for FTS + let words = [ + "hello", + "world", + "search", + "benchmark", + "lance", + "memory", + "test", + "data", + ]; + let texts: Vec = (0..num_rows) + .map(|i| { + let w1 = words[i % words.len()]; + let w2 = words[(i + 3) % words.len()]; + let w3 = words[(i + 5) % words.len()]; + format!("{} {} {} row_{}", w1, w2, w3, start_id + i as i64) + }) + .collect(); + + // Create vectors (normalized random) + let vectors: Vec = (0..num_rows) + .flat_map(|_| { + let v: Vec = (0..vector_dim).map(|_| rng.random::() - 0.5).collect(); + let norm: f32 = v.iter().map(|x| x * x).sum::().sqrt(); + v.into_iter().map(move |x| x / norm) + }) + .collect(); + + let vector_array = + FixedSizeListArray::try_new_from_values(Float32Array::from(vectors), vector_dim as i32) + .unwrap(); + + RecordBatch::try_new( + Arc::new(schema.clone()), + vec![ + Arc::new(Int64Array::from(ids)), + Arc::new(StringArray::from(texts)), + Arc::new(vector_array), + ], + ) + .unwrap() +} + +/// Create a query vector (normalized random). +fn create_query_vector(vector_dim: usize) -> Vec { + let mut rng = rand::rng(); + let v: Vec = (0..vector_dim).map(|_| rng.random::() - 0.5).collect(); + let norm: f32 = v.iter().map(|x| x * x).sum::().sqrt(); + v.into_iter().map(|x| x / norm).collect() +} + +/// Generate random IDs for point lookups. +fn generate_random_ids(max_id: i64, count: usize) -> Vec { + let mut rng = rand::rng(); + (0..count).map(|_| rng.random_range(0..max_id)).collect() +} + +/// Train IVF centroids and PQ codebook from vectors. +fn train_ivf_pq_models( + batches: &[RecordBatch], + vector_dim: usize, + num_partitions: usize, + num_sub_vectors: usize, + distance_type: DistanceType, +) -> (IvfModel, lance_index::vector::pq::ProductQuantizer) { + // Collect all vectors into a single array + let mut all_vectors: Vec = Vec::new(); + for batch in batches { + let vector_col = batch.column_by_name("vector").unwrap(); + let fsl = vector_col + .as_any() + .downcast_ref::() + .unwrap(); + let values = fsl + .values() + .as_any() + .downcast_ref::() + .unwrap(); + all_vectors.extend_from_slice(values.values()); + } + + let vectors_array = Float32Array::from(all_vectors); + + // Train IVF centroids + let kmeans_params = KMeansParams::new(None, 50, 1, distance_type); + let kmeans = train_kmeans::( + &vectors_array, + kmeans_params, + vector_dim, + num_partitions, + 256, + ) + .unwrap(); + + // kmeans.centroids is a flat Float32Array, need to convert to FixedSizeListArray + let centroids_flat = kmeans + .centroids + .as_any() + .downcast_ref::() + .expect("Centroids should be Float32Array") + .clone(); + + let centroids_fsl = + FixedSizeListArray::try_new_from_values(centroids_flat, vector_dim as i32).unwrap(); + + let ivf_model = IvfModel::new(centroids_fsl, None); + + // Train PQ codebook + let vectors_fsl = + FixedSizeListArray::try_new_from_values(vectors_array, vector_dim as i32).unwrap(); + + let pq_params = PQBuildParams::new(num_sub_vectors, 8); + let pq = pq_params.build(&vectors_fsl, distance_type).unwrap(); + + (ivf_model, pq) +} + +/// Setup MemTable with all indexes (BTree on id, FTS on text, IVF-PQ on vector). +async fn setup_memtable( + batches: Vec, + vector_dim: usize, + num_partitions: usize, + num_sub_vectors: usize, +) -> MemTable { + let schema = batches[0].schema(); + let num_batches = batches.len(); + + // Train IVF-PQ models from the data + let (ivf_model, pq) = train_ivf_pq_models( + &batches, + vector_dim, + num_partitions, + num_sub_vectors, + DistanceType::L2, + ); + + // Create index registry + let mut registry = IndexRegistry::new(); + registry.add_btree("id_idx".to_string(), "id".to_string()); + registry.add_fts("text_idx".to_string(), "text".to_string()); + registry.add_ivf_pq( + "vector_idx".to_string(), + "vector".to_string(), + ivf_model, + pq, + DistanceType::L2, + ); + + // Create MemTable with capacity for all batches (add 10% buffer) + let batch_capacity = ((num_batches as f64) * 1.1) as usize; + let mut memtable = + MemTable::with_capacity(schema, 1, vec![0], CacheConfig::default(), batch_capacity) + .unwrap(); + memtable.set_indexes(registry); + + // Insert batches with sequence numbers + for (seq, batch) in batches.into_iter().enumerate() { + memtable + .insert_with_seq(batch, (seq + 1) as u64) + .await + .unwrap(); + } + + memtable +} + +/// Lance dataset wrapper. +struct LanceSetup { + dataset: Arc, + #[allow(dead_code)] + total_rows: usize, +} + +/// Create Lance dataset with a single fragment (all batches concatenated). +async fn setup_lance(batches: Vec) -> LanceSetup { + let schema = batches[0].schema(); + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + + let uri = format!("memory://lance_bench_{}", Uuid::new_v4()); + let write_params = WriteParams { + max_rows_per_file: total_rows + 1, + ..Default::default() + }; + + let reader = RecordBatchIterator::new(batches.into_iter().map(Ok), schema); + let dataset = Dataset::write(reader, &uri, Some(write_params)) + .await + .unwrap(); + + LanceSetup { + dataset: Arc::new(dataset), + total_rows, + } +} + +/// Create Lance dataset with one fragment per batch. +async fn setup_lance_per_batch(batches: Vec, batch_size: usize) -> LanceSetup { + let schema = batches[0].schema(); + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + + let uri = format!("memory://lance_per_batch_{}", Uuid::new_v4()); + let write_params = WriteParams { + max_rows_per_file: batch_size, + ..Default::default() + }; + + let reader = RecordBatchIterator::new(batches.into_iter().map(Ok), schema); + let dataset = Dataset::write(reader, &uri, Some(write_params)) + .await + .unwrap(); + + LanceSetup { + dataset: Arc::new(dataset), + total_rows, + } +} + +/// Create Lance dataset with FTS index on text column (single fragment). +async fn setup_lance_with_fts(batches: Vec) -> LanceSetup { + let schema = batches[0].schema(); + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + + let uri = format!("memory://lance_fts_bench_{}", Uuid::new_v4()); + let write_params = WriteParams { + max_rows_per_file: total_rows + 1, + ..Default::default() + }; + + let reader = RecordBatchIterator::new(batches.into_iter().map(Ok), schema); + let mut dataset = Dataset::write(reader, &uri, Some(write_params)) + .await + .unwrap(); + + // Create FTS (inverted) index on text column + let params = InvertedIndexParams::default(); + dataset + .create_index(&["text"], IndexType::Inverted, None, ¶ms, true) + .await + .unwrap(); + + LanceSetup { + dataset: Arc::new(dataset), + total_rows, + } +} + +/// Create Lance dataset with FTS index on text column (per-batch fragments). +async fn setup_lance_per_batch_with_fts( + batches: Vec, + batch_size: usize, +) -> LanceSetup { + let schema = batches[0].schema(); + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + + let uri = format!("memory://lance_fts_per_batch_{}", Uuid::new_v4()); + let write_params = WriteParams { + max_rows_per_file: batch_size, + ..Default::default() + }; + + let reader = RecordBatchIterator::new(batches.into_iter().map(Ok), schema); + let mut dataset = Dataset::write(reader, &uri, Some(write_params)) + .await + .unwrap(); + + // Create FTS (inverted) index on text column + let params = InvertedIndexParams::default(); + dataset + .create_index(&["text"], IndexType::Inverted, None, ¶ms, true) + .await + .unwrap(); + + LanceSetup { + dataset: Arc::new(dataset), + total_rows, + } +} + +/// Create Lance dataset with IVF-PQ vector index (single fragment). +async fn setup_lance_with_vector_index( + batches: Vec, + num_partitions: usize, + num_sub_vectors: usize, +) -> LanceSetup { + let schema = batches[0].schema(); + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + + let uri = format!("memory://lance_vec_bench_{}", Uuid::new_v4()); + let write_params = WriteParams { + max_rows_per_file: total_rows + 1, + ..Default::default() + }; + + let reader = RecordBatchIterator::new(batches.into_iter().map(Ok), schema); + let mut dataset = Dataset::write(reader, &uri, Some(write_params)) + .await + .unwrap(); + + // Create IVF-PQ index on vector column + let ivf_params = IvfBuildParams { + num_partitions: Some(num_partitions), + ..Default::default() + }; + let pq_params = PQBuildParams { + num_sub_vectors, + num_bits: 8, + ..Default::default() + }; + + let vector_params = + VectorIndexParams::with_ivf_pq_params(MetricType::L2, ivf_params, pq_params); + + dataset + .create_index(&["vector"], IndexType::Vector, None, &vector_params, true) + .await + .unwrap(); + + LanceSetup { + dataset: Arc::new(dataset), + total_rows, + } +} + +/// Create Lance dataset with IVF-PQ vector index (per-batch fragments). +async fn setup_lance_per_batch_with_vector_index( + batches: Vec, + batch_size: usize, + num_partitions: usize, + num_sub_vectors: usize, +) -> LanceSetup { + let schema = batches[0].schema(); + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + + let uri = format!("memory://lance_vec_per_batch_{}", Uuid::new_v4()); + let write_params = WriteParams { + max_rows_per_file: batch_size, + ..Default::default() + }; + + let reader = RecordBatchIterator::new(batches.into_iter().map(Ok), schema); + let mut dataset = Dataset::write(reader, &uri, Some(write_params)) + .await + .unwrap(); + + // Create IVF-PQ index on vector column + let ivf_params = IvfBuildParams { + num_partitions: Some(num_partitions), + ..Default::default() + }; + let pq_params = PQBuildParams { + num_sub_vectors, + num_bits: 8, + ..Default::default() + }; + + let vector_params = + VectorIndexParams::with_ivf_pq_params(MetricType::L2, ivf_params, pq_params); + + dataset + .create_index(&["vector"], IndexType::Vector, None, &vector_params, true) + .await + .unwrap(); + + LanceSetup { + dataset: Arc::new(dataset), + total_rows, + } +} + +/// Benchmark scan operations. +fn bench_scan(c: &mut Criterion) { + let rt = tokio::runtime::Runtime::new().unwrap(); + + let num_rows = get_num_rows(); + let batch_size = get_batch_size(); + let vector_dim = get_vector_dim(); + let sample_size = get_sample_size(); + + let num_batches = num_rows.div_ceil(batch_size); + let schema = create_schema(vector_dim); + + println!("=== Scan Benchmark ==="); + println!("Num rows: {}", num_rows); + println!("Batch size: {}", batch_size); + println!("Num batches: {}", num_batches); + println!(); + + // Generate test data + let batches: Vec = (0..num_batches) + .map(|i| { + let start_id = (i * batch_size) as i64; + let rows = batch_size.min(num_rows - i * batch_size); + create_batch(&schema, start_id, rows, vector_dim) + }) + .collect(); + + // Setup Lance (single fragment) + let lance_setup = rt.block_on(setup_lance(batches.clone())); + println!( + "Lance (single fragment): {} fragments", + lance_setup.dataset.get_fragments().len() + ); + + // Setup Lance (per-batch fragments) + let lance_per_batch_setup = rt.block_on(setup_lance_per_batch(batches.clone(), batch_size)); + println!( + "Lance (per-batch): {} fragments", + lance_per_batch_setup.dataset.get_fragments().len() + ); + + // Setup MemTable with indexes + let num_partitions = (num_rows / 100).clamp(4, 256); + let num_sub_vectors = (vector_dim / 8).clamp(4, 32); + println!("Creating MemTable with indexes..."); + let memtable = rt.block_on(setup_memtable( + batches, + vector_dim, + num_partitions, + num_sub_vectors, + )); + println!( + "MemTable created with {} rows", + memtable.batch_store().total_rows() + ); + + let mut group = c.benchmark_group("Scan"); + group.throughput(Throughput::Elements(num_rows as u64)); + group.sample_size(sample_size); + + let label = format!("{}_rows", num_rows); + + // MemTable scan using MemTableScanner + group.bench_with_input(BenchmarkId::new("MemTable", &label), &(), |b, _| { + let visibility_seq = num_batches as u64; + b.to_async(&rt).iter(|| async { + let batches: Vec = memtable + .scan(visibility_seq) + .try_into_stream() + .await + .unwrap() + .try_collect() + .await + .unwrap(); + let total: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert!(total > 0); + }); + }); + + // Lance scan (single fragment) + group.bench_with_input( + BenchmarkId::new("Lance_SingleFragment", &label), + &(), + |b, _| { + let dataset = lance_setup.dataset.clone(); + b.to_async(&rt).iter(|| async { + let batches: Vec = dataset + .scan() + .try_into_stream() + .await + .unwrap() + .try_collect() + .await + .unwrap(); + let total: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert!(total > 0); + }); + }, + ); + + // Lance scan (per-batch fragments) + group.bench_with_input( + BenchmarkId::new("Lance_PerBatchFragment", &label), + &(), + |b, _| { + let dataset = lance_per_batch_setup.dataset.clone(); + b.to_async(&rt).iter(|| async { + let batches: Vec = dataset + .scan() + .try_into_stream() + .await + .unwrap() + .try_collect() + .await + .unwrap(); + let total: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert!(total > 0); + }); + }, + ); + + group.finish(); +} + +/// Benchmark point lookup operations. +/// Uses individual equality lookups rather than large IN clauses to avoid +/// DataFusion FilterExec issues with large IN expressions. +fn bench_point_lookup(c: &mut Criterion) { + let rt = tokio::runtime::Runtime::new().unwrap(); + + let num_rows = get_num_rows(); + let batch_size = get_batch_size(); + let vector_dim = get_vector_dim(); + let sample_size = get_sample_size(); + let num_lookups = DEFAULT_NUM_LOOKUPS; + + let num_batches = num_rows.div_ceil(batch_size); + let schema = create_schema(vector_dim); + + println!("=== Point Lookup Benchmark ==="); + println!("Num rows: {}", num_rows); + println!("Num lookups: {}", num_lookups); + println!(); + + // Generate test data + let batches: Vec = (0..num_batches) + .map(|i| { + let start_id = (i * batch_size) as i64; + let rows = batch_size.min(num_rows - i * batch_size); + create_batch(&schema, start_id, rows, vector_dim) + }) + .collect(); + + // Setup Lance (single fragment) + let lance_setup = rt.block_on(setup_lance(batches.clone())); + println!( + "Lance (single fragment): {} fragments", + lance_setup.dataset.get_fragments().len() + ); + + // Setup Lance (per-batch fragments) + let lance_per_batch_setup = rt.block_on(setup_lance_per_batch(batches.clone(), batch_size)); + println!( + "Lance (per-batch): {} fragments", + lance_per_batch_setup.dataset.get_fragments().len() + ); + + // Setup MemTable with indexes + let num_partitions = (num_rows / 100).clamp(4, 256); + let num_sub_vectors = (vector_dim / 8).clamp(4, 32); + println!("Creating MemTable with indexes..."); + let memtable = rt.block_on(setup_memtable( + batches, + vector_dim, + num_partitions, + num_sub_vectors, + )); + println!("MemTable created."); + + // Generate random lookup IDs + let lookup_ids = generate_random_ids(num_rows as i64, num_lookups); + + let mut group = c.benchmark_group("PointLookup"); + group.throughput(Throughput::Elements(num_lookups as u64)); + group.sample_size(sample_size); + + let label = format!("{}_lookups", num_lookups); + + // MemTable point lookup using single IN clause (same as Lance) + group.bench_with_input( + BenchmarkId::new("MemTable_Filter", &label), + &lookup_ids, + |b, ids| { + let visibility_seq = num_batches as u64; + let id_list: Vec = ids.iter().map(|id| id.to_string()).collect(); + let filter = format!("id IN ({})", id_list.join(",")); + + b.to_async(&rt).iter(|| { + let filter = filter.clone(); + let mut scanner = memtable.scan(visibility_seq); + async move { + let batches: Vec = scanner + .filter(&filter) + .unwrap() + .try_into_stream() + .await + .unwrap() + .try_collect() + .await + .unwrap(); + let total: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert!(total > 0); + } + }); + }, + ); + + // Lance filter scan (single fragment) - uses IN clause + group.bench_with_input( + BenchmarkId::new("Lance_SingleFragment_Filter", &label), + &lookup_ids, + |b, ids| { + let dataset = lance_setup.dataset.clone(); + let id_list: Vec = ids.iter().map(|id| id.to_string()).collect(); + let filter = format!("id IN ({})", id_list.join(",")); + + b.to_async(&rt).iter(|| { + let dataset = dataset.clone(); + let filter = filter.clone(); + async move { + let batches: Vec = dataset + .scan() + .filter(&filter) + .unwrap() + .try_into_stream() + .await + .unwrap() + .try_collect() + .await + .unwrap(); + let total: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert!(total > 0); + } + }); + }, + ); + + // Lance filter scan (per-batch fragments) - uses IN clause + group.bench_with_input( + BenchmarkId::new("Lance_PerBatchFragment_Filter", &label), + &lookup_ids, + |b, ids| { + let dataset = lance_per_batch_setup.dataset.clone(); + let id_list: Vec = ids.iter().map(|id| id.to_string()).collect(); + let filter = format!("id IN ({})", id_list.join(",")); + + b.to_async(&rt).iter(|| { + let dataset = dataset.clone(); + let filter = filter.clone(); + async move { + let batches: Vec = dataset + .scan() + .filter(&filter) + .unwrap() + .try_into_stream() + .await + .unwrap() + .try_collect() + .await + .unwrap(); + let total: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert!(total > 0); + } + }); + }, + ); + + group.finish(); +} + +/// Benchmark FTS operations. +fn bench_fts(c: &mut Criterion) { + let rt = tokio::runtime::Runtime::new().unwrap(); + + let num_rows = get_num_rows(); + let batch_size = get_batch_size(); + let vector_dim = get_vector_dim(); + let sample_size = get_sample_size(); + + let num_batches = num_rows.div_ceil(batch_size); + let schema = create_schema(vector_dim); + + println!("=== FTS Benchmark ==="); + println!("Num rows: {}", num_rows); + println!("Batch size: {}", batch_size); + println!("Num batches: {}", num_batches); + println!(); + + // Generate test data + let batches: Vec = (0..num_batches) + .map(|i| { + let start_id = (i * batch_size) as i64; + let rows = batch_size.min(num_rows - i * batch_size); + create_batch(&schema, start_id, rows, vector_dim) + }) + .collect(); + + // Setup Lance with FTS index (single fragment) + println!("Creating Lance dataset with FTS index (single fragment)..."); + let lance_fts_setup = rt.block_on(setup_lance_with_fts(batches.clone())); + println!( + "Lance FTS (single fragment): {} fragments", + lance_fts_setup.dataset.get_fragments().len() + ); + + // Setup Lance with FTS index (per-batch fragments) + println!("Creating Lance dataset with FTS index (per-batch fragments)..."); + let lance_fts_per_batch_setup = + rt.block_on(setup_lance_per_batch_with_fts(batches.clone(), batch_size)); + println!( + "Lance FTS (per-batch): {} fragments", + lance_fts_per_batch_setup.dataset.get_fragments().len() + ); + + // Setup MemTable with indexes + let num_partitions = (num_rows / 100).clamp(4, 256); + let num_sub_vectors = (vector_dim / 8).clamp(4, 32); + println!("Creating MemTable with indexes..."); + let memtable = rt.block_on(setup_memtable( + batches, + vector_dim, + num_partitions, + num_sub_vectors, + )); + println!("MemTable created."); + + // Search terms (these are words we know exist in the data) + let search_terms = ["hello", "world", "search", "benchmark", "lance"]; + + let mut group = c.benchmark_group("FTS"); + group.throughput(Throughput::Elements(search_terms.len() as u64)); + group.sample_size(sample_size); + + let label = format!("{}_terms", search_terms.len()); + + // MemTable FTS using MemTableScanner + group.bench_with_input( + BenchmarkId::new("MemTable_FTS", &label), + &search_terms, + |b, terms| { + let visibility_seq = num_batches as u64; + b.to_async(&rt).iter(|| { + let terms = *terms; + let scanners: Vec<_> = terms + .iter() + .map(|_| memtable.scan(visibility_seq)) + .collect(); + async move { + let mut total_found = 0usize; + for (mut scanner, term) in scanners.into_iter().zip(terms.iter()) { + let batches: Vec = scanner + .full_text_search("text", term) + .try_into_stream() + .await + .unwrap() + .try_collect() + .await + .unwrap(); + total_found += batches.iter().map(|b| b.num_rows()).sum::(); + } + assert!(total_found > 0); + } + }); + }, + ); + + // Lance FTS (single fragment) + group.bench_with_input( + BenchmarkId::new("Lance_SingleFragment_FTS", &label), + &search_terms, + |b, terms| { + let dataset = lance_fts_setup.dataset.clone(); + b.to_async(&rt).iter(|| { + let dataset = dataset.clone(); + let terms = terms.to_vec(); + async move { + let mut total_found = 0usize; + for term in terms { + let query = FullTextSearchQuery::new(term.to_string()); + let batches: Vec = dataset + .scan() + .full_text_search(query) + .unwrap() + .try_into_stream() + .await + .unwrap() + .try_collect() + .await + .unwrap(); + total_found += batches.iter().map(|b| b.num_rows()).sum::(); + } + assert!(total_found > 0); + } + }); + }, + ); + + // Lance FTS (per-batch fragments) + group.bench_with_input( + BenchmarkId::new("Lance_PerBatchFragment_FTS", &label), + &search_terms, + |b, terms| { + let dataset = lance_fts_per_batch_setup.dataset.clone(); + b.to_async(&rt).iter(|| { + let dataset = dataset.clone(); + let terms = terms.to_vec(); + async move { + let mut total_found = 0usize; + for term in terms { + let query = FullTextSearchQuery::new(term.to_string()); + let batches: Vec = dataset + .scan() + .full_text_search(query) + .unwrap() + .try_into_stream() + .await + .unwrap() + .try_collect() + .await + .unwrap(); + total_found += batches.iter().map(|b| b.num_rows()).sum::(); + } + assert!(total_found > 0); + } + }); + }, + ); + + group.finish(); +} + +/// Benchmark vector search operations. +fn bench_vector_search(c: &mut Criterion) { + let rt = tokio::runtime::Runtime::new().unwrap(); + + let num_rows = get_num_rows(); + let batch_size = get_batch_size(); + let vector_dim = get_vector_dim(); + let sample_size = get_sample_size(); + let k = DEFAULT_K; + + let num_batches = num_rows.div_ceil(batch_size); + let schema = create_schema(vector_dim); + + println!("=== Vector Search Benchmark ==="); + println!("Num rows: {}", num_rows); + println!("Batch size: {}", batch_size); + println!("Num batches: {}", num_batches); + println!("Vector dim: {}", vector_dim); + println!("K: {}", k); + println!(); + + // Generate test data + let batches: Vec = (0..num_batches) + .map(|i| { + let start_id = (i * batch_size) as i64; + let rows = batch_size.min(num_rows - i * batch_size); + create_batch(&schema, start_id, rows, vector_dim) + }) + .collect(); + + // Setup Lance with vector index (IVF-PQ) - single fragment + let num_partitions = (num_rows / 100).clamp(4, 256); + let num_sub_vectors = (vector_dim / 8).clamp(4, 32); + println!( + "Creating Lance dataset with IVF-PQ index (single fragment, partitions={}, sub_vectors={})...", + num_partitions, num_sub_vectors + ); + let lance_vec_setup = rt.block_on(setup_lance_with_vector_index( + batches.clone(), + num_partitions, + num_sub_vectors, + )); + println!( + "Lance IVF-PQ (single fragment): {} fragments", + lance_vec_setup.dataset.get_fragments().len() + ); + + // Setup Lance with vector index (IVF-PQ) - per-batch fragments + println!( + "Creating Lance dataset with IVF-PQ index (per-batch fragments, partitions={}, sub_vectors={})...", + num_partitions, num_sub_vectors + ); + let lance_vec_per_batch_setup = rt.block_on(setup_lance_per_batch_with_vector_index( + batches.clone(), + batch_size, + num_partitions, + num_sub_vectors, + )); + println!( + "Lance IVF-PQ (per-batch): {} fragments", + lance_vec_per_batch_setup.dataset.get_fragments().len() + ); + + // Setup MemTable with IVF-PQ index + println!( + "Creating MemTable with IVF-PQ index (partitions={}, sub_vectors={})...", + num_partitions, num_sub_vectors + ); + let memtable = rt.block_on(setup_memtable( + batches, + vector_dim, + num_partitions, + num_sub_vectors, + )); + println!("MemTable IVF-PQ index created."); + + // Create query vector + let query = create_query_vector(vector_dim); + + let mut group = c.benchmark_group("VectorSearch"); + group.throughput(Throughput::Elements(1)); + group.sample_size(sample_size); + + let label = format!("{}_rows_k{}", num_rows, k); + + // MemTable IVF-PQ vector search using MemTableScanner + group.bench_with_input( + BenchmarkId::new("MemTable_IVFPQ", &label), + &query, + |b, q| { + let visibility_seq = num_batches as u64; + let query_array: Arc = Arc::new(Float32Array::from(q.clone())); + b.to_async(&rt).iter(|| { + let query_array = query_array.clone(); + async { + let mut scanner = memtable.scan(visibility_seq); + let batches: Vec = scanner + .nearest("vector", query_array, k, Some(8), None) + .try_into_stream() + .await + .unwrap() + .try_collect() + .await + .unwrap(); + let total: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert!(total > 0); + } + }); + }, + ); + + // Lance IVF-PQ vector search (single fragment) + group.bench_with_input( + BenchmarkId::new("Lance_SingleFragment_IVFPQ", &label), + &query, + |b, q| { + let dataset = lance_vec_setup.dataset.clone(); + let query_array = Float32Array::from(q.clone()); + b.to_async(&rt).iter(|| { + let dataset = dataset.clone(); + let query_array = query_array.clone(); + async move { + let batches: Vec = dataset + .scan() + .nearest("vector", &query_array, k) + .unwrap() + .nprobes(8) + .try_into_stream() + .await + .unwrap() + .try_collect() + .await + .unwrap(); + let total: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert!(total > 0); + } + }); + }, + ); + + // Lance IVF-PQ vector search (per-batch fragments) + group.bench_with_input( + BenchmarkId::new("Lance_PerBatchFragment_IVFPQ", &label), + &query, + |b, q| { + let dataset = lance_vec_per_batch_setup.dataset.clone(); + let query_array = Float32Array::from(q.clone()); + b.to_async(&rt).iter(|| { + let dataset = dataset.clone(); + let query_array = query_array.clone(); + async move { + let batches: Vec = dataset + .scan() + .nearest("vector", &query_array, k) + .unwrap() + .nprobes(8) + .try_into_stream() + .await + .unwrap() + .try_collect() + .await + .unwrap(); + let total: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert!(total > 0); + } + }); + }, + ); + + group.finish(); +} + +/// Run all benchmarks. +fn all_benchmarks(c: &mut Criterion) { + bench_scan(c); + bench_point_lookup(c); + bench_fts(c); + bench_vector_search(c); +} + +#[cfg(target_os = "linux")] +criterion_group!( + name = benches; + config = Criterion::default() + .significance_level(0.05) + .with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); + targets = all_benchmarks +); + +#[cfg(not(target_os = "linux"))] +criterion_group!( + name = benches; + config = Criterion::default().significance_level(0.05); + targets = all_benchmarks +); + +criterion_main!(benches); diff --git a/rust/lance/src/dataset.rs b/rust/lance/src/dataset.rs index e5e71887147..f94255ae617 100644 --- a/rust/lance/src/dataset.rs +++ b/rust/lance/src/dataset.rs @@ -71,6 +71,7 @@ pub mod delta; pub mod fragment; mod hash_joiner; pub mod index; +pub mod mem_wal; mod metadata; pub mod optimize; pub mod progress; diff --git a/rust/lance/src/dataset/mem_wal.rs b/rust/lance/src/dataset/mem_wal.rs new file mode 100644 index 00000000000..8eaef94628e --- /dev/null +++ b/rust/lance/src/dataset/mem_wal.rs @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! MemWAL - Log-Structured Merge (LSM) tree for Lance tables +//! +//! This module implements an LSM tree architecture for high-performance +//! streaming writes with durability guarantees via Write-Ahead Log (WAL). +//! +//! ## Architecture +//! +//! Each region has: +//! - A **MemTable** for in-memory data (immediately queryable) +//! - A **WAL Buffer** for durability (persisted to object storage) +//! - **In-memory indexes** (BTree, IVF-PQ, FTS) for indexed queries +//! +//! ## Write Path +//! +//! ```text +//! put(batch) → MemTable.insert() → WalBuffer.append() → [async flush to storage] +//! ↓ +//! IndexRegistry.update() +//! ``` +//! +//! ## Durability +//! +//! Writers can be configured for: +//! - **Durable writes**: Wait for WAL flush before returning +//! - **Non-durable writes**: Buffer in memory, accept potential loss on crash +//! +//! ## Epoch-Based Fencing +//! +//! Each region has exactly one active writer at any time, enforced via +//! monotonically increasing writer epochs in the region manifest. + +mod api; +mod batch_store; +mod config; +mod dispatcher; +mod epoch; +mod indexes; +mod ivfpq_store; +mod manifest; +pub mod memtable; +pub mod scanner; +mod util; +mod watchable_cell; +pub mod write; + +pub use api::{DatasetMemWalExt, MemWalConfig}; +pub use config::RegionWriterConfig; +pub use epoch::EpochGuard; +pub use manifest::RegionManifestStore; +pub use scanner::MemTableScanner; +pub use write::RegionWriter; diff --git a/rust/lance/src/dataset/mem_wal/api.rs b/rust/lance/src/dataset/mem_wal/api.rs new file mode 100644 index 00000000000..232550c5fc0 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/api.rs @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! Dataset API extensions for MemWAL. +//! +//! This module provides the user-facing API for initializing and using MemWAL +//! on a Dataset. + +use std::sync::Arc; + +use async_trait::async_trait; +use lance_core::{Error, Result}; +use lance_index::mem_wal::{MemWalIndexDetails, RegionSpec, MEM_WAL_INDEX_NAME}; +use lance_index::vector::ivf::storage::IvfModel; +use lance_index::vector::pq::ProductQuantizer; +use lance_index::DatasetIndexExt; +use lance_io::object_store::ObjectStore; +use lance_linalg::distance::DistanceType; +use snafu::location; +use uuid::Uuid; + +use crate::dataset::transaction::{Operation, Transaction}; +use crate::dataset::CommitBuilder; +use crate::index::mem_wal::new_mem_wal_index_meta; +use crate::index::DatasetIndexInternalExt; +use crate::Dataset; + +use super::write::MemIndexConfig; +use super::write::RegionWriter; +use super::RegionWriterConfig; + +/// Configuration for initializing MemWAL on a Dataset. +#[derive(Debug, Clone, Default)] +pub struct MemWalConfig { + /// Optional region specification for partitioning writes. + /// + /// If None, MemWAL is initialized without any region spec (manual region management). + /// + /// TODO: Add `add_region_spec()` API to add region specs after initialization. + pub region_spec: Option, + /// Index names to maintain in MemTables. + /// These must reference indexes already defined on the base table. + pub maintained_indexes: Vec, +} + +/// Extension trait for Dataset to support MemWAL operations. +#[async_trait] +pub trait DatasetMemWalExt { + /// Initialize MemWAL on this dataset. + /// + /// Creates the MemWalIndex system index with the given configuration. + /// All indexes in `maintained_indexes` must already exist on the dataset. + /// + /// # Example + /// + /// ```ignore + /// let mut dataset = Dataset::open("s3://bucket/dataset").await?; + /// dataset.initialize_mem_wal(MemWalConfig { + /// region_specs: vec![], + /// maintained_indexes: vec!["id_btree".to_string()], + /// }).await?; + /// ``` + async fn initialize_mem_wal(&mut self, config: MemWalConfig) -> Result<()>; + + /// Get a RegionWriter for the specified region. + /// + /// Automatically loads index configurations from the MemWalIndex + /// and creates the appropriate in-memory indexes. + /// + /// # Arguments + /// + /// * `region_id` - UUID identifying this region + /// * `config` - Writer configuration (durability, buffer sizes, etc.) + /// + /// # Example + /// + /// ```ignore + /// let writer = dataset.mem_wal_writer( + /// Uuid::new_v4(), + /// RegionWriterConfig::default(), + /// ).await?; + /// writer.put(batch).await?; + /// ``` + async fn mem_wal_writer( + &self, + region_id: Uuid, + config: RegionWriterConfig, + ) -> Result; +} + +#[async_trait] +impl DatasetMemWalExt for Dataset { + async fn initialize_mem_wal(&mut self, config: MemWalConfig) -> Result<()> { + // Validate that the dataset has a primary key (required for MemWAL) + let pk_fields = self.schema().unenforced_primary_key(); + if pk_fields.is_empty() { + return Err(Error::invalid_input( + "MemWAL requires a primary key on the dataset. \ + Define a primary key using the 'lance-schema:unenforced-primary-key' Arrow field metadata.", + location!(), + )); + } + + // Validate that all maintained_indexes exist on the dataset + let indices = self.load_indices().await?; + for index_name in &config.maintained_indexes { + if !indices.iter().any(|idx| &idx.name == index_name) { + return Err(Error::invalid_input( + format!( + "Index '{}' not found on dataset. maintained_indexes must reference existing indexes.", + index_name + ), + location!(), + )); + } + } + + // Check if MemWAL index already exists + if indices.iter().any(|idx| idx.name == MEM_WAL_INDEX_NAME) { + return Err(Error::invalid_input( + "MemWAL is already initialized on this dataset. Use update methods instead.", + location!(), + )); + } + + // Create MemWalIndexDetails + let details = MemWalIndexDetails { + region_specs: config.region_spec.into_iter().collect(), + maintained_indexes: config.maintained_indexes, + ..Default::default() + }; + + // Create the index metadata + let index_meta = new_mem_wal_index_meta(self.manifest.version, details)?; + + // Commit as CreateIndex transaction + let transaction = Transaction::new( + self.manifest.version, + Operation::CreateIndex { + new_indices: vec![index_meta], + removed_indices: vec![], + }, + None, + ); + + let new_dataset = CommitBuilder::new(Arc::new(self.clone())) + .execute(transaction) + .await?; + + // Update self to point to new version + *self = new_dataset; + + Ok(()) + } + + async fn mem_wal_writer( + &self, + region_id: Uuid, + mut config: RegionWriterConfig, + ) -> Result { + use lance_index::metrics::NoOpMetricsCollector; + + // Load MemWalIndex to get maintained_indexes + let mem_wal_index = self + .open_mem_wal_index(&NoOpMetricsCollector) + .await? + .ok_or_else(|| { + Error::invalid_input( + "MemWAL is not initialized on this dataset. Call initialize_mem_wal() first.", + location!(), + ) + })?; + + // Get maintained_indexes from the MemWalIndex details + let maintained_indexes = &mem_wal_index.details.maintained_indexes; + + // Load index configs for each maintained index + let mut index_configs = Vec::new(); + for index_name in maintained_indexes { + let index_meta = self.load_index_by_name(index_name).await?.ok_or_else(|| { + Error::invalid_input( + format!( + "Index '{}' from maintained_indexes not found on dataset", + index_name + ), + location!(), + ) + })?; + + // Detect index type and create appropriate config + let type_url = index_meta + .index_details + .as_ref() + .map(|d| d.type_url.as_str()) + .unwrap_or(""); + + let index_type = MemIndexConfig::detect_index_type(type_url)?; + + match index_type { + "btree" => { + index_configs.push(MemIndexConfig::btree_from_metadata( + &index_meta, + self.schema(), + )?); + } + "fts" => { + index_configs.push(MemIndexConfig::fts_from_metadata( + &index_meta, + self.schema(), + )?); + } + "vector" => { + // Vector index - load IVF-PQ config from base table + let vector_config = + load_vector_index_config(self, index_name, &index_meta).await?; + index_configs.push(vector_config); + } + _ => { + return Err(Error::invalid_input( + format!("Unknown index type: {}", index_type), + location!(), + )) + } + }; + } + + // Set region_id in config + config.region_id = region_id; + + // Get object store and base path + let base_uri = self.uri(); + let (store, base_path) = ObjectStore::from_uri(base_uri).await?; + + // Create RegionWriter + RegionWriter::open( + store, + base_path, + base_uri, + config, + Arc::new(self.schema().into()), + index_configs, + ) + .await + } +} + +/// Load vector index configuration from the base table's IVF-PQ index. +/// +/// Opens the vector index and extracts the IVF model and PQ codebook +/// to create an in-memory IVF-PQ index config. +async fn load_vector_index_config( + dataset: &Dataset, + index_name: &str, + index_meta: &lance_table::format::IndexMetadata, +) -> Result { + use lance_index::metrics::NoOpMetricsCollector; + + // Get the column name for this index + let field_id = index_meta.fields.first().ok_or_else(|| { + Error::invalid_input( + format!("Vector index '{}' has no fields", index_name), + location!(), + ) + })?; + + let field = dataset.schema().field_by_id(*field_id).ok_or_else(|| { + Error::invalid_input( + format!("Field not found for vector index '{}'", index_name), + location!(), + ) + })?; + + let column = field.name.clone(); + + // Load IVF-PQ components + let index_uuid = index_meta.uuid.to_string(); + let (ivf_model, pq, distance_type) = load_ivf_pq_components( + dataset, + index_name, + &index_uuid, + &column, + &NoOpMetricsCollector, + ) + .await?; + + Ok(MemIndexConfig::ivf_pq( + index_name.to_string(), + column, + ivf_model, + pq, + distance_type, + )) +} + +/// Load IVF model and ProductQuantizer from an IVF-PQ index. +async fn load_ivf_pq_components( + dataset: &Dataset, + index_name: &str, + index_uuid: &str, + column_name: &str, + metrics: &dyn lance_index::metrics::MetricsCollector, +) -> Result<(IvfModel, ProductQuantizer, DistanceType)> { + use crate::index::vector::ivf::v2::IvfPq; + use lance_index::vector::VectorIndex; + + // Open the vector index using UUID + let index = dataset + .open_vector_index(column_name, index_uuid, metrics) + .await?; + + // Try to downcast to IvfPq (IVFIndex) + // This covers IVF-PQ indexes which are the most common + let ivf_index = index.as_any().downcast_ref::().ok_or_else(|| { + Error::invalid_input( + format!( + "Vector index '{}' is not an IVF-PQ index. Only IVF-PQ indexes are supported for MemWAL.", + index_name + ), + location!(), + ) + })?; + + // Extract IVF model and distance type from the index + let ivf_model = ivf_index.ivf_model().clone(); + let distance_type = ivf_index.metric_type(); + + // Get the quantizer and convert to ProductQuantizer + let quantizer = ivf_index.quantizer(); + let pq = ProductQuantizer::try_from(quantizer)?; + + Ok((ivf_model, pq, distance_type)) +} diff --git a/rust/lance/src/dataset/mem_wal/batch_store.rs b/rust/lance/src/dataset/mem_wal/batch_store.rs new file mode 100644 index 00000000000..91e51a4a048 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/batch_store.rs @@ -0,0 +1,815 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! Lock-free append-only batch storage for MemTable. +//! +//! This module provides a high-performance, lock-free storage structure for +//! RecordBatches in the MemTable. It is designed for a single-writer, +//! multiple-reader scenario where: +//! +//! - A single writer task (WriteBatchHandler) appends batches +//! - Multiple reader tasks concurrently read batches +//! - No locks are needed for either reads or writes +//! +//! # Safety Model +//! +//! The lock-free design relies on these invariants: +//! +//! 1. **Single Writer**: Only one thread calls `append()` at a time. +//! Enforced by the WriteBatchHandler architecture. +//! +//! 2. **Append-Only**: Once written, slots are never modified or removed +//! until the entire store is dropped. +//! +//! 3. **Atomic Publishing**: Writer updates `committed_len` with Release +//! ordering AFTER fully writing the slot. Readers load with Acquire +//! ordering BEFORE reading slots. +//! +//! 4. **Fixed Capacity**: The store has a fixed capacity set at creation. +//! When full, the MemTable should be flushed. +//! +//! # Memory Ordering +//! +//! ```text +//! Writer: Reader: +//! 1. Write data to slot[n] +//! 2. committed_len.store(n+1, Release) +//! ─────────────────────────────────► synchronizes-with +//! 3. len = committed_len.load(Acquire) +//! 4. Read slot[i] where i < len +//! ``` + +use std::cell::UnsafeCell; +use std::mem::MaybeUninit; +use std::sync::atomic::{AtomicUsize, Ordering}; + +use arrow_array::RecordBatch; + +/// A batch stored in the lock-free store. +#[derive(Clone)] +pub struct StoredBatch { + /// The Arrow RecordBatch data. + pub data: RecordBatch, + /// Number of rows in this batch (cached for quick access). + pub num_rows: usize, + /// Row offset in the MemTable (cumulative rows before this batch). + pub row_offset: u64, +} + +impl StoredBatch { + /// Create a new StoredBatch. + pub fn new(data: RecordBatch, row_offset: u64) -> Self { + let num_rows = data.num_rows(); + Self { + data, + num_rows, + row_offset, + } + } +} + +/// Error returned when the store is full. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct StoreFull; + +impl std::fmt::Display for StoreFull { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "LockFreeBatchStore is full") + } +} + +impl std::error::Error for StoreFull {} + +/// Lock-free append-only storage for memtable batches. +/// +/// This structure provides O(1) lock-free appends and reads for a +/// single-writer, multiple-reader scenario. +/// +/// # Example +/// +/// ```ignore +/// let store = LockFreeBatchStore::with_capacity(100); +/// +/// // Writer (single thread) +/// store.append(batch1, 1)?; +/// store.append(batch2, 2)?; +/// +/// // Readers (multiple threads, concurrent) +/// let len = store.len(); +/// for i in 0..len { +/// let batch = store.get(i).unwrap(); +/// // process batch... +/// } +/// ``` +pub struct LockFreeBatchStore { + /// Pre-allocated storage slots. + /// Each slot is either uninitialized or contains a valid StoredBatch. + slots: Box<[UnsafeCell>]>, + + /// Number of committed (fully written) slots. + /// Invariant: all slots [0, committed_len) contain valid data. + committed_len: AtomicUsize, + + /// Total capacity (fixed at creation). + capacity: usize, + + /// Total row count across all committed batches. + total_rows: AtomicUsize, + + /// Estimated size in bytes (for flush threshold). + estimated_bytes: AtomicUsize, + + /// WAL flush watermark: the last batch ID that has been flushed to WAL (inclusive). + /// Uses usize::MAX as sentinel for "nothing flushed yet". + /// This is per-memtable tracking, not global. + max_flushed_batch_position: AtomicUsize, +} + +// SAFETY: Safe to share across threads because: +// - Single writer guarantee (architectural invariant) +// - Readers only access committed slots (index < committed_len) +// - Atomic operations provide proper synchronization +// - Slots are never modified after being written +unsafe impl Sync for LockFreeBatchStore {} +unsafe impl Send for LockFreeBatchStore {} + +impl LockFreeBatchStore { + /// Create a new store with the given capacity. + /// + /// # Arguments + /// + /// * `capacity` - Maximum number of batches. Should be sized based on + /// `max_memtable_size / expected_avg_batch_size`. + /// + /// # Panics + /// + /// Panics if capacity is 0. + pub fn with_capacity(capacity: usize) -> Self { + assert!(capacity > 0, "capacity must be > 0"); + + // Allocate uninitialized storage + let mut slots = Vec::with_capacity(capacity); + for _ in 0..capacity { + slots.push(UnsafeCell::new(MaybeUninit::uninit())); + } + + Self { + slots: slots.into_boxed_slice(), + committed_len: AtomicUsize::new(0), + capacity, + total_rows: AtomicUsize::new(0), + estimated_bytes: AtomicUsize::new(0), + max_flushed_batch_position: AtomicUsize::new(usize::MAX), // Nothing flushed yet + } + } + + /// Calculate recommended capacity from memtable size configuration. + /// + /// Uses an assumed average batch size of 64KB with 20% buffer. + pub fn recommended_capacity(max_memtable_bytes: usize) -> usize { + const AVG_BATCH_SIZE: usize = 64 * 1024; // 64KB + const BUFFER_FACTOR: f64 = 1.2; + + let estimated_batches = max_memtable_bytes / AVG_BATCH_SIZE; + let capacity = ((estimated_batches as f64) * BUFFER_FACTOR) as usize; + capacity.max(16) // Minimum 16 slots + } + + /// Returns the capacity. + #[inline] + pub fn capacity(&self) -> usize { + self.capacity + } + + /// Returns true if the store is full. + #[inline] + pub fn is_full(&self) -> bool { + self.committed_len.load(Ordering::Relaxed) >= self.capacity + } + + /// Returns the number of remaining slots. + #[inline] + pub fn remaining_capacity(&self) -> usize { + self.capacity + .saturating_sub(self.committed_len.load(Ordering::Relaxed)) + } + + // ========================================================================= + // Writer API (Single Writer Only) + // ========================================================================= + + /// Append a batch to the store. + /// + /// # Safety Requirements + /// + /// This method MUST only be called from the single writer task. + /// Concurrent calls from multiple threads cause undefined behavior. + /// + /// # Returns + /// + /// - `Ok((batch_position, row_offset, estimated_size))` - The index, row offset, and size of the appended batch + /// - `Err(StoreFull)` - The store is at capacity, needs flush + pub fn append(&self, batch: RecordBatch) -> Result<(usize, u64, usize), StoreFull> { + // Load current length (Relaxed is fine - we're the only writer) + let idx = self.committed_len.load(Ordering::Relaxed); + + if idx >= self.capacity { + return Err(StoreFull); + } + + let num_rows = batch.num_rows(); + let estimated_size = Self::estimate_batch_size(&batch); + + // Row offset is the total rows BEFORE this batch + let row_offset = self.total_rows.load(Ordering::Relaxed) as u64; + + let stored = StoredBatch::new(batch, row_offset); + + // SAFETY: + // 1. idx < capacity, so slot exists + // 2. Single writer guarantee - no concurrent writes to this slot + // 3. Slot at idx is uninitialized (never written before, append-only) + unsafe { + let slot_ptr = self.slots[idx].get(); + std::ptr::write(slot_ptr, MaybeUninit::new(stored)); + } + + // Update counters (Relaxed - just tracking, not synchronization) + self.total_rows.fetch_add(num_rows, Ordering::Relaxed); + self.estimated_bytes + .fetch_add(estimated_size, Ordering::Relaxed); + + // CRITICAL: Publish with Release ordering. + // This ensures all writes above are visible to readers + // who load committed_len with Acquire ordering. + self.committed_len.store(idx + 1, Ordering::Release); + + Ok((idx, row_offset, estimated_size)) + } + + /// Estimate the memory size of a RecordBatch. + fn estimate_batch_size(batch: &RecordBatch) -> usize { + batch + .columns() + .iter() + .map(|col| col.get_array_memory_size()) + .sum::() + + std::mem::size_of::() + } + + // ========================================================================= + // Reader API (Multiple Concurrent Readers) + // ========================================================================= + + /// Get the number of committed batches. + #[inline] + pub fn len(&self) -> usize { + self.committed_len.load(Ordering::Acquire) + } + + /// Check if empty. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Get the maximum buffered batch position (inclusive). + /// + /// Returns `None` if no batches have been buffered. + /// Returns `Some(len - 1)` otherwise, which is the position of the last buffered batch. + #[inline] + pub fn max_buffered_batch_position(&self) -> Option { + let len = self.len(); + if len == 0 { + None + } else { + Some(len - 1) + } + } + + /// Get total row count. + #[inline] + pub fn total_rows(&self) -> usize { + self.total_rows.load(Ordering::Relaxed) + } + + /// Get estimated size in bytes. + #[inline] + pub fn estimated_bytes(&self) -> usize { + self.estimated_bytes.load(Ordering::Relaxed) + } + + // ========================================================================= + // WAL Flush Tracking API + // ========================================================================= + + /// Get the WAL flush watermark (the last batch ID that was flushed, inclusive). + /// Returns None if nothing has been flushed yet. + #[inline] + pub fn max_flushed_batch_position(&self) -> Option { + let watermark = self.max_flushed_batch_position.load(Ordering::Acquire); + if watermark == usize::MAX { + None + } else { + Some(watermark) + } + } + + /// Update the WAL flush watermark after successful WAL flush. + /// + /// # Arguments + /// + /// * `batch_position` - The last batch ID that was flushed (inclusive) + #[inline] + pub fn set_max_flushed_batch_position(&self, batch_position: usize) { + debug_assert!( + batch_position != usize::MAX, + "batch_position cannot be usize::MAX (reserved as sentinel)" + ); + self.max_flushed_batch_position + .store(batch_position, Ordering::Release); + } + + /// Get the number of batches pending WAL flush. + #[inline] + pub fn pending_wal_flush_count(&self) -> usize { + let committed = self.committed_len.load(Ordering::Acquire); + let watermark = self.max_flushed_batch_position.load(Ordering::Acquire); + if watermark == usize::MAX { + // Nothing flushed yet, all committed batches are pending + committed + } else { + // Batches [0, watermark] are flushed, so pending = committed - (watermark + 1) + committed.saturating_sub(watermark + 1) + } + } + + /// Check if all committed batches have been WAL-flushed. + #[inline] + pub fn is_wal_flush_complete(&self) -> bool { + self.pending_wal_flush_count() == 0 + } + + /// Get the range of batch IDs pending WAL flush: [start, end). + /// Returns None if nothing pending. + #[inline] + pub fn pending_wal_flush_range(&self) -> Option<(usize, usize)> { + let committed = self.committed_len.load(Ordering::Acquire); + let watermark = self.max_flushed_batch_position.load(Ordering::Acquire); + let start = if watermark == usize::MAX { + 0 + } else { + watermark + 1 + }; + if committed > start { + Some((start, committed)) + } else { + None + } + } + + /// Get a reference to a batch by index. + /// + /// Returns `None` if index >= committed length. + /// + /// # Safety + /// + /// The returned reference is valid as long as `self` is not dropped. + /// This is safe because: + /// - We only access slots where index < committed_len (Acquire load) + /// - Slots are never modified after being written + /// - The store is append-only + #[inline] + pub fn get(&self, index: usize) -> Option<&StoredBatch> { + // Acquire ordering synchronizes with Release in append() + let len = self.committed_len.load(Ordering::Acquire); + + if index >= len { + return None; + } + + // SAFETY: + // 1. index < len, and len was loaded with Acquire ordering + // 2. The Release-Acquire pair ensures the write is visible + // 3. Slots are never modified after writing (append-only) + unsafe { + let slot_ptr = self.slots[index].get(); + Some((*slot_ptr).assume_init_ref()) + } + } + + /// Get the RecordBatch data at an index. + #[inline] + pub fn get_batch(&self, index: usize) -> Option<&RecordBatch> { + self.get(index).map(|s| &s.data) + } + + /// Iterate over all committed batches. + /// + /// The iterator captures a snapshot of the committed length at creation + /// time, so it will not see batches appended during iteration. + pub fn iter(&self) -> LockFreeBatchStoreIter<'_> { + let len = self.committed_len.load(Ordering::Acquire); + LockFreeBatchStoreIter { + store: self, + current: 0, + len, + } + } + + /// Get all batches as a Vec (clones the RecordBatch data). + pub fn to_vec(&self) -> Vec { + self.iter().map(|b| b.data.clone()).collect() + } + + /// Get all StoredBatches as a Vec (clones). + pub fn to_stored_vec(&self) -> Vec { + self.iter().cloned().collect() + } + + // ========================================================================= + // Visibility API + // ========================================================================= + + /// Get batches visible up to a specific batch position (inclusive). + /// + /// A batch at position `i` is visible if `i <= max_visible_batch_position`. + pub fn visible_batches(&self, max_visible_batch_position: usize) -> Vec<&StoredBatch> { + let len = self.committed_len.load(Ordering::Acquire); + let end = (max_visible_batch_position + 1).min(len); + (0..end).filter_map(|i| self.get(i)).collect() + } + + /// Get batch positions visible up to a specific batch position (inclusive). + pub fn max_visible_batch_positions(&self, max_visible_batch_position: usize) -> Vec { + let len = self.committed_len.load(Ordering::Acquire); + let end = (max_visible_batch_position + 1).min(len); + (0..end).collect() + } + + /// Check if a specific batch is visible at a given visibility position. + #[inline] + pub fn is_batch_visible( + &self, + batch_position: usize, + max_visible_batch_position: usize, + ) -> bool { + let len = self.committed_len.load(Ordering::Acquire); + batch_position < len && batch_position <= max_visible_batch_position + } + + /// Get visible RecordBatches (clones the data). + pub fn visible_record_batches(&self, max_visible_batch_position: usize) -> Vec { + self.visible_batches(max_visible_batch_position) + .into_iter() + .map(|b| b.data.clone()) + .collect() + } +} + +impl Drop for LockFreeBatchStore { + fn drop(&mut self) { + // Get the committed length directly (no atomic needed, we have &mut self) + let len = *self.committed_len.get_mut(); + + // Drop all initialized slots + for i in 0..len { + // SAFETY: slots [0, len) are initialized and we have exclusive access + unsafe { + let slot_ptr = self.slots[i].get(); + std::ptr::drop_in_place((*slot_ptr).as_mut_ptr()); + } + } + } +} + +/// Iterator over committed batches in a LockFreeBatchStore. +/// +/// This iterator captures a snapshot of the committed length at creation, +/// providing a consistent view even if new batches are appended during +/// iteration. +pub struct LockFreeBatchStoreIter<'a> { + store: &'a LockFreeBatchStore, + current: usize, + len: usize, +} + +impl<'a> Iterator for LockFreeBatchStoreIter<'a> { + type Item = &'a StoredBatch; + + fn next(&mut self) -> Option { + if self.current >= self.len { + return None; + } + + // SAFETY: current < len, which was captured with Acquire ordering + let batch = unsafe { + let slot_ptr = self.store.slots[self.current].get(); + (*slot_ptr).assume_init_ref() + }; + + self.current += 1; + Some(batch) + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = self.len - self.current; + (remaining, Some(remaining)) + } +} + +impl ExactSizeIterator for LockFreeBatchStoreIter<'_> {} + +// ========================================================================= +// Tests +// ========================================================================= + +#[cfg(test)] +mod tests { + use super::*; + use arrow_array::Int32Array; + use arrow_schema::{DataType, Field, Schema as ArrowSchema}; + use std::sync::Arc; + + fn create_test_schema() -> Arc { + Arc::new(ArrowSchema::new(vec![ + Field::new("id", DataType::Int32, false), + Field::new("value", DataType::Int32, false), + ])) + } + + fn create_test_batch(num_rows: usize) -> RecordBatch { + let schema = create_test_schema(); + let ids: Vec = (0..num_rows as i32).collect(); + let values: Vec = ids.iter().map(|id| id * 10).collect(); + RecordBatch::try_new( + schema, + vec![ + Arc::new(Int32Array::from(ids)), + Arc::new(Int32Array::from(values)), + ], + ) + .unwrap() + } + + #[test] + fn test_create_store() { + let store = LockFreeBatchStore::with_capacity(10); + assert_eq!(store.capacity(), 10); + assert_eq!(store.len(), 0); + assert!(store.is_empty()); + assert!(!store.is_full()); + assert_eq!(store.remaining_capacity(), 10); + } + + #[test] + fn test_append_single() { + let store = LockFreeBatchStore::with_capacity(10); + let batch = create_test_batch(100); + + let (id, row_offset, _size) = store.append(batch).unwrap(); + assert_eq!(id, 0); + assert_eq!(row_offset, 0); // First batch starts at row 0 + assert_eq!(store.len(), 1); + assert!(!store.is_empty()); + assert_eq!(store.total_rows(), 100); + } + + #[test] + fn test_append_multiple() { + let store = LockFreeBatchStore::with_capacity(10); + + let mut expected_row_offset = 0u64; + for i in 0..5 { + let num_rows = 10 * (i + 1); + let batch = create_test_batch(num_rows); + let (id, row_offset, _size) = store.append(batch).unwrap(); + assert_eq!(id, i); + assert_eq!(row_offset, expected_row_offset); + expected_row_offset += num_rows as u64; + } + + assert_eq!(store.len(), 5); + assert_eq!(store.total_rows(), 10 + 20 + 30 + 40 + 50); + } + + #[test] + fn test_capacity_limit() { + let store = LockFreeBatchStore::with_capacity(3); + + store.append(create_test_batch(10)).unwrap(); + store.append(create_test_batch(10)).unwrap(); + store.append(create_test_batch(10)).unwrap(); + + assert!(store.is_full()); + assert_eq!(store.remaining_capacity(), 0); + + let result = store.append(create_test_batch(10)); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), StoreFull); + } + + #[test] + fn test_get_batch() { + let store = LockFreeBatchStore::with_capacity(10); + + let batch1 = create_test_batch(10); + let batch2 = create_test_batch(20); + + store.append(batch1).unwrap(); + store.append(batch2).unwrap(); + + let retrieved1 = store.get(0).unwrap(); + assert_eq!(retrieved1.num_rows, 10); + assert_eq!(retrieved1.row_offset, 0); + + let retrieved2 = store.get(1).unwrap(); + assert_eq!(retrieved2.num_rows, 20); + assert_eq!(retrieved2.row_offset, 10); // After first batch + + // Out of bounds + assert!(store.get(2).is_none()); + assert!(store.get(100).is_none()); + } + + #[test] + fn test_iter() { + let store = LockFreeBatchStore::with_capacity(10); + + for _ in 0..5 { + store.append(create_test_batch(10)).unwrap(); + } + + let batches: Vec<_> = store.iter().collect(); + assert_eq!(batches.len(), 5); + } + + #[test] + fn test_visibility_filtering() { + let store = LockFreeBatchStore::with_capacity(10); + + store.append(create_test_batch(10)).unwrap(); // position 0 + store.append(create_test_batch(10)).unwrap(); // position 1 + store.append(create_test_batch(10)).unwrap(); // position 2 + store.append(create_test_batch(10)).unwrap(); // position 3 + store.append(create_test_batch(10)).unwrap(); // position 4 + + // max_visible_batch_position=2 means positions 0, 1, 2 are visible + let visible = store.max_visible_batch_positions(2); + assert_eq!(visible, vec![0, 1, 2]); + + // max_visible_batch_position=4 means all visible + let visible = store.max_visible_batch_positions(4); + assert_eq!(visible, vec![0, 1, 2, 3, 4]); + + // max_visible_batch_position=0 means only position 0 visible + let visible = store.max_visible_batch_positions(0); + assert_eq!(visible, vec![0]); + } + + #[test] + fn test_is_batch_visible() { + let store = LockFreeBatchStore::with_capacity(10); + + store.append(create_test_batch(10)).unwrap(); // position 0 + store.append(create_test_batch(10)).unwrap(); // position 1 + store.append(create_test_batch(10)).unwrap(); // position 2 + + // Batch at position 0 is visible when max_visible_batch_position >= 0 + assert!(store.is_batch_visible(0, 0)); + assert!(store.is_batch_visible(0, 1)); + assert!(store.is_batch_visible(0, 2)); + + // Batch at position 2 is only visible when max_visible_batch_position >= 2 + assert!(!store.is_batch_visible(2, 1)); + assert!(store.is_batch_visible(2, 2)); + assert!(store.is_batch_visible(2, 3)); + + // Batch 3 doesn't exist + assert!(!store.is_batch_visible(3, 10)); + } + + #[test] + fn test_recommended_capacity() { + // 64MB memtable, 64KB avg batch = 1024 batches * 1.2 = ~1228 + let cap = LockFreeBatchStore::recommended_capacity(64 * 1024 * 1024); + assert!( + (1200..=1300).contains(&cap), + "capacity should be around 1200, got {}", + cap + ); + + // Very small memtable should get minimum capacity + let cap = LockFreeBatchStore::recommended_capacity(1024); + assert_eq!(cap, 16); // minimum + } + + #[test] + fn test_to_vec() { + let store = LockFreeBatchStore::with_capacity(10); + + let batch1 = create_test_batch(10); + let batch2 = create_test_batch(20); + + store.append(batch1).unwrap(); + store.append(batch2).unwrap(); + + let vec = store.to_vec(); + assert_eq!(vec.len(), 2); + assert_eq!(vec[0].num_rows(), 10); + assert_eq!(vec[1].num_rows(), 20); + } + + #[test] + fn test_concurrent_readers() { + use std::sync::Arc; + use std::thread; + + let store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + // Pre-populate with some batches + for _ in 0..50 { + store.append(create_test_batch(10)).unwrap(); + } + + // Spawn multiple reader threads + let readers: Vec<_> = (0..4) + .map(|_| { + let reader_store = store.clone(); + thread::spawn(move || { + for _ in 0..100 { + let len = reader_store.len(); + assert_eq!(len, 50); + + // Verify we can read all batches + for i in 0..len { + let batch = reader_store.get(i); + assert!(batch.is_some()); + assert_eq!(batch.unwrap().num_rows, 10); + } + + // Verify iterator + let count = reader_store.iter().count(); + assert_eq!(count, 50); + + thread::yield_now(); + } + }) + }) + .collect(); + + for r in readers { + r.join().unwrap(); + } + } + + #[test] + fn test_concurrent_read_write() { + use std::sync::atomic::AtomicBool; + use std::sync::Arc; + use std::thread; + + let store = Arc::new(LockFreeBatchStore::with_capacity(200)); + let done = Arc::new(AtomicBool::new(false)); + + // Writer thread (single writer) + let writer_store = store.clone(); + let writer_done = done.clone(); + let writer = thread::spawn(move || { + for _ in 0..100 { + writer_store.append(create_test_batch(10)).unwrap(); + thread::yield_now(); + } + writer_done.store(true, Ordering::Release); + }); + + // Reader threads (concurrent readers) + let readers: Vec<_> = (0..4) + .map(|_| { + let reader_store = store.clone(); + let reader_done = done.clone(); + thread::spawn(move || { + while !reader_done.load(Ordering::Acquire) { + let len = reader_store.len(); + + // Every batch we can see should be valid + for i in 0..len { + let batch = reader_store.get(i); + assert!(batch.is_some()); + } + + thread::yield_now(); + } + + // Final check - should see all 100 batches + assert_eq!(reader_store.len(), 100); + }) + }) + .collect(); + + writer.join().unwrap(); + for r in readers { + r.join().unwrap(); + } + } +} diff --git a/rust/lance/src/dataset/mem_wal/config.rs b/rust/lance/src/dataset/mem_wal/config.rs new file mode 100644 index 00000000000..cf032deb84d --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/config.rs @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! Configuration types for MemWAL writers. + +use std::time::Duration; + +use uuid::Uuid; + +/// Configuration for a region writer. +#[derive(Debug, Clone)] +pub struct RegionWriterConfig { + /// Unique identifier for this region (UUID v4). + pub region_id: Uuid, + + /// Region spec ID this region was created with. + /// A value of 0 indicates a manually-created region not governed by any spec. + pub region_spec_id: u32, + + /// Whether to wait for WAL flush before returning from writes. + /// + /// When true (durable writes): + /// - Each write waits for WAL persistence before returning + /// - Guarantees no data loss on crash + /// - Higher latency due to object storage writes + /// + /// When false (non-durable writes): + /// - Writes return immediately after buffering in memory + /// - Potential data loss if process crashes before flush + /// - Lower latency, batched S3 operations + pub durable_write: bool, + + /// Whether to update indexes synchronously on each write. + /// + /// When true: + /// - Newly written data is immediately searchable via indexes + /// - Higher latency due to index update overhead + /// + /// When false: + /// - Index updates are deferred + /// - New data may not appear in index-accelerated queries immediately + pub sync_indexed_write: bool, + + /// Maximum WAL buffer size in bytes before triggering a flush. + /// + /// This is a soft threshold - write batches are atomic and won't be split. + /// WAL flushes when buffer exceeds this size OR when `max_wal_flush_interval` elapses. + /// Default: 10MB + pub max_wal_buffer_size: usize, + + /// Time-based WAL flush interval. + /// + /// WAL buffer will be flushed after this duration even if size threshold + /// hasn't been reached. This ensures bounded data loss window in non-durable mode + /// and prevents accumulating too much data before flushing to object storage. + /// Default: 100ms + pub max_wal_flush_interval: Option, + + /// Maximum MemTable size in bytes before triggering a flush to storage. + /// + /// MemTable size is checked every `max_wal_flush_interval` (during WAL flush ticks). + /// Default: 256MB + pub max_memtable_size: usize, + + /// Maximum number of rows in a MemTable. + /// + /// Used to pre-allocate index storage (e.g., IVF-PQ partition capacity). + /// When a partition reaches capacity, memtable will be flushed. + /// Default: 100,000 rows + pub max_memtable_rows: usize, + + /// Maximum number of batches in a MemTable. + /// + /// Used to pre-allocate batch storage. When this limit is reached, + /// memtable will be flushed. Sized for typical ML workloads with + /// 1024-dim vectors (~82KB per 20-row batch). + /// Default: 8,000 batches + pub max_memtable_batches: usize, + + /// Safety factor for IVF-PQ index partition capacity calculation. + /// + /// Accounts for non-uniform distribution of vectors across partitions. + /// Higher values use more memory but reduce overflow risk. + /// Partition capacity = min((max_rows / num_partitions) * safety_factor, max_rows) + /// Default: 8 + pub ivf_index_partition_capacity_safety_factor: usize, + + /// Batch size for parallel HEAD requests when scanning for manifest versions. + /// + /// Higher values scan faster but use more parallel requests. + /// Default: 2 + pub manifest_scan_batch_size: usize, + + /// Maximum unflushed bytes before applying backpressure. + /// + /// When total unflushed data (active memtable + frozen memtables) exceeds this, + /// new writes will block until some data is flushed to storage. + /// This prevents unbounded memory growth during write spikes. + /// + /// Default: 1GB + pub max_unflushed_memtable_bytes: usize, + + /// Interval for logging warnings when writes are blocked by backpressure. + /// + /// When a write is blocked waiting for WAL flush, memtable flush, or index + /// updates to complete, a warning is logged after this duration. The write + /// will continue waiting indefinitely (it never fails due to backpressure), + /// but warnings are logged at this interval to help diagnose slow flushes. + /// + /// Default: 30 seconds + pub backpressure_log_interval: Duration, + + /// Maximum rows to buffer before flushing to async indexes. + /// + /// Only applies when `sync_indexed_write` is false. Larger values enable + /// better vectorization (especially for IVF-PQ) but increase memory usage + /// and latency before data becomes searchable. + /// + /// Default: 10,000 rows + pub async_index_buffer_rows: usize, + + /// Maximum time to buffer before flushing to async indexes. + /// + /// Only applies when `sync_indexed_write` is false. Ensures bounded latency + /// for data to become searchable even during low write throughput. + /// + /// Default: 1 second + pub async_index_interval: Duration, + + /// Interval for periodic stats logging. + /// + /// Stats (write throughput, backpressure events, memtable size) are logged + /// at this interval. Set to None to disable periodic stats logging. + /// + /// Default: 60 seconds + pub stats_log_interval: Option, +} + +impl Default for RegionWriterConfig { + fn default() -> Self { + Self { + region_id: Uuid::new_v4(), + region_spec_id: 0, + durable_write: true, + sync_indexed_write: true, + max_wal_buffer_size: 10 * 1024 * 1024, // 10MB + max_wal_flush_interval: Some(Duration::from_millis(100)), // 100ms + max_memtable_size: 256 * 1024 * 1024, // 256MB + max_memtable_rows: 100_000, // 100k rows + max_memtable_batches: 8_000, // 8k batches + ivf_index_partition_capacity_safety_factor: 8, + manifest_scan_batch_size: 2, + max_unflushed_memtable_bytes: 1024 * 1024 * 1024, // 1GB + backpressure_log_interval: Duration::from_secs(30), + async_index_buffer_rows: 10_000, + async_index_interval: Duration::from_secs(1), + stats_log_interval: Some(Duration::from_secs(60)), // 1 minute + } + } +} + +impl RegionWriterConfig { + /// Create a new configuration with the given region ID. + pub fn new(region_id: Uuid) -> Self { + Self { + region_id, + ..Default::default() + } + } + + /// Set the region spec ID. + pub fn with_region_spec_id(mut self, spec_id: u32) -> Self { + self.region_spec_id = spec_id; + self + } + + /// Set durable writes mode. + pub fn with_durable_write(mut self, durable: bool) -> Self { + self.durable_write = durable; + self + } + + /// Set indexed writes mode. + pub fn with_sync_indexed_write(mut self, indexed: bool) -> Self { + self.sync_indexed_write = indexed; + self + } + + /// Set maximum WAL buffer size. + pub fn with_max_wal_buffer_size(mut self, size: usize) -> Self { + self.max_wal_buffer_size = size; + self + } + + /// Set maximum flush interval. + pub fn with_max_wal_flush_interval(mut self, interval: Duration) -> Self { + self.max_wal_flush_interval = Some(interval); + self + } + + /// Set maximum MemTable size. + pub fn with_max_memtable_size(mut self, size: usize) -> Self { + self.max_memtable_size = size; + self + } + + /// Set maximum MemTable rows for index pre-allocation. + pub fn with_max_memtable_rows(mut self, rows: usize) -> Self { + self.max_memtable_rows = rows; + self + } + + /// Set maximum MemTable batches for batch store pre-allocation. + pub fn with_max_memtable_batches(mut self, batches: usize) -> Self { + self.max_memtable_batches = batches; + self + } + + /// Set partition capacity safety factor for IVF-PQ indexes. + pub fn with_ivf_index_partition_capacity_safety_factor(mut self, factor: usize) -> Self { + self.ivf_index_partition_capacity_safety_factor = factor; + self + } + + /// Set manifest scan batch size. + pub fn with_manifest_scan_batch_size(mut self, size: usize) -> Self { + self.manifest_scan_batch_size = size; + self + } + + /// Set maximum unflushed bytes for backpressure. + pub fn with_max_unflushed_memtable_bytes(mut self, size: usize) -> Self { + self.max_unflushed_memtable_bytes = size; + self + } + + /// Set backpressure log interval. + pub fn with_backpressure_log_interval(mut self, interval: Duration) -> Self { + self.backpressure_log_interval = interval; + self + } + + /// Set async index buffer rows. + pub fn with_async_index_buffer_rows(mut self, rows: usize) -> Self { + self.async_index_buffer_rows = rows; + self + } + + /// Set async index interval. + pub fn with_async_index_interval(mut self, interval: Duration) -> Self { + self.async_index_interval = interval; + self + } + + /// Set stats logging interval. Use None to disable periodic stats logging. + pub fn with_stats_log_interval(mut self, interval: Option) -> Self { + self.stats_log_interval = interval; + self + } +} diff --git a/rust/lance/src/dataset/mem_wal/dispatcher.rs b/rust/lance/src/dataset/mem_wal/dispatcher.rs new file mode 100644 index 00000000000..c869a2946f3 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/dispatcher.rs @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! Background task dispatcher with message-driven event handling. + +use std::fmt::Debug; +use std::sync::Arc; +use std::time::Duration; + +use async_trait::async_trait; +use lance_core::Result; +use std::sync::RwLock; +use tokio::select; +use tokio::sync::mpsc; +use tokio::task::JoinHandle; +use tokio::time::{interval_at, Instant, Interval}; +use tokio_util::sync::CancellationToken; +use tracing::{debug, error, info, warn}; + +/// Factory function for creating ticker messages. +pub type MessageFactory = Box T + Send + Sync>; + +/// Handler trait for processing messages in a background task. +/// +/// Implementations define: +/// - How to process incoming messages +/// - Optional periodic tickers for scheduled work +/// - Cleanup logic on shutdown +#[async_trait] +pub trait MessageHandler: Send { + /// Define periodic tickers that generate messages. + /// + /// Each ticker is a tuple of (interval, factory function). + /// The factory is called each time the interval elapses. + fn tickers(&mut self) -> Vec<(Duration, MessageFactory)> { + vec![] + } + + /// Handle a single message. + /// + /// Called for both external messages and ticker-generated messages. + async fn handle(&mut self, message: T) -> Result<()>; + + /// Cleanup on shutdown. + /// + /// Called when the handler is shutting down. Override to perform + /// custom cleanup logic (e.g., flushing buffers, closing connections). + /// + /// # Arguments + /// + /// * `_shutdown_ok` - Whether the shutdown was clean (true) or due to error (false) + async fn cleanup(&mut self, _shutdown_ok: bool) -> Result<()> { + Ok(()) + } +} + +/// Dispatcher that runs the event loop for a single message handler. +struct TaskDispatcher { + handler: Box>, + rx: mpsc::UnboundedReceiver, + cancellation_token: CancellationToken, + name: String, +} + +impl TaskDispatcher { + /// Run the message processing loop until cancellation or error. + async fn run(mut self) -> Result<()> { + let tickers = self.handler.tickers(); + let mut ticker_intervals: Vec<(Interval, MessageFactory)> = tickers + .into_iter() + .map(|(duration, factory)| { + let interval = interval_at(Instant::now() + duration, duration); + (interval, factory) + }) + .collect(); + + let result = loop { + // Handle tickers based on whether we have any + if ticker_intervals.is_empty() { + // No tickers, just handle messages and cancellation + select! { + biased; + + // Priority 1: Cancellation + _ = self.cancellation_token.cancelled() => { + debug!("Task '{}' received cancellation", self.name); + break Ok(()); + } + + // Priority 2: Messages from channel + msg = self.rx.recv() => { + match msg { + Some(message) => { + if let Err(e) = self.handler.handle(message).await { + error!("Task '{}' error handling message: {}", self.name, e); + break Err(e); + } + } + None => { + debug!("Task '{}' channel closed", self.name); + break Ok(()); + } + } + } + } + } else { + // With tickers, give ticker higher priority than messages to prevent starvation. + let first_ticker = ticker_intervals.first_mut().unwrap(); + let first_interval = &mut first_ticker.0; + + select! { + biased; + + // Priority 1: Cancellation + _ = self.cancellation_token.cancelled() => { + debug!("Task '{}' received cancellation", self.name); + break Ok(()); + } + + // Priority 2: Ticker event - must be higher than messages to prevent starvation + _ = first_interval.tick() => { + let message = (ticker_intervals[0].1)(); + if let Err(e) = self.handler.handle(message).await { + error!("Task '{}' error handling ticker message: {}", self.name, e); + break Err(e); + } + } + + // Priority 3: Messages from channel + msg = self.rx.recv() => { + match msg { + Some(message) => { + if let Err(e) = self.handler.handle(message).await { + error!("Task '{}' error handling message: {}", self.name, e); + break Err(e); + } + } + None => { + debug!("Task '{}' channel closed", self.name); + break Ok(()); + } + } + } + } + } + }; + + // Cleanup + let cleanup_ok = result.is_ok(); + self.handler.cleanup(cleanup_ok).await?; + + info!("Task dispatcher '{}' stopped", self.name); + result + } +} + +/// Executor that manages multiple background tasks. +pub struct TaskExecutor { + tasks: RwLock>)>>, + cancellation_token: CancellationToken, +} + +impl TaskExecutor { + /// Create a new task executor. + pub fn new() -> Self { + Self { + tasks: RwLock::new(Vec::new()), + cancellation_token: CancellationToken::new(), + } + } + + /// Add a new handler task. + /// + /// The handler will be spawned as a tokio task and run until shutdown. + /// + /// # Arguments + /// + /// * `name` - Unique name for the task (for logging) + /// * `handler` - The message handler implementation + /// * `rx` - Receiver for messages to this handler + pub fn add_handler( + &self, + name: String, + handler: Box>, + rx: mpsc::UnboundedReceiver, + ) -> Result<()> { + let dispatcher = TaskDispatcher { + handler, + rx, + cancellation_token: self.cancellation_token.clone(), + name: name.clone(), + }; + + let handle = tokio::spawn(async move { dispatcher.run().await }); + + self.tasks.write().unwrap().push((name, handle)); + Ok(()) + } + + /// Shutdown a specific task by name. + #[allow(dead_code)] + pub async fn shutdown_task(&self, name: &str) -> Result<()> { + // Find and remove the task + let handle = { + let mut tasks = self.tasks.write().unwrap(); + let pos = tasks.iter().position(|(n, _)| n == name); + pos.map(|i| tasks.remove(i).1) + }; + + if let Some(handle) = handle { + // Cancel and wait + self.cancellation_token.cancel(); + match handle.await { + Ok(result) => result, + Err(e) => { + error!("Task '{}' panicked: {}", name, e); + Ok(()) + } + } + } else { + warn!("Task '{}' not found", name); + Ok(()) + } + } + + /// Shutdown all tasks and wait for completion. + pub async fn shutdown_all(&self) -> Result<()> { + info!("Shutting down all tasks"); + + // Signal cancellation + self.cancellation_token.cancel(); + + // Wait for all tasks + let tasks = std::mem::take(&mut *self.tasks.write().unwrap()); + for (name, handle) in tasks { + match handle.await { + Ok(Ok(())) => debug!("Task '{}' completed successfully", name), + Ok(Err(e)) => warn!("Task '{}' completed with error: {}", name, e), + Err(e) => error!("Task '{}' panicked: {}", name, e), + } + } + + Ok(()) + } + + /// Check if cancellation has been requested. + #[allow(dead_code)] + pub fn is_cancelled(&self) -> bool { + self.cancellation_token.is_cancelled() + } + + /// Get a clone of the cancellation token. + #[allow(dead_code)] + pub fn cancellation_token(&self) -> CancellationToken { + self.cancellation_token.clone() + } +} + +impl Default for TaskExecutor { + fn default() -> Self { + Self::new() + } +} + +/// Watcher for async durability notifications. +/// +/// Allows callers to wait for a write to become durable (flushed to storage). +#[derive(Clone, Debug)] +pub struct DurableWatcher { + rx: Arc>, +} + +impl DurableWatcher { + /// Create a new durable watcher with a sender for signaling. + pub fn new() -> (Self, DurableSender) { + let (tx, rx) = tokio::sync::watch::channel(false); + (Self { rx: Arc::new(rx) }, DurableSender { tx }) + } + + /// Wait until the write is durable. + pub async fn wait(&self) -> Result<()> { + let mut rx = (*self.rx).clone(); + while !*rx.borrow() { + rx.changed().await.map_err(|_| { + lance_core::Error::io("Durable watcher channel closed", snafu::location!()) + })?; + } + Ok(()) + } +} + +/// Sender for signaling durability. +pub struct DurableSender { + tx: tokio::sync::watch::Sender, +} + +impl std::fmt::Debug for DurableSender { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DurableSender").finish_non_exhaustive() + } +} + +impl DurableSender { + /// Signal that the write is now durable. + pub fn notify_durable(self) { + let _ = self.tx.send(true); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::sync::atomic::{AtomicUsize, Ordering}; + + #[derive(Debug)] + enum TestMessage { + Increment, + GetCount(tokio::sync::oneshot::Sender), + } + + struct CountingHandler { + count: Arc, + } + + #[async_trait] + impl MessageHandler for CountingHandler { + async fn handle(&mut self, message: TestMessage) -> Result<()> { + match message { + TestMessage::Increment => { + self.count.fetch_add(1, Ordering::SeqCst); + } + TestMessage::GetCount(tx) => { + let _ = tx.send(self.count.load(Ordering::SeqCst)); + } + } + Ok(()) + } + } + + #[tokio::test] + async fn test_basic_message_handling() { + let count = Arc::new(AtomicUsize::new(0)); + let handler = CountingHandler { + count: count.clone(), + }; + + let (tx, rx) = mpsc::unbounded_channel(); + let executor = TaskExecutor::new(); + + executor + .add_handler("counter".to_string(), Box::new(handler), rx) + .unwrap(); + + // Send some messages + tx.send(TestMessage::Increment).unwrap(); + tx.send(TestMessage::Increment).unwrap(); + tx.send(TestMessage::Increment).unwrap(); + + // Give time to process + tokio::time::sleep(Duration::from_millis(50)).await; + + let (resp_tx, resp_rx) = tokio::sync::oneshot::channel(); + tx.send(TestMessage::GetCount(resp_tx)).unwrap(); + + let result = resp_rx.await.unwrap(); + assert_eq!(result, 3); + + // Cleanup + drop(tx); + executor.shutdown_all().await.unwrap(); + } + + #[tokio::test] + async fn test_durable_watcher() { + let (watcher, sender) = DurableWatcher::new(); + + // Spawn a task that waits for durability + let watcher_clone = watcher.clone(); + let wait_task = tokio::spawn(async move { watcher_clone.wait().await }); + + // Brief delay + tokio::time::sleep(Duration::from_millis(10)).await; + + // Signal durability + sender.notify_durable(); + + // Wait should complete + wait_task.await.unwrap().unwrap(); + } +} diff --git a/rust/lance/src/dataset/mem_wal/epoch.rs b/rust/lance/src/dataset/mem_wal/epoch.rs new file mode 100644 index 00000000000..f73c591ac07 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/epoch.rs @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! Epoch-based writer fencing for single-writer semantics. +//! +//! Each region has exactly one active writer at any time. Writers use +//! monotonically increasing epochs to claim ownership and detect when +//! they've been fenced by a newer writer. +//! +//! ## Protocol +//! +//! ### Writer Initialization +//! +//! 1. Load the latest region manifest +//! 2. Increment `writer_epoch` by one +//! 3. Atomically write a new manifest with the incremented epoch +//! 4. If write fails (another writer claimed first), retry with higher epoch +//! +//! ### Fencing Check +//! +//! Before any manifest update, verify: +//! - `local_epoch == stored_epoch`: Writer is still active, proceed +//! - `local_epoch < stored_epoch`: Writer has been fenced, abort +//! +//! ## Fencing Guarantees +//! +//! - Fenced writers stop all operations immediately +//! - WAL entries from fenced writers are still valid (epoch <= current) +//! - No data loss occurs - fenced entries are replayed by the new writer + +use std::sync::Arc; + +use lance_core::{Error, Result}; +use lance_index::mem_wal::RegionManifest; +use snafu::location; +use tracing::{debug, info, warn}; +use uuid::Uuid; + +use super::manifest::RegionManifestStore; + +/// Error indicating a writer has been fenced by a newer writer. +#[derive(Debug, Clone)] +pub struct FencedError { + pub local_epoch: u64, + pub stored_epoch: u64, + pub region_id: Uuid, +} + +impl std::fmt::Display for FencedError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Writer fenced: local epoch {} < stored epoch {} for region {}", + self.local_epoch, self.stored_epoch, self.region_id + ) + } +} + +impl std::error::Error for FencedError {} + +/// Guard that tracks a writer's claimed epoch and provides fencing checks. +/// +/// An `EpochGuard` is obtained by successfully claiming a region via +/// [`EpochGuard::claim`]. It must be checked before any manifest update +/// to ensure the writer hasn't been fenced. +#[derive(Debug)] +pub struct EpochGuard { + local_epoch: u64, + region_id: Uuid, + manifest_store: Arc, +} + +impl EpochGuard { + /// Claim a region by incrementing its writer epoch. + /// + /// This establishes single-writer semantics by: + /// 1. Loading the current manifest (or creating initial state) + /// 2. Incrementing the writer epoch + /// 3. Atomically writing the new manifest + /// + /// If another writer has already claimed the region (race condition), + /// this will retry with an even higher epoch. + /// + /// # Arguments + /// + /// * `manifest_store` - Store for reading/writing region manifests + /// * `region_spec_id` - Region spec ID (used when creating new regions) + /// + /// # Returns + /// + /// A tuple of `(EpochGuard, RegionManifest)` where the manifest is the + /// claimed state (may be freshly created or loaded and epoch-bumped). + pub async fn claim( + manifest_store: Arc, + region_spec_id: u32, + ) -> Result<(Self, RegionManifest)> { + let region_id = manifest_store.region_id(); + let max_retries = 10; + + for attempt in 0..max_retries { + // Load current manifest or create initial state + let current = manifest_store.read_latest().await?; + + let (next_version, next_epoch, base_manifest) = match current { + Some(m) => (m.version + 1, m.writer_epoch + 1, Some(m)), + None => (1, 1, None), + }; + + // Create new manifest with bumped epoch + let new_manifest = if let Some(base) = base_manifest { + RegionManifest { + version: next_version, + writer_epoch: next_epoch, + ..base + } + } else { + RegionManifest { + region_id, + version: next_version, + region_spec_id, + writer_epoch: next_epoch, + replay_after_wal_id: 0, + wal_id_last_seen: 0, + current_generation: 1, + flushed_generations: vec![], + } + }; + + // Attempt atomic write + match manifest_store.write(&new_manifest).await { + Ok(_) => { + info!( + "Claimed region {} with epoch {} (version {})", + region_id, next_epoch, next_version + ); + + let guard = Self { + local_epoch: next_epoch, + region_id, + manifest_store, + }; + + return Ok((guard, new_manifest)); + } + Err(e) => { + // Another writer beat us - retry with fresh state + warn!( + "Epoch claim attempt {} failed for region {}: {}", + attempt + 1, + region_id, + e + ); + + if attempt == max_retries - 1 { + return Err(Error::io( + format!( + "Failed to claim region {} after {} attempts", + region_id, max_retries + ), + location!(), + )); + } + + // Brief delay before retry to avoid tight spin + tokio::time::sleep(std::time::Duration::from_millis(10 * (attempt as u64 + 1))) + .await; + } + } + } + + unreachable!() + } + + /// Check if this writer has been fenced. + /// + /// Loads the current manifest and compares epochs. If the stored epoch + /// is higher than our local epoch, we've been fenced by another writer. + /// + /// # Errors + /// + /// Returns `FencedError` if this writer has been fenced. + pub async fn check_fenced(&self) -> Result<()> { + let current = self.manifest_store.read_latest().await?; + + match current { + Some(m) if m.writer_epoch > self.local_epoch => { + let err = FencedError { + local_epoch: self.local_epoch, + stored_epoch: m.writer_epoch, + region_id: self.region_id, + }; + Err(Error::io(err.to_string(), location!())) + } + _ => Ok(()), + } + } + + /// Get the local epoch for this writer. + pub fn epoch(&self) -> u64 { + self.local_epoch + } + + /// Get the region ID. + pub fn region_id(&self) -> Uuid { + self.region_id + } + + /// Get a reference to the manifest store. + pub fn manifest_store(&self) -> &Arc { + &self.manifest_store + } + + /// Write a new manifest version, checking for fencing first. + /// + /// This combines fencing check with manifest write atomically. + /// If another writer has fenced us, the write fails immediately. + /// If the write fails due to version conflict, it may indicate + /// concurrent access that should be handled by the caller. + /// + /// # Arguments + /// + /// * `manifest` - The manifest to write (version should be correctly set) + pub async fn write_manifest(&self, manifest: &RegionManifest) -> Result<()> { + // First check if we've been fenced + self.check_fenced().await?; + + // Verify manifest has our epoch + if manifest.writer_epoch != self.local_epoch { + return Err(Error::invalid_input( + format!( + "Manifest epoch {} doesn't match local epoch {}", + manifest.writer_epoch, self.local_epoch + ), + location!(), + )); + } + + // Write the manifest + self.manifest_store.write(manifest).await?; + + debug!( + "Wrote manifest version {} for region {}", + manifest.version, self.region_id + ); + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use lance_io::object_store::ObjectStore; + use object_store::path::Path; + use tempfile::TempDir; + + async fn create_local_store() -> (Arc, Path, TempDir) { + let temp_dir = tempfile::tempdir().unwrap(); + let uri = format!("file://{}", temp_dir.path().display()); + let (store, path) = ObjectStore::from_uri(&uri).await.unwrap(); + (store, path, temp_dir) + } + + #[tokio::test] + async fn test_initial_claim() { + let (store, base_path, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let manifest_store = Arc::new(RegionManifestStore::new(store, &base_path, region_id, 2)); + + let (guard, manifest) = EpochGuard::claim(manifest_store, 0).await.unwrap(); + + assert_eq!(guard.epoch(), 1); + assert_eq!(manifest.version, 1); + assert_eq!(manifest.writer_epoch, 1); + } + + #[tokio::test] + async fn test_subsequent_claim_bumps_epoch() { + let (store, base_path, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let manifest_store = Arc::new(RegionManifestStore::new( + store.clone(), + &base_path, + region_id, + 2, + )); + + // First claim + let (guard1, manifest1) = EpochGuard::claim(manifest_store.clone(), 0).await.unwrap(); + assert_eq!(guard1.epoch(), 1); + assert_eq!(manifest1.version, 1); + + // Second claim (simulating new writer taking over) + let (guard2, manifest2) = EpochGuard::claim(manifest_store, 0).await.unwrap(); + assert_eq!(guard2.epoch(), 2); + assert_eq!(manifest2.version, 2); + } + + #[tokio::test] + async fn test_fencing_detection() { + let (store, base_path, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let manifest_store = Arc::new(RegionManifestStore::new( + store.clone(), + &base_path, + region_id, + 2, + )); + + // First writer claims + let (guard1, _) = EpochGuard::claim(manifest_store.clone(), 0).await.unwrap(); + assert!(guard1.check_fenced().await.is_ok()); + + // Second writer claims (fences first) + let (guard2, _) = EpochGuard::claim(manifest_store.clone(), 0).await.unwrap(); + assert!(guard2.check_fenced().await.is_ok()); + + // First writer should now detect fencing + assert!(guard1.check_fenced().await.is_err()); + } + + #[tokio::test] + async fn test_write_manifest_checks_fencing() { + let (store, base_path, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let manifest_store = Arc::new(RegionManifestStore::new( + store.clone(), + &base_path, + region_id, + 2, + )); + + // First writer claims + let (guard1, manifest1) = EpochGuard::claim(manifest_store.clone(), 0).await.unwrap(); + + // Second writer claims (fences first) + let (_guard2, _) = EpochGuard::claim(manifest_store.clone(), 0).await.unwrap(); + + // First writer tries to write - should fail due to fencing + let updated = RegionManifest { + version: manifest1.version + 1, + ..manifest1 + }; + assert!(guard1.write_manifest(&updated).await.is_err()); + } + + #[tokio::test] + async fn test_preserves_manifest_state() { + let (store, base_path, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let manifest_store = Arc::new(RegionManifestStore::new( + store.clone(), + &base_path, + region_id, + 2, + )); + + // First writer sets some state + let (guard1, mut manifest1) = EpochGuard::claim(manifest_store.clone(), 0).await.unwrap(); + manifest1.version = 2; + manifest1.replay_after_wal_id = 100; + manifest1.current_generation = 5; + guard1.write_manifest(&manifest1).await.unwrap(); + + // Second writer should inherit state + let (_guard2, manifest2) = EpochGuard::claim(manifest_store, 0).await.unwrap(); + assert_eq!(manifest2.replay_after_wal_id, 100); + assert_eq!(manifest2.current_generation, 5); + assert_eq!(manifest2.writer_epoch, 2); // epoch bumped + assert_eq!(manifest2.version, 3); // version bumped + } +} diff --git a/rust/lance/src/dataset/mem_wal/indexes.rs b/rust/lance/src/dataset/mem_wal/indexes.rs new file mode 100644 index 00000000000..5d9d033f248 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/indexes.rs @@ -0,0 +1,2143 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +#![allow(clippy::print_stderr)] +#![allow(clippy::type_complexity)] + +//! Index registry for MemTable write path. +//! +//! Maintains in-memory indexes that are updated synchronously with writes: +//! - BTree: Primary key and scalar field lookups +//! - IVF-PQ: Vector similarity search (reuses centroids and codebook from base table) +//! - FTS: Full-text search +//! +//! Other index types log a warning and are skipped. + +use std::collections::HashMap; +use std::sync::atomic::{AtomicUsize, Ordering}; + +use arrow_array::cast::AsArray; +use arrow_array::{Array, FixedSizeListArray, RecordBatch, UInt8Array}; +use crossbeam_skiplist::SkipMap; +use datafusion::common::ScalarValue; +use lance_core::datatypes::Schema as LanceSchema; +use lance_core::{Error, Result}; +use lance_index::scalar::btree::OrderableScalarValue; +use lance_index::vector::ivf::storage::IvfModel; +use lance_index::vector::kmeans::compute_partitions_arrow_array; +use lance_index::vector::pq::storage::transpose; +use lance_index::vector::pq::ProductQuantizer; +use lance_index::vector::quantizer::Quantization; + +use super::ivfpq_store::IvfPqPartition; +use lance_linalg::distance::DistanceType; +use lance_table::format::IndexMetadata; +use snafu::location; + +/// Row position in MemTable. +/// +/// This is the absolute row position across all batches in the MemTable. +/// When flushed to a single Lance file, this becomes the row ID directly. +pub type RowPosition = u64; + +/// Index coverage information for split plan creation. +/// +/// Tracks the maximum indexed batch position for each index, +/// used to determine which data is covered by indexes. +#[derive(Debug, Clone, Default)] +pub struct IndexCoverageInfo { + /// BTree/scalar index coverage: index name -> max_indexed_batch_position + pub btree_coverage: HashMap, + /// IVF-PQ vector index coverage: index name -> max_indexed_batch_position + pub ivfpq_coverage: HashMap, + /// FTS index coverage: index name -> max_indexed_batch_position + pub fts_coverage: HashMap, +} + +/// Result of checking index coverage for a query. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum CoverageResult { + /// All data is covered by the index (max_indexed_batch_position >= max_visible_batch_position) + Full, + /// Part of the data is covered (max_indexed_batch_position < max_visible_batch_position) + Partial { + /// Maximum batch position that is indexed + max_indexed_batch_position: usize, + }, + /// No data is covered by the index (max_indexed_batch_position == 0 or no index) + None, +} + +impl IndexCoverageInfo { + /// Check coverage for a BTree/scalar index by name. + pub fn check_btree_coverage( + &self, + name: &str, + max_visible_batch_position: usize, + ) -> CoverageResult { + self.check_coverage(self.btree_coverage.get(name), max_visible_batch_position) + } + + /// Check coverage for an IVF-PQ vector index by name. + pub fn check_ivfpq_coverage( + &self, + name: &str, + max_visible_batch_position: usize, + ) -> CoverageResult { + self.check_coverage(self.ivfpq_coverage.get(name), max_visible_batch_position) + } + + /// Check coverage for an FTS index by name. + pub fn check_fts_coverage( + &self, + name: &str, + max_visible_batch_position: usize, + ) -> CoverageResult { + self.check_coverage(self.fts_coverage.get(name), max_visible_batch_position) + } + + /// Check coverage given a max_indexed_batch_position value. + fn check_coverage( + &self, + max_indexed_batch_position: Option<&usize>, + max_visible_batch_position: usize, + ) -> CoverageResult { + match max_indexed_batch_position { + None | Some(&0) => CoverageResult::None, + Some(&pos) if pos >= max_visible_batch_position => CoverageResult::Full, + Some(&pos) => CoverageResult::Partial { + max_indexed_batch_position: pos, + }, + } + } + + /// Get the minimum coverage across all BTree indexes. + /// + /// Returns the minimum max_indexed_batch_position, useful when multiple indexes + /// need to cover the same data. + pub fn min_btree_coverage(&self) -> usize { + self.btree_coverage.values().copied().min().unwrap_or(0) + } + + /// Get the minimum coverage across all IVF-PQ indexes. + pub fn min_ivfpq_coverage(&self) -> usize { + self.ivfpq_coverage.values().copied().min().unwrap_or(0) + } + + /// Get the minimum coverage across all FTS indexes. + pub fn min_fts_coverage(&self) -> usize { + self.fts_coverage.values().copied().min().unwrap_or(0) + } +} + +/// Composite key for BTree index. +/// +/// By combining (scalar_value, row_position), each entry is unique. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct IndexKey { + /// The indexed scalar value. + pub value: OrderableScalarValue, + /// Row position (makes the key unique for non-unique indexes). + pub row_position: RowPosition, +} + +impl PartialOrd for IndexKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for IndexKey { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + // First compare by value, then by row_position + match self.value.cmp(&other.value) { + std::cmp::Ordering::Equal => self.row_position.cmp(&other.row_position), + ord => ord, + } + } +} + +/// In-memory BTree index for scalar fields. +/// +/// Represents the in-memory portion of Lance's on-disk BTree index. +/// Implemented using crossbeam-skiplist for concurrent access with O(log n) operations. +#[derive(Debug)] +pub struct BTreeMemIndex { + /// Ordered map: (scalar_value, row_position) -> () + lookup: SkipMap, + /// Column name this index is built on. + column_name: String, + /// Maximum batch position that has been indexed. + /// Used for index coverage tracking in split plans. + max_indexed_batch_position: AtomicUsize, +} + +impl Default for BTreeMemIndex { + fn default() -> Self { + Self { + lookup: SkipMap::new(), + column_name: String::new(), + max_indexed_batch_position: AtomicUsize::new(0), + } + } +} + +impl BTreeMemIndex { + /// Create a new BTree index for the given column. + pub fn new(column_name: String) -> Self { + Self { + lookup: SkipMap::new(), + column_name, + max_indexed_batch_position: AtomicUsize::new(0), + } + } + + /// Get the maximum batch position that has been indexed. + /// + /// Used for index coverage tracking. Rows with batch_position <= this value + /// are covered by the index. + pub fn max_indexed_batch_position(&self) -> usize { + self.max_indexed_batch_position.load(Ordering::Acquire) + } + + /// Update the maximum indexed batch position. + /// + /// Only updates if the new value is greater than the current value. + /// Uses compare-and-swap to handle concurrent updates. + pub fn update_max_indexed_batch_position(&self, batch_pos: usize) { + let mut current = self.max_indexed_batch_position.load(Ordering::Acquire); + while batch_pos > current { + match self.max_indexed_batch_position.compare_exchange_weak( + current, + batch_pos, + Ordering::Release, + Ordering::Acquire, + ) { + Ok(_) => break, + Err(actual) => current = actual, + } + } + } + + /// Insert rows from a batch into the index. + pub fn insert(&self, batch: &RecordBatch, row_offset: u64) -> Result<()> { + self.insert_with_batch_position(batch, row_offset, None) + } + + /// Insert rows from a batch into the index with batch position tracking. + pub fn insert_with_batch_position( + &self, + batch: &RecordBatch, + row_offset: u64, + batch_position: Option, + ) -> Result<()> { + let col_idx = batch + .schema() + .column_with_name(&self.column_name) + .map(|(idx, _)| idx) + .ok_or_else(|| { + Error::invalid_input( + format!("Column '{}' not found in batch", self.column_name), + location!(), + ) + })?; + + let column = batch.column(col_idx); + for row_idx in 0..batch.num_rows() { + let value = ScalarValue::try_from_array(column.as_ref(), row_idx)?; + let row_position = row_offset + row_idx as u64; + + let key = IndexKey { + value: OrderableScalarValue(value), + row_position, + }; + self.lookup.insert(key, ()); + } + + // Update max_indexed_batch_position if provided + if let Some(pos) = batch_position { + self.update_max_indexed_batch_position(pos); + } + + Ok(()) + } + + /// Look up row positions for an exact value. + pub fn get(&self, value: &ScalarValue) -> Vec { + let orderable = OrderableScalarValue(value.clone()); + let start = IndexKey { + value: orderable.clone(), + row_position: 0, + }; + let end = IndexKey { + value: orderable, + row_position: u64::MAX, + }; + + // Range scan: all entries with the same value + self.lookup + .range(start..=end) + .map(|entry| entry.key().row_position) + .collect() + } + + /// Get the number of entries (not unique values). + pub fn len(&self) -> usize { + self.lookup.len() + } + + /// Check if the index is empty. + pub fn is_empty(&self) -> bool { + self.lookup.is_empty() + } + + /// Get the column name. + pub fn column_name(&self) -> &str { + &self.column_name + } + + /// Get a snapshot of all entries grouped by value in sorted order. + pub fn snapshot(&self) -> Vec<(OrderableScalarValue, Vec)> { + let mut result: Vec<(OrderableScalarValue, Vec)> = Vec::new(); + + for entry in self.lookup.iter() { + let key = entry.key(); + if let Some(last) = result.last_mut() { + if last.0 == key.value { + last.1.push(key.row_position); + continue; + } + } + result.push((key.value.clone(), vec![key.row_position])); + } + + result + } + + /// Get the data type of the indexed column. + /// + /// Returns None if the index is empty. + pub fn data_type(&self) -> Option { + self.lookup + .front() + .map(|entry| entry.key().value.0.data_type()) + } + + /// Export the index data as sorted RecordBatches for BTree index training. + pub fn to_training_batches(&self, batch_size: usize) -> Result> { + use arrow_schema::{DataType, Field, Schema}; + use lance_core::ROW_ID; + use lance_index::scalar::registry::VALUE_COLUMN_NAME; + use std::sync::Arc; + + if self.lookup.is_empty() { + return Ok(vec![]); + } + + // Get the data type from the first key + let first_entry = self.lookup.front().unwrap(); + let data_type = first_entry.key().value.0.data_type(); + + // Create schema for training data + let schema = Arc::new(Schema::new(vec![ + Field::new(VALUE_COLUMN_NAME, data_type, true), + Field::new(ROW_ID, DataType::UInt64, false), + ])); + + let mut batches = Vec::new(); + let mut values: Vec = Vec::with_capacity(batch_size); + let mut row_ids: Vec = Vec::with_capacity(batch_size); + + for entry in self.lookup.iter() { + let key = entry.key(); + values.push(key.value.0.clone()); + row_ids.push(key.row_position); + + if values.len() >= batch_size { + // Build and emit a batch + let batch = self.build_training_batch(&schema, &values, &row_ids)?; + batches.push(batch); + values.clear(); + row_ids.clear(); + } + } + + // Emit any remaining data + if !values.is_empty() { + let batch = self.build_training_batch(&schema, &values, &row_ids)?; + batches.push(batch); + } + + Ok(batches) + } + + /// Build a single training batch from values and row IDs. + fn build_training_batch( + &self, + schema: &std::sync::Arc, + values: &[ScalarValue], + row_ids: &[u64], + ) -> Result { + use arrow_array::UInt64Array; + use std::sync::Arc; + + // Convert ScalarValues to Arrow array + let value_array = ScalarValue::iter_to_array(values.iter().cloned())?; + + // Create row_id array + let row_id_array = Arc::new(UInt64Array::from(row_ids.to_vec())); + + RecordBatch::try_new(schema.clone(), vec![value_array, row_id_array]).map_err(|e| { + Error::io( + format!("Failed to create training batch: {}", e), + location!(), + ) + }) + } +} + +/// In-memory IVF-PQ index entry. +/// +/// Stores partition assignment and PQ codes for each vector. +#[derive(Debug, Clone)] +pub struct IvfPqEntry { + /// Row position in MemTable. + pub row_position: RowPosition, + /// PQ code for this vector (compressed representation). + /// Length = num_sub_vectors (for 8-bit) or num_sub_vectors/2 (for 4-bit). + pub pq_code: Vec, +} + +/// In-memory IVF-PQ index for vector similarity search. +/// +/// Reuses IVF centroids and PQ codebook from the base table to ensure +/// distance comparisons are consistent between the in-memory and base table indexes. +/// +/// Uses hybrid storage for optimal performance: +/// - **Primary**: Pre-allocated `IvfPqPartition` stores with pre-transposed codes (fast search) +/// - **Overflow**: SkipMap fallback when primary is full (row-major, transpose at search) +/// +/// This design ensures writes never block while optimizing the common case where +/// most data (typically 95%+) fits in the fast primary storage. +#[derive(Debug)] +pub struct IvfPqMemIndex { + column_name: String, + ivf_model: IvfModel, + pq: ProductQuantizer, + /// Per-partition stores with hybrid storage (primary + overflow). + partitions: Vec, + /// Total number of vectors indexed. + vector_count: AtomicUsize, + /// Distance type for partition assignment. + distance_type: DistanceType, + /// Number of partitions. + num_partitions: usize, + /// PQ code length per vector (num_sub_vectors for 8-bit, num_sub_vectors/2 for 4-bit). + code_len: usize, + /// Maximum batch position that has been indexed. + /// Used for index coverage tracking in split plans. + max_indexed_batch_position: AtomicUsize, +} + +/// Default partition capacity when not specified. +/// This is a fallback - in practice, capacity should always be calculated +/// from memtable config using the safety factor. +const DEFAULT_PARTITION_CAPACITY: usize = 1024; + +impl IvfPqMemIndex { + /// Create a new IVF-PQ index with centroids and codebook from base table. + /// + /// Uses default partition capacity. For production use, prefer `with_capacity()` + /// with capacity calculated from memtable config. + /// + /// # Arguments + /// + /// * `column_name` - Vector column name + /// * `ivf_model` - IVF model with centroids from base table + /// * `pq` - Product quantizer with codebook from base table + /// * `distance_type` - Distance type for search + pub fn new( + column_name: String, + ivf_model: IvfModel, + pq: ProductQuantizer, + distance_type: DistanceType, + ) -> Self { + Self::with_capacity( + column_name, + ivf_model, + pq, + distance_type, + DEFAULT_PARTITION_CAPACITY, + ) + } + + /// Create a new IVF-PQ index with specified partition capacity. + /// + /// The partition capacity determines how many vectors each partition's + /// primary storage can hold before overflowing to the slower SkipMap. + /// + /// # Arguments + /// + /// * `column_name` - Vector column name + /// * `ivf_model` - IVF model with centroids from base table + /// * `pq` - Product quantizer with codebook from base table + /// * `distance_type` - Distance type for search + /// * `partition_capacity` - Max vectors per partition in primary storage + pub fn with_capacity( + column_name: String, + ivf_model: IvfModel, + pq: ProductQuantizer, + distance_type: DistanceType, + partition_capacity: usize, + ) -> Self { + let num_partitions = ivf_model.num_partitions(); + let code_len = pq.num_sub_vectors * pq.num_bits as usize / 8; + + // Pre-allocate all partition stores. + let partitions: Vec<_> = (0..num_partitions) + .map(|_| IvfPqPartition::new(partition_capacity, code_len)) + .collect(); + + Self { + column_name, + ivf_model, + pq, + partitions, + vector_count: AtomicUsize::new(0), + distance_type, + num_partitions, + code_len, + max_indexed_batch_position: AtomicUsize::new(0), + } + } + + /// Get the maximum batch position that has been indexed. + /// + /// Used for index coverage tracking. Rows with batch_position <= this value + /// are covered by the index. + pub fn max_indexed_batch_position(&self) -> usize { + self.max_indexed_batch_position.load(Ordering::Acquire) + } + + /// Update the maximum indexed batch position. + /// + /// Only updates if the new value is greater than the current value. + /// Uses compare-and-swap to handle concurrent updates. + pub fn update_max_indexed_batch_position(&self, batch_pos: usize) { + let mut current = self.max_indexed_batch_position.load(Ordering::Acquire); + while batch_pos > current { + match self.max_indexed_batch_position.compare_exchange_weak( + current, + batch_pos, + Ordering::Release, + Ordering::Acquire, + ) { + Ok(_) => break, + Err(actual) => current = actual, + } + } + } + + /// Check if any partition has overflow data. + /// + /// This indicates some vectors are stored in the slower SkipMap storage. + /// Useful for monitoring - high overflow suggests capacity tuning needed. + pub fn any_partition_has_overflow(&self) -> bool { + self.partitions.iter().any(|p| p.has_overflow()) + } + + /// Get the maximum overflow percentage across all partitions. + /// + /// Returns 0.0 to 1.0 indicating what fraction of vectors are in overflow. + /// Values > 0.1 suggest `partition_capacity` may need increasing. + pub fn max_overflow_percentage(&self) -> f64 { + self.partitions + .iter() + .map(|p| p.overflow_percentage()) + .fold(0.0, f64::max) + } + + /// Get total overflow count across all partitions. + pub fn total_overflow_count(&self) -> usize { + self.partitions.iter().map(|p| p.overflow_len()).sum() + } + + /// Insert vectors from a batch into the index. + pub fn insert(&self, batch: &RecordBatch, row_offset: u64) -> Result<()> { + self.insert_with_batch_position(batch, row_offset, None) + } + + /// Insert vectors from a batch into the index with sequence tracking. + /// + /// For better performance with multiple batches, prefer `insert_batches()` + /// which enables cross-batch vectorization. + pub fn insert_with_batch_position( + &self, + batch: &RecordBatch, + row_offset: u64, + batch_position: Option, + ) -> Result<()> { + let col_idx = batch + .schema() + .column_with_name(&self.column_name) + .map(|(idx, _)| idx); + + let Some(col_idx) = col_idx else { + // Column not in this batch, skip + // Still update max_indexed_batch_position if provided + if let Some(bp) = batch_position { + self.update_max_indexed_batch_position(bp); + } + return Ok(()); + }; + + let column = batch.column(col_idx); + let fsl = column.as_fixed_size_list_opt().ok_or_else(|| { + Error::invalid_input( + format!( + "Column '{}' is not a FixedSizeList, got {:?}", + self.column_name, + column.data_type() + ), + location!(), + ) + })?; + + // Find partition assignments for all vectors using batch computation + let centroids = self + .ivf_model + .centroids + .as_ref() + .ok_or_else(|| Error::invalid_input("IVF model has no centroids", location!()))?; + let (partition_ids, _distances) = + compute_partitions_arrow_array(centroids, fsl, self.distance_type)?; + + // Compute PQ codes for all vectors (row-major output) + let pq_codes = self.pq.quantize(fsl)?; + let pq_codes_fsl = pq_codes.as_fixed_size_list(); + let pq_codes_flat = pq_codes_fsl + .values() + .as_primitive::(); + + // Group vectors by partition + let mut partition_groups: Vec> = vec![Vec::new(); self.num_partitions]; + for (row_idx, partition_id) in partition_ids.iter().enumerate().take(batch.num_rows()) { + if let Some(pid) = partition_id { + if (*pid as usize) < self.num_partitions { + partition_groups[*pid as usize].push(row_idx); + } + } + } + + // For each partition: gather codes and append + let mut total_inserted = 0usize; + + for (partition_id, indices) in partition_groups.iter().enumerate() { + if indices.is_empty() { + continue; + } + + let num_vectors = indices.len(); + + // Gather row-major codes for this partition + let mut partition_codes: Vec = Vec::with_capacity(num_vectors * self.code_len); + let mut partition_positions: Vec = Vec::with_capacity(num_vectors); + + for &row_idx in indices { + let code_start = row_idx * self.code_len; + let code_end = code_start + self.code_len; + partition_codes.extend_from_slice(&pq_codes_flat.values()[code_start..code_end]); + partition_positions.push(row_offset + row_idx as u64); + } + + // Append to partition (handles primary vs overflow internally) + self.partitions[partition_id].append_batch(&partition_codes, &partition_positions); + + total_inserted += num_vectors; + } + + self.vector_count + .fetch_add(total_inserted, Ordering::Relaxed); + + // Update max_indexed_batch_position if provided + if let Some(bp) = batch_position { + self.update_max_indexed_batch_position(bp); + } + + Ok(()) + } + + /// Insert vectors from multiple batches with cross-batch vectorization. + /// + /// This method concatenates vectors from all batches and processes them + /// together for better SIMD utilization in partition assignment and PQ encoding. + /// Vectors are stored in the partition's primary (pre-transposed) storage when + /// capacity allows, otherwise in the overflow SkipMap. + pub fn insert_batches(&self, batches: &[BufferedBatch]) -> Result<()> { + if batches.is_empty() { + return Ok(()); + } + + // Collect vector arrays and track batch boundaries + let mut vector_arrays: Vec<&FixedSizeListArray> = Vec::with_capacity(batches.len()); + let mut batch_infos: Vec<(u64, usize, Option)> = Vec::with_capacity(batches.len()); + + for buffered in batches { + let col_idx = buffered + .batch + .schema() + .column_with_name(&self.column_name) + .map(|(idx, _)| idx); + + if let Some(col_idx) = col_idx { + let column = buffered.batch.column(col_idx); + if let Some(fsl) = column.as_fixed_size_list_opt() { + let num_vectors = fsl.len(); + if num_vectors > 0 { + vector_arrays.push(fsl); + batch_infos.push(( + buffered.row_offset, + num_vectors, + buffered.batch_position, + )); + } + } + } + + // Update max_indexed_batch_position even if no vectors in this batch + if let Some(bp) = buffered.batch_position { + self.update_max_indexed_batch_position(bp); + } + } + + if vector_arrays.is_empty() { + return Ok(()); + } + + // Concatenate all vectors into a single array for vectorized processing + let arrays_as_refs: Vec<&dyn Array> = + vector_arrays.iter().map(|a| *a as &dyn Array).collect(); + let concatenated = arrow_select::concat::concat(&arrays_as_refs)?; + let mega_fsl = concatenated.as_fixed_size_list(); + let total_vectors = mega_fsl.len(); + + // Batch compute partition assignments (SIMD-optimized) + let centroids = self + .ivf_model + .centroids + .as_ref() + .ok_or_else(|| Error::invalid_input("IVF model has no centroids", location!()))?; + let (partition_ids, _distances) = + compute_partitions_arrow_array(centroids, mega_fsl, self.distance_type)?; + + // Batch compute PQ codes (SIMD-optimized, row-major output) + let pq_codes = self.pq.quantize(mega_fsl)?; + let pq_codes_fsl = pq_codes.as_fixed_size_list(); + let pq_codes_flat = pq_codes_fsl + .values() + .as_primitive::(); + + // Build row position mapping + let mut row_positions: Vec = Vec::with_capacity(total_vectors); + for (row_offset, num_vectors, _) in &batch_infos { + for i in 0..*num_vectors { + row_positions.push(row_offset + i as u64); + } + } + + // Group vectors by partition + let mut partition_groups: Vec> = vec![Vec::new(); self.num_partitions]; + for (idx, pid) in partition_ids.iter().enumerate() { + if let Some(pid) = pid { + if (*pid as usize) < self.num_partitions { + partition_groups[*pid as usize].push(idx); + } + } + } + + // For each partition: gather codes and append + let mut total_inserted = 0usize; + + for (partition_id, indices) in partition_groups.iter().enumerate() { + if indices.is_empty() { + continue; + } + + let num_vectors = indices.len(); + + // Gather row-major codes for this partition + let mut partition_codes: Vec = Vec::with_capacity(num_vectors * self.code_len); + let mut partition_positions: Vec = Vec::with_capacity(num_vectors); + + for &idx in indices { + let code_start = idx * self.code_len; + let code_end = code_start + self.code_len; + partition_codes.extend_from_slice(&pq_codes_flat.values()[code_start..code_end]); + partition_positions.push(row_positions[idx]); + } + + // Append to partition (handles primary vs overflow internally) + self.partitions[partition_id].append_batch(&partition_codes, &partition_positions); + + total_inserted += num_vectors; + } + + self.vector_count + .fetch_add(total_inserted, Ordering::Relaxed); + + Ok(()) + } + + /// Search for nearest neighbors with visibility filtering. + /// + /// Searches both primary (pre-transposed, fast) and overflow (needs transpose) + /// storage and merges results. Only returns rows where `row_position <= max_row_position`. + /// + /// # Arguments + /// + /// * `query` - Query vector as FixedSizeListArray with single vector + /// * `k` - Number of results to return + /// * `nprobes` - Number of partitions to search + /// * `max_row_position` - Maximum visible row position (for MVCC filtering) + /// + /// # Returns + /// + /// Vec of (distance, row_position) sorted by distance ascending. + pub fn search( + &self, + query: &FixedSizeListArray, + k: usize, + nprobes: usize, + max_row_position: RowPosition, + ) -> Result> { + if query.len() != 1 { + return Err(Error::invalid_input( + format!("Query must have exactly 1 vector, got {}", query.len()), + location!(), + )); + } + + // Find nearest partitions to probe + let query_values = query.value(0); + let (partition_ids, _) = + self.ivf_model + .find_partitions(&query_values, nprobes, self.distance_type)?; + + let mut results: Vec<(f32, RowPosition)> = Vec::new(); + + for i in 0..partition_ids.len() { + let partition_id = partition_ids.value(i) as usize; + if partition_id >= self.num_partitions { + continue; + } + + let partition = &self.partitions[partition_id]; + if partition.is_empty() { + continue; + } + + // Search primary storage (pre-transposed, fast path) + let (primary_codes, primary_positions) = partition.get_primary_codes_for_search(); + if !primary_codes.is_empty() { + let codes_array = UInt8Array::from(primary_codes); + let distances = self.pq.compute_distances(&query_values, &codes_array)?; + + for (idx, &dist) in distances.values().iter().enumerate() { + let pos = primary_positions[idx]; + if pos <= max_row_position { + results.push((dist, pos)); + } + } + } + + // Search overflow storage (needs transpose) + if partition.has_overflow() { + let (overflow_codes_rowmajor, overflow_positions) = + partition.get_overflow_codes_for_search(); + + if !overflow_codes_rowmajor.is_empty() { + let num_overflow = overflow_positions.len(); + + // Transpose to column-major for distance computation + let codes_array = UInt8Array::from(overflow_codes_rowmajor); + let transposed = transpose::( + &codes_array, + num_overflow, + self.code_len, + ); + let distances = self.pq.compute_distances(&query_values, &transposed)?; + + for (idx, &dist) in distances.values().iter().enumerate() { + let pos = overflow_positions[idx]; + if pos <= max_row_position { + results.push((dist, pos)); + } + } + } + } + } + + // Sort by distance and take top-k + results.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap_or(std::cmp::Ordering::Equal)); + results.truncate(k); + + Ok(results) + } + + /// Get the number of vectors in a partition. + pub fn partition_size(&self, partition_id: usize) -> usize { + if partition_id >= self.num_partitions { + return 0; + } + self.partitions[partition_id].len() + } + + /// Get total vector count. + pub fn len(&self) -> usize { + self.vector_count.load(Ordering::Relaxed) + } + + /// Check if the index is empty. + pub fn is_empty(&self) -> bool { + self.vector_count.load(Ordering::Relaxed) == 0 + } + + /// Get the column name. + pub fn column_name(&self) -> &str { + &self.column_name + } + + /// Get entries for a partition. + /// Returns PQ codes in row-major format. + pub fn get_partition(&self, partition_id: usize) -> Vec { + if partition_id >= self.num_partitions { + return Vec::new(); + } + + let partition = &self.partitions[partition_id]; + let mut entries = Vec::with_capacity(partition.len()); + + // Get from primary storage (need to convert from column-major to row-major) + let (primary_codes, primary_positions) = partition.get_primary_codes_for_search(); + if !primary_codes.is_empty() { + let num_vectors = primary_positions.len(); + // primary_codes are column-major, need to transpose back to row-major + for (i, &row_position) in primary_positions.iter().enumerate() { + let mut pq_code = Vec::with_capacity(self.code_len); + for sv in 0..self.code_len { + pq_code.push(primary_codes[sv * num_vectors + i]); + } + entries.push(IvfPqEntry { + row_position, + pq_code, + }); + } + } + + // Get from overflow storage (already row-major) + let (overflow_codes, overflow_positions) = partition.get_overflow_codes_for_search(); + for (i, &row_position) in overflow_positions.iter().enumerate() { + let code_start = i * self.code_len; + let code_end = code_start + self.code_len; + entries.push(IvfPqEntry { + row_position, + pq_code: overflow_codes[code_start..code_end].to_vec(), + }); + } + + entries + } + + /// Get the number of partitions. + pub fn num_partitions(&self) -> usize { + self.ivf_model.num_partitions() + } + + /// Get the IVF model (for advanced use). + pub fn ivf_model(&self) -> &IvfModel { + &self.ivf_model + } + + /// Get the product quantizer (for advanced use). + pub fn pq(&self) -> &ProductQuantizer { + &self.pq + } + + /// Get the distance type. + pub fn distance_type(&self) -> DistanceType { + self.distance_type + } + + /// Export partition data as RecordBatches for index creation. + /// Each batch has schema: `_rowid` (UInt64), `__pq_code` (FixedSizeList). + /// + /// The PQ codes are stored row-major (not transposed), matching the format + /// expected by the index builder's shuffle stage. + pub fn to_partition_batches(&self) -> Result> { + use arrow_array::UInt64Array; + use arrow_schema::{Field, Schema}; + use lance_arrow::FixedSizeListArrayExt; + use lance_core::ROW_ID; + use lance_index::vector::PQ_CODE_COLUMN; + use std::sync::Arc; + + let pq_code_len = self.pq.num_sub_vectors * self.pq.num_bits as usize / 8; + + // Schema for partition data: row_id and pq_code + let schema = Arc::new(Schema::new(vec![ + Field::new(ROW_ID, arrow_schema::DataType::UInt64, false), + Field::new( + PQ_CODE_COLUMN, + arrow_schema::DataType::FixedSizeList( + Arc::new(Field::new("item", arrow_schema::DataType::UInt8, false)), + pq_code_len as i32, + ), + false, + ), + ])); + + let mut result = Vec::new(); + + for part_id in 0..self.num_partitions { + let entries = self.get_partition(part_id); + if entries.is_empty() { + continue; + } + + // Collect row IDs + let row_ids: Vec = entries.iter().map(|e| e.row_position).collect(); + let row_id_array = Arc::new(UInt64Array::from(row_ids)); + + // Collect PQ codes into a flat array + let mut pq_codes_flat: Vec = Vec::with_capacity(entries.len() * pq_code_len); + for entry in &entries { + pq_codes_flat.extend_from_slice(&entry.pq_code); + } + + // Create FixedSizeList array for PQ codes + let pq_codes_array = UInt8Array::from(pq_codes_flat); + let pq_codes_fsl = Arc::new( + FixedSizeListArray::try_new_from_values(pq_codes_array, pq_code_len as i32) + .map_err(|e| { + Error::io( + format!("Failed to create PQ code array: {}", e), + location!(), + ) + })?, + ); + + let batch = RecordBatch::try_new(schema.clone(), vec![row_id_array, pq_codes_fsl]) + .map_err(|e| { + Error::io( + format!("Failed to create partition batch: {}", e), + location!(), + ) + })?; + + result.push((part_id, batch)); + } + + Ok(result) + } +} + +/// Composite key for FTS index. +/// +/// By combining (token, row_position), each entry is unique. +/// This follows the same pattern as IndexKey and IvfPqKey. +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct FtsKey { + /// The indexed token (lowercase). + pub token: String, + /// Row position (makes the key unique for tokens appearing in multiple docs). + pub row_position: RowPosition, +} + +/// In-memory FTS (Full-Text Search) index entry (returned from search). +#[derive(Debug, Clone)] +pub struct FtsEntry { + /// Row position in MemTable. + pub row_position: RowPosition, + /// Term frequency in this document. + pub frequency: u32, +} + +/// In-memory FTS index for full-text search. +#[derive(Debug)] +pub struct FtsMemIndex { + column_name: String, + /// Inverted index: (token, row_position) -> frequency. + postings: SkipMap, + /// Total document count. + doc_count: AtomicUsize, + /// Maximum batch position that has been indexed. + /// Used for index coverage tracking in split plans. + max_indexed_batch_position: AtomicUsize, +} + +impl FtsMemIndex { + /// Create a new FTS index for the given column. + pub fn new(column_name: String) -> Self { + Self { + column_name, + postings: SkipMap::new(), + doc_count: AtomicUsize::new(0), + max_indexed_batch_position: AtomicUsize::new(0), + } + } + + /// Get the maximum batch position that has been indexed. + /// + /// Used for index coverage tracking. Rows with batch_position <= this value + /// are covered by the index. + pub fn max_indexed_batch_position(&self) -> usize { + self.max_indexed_batch_position.load(Ordering::Acquire) + } + + /// Update the maximum indexed batch position. + /// + /// Only updates if the new value is greater than the current value. + /// Uses compare-and-swap to handle concurrent updates. + pub fn update_max_indexed_batch_position(&self, batch_pos: usize) { + let mut current = self.max_indexed_batch_position.load(Ordering::Acquire); + while batch_pos > current { + match self.max_indexed_batch_position.compare_exchange_weak( + current, + batch_pos, + Ordering::Release, + Ordering::Acquire, + ) { + Ok(_) => break, + Err(actual) => current = actual, + } + } + } + + /// Insert documents from a batch into the index using whitespace tokenization. + pub fn insert(&self, batch: &RecordBatch, row_offset: u64) -> Result<()> { + self.insert_with_batch_position(batch, row_offset, None) + } + + /// Insert documents from a batch into the index with batch position tracking. + pub fn insert_with_batch_position( + &self, + batch: &RecordBatch, + row_offset: u64, + batch_position: Option, + ) -> Result<()> { + let col_idx = batch + .schema() + .column_with_name(&self.column_name) + .map(|(idx, _)| idx); + + if col_idx.is_none() { + // Still update max_indexed_batch_position if provided + if let Some(bp) = batch_position { + self.update_max_indexed_batch_position(bp); + } + return Ok(()); + } + + let column = batch.column(col_idx.unwrap()); + + for row_idx in 0..batch.num_rows() { + let value = ScalarValue::try_from_array(column.as_ref(), row_idx)?; + let row_position = row_offset + row_idx as u64; + + if let ScalarValue::Utf8(Some(text)) | ScalarValue::LargeUtf8(Some(text)) = value { + // Simple whitespace tokenization + let mut term_freq: HashMap = HashMap::new(); + for token in text.split_whitespace() { + let token = token.to_lowercase(); + *term_freq.entry(token).or_default() += 1; + } + + for (token, freq) in term_freq { + let key = FtsKey { + token, + row_position, + }; + self.postings.insert(key, freq); + } + } + + self.doc_count.fetch_add(1, Ordering::Relaxed); + } + + // Update max_indexed_batch_position if provided + if let Some(bp) = batch_position { + self.update_max_indexed_batch_position(bp); + } + + Ok(()) + } + + /// Search for documents containing a term. + pub fn search(&self, term: &str) -> Vec { + let token = term.to_lowercase(); + let start = FtsKey { + token: token.clone(), + row_position: 0, + }; + let end = FtsKey { + token, + row_position: u64::MAX, + }; + + self.postings + .range(start..=end) + .map(|entry| FtsEntry { + row_position: entry.key().row_position, + frequency: *entry.value(), + }) + .collect() + } + + /// Get the number of entries in the index. + /// Note: This counts (token, row_position) pairs, not unique tokens. + pub fn entry_count(&self) -> usize { + self.postings.len() + } + + /// Get the document count. + pub fn doc_count(&self) -> usize { + self.doc_count.load(Ordering::Relaxed) + } + + /// Check if the index is empty. + pub fn is_empty(&self) -> bool { + self.doc_count.load(Ordering::Relaxed) == 0 + } + + /// Get the column name. + pub fn column_name(&self) -> &str { + &self.column_name + } +} + +/// Configuration for a BTree scalar index. +#[derive(Debug, Clone)] +pub struct BTreeIndexConfig { + /// Index name. + pub name: String, + /// Column name the index is built on. + pub column: String, +} + +/// Configuration for an IVF-PQ vector index. +/// +/// Contains the centroids and codebook from the base table +/// to ensure consistent distance computations. +#[derive(Debug, Clone)] +pub struct IvfPqIndexConfig { + /// Index name. + pub name: String, + /// Column name containing vectors. + pub column: String, + /// IVF model with centroids from base table. + pub ivf_model: IvfModel, + /// Product quantizer with codebook from base table. + pub pq: ProductQuantizer, + /// Distance type for search. + pub distance_type: DistanceType, +} + +/// Configuration for a Full-Text Search index. +#[derive(Debug, Clone)] +pub struct FtsIndexConfig { + /// Index name. + pub name: String, + /// Column name containing text. + pub column: String, +} + +/// Configuration for an index in MemWAL. +/// +/// Each variant contains all the configuration needed for that index type. +/// IvfPq is boxed because it contains large IVF model and PQ codebook. +#[derive(Debug, Clone)] +pub enum MemIndexConfig { + /// BTree index for scalar fields (point lookups, range queries). + BTree(BTreeIndexConfig), + /// IVF-PQ index for vector similarity search. + /// Boxed due to large size (contains IVF centroids and PQ codebook). + IvfPq(Box), + /// Full-text search index. + Fts(FtsIndexConfig), +} + +impl MemIndexConfig { + /// Get the index name. + pub fn name(&self) -> &str { + match self { + Self::BTree(c) => &c.name, + Self::IvfPq(c) => &c.name, + Self::Fts(c) => &c.name, + } + } + + /// Get the column name. + pub fn column(&self) -> &str { + match self { + Self::BTree(c) => &c.column, + Self::IvfPq(c) => &c.column, + Self::Fts(c) => &c.column, + } + } + + /// Create a BTree index config from base table IndexMetadata. + pub fn btree_from_metadata(index_meta: &IndexMetadata, schema: &LanceSchema) -> Result { + let column = Self::extract_column(index_meta, schema)?; + Ok(Self::BTree(BTreeIndexConfig { + name: index_meta.name.clone(), + column, + })) + } + + /// Create an FTS index config from base table IndexMetadata. + pub fn fts_from_metadata(index_meta: &IndexMetadata, schema: &LanceSchema) -> Result { + let column = Self::extract_column(index_meta, schema)?; + Ok(Self::Fts(FtsIndexConfig { + name: index_meta.name.clone(), + column, + })) + } + + /// Create an IVF-PQ index config with centroids and codebook from base table. + pub fn ivf_pq( + name: String, + column: String, + ivf_model: IvfModel, + pq: ProductQuantizer, + distance_type: DistanceType, + ) -> Self { + Self::IvfPq(Box::new(IvfPqIndexConfig { + name, + column, + ivf_model, + pq, + distance_type, + })) + } + + /// Detect index type from protobuf type_url. + pub fn detect_index_type(type_url: &str) -> Result<&'static str> { + if type_url.ends_with("BTreeIndexDetails") { + Ok("btree") + } else if type_url.ends_with("InvertedIndexDetails") { + Ok("fts") + } else if type_url.ends_with("VectorIndexDetails") { + Ok("vector") + } else { + Err(Error::invalid_input( + format!( + "Unsupported index type for MemWAL: {}. Supported: BTree, Inverted, Vector", + type_url + ), + location!(), + )) + } + } + + /// Extract column name from index metadata. + fn extract_column(index_meta: &IndexMetadata, schema: &LanceSchema) -> Result { + let field_id = index_meta.fields.first().ok_or_else(|| { + Error::invalid_input( + format!("Index '{}' has no fields", index_meta.name), + location!(), + ) + })?; + + schema + .field_by_id(*field_id) + .map(|f| f.name.clone()) + .ok_or_else(|| { + Error::invalid_input( + format!("Field with id {} not found in schema", field_id), + location!(), + ) + }) + } +} + +/// Registry managing all in-memory indexes for a MemTable. +#[derive(Debug, Default)] +pub struct IndexRegistry { + /// Skip-list indexes by name (used for BTree/scalar indexes). + /// Skip-list provides better concurrent performance than BTreeMap with RwLock. + btree_indexes: HashMap, + /// IVF-PQ indexes by name. + ivf_pq_indexes: HashMap, + /// FTS indexes by name. + fts_indexes: HashMap, +} + +impl IndexRegistry { + /// Create a new empty index registry. + pub fn new() -> Self { + Self::default() + } + + /// Create an index registry from index configurations. + /// + /// # Arguments + /// + /// * `configs` - Index configurations + /// * `max_rows` - Maximum rows in memtable, used to calculate IVF-PQ partition capacity + /// * `ivf_index_partition_capacity_safety_factor` - Safety factor for partition capacity (accounts for non-uniform distribution) + pub fn from_configs( + configs: &[MemIndexConfig], + max_rows: usize, + ivf_index_partition_capacity_safety_factor: usize, + ) -> Result { + let mut registry = Self::new(); + + for config in configs { + match config { + MemIndexConfig::BTree(c) => { + log::info!( + "[INDEX_REGISTRY] Creating BTree index '{}' on column '{}'", + c.name, + c.column + ); + let index = BTreeMemIndex::new(c.column.clone()); + registry.btree_indexes.insert(c.name.clone(), index); + } + MemIndexConfig::IvfPq(c) => { + let num_partitions = c.ivf_model.num_partitions(); + // Calculate capacity with safety factor for non-uniform distribution. + // Cap at max_rows to avoid over-allocation when num_partitions < safety_factor. + let avg_per_partition = max_rows / num_partitions; + let partition_capacity = (avg_per_partition + * ivf_index_partition_capacity_safety_factor) + .min(max_rows); + + log::info!( + "[INDEX_REGISTRY] Creating IVF-PQ index '{}' on column '{}' ({} partitions, capacity {})", + c.name, c.column, num_partitions, partition_capacity + ); + let index = IvfPqMemIndex::with_capacity( + c.column.clone(), + c.ivf_model.clone(), + c.pq.clone(), + c.distance_type, + partition_capacity, + ); + registry.ivf_pq_indexes.insert(c.name.clone(), index); + } + MemIndexConfig::Fts(c) => { + log::info!( + "[INDEX_REGISTRY] Creating FTS index '{}' on column '{}'", + c.name, + c.column + ); + let index = FtsMemIndex::new(c.column.clone()); + registry.fts_indexes.insert(c.name.clone(), index); + } + } + } + + log::info!( + "[INDEX_REGISTRY] Created {} BTree, {} IVF-PQ, {} FTS indexes", + registry.btree_indexes.len(), + registry.ivf_pq_indexes.len(), + registry.fts_indexes.len() + ); + + Ok(registry) + } + + /// Add a BTree/scalar index (implemented using skip-list for better concurrency). + pub fn add_btree(&mut self, name: String, column: String) { + self.btree_indexes.insert(name, BTreeMemIndex::new(column)); + } + + /// Add an IVF-PQ index with centroids and codebook from base table. + pub fn add_ivf_pq( + &mut self, + name: String, + column: String, + ivf_model: IvfModel, + pq: ProductQuantizer, + distance_type: DistanceType, + ) { + self.ivf_pq_indexes.insert( + name, + IvfPqMemIndex::new(column, ivf_model, pq, distance_type), + ); + } + + /// Add an FTS index. + pub fn add_fts(&mut self, name: String, column: String) { + self.fts_indexes.insert(name, FtsMemIndex::new(column)); + } + + /// Insert a batch into all indexes. + pub fn insert(&self, batch: &RecordBatch, row_offset: u64) -> Result<()> { + self.insert_with_batch_position(batch, row_offset, None) + } + + /// Insert a batch into all indexes with batch position tracking. + pub fn insert_with_batch_position( + &self, + batch: &RecordBatch, + row_offset: u64, + batch_position: Option, + ) -> Result<()> { + for index in self.btree_indexes.values() { + index.insert_with_batch_position(batch, row_offset, batch_position)?; + } + for index in self.ivf_pq_indexes.values() { + index.insert_with_batch_position(batch, row_offset, batch_position)?; + } + for index in self.fts_indexes.values() { + index.insert_with_batch_position(batch, row_offset, batch_position)?; + } + Ok(()) + } + + /// Insert multiple batches into all indexes with cross-batch optimization. + /// + /// For IVF-PQ indexes, this enables vectorized partition assignment and + /// PQ encoding across all batches, improving performance through better + /// SIMD utilization. + pub fn insert_batches(&self, batches: &[BufferedBatch]) -> Result<()> { + // BTree indexes: iterate batches (no cross-batch optimization benefit) + for index in self.btree_indexes.values() { + for buffered in batches { + index.insert_with_batch_position( + &buffered.batch, + buffered.row_offset, + buffered.batch_position, + )?; + } + } + + // IVF-PQ indexes: use batched insert for vectorization + for index in self.ivf_pq_indexes.values() { + index.insert_batches(batches)?; + } + + // FTS indexes: iterate batches (potential future optimization) + for index in self.fts_indexes.values() { + for buffered in batches { + index.insert_with_batch_position( + &buffered.batch, + buffered.row_offset, + buffered.batch_position, + )?; + } + } + + Ok(()) + } + + /// Insert multiple batches into all indexes in parallel. + /// + /// Each individual index runs in its own thread, regardless of type. + /// This maximizes parallelism when multiple indexes are maintained. + /// + /// This is used during WAL flush to parallelize index updates with WAL I/O. + /// Insert batches into all indexes in parallel. + /// + /// Returns a map of index names to their update durations for performance tracking. + pub fn insert_batches_parallel( + &self, + batches: &[BufferedBatch], + ) -> Result> { + use std::time::Instant; + + if batches.is_empty() { + return Ok(std::collections::HashMap::new()); + } + + let total_rows: usize = batches.iter().map(|b| b.batch.num_rows()).sum(); + + // Use std::thread::scope for parallel CPU-bound work + std::thread::scope(|scope| { + // Each handle returns (index_name, index_type, duration, Result) + let mut handles: Vec<( + &str, + &str, + std::thread::ScopedJoinHandle<'_, (std::time::Duration, Result<()>)>, + )> = Vec::new(); + + // Spawn a thread for each BTree index + for (name, index) in &self.btree_indexes { + let handle = scope.spawn(move || -> (std::time::Duration, Result<()>) { + let start = Instant::now(); + let result = (|| { + for buffered in batches { + index.insert_with_batch_position( + &buffered.batch, + buffered.row_offset, + buffered.batch_position, + )?; + } + Ok(()) + })(); + (start.elapsed(), result) + }); + handles.push((name.as_str(), "btree", handle)); + } + + // Spawn a thread for each IVF-PQ index + for (name, index) in &self.ivf_pq_indexes { + let handle = scope.spawn(move || -> (std::time::Duration, Result<()>) { + let start = Instant::now(); + let result = index.insert_batches(batches); + (start.elapsed(), result) + }); + handles.push((name.as_str(), "ivfpq", handle)); + } + + // Spawn a thread for each FTS index + for (name, index) in &self.fts_indexes { + let handle = scope.spawn(move || -> (std::time::Duration, Result<()>) { + let start = Instant::now(); + let result = (|| { + for buffered in batches { + index.insert_with_batch_position( + &buffered.batch, + buffered.row_offset, + buffered.batch_position, + )?; + } + Ok(()) + })(); + (start.elapsed(), result) + }); + handles.push((name.as_str(), "fts", handle)); + } + + // Collect results, log timing, and check for errors + let mut first_error: Option = None; + let mut timings: Vec<(&str, &str, u128)> = Vec::new(); + + for (name, idx_type, handle) in handles { + match handle.join() { + Ok((duration, Ok(()))) => { + timings.push((name, idx_type, duration.as_millis())); + } + Ok((duration, Err(e))) => { + timings.push((name, idx_type, duration.as_millis())); + if first_error.is_none() { + first_error = Some(e); + } + } + Err(_) => { + if first_error.is_none() { + first_error = Some(Error::Internal { + message: format!("Index '{}' thread panicked", name), + location: location!(), + }); + } + } + } + } + + // Log individual index timings + if !timings.is_empty() { + let timing_str: Vec = timings + .iter() + .map(|(name, idx_type, ms)| format!("{}({})={}ms", name, idx_type, ms)) + .collect(); + eprintln!( + "[INDEX_UPDATE] {} rows, {} batches: {}", + total_rows, + batches.len(), + timing_str.join(", ") + ); + } + + if let Some(e) = first_error { + return Err(e); + } + + // Convert timings to HashMap + let duration_map: std::collections::HashMap = timings + .into_iter() + .map(|(name, _idx_type, ms)| { + ( + name.to_string(), + std::time::Duration::from_millis(ms as u64), + ) + }) + .collect(); + + Ok(duration_map) + }) + } + + /// Get index coverage information. + /// + /// Returns the maximum indexed sequence numbers for all index types. + /// Used for split plan creation when scanning with partial index coverage. + pub fn get_coverage(&self) -> IndexCoverageInfo { + let mut btree_coverage = HashMap::new(); + let mut ivfpq_coverage = HashMap::new(); + let mut fts_coverage = HashMap::new(); + + for (name, index) in &self.btree_indexes { + btree_coverage.insert(name.clone(), index.max_indexed_batch_position()); + } + + for (name, index) in &self.ivf_pq_indexes { + ivfpq_coverage.insert(name.clone(), index.max_indexed_batch_position()); + } + + for (name, index) in &self.fts_indexes { + fts_coverage.insert(name.clone(), index.max_indexed_batch_position()); + } + + IndexCoverageInfo { + btree_coverage, + ivfpq_coverage, + fts_coverage, + } + } + + /// Get a BTree index by name. + pub fn get_btree(&self, name: &str) -> Option<&BTreeMemIndex> { + self.btree_indexes.get(name) + } + + /// Get an IVF-PQ index by name. + pub fn get_ivf_pq(&self, name: &str) -> Option<&IvfPqMemIndex> { + self.ivf_pq_indexes.get(name) + } + + /// Get an FTS index by name. + pub fn get_fts(&self, name: &str) -> Option<&FtsMemIndex> { + self.fts_indexes.get(name) + } + + /// Check if the registry has any indexes. + pub fn is_empty(&self) -> bool { + self.btree_indexes.is_empty() + && self.ivf_pq_indexes.is_empty() + && self.fts_indexes.is_empty() + } + + /// Get the total number of indexes. + pub fn len(&self) -> usize { + self.btree_indexes.len() + self.ivf_pq_indexes.len() + self.fts_indexes.len() + } +} + +/// Buffered batch for cross-batch index processing. +/// +/// Used by `insert_batches` methods to process multiple batches together +/// for better vectorization (especially IVF-PQ partition assignment and encoding). +pub struct BufferedBatch { + /// The batch to index. + pub batch: RecordBatch, + /// Row offset in MemTable. + pub row_offset: u64, + /// Batch position for coverage tracking. + pub batch_position: Option, +} + +#[cfg(test)] +mod tests { + use super::*; + use arrow_array::{Int32Array, RecordBatch, StringArray}; + use arrow_schema::{DataType, Field, Schema as ArrowSchema}; + use std::sync::Arc; + use tracing::warn; + + /// Check if an index type is supported and log warning if not. + fn check_index_type_supported(index_type: &str) -> bool { + match index_type.to_lowercase().as_str() { + "btree" | "scalar" => true, + "ivf_pq" | "ivf-pq" | "ivfpq" | "vector" => true, + "fts" | "inverted" | "fulltext" => true, + _ => { + warn!( + "Index type '{}' is not supported for MemWAL. \ + Supported types: btree, ivf_pq, fts. Skipping.", + index_type + ); + false + } + } + } + + fn create_test_schema() -> Arc { + Arc::new(ArrowSchema::new(vec![ + Field::new("id", DataType::Int32, false), + Field::new("name", DataType::Utf8, true), + Field::new("description", DataType::Utf8, true), + ])) + } + + fn create_test_batch(schema: &ArrowSchema, start_id: i32) -> RecordBatch { + RecordBatch::try_new( + Arc::new(schema.clone()), + vec![ + Arc::new(Int32Array::from(vec![start_id, start_id + 1, start_id + 2])), + Arc::new(StringArray::from(vec!["alice", "bob", "charlie"])), + Arc::new(StringArray::from(vec![ + "hello world", + "goodbye world", + "hello again", + ])), + ], + ) + .unwrap() + } + + #[test] + fn test_btree_index_insert_and_lookup() { + let schema = create_test_schema(); + let index = BTreeMemIndex::new("id".to_string()); + + let batch = create_test_batch(&schema, 0); + // row_offset = 0 for first batch + index.insert(&batch, 0).unwrap(); + + assert_eq!(index.len(), 3); + + // Row positions are 0, 1, 2 for the first batch + let result = index.get(&ScalarValue::Int32(Some(0))); + assert!(!result.is_empty()); + assert_eq!(result, vec![0]); + + let result = index.get(&ScalarValue::Int32(Some(1))); + assert!(!result.is_empty()); + assert_eq!(result, vec![1]); + } + + #[test] + fn test_btree_index_multiple_batches() { + let schema = create_test_schema(); + let index = BTreeMemIndex::new("id".to_string()); + + let batch1 = create_test_batch(&schema, 0); + let batch2 = create_test_batch(&schema, 10); + + // First batch: rows 0-2 + index.insert(&batch1, 0).unwrap(); + // Second batch: rows 3-5 (row_offset = 3 since batch1 had 3 rows) + index.insert(&batch2, 3).unwrap(); + + assert_eq!(index.len(), 6); + + // Value 10 is at row position 3 (first row of second batch) + let result = index.get(&ScalarValue::Int32(Some(10))); + assert!(!result.is_empty()); + assert_eq!(result, vec![3]); + } + + #[test] + fn test_btree_index_to_training_batches() { + use lance_core::ROW_ID; + use lance_index::scalar::registry::VALUE_COLUMN_NAME; + + let schema = create_test_schema(); + let index = BTreeMemIndex::new("id".to_string()); + + let batch1 = create_test_batch(&schema, 0); // ids: 0, 1, 2 + let batch2 = create_test_batch(&schema, 10); // ids: 10, 11, 12 + + index.insert(&batch1, 0).unwrap(); // row positions 0, 1, 2 + index.insert(&batch2, 3).unwrap(); // row positions 3, 4, 5 + + // Export as training batches (batch_size = 100 to get all in one batch) + let batches = index.to_training_batches(100).unwrap(); + assert_eq!(batches.len(), 1); + + let batch = &batches[0]; + assert_eq!(batch.num_rows(), 6); + + // Check schema + assert_eq!(batch.schema().field(0).name(), VALUE_COLUMN_NAME); + assert_eq!(batch.schema().field(1).name(), ROW_ID); + + // Data should be sorted by value (0, 1, 2, 10, 11, 12) + let values = batch + .column_by_name(VALUE_COLUMN_NAME) + .unwrap() + .as_any() + .downcast_ref::() + .unwrap(); + assert_eq!(values.value(0), 0); + assert_eq!(values.value(1), 1); + assert_eq!(values.value(2), 2); + assert_eq!(values.value(3), 10); + assert_eq!(values.value(4), 11); + assert_eq!(values.value(5), 12); + + // Check row IDs match positions + let row_ids = batch + .column_by_name(ROW_ID) + .unwrap() + .as_any() + .downcast_ref::() + .unwrap(); + assert_eq!(row_ids.value(0), 0); // id=0 -> row 0 + assert_eq!(row_ids.value(1), 1); // id=1 -> row 1 + assert_eq!(row_ids.value(2), 2); // id=2 -> row 2 + assert_eq!(row_ids.value(3), 3); // id=10 -> row 3 + assert_eq!(row_ids.value(4), 4); // id=11 -> row 4 + assert_eq!(row_ids.value(5), 5); // id=12 -> row 5 + } + + #[test] + fn test_btree_index_snapshot() { + let schema = create_test_schema(); + let index = BTreeMemIndex::new("id".to_string()); + + let batch = create_test_batch(&schema, 0); + index.insert(&batch, 0).unwrap(); + + let snapshot = index.snapshot(); + assert_eq!(snapshot.len(), 3); + + // Snapshot should be in sorted order + assert_eq!(snapshot[0].0 .0, ScalarValue::Int32(Some(0))); + assert_eq!(snapshot[1].0 .0, ScalarValue::Int32(Some(1))); + assert_eq!(snapshot[2].0 .0, ScalarValue::Int32(Some(2))); + } + + #[test] + fn test_fts_index_insert_and_search() { + let schema = create_test_schema(); + let index = FtsMemIndex::new("description".to_string()); + + let batch = create_test_batch(&schema, 0); + index.insert(&batch, 0).unwrap(); + + assert_eq!(index.doc_count(), 3); + + // "hello" appears in docs 0 and 2 + let entries = index.search("hello"); + assert!(!entries.is_empty()); + assert_eq!(entries.len(), 2); + + // "world" appears in docs 0 and 1 + let entries = index.search("world"); + assert!(!entries.is_empty()); + assert_eq!(entries.len(), 2); + + // "goodbye" appears only in doc 1 (row position 1) + let entries = index.search("goodbye"); + assert!(!entries.is_empty()); + assert_eq!(entries.len(), 1); + assert_eq!(entries[0].row_position, 1); + + // Non-existent term returns empty Vec + let entries = index.search("nonexistent"); + assert!(entries.is_empty()); + } + + #[test] + fn test_index_registry() { + let schema = create_test_schema(); + let mut registry = IndexRegistry::new(); + + registry.add_btree("id_idx".to_string(), "id".to_string()); + registry.add_fts("desc_idx".to_string(), "description".to_string()); + + assert_eq!(registry.len(), 2); + + let batch = create_test_batch(&schema, 0); + registry.insert(&batch, 0).unwrap(); + + let btree = registry.get_btree("id_idx").unwrap(); + assert_eq!(btree.len(), 3); + + let fts = registry.get_fts("desc_idx").unwrap(); + assert_eq!(fts.doc_count(), 3); + } + + #[test] + fn test_check_index_type_supported() { + assert!(check_index_type_supported("btree")); + assert!(check_index_type_supported("BTree")); + assert!(check_index_type_supported("ivf_pq")); + assert!(check_index_type_supported("fts")); + assert!(check_index_type_supported("inverted")); + + assert!(!check_index_type_supported("unknown")); + } + + #[test] + fn test_from_configs() { + let configs = vec![ + MemIndexConfig::BTree(BTreeIndexConfig { + name: "pk_idx".to_string(), + column: "id".to_string(), + }), + MemIndexConfig::Fts(FtsIndexConfig { + name: "search_idx".to_string(), + column: "description".to_string(), + }), + ]; + + let registry = IndexRegistry::from_configs(&configs, 100_000, 8).unwrap(); + assert_eq!(registry.len(), 2); + assert!(registry.get_btree("pk_idx").is_some()); + assert!(registry.get_fts("search_idx").is_some()); + } + + #[test] + fn test_btree_index_max_indexed_batch_position() { + let schema = create_test_schema(); + let index = BTreeMemIndex::new("id".to_string()); + + // Initially max_indexed_batch_position is 0 + assert_eq!(index.max_indexed_batch_position(), 0); + + let batch = create_test_batch(&schema, 0); + + // Insert with sequence tracking + index + .insert_with_batch_position(&batch, 0, Some(5)) + .unwrap(); + assert_eq!(index.max_indexed_batch_position(), 5); + + // Insert with higher sequence + index + .insert_with_batch_position(&batch, 3, Some(10)) + .unwrap(); + assert_eq!(index.max_indexed_batch_position(), 10); + + // Insert with lower sequence (should not decrease) + index + .insert_with_batch_position(&batch, 6, Some(7)) + .unwrap(); + assert_eq!(index.max_indexed_batch_position(), 10); + + // Insert without sequence (should not change) + index.insert(&batch, 9).unwrap(); + assert_eq!(index.max_indexed_batch_position(), 10); + } + + #[test] + fn test_fts_index_max_indexed_batch_position() { + let schema = create_test_schema(); + let index = FtsMemIndex::new("description".to_string()); + + // Initially max_indexed_batch_position is 0 + assert_eq!(index.max_indexed_batch_position(), 0); + + let batch = create_test_batch(&schema, 0); + + // Insert with sequence tracking + index + .insert_with_batch_position(&batch, 0, Some(5)) + .unwrap(); + assert_eq!(index.max_indexed_batch_position(), 5); + + // Insert with higher sequence + index + .insert_with_batch_position(&batch, 3, Some(10)) + .unwrap(); + assert_eq!(index.max_indexed_batch_position(), 10); + + // Insert with lower sequence (should not decrease) + index + .insert_with_batch_position(&batch, 6, Some(7)) + .unwrap(); + assert_eq!(index.max_indexed_batch_position(), 10); + } + + #[test] + fn test_index_registry_coverage() { + let schema = create_test_schema(); + let mut registry = IndexRegistry::new(); + + registry.add_btree("id_idx".to_string(), "id".to_string()); + registry.add_fts("desc_idx".to_string(), "description".to_string()); + + // Initial coverage should be 0 + let coverage = registry.get_coverage(); + assert_eq!(coverage.btree_coverage.get("id_idx"), Some(&0)); + assert_eq!(coverage.fts_coverage.get("desc_idx"), Some(&0)); + + // Insert with sequence tracking + let batch = create_test_batch(&schema, 0); + registry + .insert_with_batch_position(&batch, 0, Some(5)) + .unwrap(); + + let coverage = registry.get_coverage(); + assert_eq!(coverage.btree_coverage.get("id_idx"), Some(&5)); + assert_eq!(coverage.fts_coverage.get("desc_idx"), Some(&5)); + + // Insert with higher sequence + registry + .insert_with_batch_position(&batch, 3, Some(10)) + .unwrap(); + + let coverage = registry.get_coverage(); + assert_eq!(coverage.btree_coverage.get("id_idx"), Some(&10)); + assert_eq!(coverage.fts_coverage.get("desc_idx"), Some(&10)); + } + + #[test] + fn test_coverage_result() { + let mut coverage = IndexCoverageInfo::default(); + coverage.btree_coverage.insert("id_idx".to_string(), 50); + coverage.ivfpq_coverage.insert("vec_idx".to_string(), 30); + coverage.fts_coverage.insert("text_idx".to_string(), 0); + + // Full coverage: max_visible_batch_position <= max_indexed_batch_position + assert_eq!( + coverage.check_btree_coverage("id_idx", 50), + CoverageResult::Full + ); + assert_eq!( + coverage.check_btree_coverage("id_idx", 25), + CoverageResult::Full + ); + + // Partial coverage: max_visible_batch_position > max_indexed_batch_position + assert_eq!( + coverage.check_btree_coverage("id_idx", 100), + CoverageResult::Partial { + max_indexed_batch_position: 50 + } + ); + + // No coverage: max_indexed_batch_position is 0 + assert_eq!( + coverage.check_fts_coverage("text_idx", 50), + CoverageResult::None + ); + + // No coverage: index doesn't exist + assert_eq!( + coverage.check_btree_coverage("nonexistent", 50), + CoverageResult::None + ); + + // IVF-PQ coverage + assert_eq!( + coverage.check_ivfpq_coverage("vec_idx", 30), + CoverageResult::Full + ); + assert_eq!( + coverage.check_ivfpq_coverage("vec_idx", 50), + CoverageResult::Partial { + max_indexed_batch_position: 30 + } + ); + } + + #[test] + fn test_coverage_min_values() { + let mut coverage = IndexCoverageInfo::default(); + coverage.btree_coverage.insert("idx1".to_string(), 50); + coverage.btree_coverage.insert("idx2".to_string(), 30); + coverage.btree_coverage.insert("idx3".to_string(), 70); + + // Minimum should be 30 + assert_eq!(coverage.min_btree_coverage(), 30); + + // Empty should be 0 + assert_eq!(coverage.min_ivfpq_coverage(), 0); + assert_eq!(coverage.min_fts_coverage(), 0); + } +} diff --git a/rust/lance/src/dataset/mem_wal/ivfpq_store.rs b/rust/lance/src/dataset/mem_wal/ivfpq_store.rs new file mode 100644 index 00000000000..a4090828e36 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/ivfpq_store.rs @@ -0,0 +1,608 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! Lock-free storage for IVF-PQ partition with pre-transposed PQ codes. +//! +//! This module provides a high-performance storage structure for IVF-PQ index +//! partitions that stores PQ codes in column-major (transposed) format for +//! zero-cost search-time access. +//! +//! # Architecture +//! +//! Each partition uses hybrid storage: +//! - **Primary**: Pre-allocated `LockFreeIvfPqPartitionStore` with transposed codes +//! - **Overflow**: `SkipMap` for when primary is full (row-major, transpose at search) +//! +//! This design ensures writes never block while optimizing the common case. +//! +//! # Safety Model +//! +//! Same as `LockFreeBatchStore`: +//! - Single writer (WalFlushHandler during WAL flush) +//! - Multiple concurrent readers +//! - Append-only until memtable flush + +use std::cell::UnsafeCell; +use std::mem::MaybeUninit; +use std::sync::atomic::{AtomicUsize, Ordering}; + +use arrow_array::types::UInt8Type; +use arrow_array::UInt8Array; +use crossbeam_skiplist::SkipMap; +use lance_index::vector::pq::storage::transpose; + +/// Error when partition store is full. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct PartitionFull; + +impl std::fmt::Display for PartitionFull { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "IVF-PQ partition store is full") + } +} + +impl std::error::Error for PartitionFull {} + +/// Lock-free storage for a single IVF partition with pre-transposed PQ codes. +/// +/// Stores PQ codes in column-major (transposed) format for zero-cost +/// search-time access. Uses the same single-writer, multi-reader pattern +/// as `LockFreeBatchStore`. +/// +/// # Memory Layout +/// +/// ```text +/// codes: [subvec_0_all_vectors | subvec_1_all_vectors | ... | subvec_n_all_vectors] +/// ``` +/// +/// Each subvector section has `capacity` bytes pre-allocated. +/// +/// # Safety +/// +/// - Single writer (WalFlushHandler during WAL flush) +/// - Multiple concurrent readers +/// - Append-only until memtable flush +#[derive(Debug)] +#[allow(dead_code)] // Some methods intentionally public for monitoring/future use +pub struct LockFreeIvfPqPartitionStore { + /// Pre-allocated column-major PQ codes. + /// Layout: codes[subvec_idx * capacity + vector_idx] = code_byte + codes: UnsafeCell]>>, + + /// Row positions for result mapping. + row_positions: UnsafeCell]>>, + + /// Number of vectors committed (visible to readers). + committed_len: AtomicUsize, + + /// Maximum vectors this partition can hold. + capacity: usize, + + /// Number of sub-vectors (PQ code length). + num_sub_vectors: usize, +} + +// SAFETY: Single-writer pattern enforced by architecture. +// UnsafeCell contents are only mutated by single writer thread. +unsafe impl Sync for LockFreeIvfPqPartitionStore {} +unsafe impl Send for LockFreeIvfPqPartitionStore {} + +#[allow(dead_code)] // Some methods intentionally public for monitoring/future use +impl LockFreeIvfPqPartitionStore { + /// Create a new partition store with given capacity. + /// + /// # Arguments + /// + /// * `capacity` - Maximum number of vectors + /// * `num_sub_vectors` - PQ code length (number of sub-vectors) + /// + /// # Panics + /// + /// Panics if capacity or num_sub_vectors is 0. + pub fn new(capacity: usize, num_sub_vectors: usize) -> Self { + assert!(capacity > 0, "capacity must be > 0"); + assert!(num_sub_vectors > 0, "num_sub_vectors must be > 0"); + + // Allocate codes: capacity * num_sub_vectors bytes + let codes_size = capacity * num_sub_vectors; + let mut codes = Vec::with_capacity(codes_size); + for _ in 0..codes_size { + codes.push(MaybeUninit::uninit()); + } + + // Allocate row positions: capacity u64s + let mut row_positions = Vec::with_capacity(capacity); + for _ in 0..capacity { + row_positions.push(MaybeUninit::uninit()); + } + + Self { + codes: UnsafeCell::new(codes.into_boxed_slice()), + row_positions: UnsafeCell::new(row_positions.into_boxed_slice()), + committed_len: AtomicUsize::new(0), + capacity, + num_sub_vectors, + } + } + + /// Returns the capacity. + #[inline] + pub fn capacity(&self) -> usize { + self.capacity + } + + /// Returns the number of sub-vectors (code length). + #[inline] + pub fn num_sub_vectors(&self) -> usize { + self.num_sub_vectors + } + + /// Returns the number of committed vectors. + #[inline] + pub fn len(&self) -> usize { + self.committed_len.load(Ordering::Acquire) + } + + /// Returns true if empty. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns true if at capacity. + #[inline] + pub fn is_full(&self) -> bool { + self.committed_len.load(Ordering::Relaxed) >= self.capacity + } + + /// Returns remaining capacity. + #[inline] + pub fn remaining_capacity(&self) -> usize { + self.capacity + .saturating_sub(self.committed_len.load(Ordering::Relaxed)) + } + + /// Returns fill percentage (0.0 to 1.0). + #[inline] + pub fn fill_percentage(&self) -> f64 { + self.committed_len.load(Ordering::Relaxed) as f64 / self.capacity as f64 + } + + /// Append a batch of already-transposed PQ codes. + /// + /// # Arguments + /// + /// * `transposed_codes` - Column-major codes from `transpose()`. + /// Layout: [subvec0_all, subvec1_all, ...] where each section + /// has `num_vectors` bytes. + /// * `positions` - Row positions for each vector. + /// + /// # Returns + /// + /// * `Ok(())` - Successfully appended + /// * `Err(PartitionFull)` - Not enough capacity + /// + /// # Safety + /// + /// Must be called from single writer thread only. + pub fn append_transposed_batch( + &self, + transposed_codes: &[u8], + positions: &[u64], + ) -> Result<(), PartitionFull> { + let num_vectors = positions.len(); + if num_vectors == 0 { + return Ok(()); + } + + debug_assert_eq!( + transposed_codes.len(), + num_vectors * self.num_sub_vectors, + "transposed_codes length mismatch: expected {}, got {}", + num_vectors * self.num_sub_vectors, + transposed_codes.len() + ); + + let committed = self.committed_len.load(Ordering::Relaxed); + if committed + num_vectors > self.capacity { + return Err(PartitionFull); + } + + // SAFETY: Single writer, and we checked capacity. + let codes = unsafe { &mut *self.codes.get() }; + let row_pos = unsafe { &mut *self.row_positions.get() }; + + // Copy transposed codes column by column. + // Source layout: [sv0_v0..sv0_vN, sv1_v0..sv1_vN, ...] + // Dest layout: [sv0_v0..sv0_vCAP, sv1_v0..sv1_vCAP, ...] + for subvec_idx in 0..self.num_sub_vectors { + let src_start = subvec_idx * num_vectors; + let dst_start = subvec_idx * self.capacity + committed; + + for i in 0..num_vectors { + codes[dst_start + i].write(transposed_codes[src_start + i]); + } + } + + // Copy row positions. + for (i, &pos) in positions.iter().enumerate() { + row_pos[committed + i].write(pos); + } + + // Publish with release ordering. + self.committed_len + .store(committed + num_vectors, Ordering::Release); + + Ok(()) + } + + /// Get codes formatted for `ProductQuantizer::compute_distances()`. + /// + /// Copies committed codes to a contiguous buffer in column-major format. + /// This is the format expected by `compute_distances()`. + /// + /// # Returns + /// + /// Tuple of (contiguous_codes, row_positions). + pub fn get_codes_for_search(&self) -> (Vec, Vec) { + let len = self.committed_len.load(Ordering::Acquire); + if len == 0 { + return (Vec::new(), Vec::new()); + } + + let codes = unsafe { &*self.codes.get() }; + let row_pos = unsafe { &*self.row_positions.get() }; + + // Copy codes to contiguous buffer (remove capacity gaps). + let mut result_codes = Vec::with_capacity(len * self.num_sub_vectors); + for subvec_idx in 0..self.num_sub_vectors { + let start = subvec_idx * self.capacity; + for i in 0..len { + // SAFETY: i < len <= committed_len, data was initialized. + result_codes.push(unsafe { codes[start + i].assume_init() }); + } + } + + // Copy row positions. + let result_positions: Vec = (0..len) + .map(|i| unsafe { row_pos[i].assume_init() }) + .collect(); + + (result_codes, result_positions) + } +} + +/// A single IVF partition with primary (pre-transposed) and overflow (row-major) storage. +/// +/// This is the main interface for partition storage, handling the split between +/// fast primary storage and overflow when primary is full. +#[derive(Debug)] +#[allow(dead_code)] // Some methods intentionally public for monitoring/future use +pub struct IvfPqPartition { + /// Primary storage: pre-allocated, pre-transposed codes (fast search). + primary: LockFreeIvfPqPartitionStore, + + /// Overflow storage: SkipMap for when primary is full (slower search). + /// Key: row_position, Value: row-major PQ code. + overflow: SkipMap>, + + /// Number of vectors in overflow (cached for fast access). + overflow_count: AtomicUsize, + + /// Number of sub-vectors (code length). + num_sub_vectors: usize, +} + +#[allow(dead_code)] // Some methods intentionally public for monitoring/future use +impl IvfPqPartition { + /// Create a new partition with given capacity. + /// + /// # Arguments + /// + /// * `capacity` - Maximum vectors in primary storage + /// * `num_sub_vectors` - PQ code length + pub fn new(capacity: usize, num_sub_vectors: usize) -> Self { + Self { + primary: LockFreeIvfPqPartitionStore::new(capacity, num_sub_vectors), + overflow: SkipMap::new(), + overflow_count: AtomicUsize::new(0), + num_sub_vectors, + } + } + + /// Append a batch of vectors to this partition. + /// + /// Goes to primary if capacity available, otherwise overflow. + /// Codes should be in row-major format; this method handles transpose. + /// + /// # Arguments + /// + /// * `row_major_codes` - Row-major PQ codes (as returned by `pq.quantize()`) + /// * `positions` - Row positions for each vector + pub fn append_batch(&self, row_major_codes: &[u8], positions: &[u64]) { + let num_vectors = positions.len(); + if num_vectors == 0 { + return; + } + + debug_assert_eq!( + row_major_codes.len(), + num_vectors * self.num_sub_vectors, + "row_major_codes length mismatch" + ); + + let primary_remaining = self.primary.remaining_capacity(); + + if primary_remaining >= num_vectors { + // All fit in primary - transpose and append. + let codes_array = UInt8Array::from(row_major_codes.to_vec()); + let transposed = + transpose::(&codes_array, num_vectors, self.num_sub_vectors); + let _ = self + .primary + .append_transposed_batch(transposed.values(), positions); + } else if primary_remaining > 0 { + // Split: some go to primary, rest to overflow. + let primary_count = primary_remaining; + + // Primary portion - transpose and append. + let primary_codes = &row_major_codes[..primary_count * self.num_sub_vectors]; + let primary_positions = &positions[..primary_count]; + let codes_array = UInt8Array::from(primary_codes.to_vec()); + let transposed = + transpose::(&codes_array, primary_count, self.num_sub_vectors); + let _ = self + .primary + .append_transposed_batch(transposed.values(), primary_positions); + + // Overflow portion - store row-major. + let overflow_count = num_vectors - primary_count; + for i in 0..overflow_count { + let idx = primary_count + i; + let code_start = idx * self.num_sub_vectors; + let code_end = code_start + self.num_sub_vectors; + let code = row_major_codes[code_start..code_end].to_vec(); + self.overflow.insert(positions[idx], code); + } + self.overflow_count + .fetch_add(overflow_count, Ordering::Relaxed); + } else { + // Primary full - all go to overflow. + for (i, &pos) in positions.iter().enumerate() { + let code_start = i * self.num_sub_vectors; + let code_end = code_start + self.num_sub_vectors; + let code = row_major_codes[code_start..code_end].to_vec(); + self.overflow.insert(pos, code); + } + self.overflow_count + .fetch_add(num_vectors, Ordering::Relaxed); + } + } + + /// Check if this partition has overflow data. + #[inline] + pub fn has_overflow(&self) -> bool { + self.overflow_count.load(Ordering::Relaxed) > 0 + } + + /// Get number of vectors in overflow. + #[inline] + pub fn overflow_len(&self) -> usize { + self.overflow_count.load(Ordering::Relaxed) + } + + /// Get number of vectors in primary storage. + #[inline] + pub fn primary_len(&self) -> usize { + self.primary.len() + } + + /// Get overflow percentage (for metrics). + pub fn overflow_percentage(&self) -> f64 { + let primary = self.primary.len(); + let overflow = self.overflow_count.load(Ordering::Relaxed); + let total = primary + overflow; + if total == 0 { + 0.0 + } else { + overflow as f64 / total as f64 + } + } + + /// Total vectors in this partition. + #[inline] + pub fn len(&self) -> usize { + self.primary.len() + self.overflow_count.load(Ordering::Relaxed) + } + + /// Returns true if empty. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Get primary codes for search (pre-transposed, fast). + /// + /// Returns (codes, positions) where codes are column-major. + pub fn get_primary_codes_for_search(&self) -> (Vec, Vec) { + self.primary.get_codes_for_search() + } + + /// Get overflow codes for search. + /// + /// Returns (row_major_codes, positions). Caller must transpose before distance computation. + pub fn get_overflow_codes_for_search(&self) -> (Vec, Vec) { + let overflow_count = self.overflow_count.load(Ordering::Acquire); + if overflow_count == 0 { + return (Vec::new(), Vec::new()); + } + + let mut codes = Vec::with_capacity(overflow_count * self.num_sub_vectors); + let mut positions = Vec::with_capacity(overflow_count); + + for entry in self.overflow.iter() { + positions.push(*entry.key()); + codes.extend_from_slice(entry.value()); + } + + (codes, positions) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_partition_store_basic() { + let store = LockFreeIvfPqPartitionStore::new(100, 16); + assert_eq!(store.capacity(), 100); + assert_eq!(store.num_sub_vectors(), 16); + assert_eq!(store.len(), 0); + assert!(store.is_empty()); + assert!(!store.is_full()); + assert_eq!(store.remaining_capacity(), 100); + } + + #[test] + fn test_partition_store_append_transposed() { + let store = LockFreeIvfPqPartitionStore::new(100, 4); + + // Append 3 vectors with 4 sub-vectors each. + // Transposed layout: [sv0_v0, sv0_v1, sv0_v2, sv1_v0, sv1_v1, sv1_v2, ...] + let transposed_codes = vec![ + // SubVec 0 + 10, 20, 30, // SubVec 1 + 11, 21, 31, // SubVec 2 + 12, 22, 32, // SubVec 3 + 13, 23, 33, + ]; + let positions = vec![100, 200, 300]; + + store + .append_transposed_batch(&transposed_codes, &positions) + .unwrap(); + + assert_eq!(store.len(), 3); + assert_eq!(store.remaining_capacity(), 97); + + let (codes, pos) = store.get_codes_for_search(); + assert_eq!(pos, vec![100, 200, 300]); + assert_eq!(codes, transposed_codes); + } + + #[test] + fn test_partition_store_full() { + let store = LockFreeIvfPqPartitionStore::new(2, 4); + + // First batch. + let codes1 = vec![1, 2, 3, 4, 5, 6, 7, 8]; // 2 vectors transposed + let pos1 = vec![10, 20]; + store.append_transposed_batch(&codes1, &pos1).unwrap(); + + assert!(store.is_full()); + assert_eq!(store.remaining_capacity(), 0); + + // Should fail. + let codes2 = vec![9, 10, 11, 12]; + let pos2 = vec![30]; + assert!(store.append_transposed_batch(&codes2, &pos2).is_err()); + } + + #[test] + fn test_ivfpq_partition_primary_only() { + let partition = IvfPqPartition::new(100, 4); + + // Row-major codes for 3 vectors. + let row_major = vec![ + 10, 11, 12, 13, // vec 0 + 20, 21, 22, 23, // vec 1 + 30, 31, 32, 33, // vec 2 + ]; + let positions = vec![100, 200, 300]; + + partition.append_batch(&row_major, &positions); + + assert_eq!(partition.len(), 3); + assert_eq!(partition.primary_len(), 3); + assert_eq!(partition.overflow_len(), 0); + assert!(!partition.has_overflow()); + + let (codes, pos) = partition.get_primary_codes_for_search(); + assert_eq!(pos, vec![100, 200, 300]); + // Codes should be transposed. + assert_eq!( + codes, + vec![ + 10, 20, 30, // sv0 + 11, 21, 31, // sv1 + 12, 22, 32, // sv2 + 13, 23, 33, // sv3 + ] + ); + } + + #[test] + fn test_ivfpq_partition_overflow() { + let partition = IvfPqPartition::new(2, 4); // Only 2 slots in primary. + + // Insert 4 vectors - 2 should go to primary, 2 to overflow. + let row_major = vec![ + 10, 11, 12, 13, // vec 0 -> primary + 20, 21, 22, 23, // vec 1 -> primary + 30, 31, 32, 33, // vec 2 -> overflow + 40, 41, 42, 43, // vec 3 -> overflow + ]; + let positions = vec![100, 200, 300, 400]; + + partition.append_batch(&row_major, &positions); + + assert_eq!(partition.len(), 4); + assert_eq!(partition.primary_len(), 2); + assert_eq!(partition.overflow_len(), 2); + assert!(partition.has_overflow()); + assert!((partition.overflow_percentage() - 0.5).abs() < 0.001); + + // Check primary. + let (primary_codes, primary_pos) = partition.get_primary_codes_for_search(); + assert_eq!(primary_pos, vec![100, 200]); + // Transposed. + assert_eq!( + primary_codes, + vec![ + 10, 20, // sv0 + 11, 21, // sv1 + 12, 22, // sv2 + 13, 23, // sv3 + ] + ); + + // Check overflow (row-major). + let (overflow_codes, overflow_pos) = partition.get_overflow_codes_for_search(); + assert_eq!(overflow_pos.len(), 2); + assert!(overflow_pos.contains(&300)); + assert!(overflow_pos.contains(&400)); + // Row-major codes in SkipMap order (sorted by key). + assert_eq!(overflow_codes.len(), 8); + } + + #[test] + fn test_ivfpq_partition_all_overflow() { + let partition = IvfPqPartition::new(2, 4); + + // Fill primary first. + let batch1 = vec![1, 2, 3, 4, 5, 6, 7, 8]; + partition.append_batch(&batch1, &[10, 20]); + assert_eq!(partition.primary_len(), 2); + assert!(!partition.has_overflow()); + + // This batch should all go to overflow. + let batch2 = vec![11, 12, 13, 14, 21, 22, 23, 24, 31, 32, 33, 34]; + partition.append_batch(&batch2, &[30, 40, 50]); + + assert_eq!(partition.len(), 5); + assert_eq!(partition.primary_len(), 2); + assert_eq!(partition.overflow_len(), 3); + assert!(partition.has_overflow()); + } +} diff --git a/rust/lance/src/dataset/mem_wal/manifest.rs b/rust/lance/src/dataset/mem_wal/manifest.rs new file mode 100644 index 00000000000..35e87ad4832 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/manifest.rs @@ -0,0 +1,480 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! Region manifest storage with bit-reversed versioned naming. +//! +//! Region manifests are stored as versioned protobuf files using bit-reversed +//! naming scheme to distribute files across object store keyspace. +//! +//! ## File Layout +//! +//! ```text +//! _mem_wal/{region_id}/manifest/ +//! ├── {bit_reversed_version}.binpb # Versioned manifest files +//! └── version_hint.json # Best-effort version hint +//! ``` +//! +//! ## Write Protocol +//! +//! 1. Compute next version number +//! 2. Write manifest to `{bit_reversed_version}.binpb` using PUT-IF-NOT-EXISTS +//! 3. Best-effort update `version_hint.json` (failure is acceptable) +//! +//! ## Read Protocol +//! +//! 1. Read `version_hint.json` for starting version (default: 1 if not found) +//! 2. Use HEAD requests to check existence of subsequent versions +//! 3. Continue until a version is not found +//! 4. Return the last found version + +use std::sync::Arc; + +use bytes::Bytes; +use futures::stream::FuturesUnordered; +use futures::StreamExt; +use lance_core::{Error, Result}; +use lance_index::mem_wal::RegionManifest; +use lance_io::object_store::ObjectStore; +use lance_table::format::pb; +use object_store::path::Path; +use object_store::PutMode; +use object_store::PutOptions; +use prost::Message; +use serde::{Deserialize, Serialize}; +use snafu::location; +use tracing::{debug, warn}; +use uuid::Uuid; + +use super::util::{manifest_filename, parse_bit_reversed_filename, region_manifest_path}; + +/// Version hint file structure. +#[derive(Debug, Serialize, Deserialize)] +struct VersionHint { + version: u64, +} + +/// Store for reading and writing region manifests. +/// +/// Handles versioned manifest files with bit-reversed naming scheme +/// and PUT-IF-NOT-EXISTS atomicity. +#[derive(Debug)] +pub struct RegionManifestStore { + object_store: Arc, + region_id: Uuid, + manifest_dir: Path, + manifest_scan_batch_size: usize, +} + +impl RegionManifestStore { + /// Create a new manifest store for the given region. + /// + /// # Arguments + /// + /// * `object_store` - Object store for reading/writing manifests + /// * `base_path` - Base path within the object store (from ObjectStore::from_uri) + /// * `region_id` - Region UUID + /// * `manifest_scan_batch_size` - Batch size for parallel HEAD requests when scanning versions + pub fn new( + object_store: Arc, + base_path: &Path, + region_id: Uuid, + manifest_scan_batch_size: usize, + ) -> Self { + let manifest_dir = region_manifest_path(base_path, ®ion_id); + Self { + object_store, + region_id, + manifest_dir, + manifest_scan_batch_size, + } + } + + /// Read the latest manifest version. + /// + /// Returns `None` if no manifest exists (new region). + pub async fn read_latest(&self) -> Result> { + let version = self.find_latest_version().await?; + if version == 0 { + return Ok(None); + } + + self.read_version(version).await.map(Some) + } + + /// Read a specific manifest version. + pub async fn read_version(&self, version: u64) -> Result { + let filename = manifest_filename(version); + let path = self.manifest_dir.child(filename.as_str()); + + let data = self.object_store.inner.get(&path).await.map_err(|e| { + Error::io( + format!( + "Failed to read manifest version {} for region {}: {}", + version, self.region_id, e + ), + location!(), + ) + })?; + + let bytes = data + .bytes() + .await + .map_err(|e| Error::io(format!("Failed to read manifest bytes: {}", e), location!()))?; + + let pb_manifest = pb::RegionManifest::decode(bytes).map_err(|e| { + Error::io( + format!("Failed to decode manifest protobuf: {}", e), + location!(), + ) + })?; + + RegionManifest::try_from(pb_manifest) + } + + /// Write a new manifest version atomically. + /// + /// Uses storage-appropriate strategy: + /// - Local: Write to temp file + atomic rename for fencing + /// - Cloud: PUT-IF-NOT-EXISTS (S3 conditional write) + /// + /// Returns the version that was written. + /// + /// # Errors + /// + /// Returns `Error::AlreadyExists` if another writer already wrote this version. + pub async fn write(&self, manifest: &RegionManifest) -> Result { + let version = manifest.version; + let filename = manifest_filename(version); + let path = self.manifest_dir.child(filename.as_str()); + + let pb_manifest = pb::RegionManifest::from(manifest); + let bytes = pb_manifest.encode_to_vec(); + + if self.object_store.is_local() { + // Local storage: Use temp file + atomic rename for fencing + let temp_filename = format!("{}.tmp.{}", filename, uuid::Uuid::new_v4()); + let temp_path = self.manifest_dir.child(temp_filename.as_str()); + + // Write to temp file + self.object_store + .inner + .put(&temp_path, Bytes::from(bytes).into()) + .await + .map_err(|e| { + Error::io(format!("Failed to write temp manifest: {}", e), location!()) + })?; + + // Atomically rename to final path + match self + .object_store + .inner + .rename_if_not_exists(&temp_path, &path) + .await + { + Ok(()) => {} + Err(object_store::Error::AlreadyExists { .. }) => { + // Clean up temp file + let _ = self.object_store.delete(&temp_path).await; + return Err(Error::io( + format!( + "Manifest version {} already exists for region {}", + version, self.region_id + ), + location!(), + )); + } + Err(e) => { + // Clean up temp file + let _ = self.object_store.delete(&temp_path).await; + return Err(Error::io( + format!( + "Failed to write manifest version {} for region {}: {}", + version, self.region_id, e + ), + location!(), + )); + } + } + } else { + // Cloud storage: Use PUT-IF-NOT-EXISTS + let put_opts = PutOptions { + mode: PutMode::Create, + ..Default::default() + }; + + self.object_store + .inner + .put_opts(&path, Bytes::from(bytes).into(), put_opts) + .await + .map_err(|e| { + if matches!(e, object_store::Error::AlreadyExists { .. }) { + Error::io( + format!( + "Manifest version {} already exists for region {}", + version, self.region_id + ), + location!(), + ) + } else { + Error::io( + format!( + "Failed to write manifest version {} for region {}: {}", + version, self.region_id, e + ), + location!(), + ) + } + })?; + } + + // Best-effort update version hint (failures are logged as warnings) + self.write_version_hint(version).await; + + debug!( + "Wrote manifest version {} for region {}", + version, self.region_id + ); + + Ok(version) + } + + /// Find the latest manifest version. + /// + /// Uses HEAD requests starting from version hint, scanning forward + /// until a version is not found. + async fn find_latest_version(&self) -> Result { + // Start from version hint or 1 + let hint = self.read_version_hint().await.unwrap_or(1); + + // Scan forward from hint using HEAD requests + let mut latest_found = 0u64; + + // First, check if hint version exists + if hint > 0 && self.version_exists(hint).await? { + latest_found = hint; + } else if hint > 1 { + // Hint might be stale, scan from beginning + if self.version_exists(1).await? { + latest_found = 1; + } + } + + // Parallel scan forward with batches of HEAD requests + let batch_size = self.manifest_scan_batch_size; + loop { + let mut futures = FuturesUnordered::new(); + for offset in 0..batch_size { + let version = latest_found + 1 + offset as u64; + futures.push(async move { (version, self.version_exists(version).await) }); + } + + let mut found_any = false; + while let Some((version, result)) = futures.next().await { + if let Ok(true) = result { + if version > latest_found { + latest_found = version; + found_any = true; + } + } + } + + if !found_any { + break; + } + } + + Ok(latest_found) + } + + /// Check if a manifest version exists using HEAD request. + async fn version_exists(&self, version: u64) -> Result { + let filename = manifest_filename(version); + let path = self.manifest_dir.child(filename.as_str()); + + match self.object_store.inner.head(&path).await { + Ok(_) => Ok(true), + Err(object_store::Error::NotFound { .. }) => Ok(false), + Err(e) => Err(Error::io( + format!("HEAD request failed for version {}: {}", version, e), + location!(), + )), + } + } + + /// Read the version hint file. + async fn read_version_hint(&self) -> Option { + let path = self.manifest_dir.child("version_hint.json"); + + let data = self.object_store.inner.get(&path).await.ok()?; + let bytes = data.bytes().await.ok()?; + let hint: VersionHint = serde_json::from_slice(&bytes).ok()?; + + Some(hint.version) + } + + /// Write the version hint file (best-effort, failures logged but ignored). + async fn write_version_hint(&self, version: u64) { + let path = self.manifest_dir.child("version_hint.json"); + let hint = VersionHint { version }; + + match serde_json::to_vec(&hint) { + Ok(bytes) => { + if let Err(e) = self + .object_store + .inner + .put(&path, Bytes::from(bytes).into()) + .await + { + warn!( + "Failed to write version hint for region {}: {}", + self.region_id, e + ); + } + } + Err(e) => { + warn!("Failed to serialize version hint: {}", e); + } + } + } + + /// List all manifest versions (for garbage collection or debugging). + pub async fn list_versions(&self) -> Result> { + let mut versions = Vec::new(); + + let list_result = self + .object_store + .inner + .list(Some(&self.manifest_dir)) + .collect::>() + .await; + + for item in list_result { + match item { + Ok(meta) => { + if let Some(filename) = meta.location.filename() { + if filename.ends_with(".binpb") { + if let Some(version) = parse_bit_reversed_filename(filename) { + versions.push(version); + } + } + } + } + Err(e) => { + warn!("Error listing manifest directory: {}", e); + } + } + } + + versions.sort_unstable(); + Ok(versions) + } + + /// Get the region ID. + pub fn region_id(&self) -> Uuid { + self.region_id + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + async fn create_local_store() -> (Arc, Path, TempDir) { + let temp_dir = tempfile::tempdir().unwrap(); + let uri = format!("file://{}", temp_dir.path().display()); + let (store, path) = ObjectStore::from_uri(&uri).await.unwrap(); + (store, path, temp_dir) + } + + fn create_test_manifest(region_id: Uuid, version: u64, epoch: u64) -> RegionManifest { + RegionManifest { + region_id, + version, + region_spec_id: 0, + writer_epoch: epoch, + replay_after_wal_id: 0, + wal_id_last_seen: 0, + current_generation: 1, + flushed_generations: vec![], + } + } + + #[tokio::test] + async fn test_read_latest_empty() { + let (store, base_path, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let manifest_store = RegionManifestStore::new(store, &base_path, region_id, 2); + + let result = manifest_store.read_latest().await.unwrap(); + assert!(result.is_none()); + } + + #[tokio::test] + async fn test_write_and_read_manifest() { + let (store, base_path, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let manifest_store = RegionManifestStore::new(store, &base_path, region_id, 2); + + let manifest = create_test_manifest(region_id, 1, 1); + manifest_store.write(&manifest).await.unwrap(); + + let loaded = manifest_store.read_latest().await.unwrap().unwrap(); + assert_eq!(loaded.version, 1); + assert_eq!(loaded.writer_epoch, 1); + assert_eq!(loaded.region_id, region_id); + } + + #[tokio::test] + async fn test_multiple_versions() { + let (store, base_path, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let manifest_store = RegionManifestStore::new(store, &base_path, region_id, 2); + + // Write multiple versions + for version in 1..=5 { + let manifest = create_test_manifest(region_id, version, version); + manifest_store.write(&manifest).await.unwrap(); + } + + // Should find latest + let loaded = manifest_store.read_latest().await.unwrap().unwrap(); + assert_eq!(loaded.version, 5); + assert_eq!(loaded.writer_epoch, 5); + + // List should return all versions + let versions = manifest_store.list_versions().await.unwrap(); + assert_eq!(versions, vec![1, 2, 3, 4, 5]); + } + + #[tokio::test] + async fn test_read_specific_version() { + let (store, base_path, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let manifest_store = RegionManifestStore::new(store, &base_path, region_id, 2); + + for version in 1..=3 { + let manifest = create_test_manifest(region_id, version, version * 10); + manifest_store.write(&manifest).await.unwrap(); + } + + let v2 = manifest_store.read_version(2).await.unwrap(); + assert_eq!(v2.version, 2); + assert_eq!(v2.writer_epoch, 20); + } + + #[tokio::test] + async fn test_put_if_not_exists() { + let (store, base_path, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let manifest_store = RegionManifestStore::new(store, &base_path, region_id, 2); + + let manifest1 = create_test_manifest(region_id, 1, 1); + manifest_store.write(&manifest1).await.unwrap(); + + // Second write to same version should fail + let manifest2 = create_test_manifest(region_id, 1, 2); + let result = manifest_store.write(&manifest2).await; + assert!(result.is_err()); + } +} diff --git a/rust/lance/src/dataset/mem_wal/memtable.rs b/rust/lance/src/dataset/mem_wal/memtable.rs new file mode 100644 index 00000000000..8a57d6caa1e --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/memtable.rs @@ -0,0 +1,1122 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! In-memory MemTable for buffering writes. + +#[cfg(test)] +mod perf_test; + +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use arrow_array::{Array, RecordBatch, RecordBatchIterator}; +use arrow_schema::Schema as ArrowSchema; +use lance_core::datatypes::Schema; +use lance_core::{Error, Result}; +use lance_index::scalar::bloomfilter::sbbf::Sbbf; +use snafu::location; +use tokio::sync::RwLock; +use tracing::debug; +use uuid::Uuid; + +use super::batch_store::LockFreeBatchStore; +use super::indexes::IndexRegistry; +use super::watchable_cell::{WatchableOnceCell, WatchableOnceCellReader}; +use super::write::{DurabilityResult, WalFlushResult}; +use crate::Dataset; + +/// Default batch store capacity when not specified. +const DEFAULT_BATCH_CAPACITY: usize = 1024; + +/// Configuration for the reader cache. +#[derive(Debug, Clone)] +pub struct CacheConfig { + /// Time-to-live for cached Dataset. Default: 60 seconds. + pub ttl: Duration, + /// Whether to always return fresh data (bypass cache). Default: false. + pub always_fresh: bool, +} + +impl Default for CacheConfig { + fn default() -> Self { + Self { + ttl: Duration::from_secs(60), + always_fresh: false, + } + } +} + +/// In-memory table for buffering writes. +/// +/// Stores Arrow RecordBatches in a lock-free append-only structure for O(1) operations. +/// Dataset is constructed on-demand for reading with configurable caching. +/// +/// # Thread Safety +/// +/// - **Writer**: Only one thread should call `insert_with_seq()` at a time. +/// This is enforced by the WriteBatchHandler architecture. +/// - **Readers**: Multiple threads can safely call read methods concurrently. +pub struct MemTable { + /// Schema for this MemTable. + schema: Arc, + /// Lance schema (for index operations). + lance_schema: Schema, + + /// Lock-free batch storage (replaces RwLock>). + /// Wrapped in Arc for sharing with scanners. + batch_store: Arc, + + /// Unique URI for on-demand Dataset construction. + dataset_uri: String, + + /// Cache configuration for reading. + cache_config: CacheConfig, + /// Cached Dataset for reading (with eventual consistency). + cached_dataset: RwLock>, + + /// Generation number (incremented on flush). + generation: u64, + + /// WAL batch mapping: batch_position -> (wal_entry_position, position within WAL entry). + wal_batch_mapping: HashMap, + /// Last WAL entry position that has been flushed. + last_flushed_wal_entry_position: u64, + /// Set of batch IDs that have been flushed to WAL. + flushed_batch_positions: HashSet, + + /// Primary key bloom filter for staleness detection. + pk_bloom_filter: Sbbf, + /// Primary key field IDs (for bloom filter updates). + pk_field_ids: Vec, + + /// Index registry (optional, for indexed writes). + /// Wrapped in Arc for sharing with async index handler. + indexes: Option>, + + /// WAL entry position when this memtable was frozen. + /// Used for WAL replay starting point during recovery. + /// None means the memtable is still active (not frozen). + frozen_at_wal_entry_position: Option, + + /// Reader for WAL flush completion notification. + /// Set when the memtable is frozen and a WAL flush request is sent. + /// The reader can be awaited to know when WAL flush is complete. + /// Uses Mutex for interior mutability since the MemTable is wrapped in Arc when frozen. + /// Uses Result since lance_core::Error doesn't implement Clone. + wal_flush_completion: std::sync::Mutex< + Option>>, + >, + + /// Cell for memtable flush completion notification. + /// Created when the memtable is frozen and set with a value when the flush completes. + /// Used by backpressure to wait for oldest memtable flush completion. + memtable_flush_completion: std::sync::Mutex>>, +} + +/// Cached Dataset with timestamp for eventual consistency. +struct CachedDataset { + dataset: Dataset, + created_at: Instant, + batch_count: usize, +} + +/// Default expected items for primary key bloom filter. +/// Consistent with lance-index scalar bloomfilter defaults. +const PK_BLOOM_FILTER_EXPECTED_ITEMS: u64 = 8192; + +/// Default false positive probability for primary key bloom filter. +/// Consistent with lance-index scalar bloomfilter defaults (≈ 1 in 1754). +const PK_BLOOM_FILTER_FPP: f64 = 0.00057; + +impl MemTable { + /// Create a new MemTable with default capacity. + /// + /// # Arguments + /// + /// * `schema` - Arrow schema for the data + /// * `generation` - Initial generation number (typically 1 for new, or from recovery) + /// * `pk_field_ids` - Field IDs that form the primary key (for bloom filter) + pub fn new(schema: Arc, generation: u64, pk_field_ids: Vec) -> Result { + Self::with_capacity( + schema, + generation, + pk_field_ids, + CacheConfig::default(), + DEFAULT_BATCH_CAPACITY, + ) + } + + /// Create a new MemTable with custom cache configuration. + /// + /// # Arguments + /// + /// * `schema` - Arrow schema for the data + /// * `generation` - Initial generation number (typically 1 for new, or from recovery) + /// * `pk_field_ids` - Field IDs that form the primary key (for bloom filter) + /// * `cache_config` - Configuration for reader cache (TTL, freshness) + pub fn with_cache_config( + schema: Arc, + generation: u64, + pk_field_ids: Vec, + cache_config: CacheConfig, + ) -> Result { + Self::with_capacity( + schema, + generation, + pk_field_ids, + cache_config, + DEFAULT_BATCH_CAPACITY, + ) + } + + /// Create a new MemTable with custom capacity. + /// + /// # Arguments + /// + /// * `schema` - Arrow schema for the data + /// * `generation` - Initial generation number (typically 1 for new, or from recovery) + /// * `pk_field_ids` - Field IDs that form the primary key (for bloom filter) + /// * `cache_config` - Configuration for reader cache (TTL, freshness) + /// * `batch_capacity` - Maximum number of batches before flush is required + pub fn with_capacity( + schema: Arc, + generation: u64, + pk_field_ids: Vec, + cache_config: CacheConfig, + batch_capacity: usize, + ) -> Result { + let lance_schema = Schema::try_from(schema.as_ref())?; + + // Initialize bloom filter for primary key staleness detection. + let pk_bloom_filter = + Sbbf::with_ndv_fpp(PK_BLOOM_FILTER_EXPECTED_ITEMS, PK_BLOOM_FILTER_FPP).map_err( + |e| { + Error::io( + format!("Failed to create bloom filter for primary key: {}", e), + location!(), + ) + }, + )?; + + // Generate unique URI for on-demand Dataset construction + let dataset_uri = format!("memory://{}", Uuid::new_v4()); + + // Create lock-free batch store + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(batch_capacity)); + + // Create memtable_flush_completion cell immediately so backpressure can + // wait on it even before the memtable is frozen. Every memtable will + // eventually be frozen and flushed. + let memtable_flush_cell = super::watchable_cell::WatchableOnceCell::new(); + + Ok(Self { + schema, + lance_schema, + batch_store, + dataset_uri, + cache_config, + cached_dataset: RwLock::new(None), + generation, + wal_batch_mapping: HashMap::new(), + last_flushed_wal_entry_position: 0, + flushed_batch_positions: HashSet::new(), + pk_bloom_filter, + pk_field_ids, + indexes: None, + frozen_at_wal_entry_position: None, + wal_flush_completion: std::sync::Mutex::new(None), + memtable_flush_completion: std::sync::Mutex::new(Some(memtable_flush_cell)), + }) + } + + /// Set the index registry for indexed writes. + pub fn set_indexes(&mut self, indexes: IndexRegistry) { + self.indexes = Some(Arc::new(indexes)); + } + + /// Set the index registry with an Arc (for sharing with async handler). + pub fn set_indexes_arc(&mut self, indexes: Arc) { + self.indexes = Some(indexes); + } + + /// Mark this memtable as frozen with the given WAL entry position. + /// + /// Once frozen, no new writes should be added. The memtable will be + /// added to the immutable queue for flushing to Lance storage. + /// + /// # Arguments + /// + /// * `wal_entry_position` - The last WAL entry position when this memtable was frozen + pub fn freeze(&mut self, wal_entry_position: u64) { + self.frozen_at_wal_entry_position = Some(wal_entry_position); + } + + /// Set the WAL flush completion reader. + /// + /// Called when a WAL flush request is sent at freeze time. + /// The reader can be awaited by flush_oldest_immutable to know when + /// the WAL flush is complete. + pub fn set_wal_flush_completion( + &self, + reader: WatchableOnceCellReader>, + ) { + *self.wal_flush_completion.lock().unwrap() = Some(reader); + } + + /// Take the WAL flush completion reader. + /// + /// Returns the reader if set, consuming it. Used by flush_oldest_immutable + /// to await WAL flush completion before proceeding with memtable flush. + /// Thread-safe via interior mutability. + pub fn take_wal_flush_completion( + &self, + ) -> Option>> { + self.wal_flush_completion.lock().unwrap().take() + } + + /// Check if this memtable has a pending WAL flush completion to await. + pub fn has_pending_wal_flush(&self) -> bool { + self.wal_flush_completion.lock().unwrap().is_some() + } + + /// Get a reader for the memtable flush completion. + /// + /// The cell is created at memtable construction time, so this always + /// returns a reader. This allows backpressure to wait on the active + /// memtable's flush completion, not just frozen memtables. + /// + /// # Panics + /// + /// Panics if called after `signal_memtable_flush_complete()` has consumed the cell. + pub fn create_memtable_flush_completion(&self) -> WatchableOnceCellReader { + self.memtable_flush_completion + .lock() + .unwrap() + .as_ref() + .expect("memtable_flush_completion cell should exist (created at construction)") + .reader() + } + + /// Get a reader for the memtable flush completion. + /// + /// Returns a reader if the completion cell exists, without consuming it. + /// Multiple readers can be obtained from the same cell. + pub fn get_memtable_flush_watcher(&self) -> Option> { + self.memtable_flush_completion + .lock() + .unwrap() + .as_ref() + .map(|cell| cell.reader()) + } + + /// Signal that the memtable flush is complete. + /// + /// Called after the memtable has been flushed to Lance storage. + pub fn signal_memtable_flush_complete(&self) { + if let Some(cell) = self.memtable_flush_completion.lock().unwrap().take() { + cell.write(DurabilityResult::ok()); + } + } + + /// Get the WAL entry position when this memtable was frozen. + /// + /// Returns `None` if the memtable is still active (not frozen). + pub fn frozen_at_wal_entry_position(&self) -> Option { + self.frozen_at_wal_entry_position + } + + /// Check if this memtable has been frozen. + pub fn is_frozen(&self) -> bool { + self.frozen_at_wal_entry_position.is_some() + } + + /// Insert a record batch into the MemTable. + /// + /// O(1) append. + /// + /// # Returns + /// + /// The batch position (0-indexed) for the inserted batch. + /// + /// # Single Writer Requirement + /// + /// This method MUST only be called from the single writer task. + pub async fn insert(&mut self, batch: RecordBatch) -> Result { + // Validate schema compatibility + if batch.schema() != self.schema { + return Err(Error::invalid_input( + "Batch schema doesn't match MemTable schema", + location!(), + )); + } + + let num_rows = batch.num_rows(); + if num_rows == 0 { + return Err(Error::invalid_input( + "Cannot insert empty batch", + location!(), + )); + } + + // Row offset is the current row count (before adding this batch) + let row_offset = self.batch_store.total_rows() as u64; + + // Update bloom filter with primary keys + self.update_bloom_filter(&batch)?; + + // Get batch position before appending (for index coverage tracking) + let batch_position = self.batch_store.len(); + + // Update indexes with batch position for coverage tracking + if let Some(ref indexes) = self.indexes { + indexes.insert_with_batch_position(&batch, row_offset, Some(batch_position))?; + } + + // Append to batch store (returns batch_position, row_offset, estimated_size) + let (batch_position, _row_offset, _estimated_size) = + self.batch_store.append(batch).map_err(|_| { + Error::invalid_input( + "MemTable batch store is full - should have been flushed", + location!(), + ) + })?; + + debug!( + "Inserted {} rows into MemTable (batch_position={}, row_offset={}, total_rows={})", + num_rows, + batch_position, + row_offset, + self.batch_store.total_rows() + ); + + Ok(batch_position) + } + + /// Insert a batch without updating indexes. + /// + /// Index updates are performed during WAL flush by `WalFlushHandler`. + /// + /// Returns `(batch_position, row_offset, estimated_size)` so the caller can queue the index update. + /// + /// # Single Writer Requirement + /// + /// This method MUST only be called from the single writer task. + pub async fn insert_batch_only(&mut self, batch: RecordBatch) -> Result<(usize, u64, usize)> { + // Validate schema compatibility + if batch.schema() != self.schema { + return Err(Error::invalid_input( + "Batch schema doesn't match MemTable schema", + location!(), + )); + } + + let num_rows = batch.num_rows(); + if num_rows == 0 { + return Err(Error::invalid_input( + "Cannot insert empty batch", + location!(), + )); + } + + // Update bloom filter with primary keys + self.update_bloom_filter(&batch)?; + + // NOTE: Index update is skipped - caller will queue async update + + // Append to batch store (returns batch_position, row_offset, estimated_size) + let (batch_position, row_offset, estimated_size) = + self.batch_store.append(batch).map_err(|_| { + Error::invalid_input( + "MemTable batch store is full - should have been flushed", + location!(), + ) + })?; + + debug!( + "Inserted {} rows into MemTable (batch_only, batch_position={}, row_offset={}, total_rows={})", + num_rows, + batch_position, + row_offset, + self.batch_store.total_rows() + ); + + Ok((batch_position, row_offset, estimated_size)) + } + + /// Check if the MemTable should be flushed. + /// + /// Returns true if the batch store is full or estimated size exceeds threshold. + pub fn should_flush(&self, max_bytes: usize) -> bool { + self.batch_store.is_full() || self.batch_store.estimated_bytes() >= max_bytes + } + + /// Get batches visible up to a specific batch position (inclusive). + /// + /// A batch at position `i` is visible if `i <= max_visible_batch_position`. + /// + /// # Arguments + /// + /// * `max_visible_batch_position` - The maximum batch position to include (inclusive) + /// + /// # Returns + /// + /// Vector of visible batches. + pub async fn get_visible_batches(&self, max_visible_batch_position: usize) -> Vec { + self.batch_store + .visible_record_batches(max_visible_batch_position) + } + + /// Get batch positions visible up to a specific batch position (inclusive). + /// + /// This is useful for filtering index results by visibility. + pub async fn get_max_visible_batch_positions( + &self, + max_visible_batch_position: usize, + ) -> Vec { + self.batch_store + .max_visible_batch_positions(max_visible_batch_position) + } + + /// Check if a specific batch is visible at a given visibility position. + /// + /// Returns true if the batch is visible, false if not visible or doesn't exist. + pub async fn is_batch_visible( + &self, + batch_position: usize, + max_visible_batch_position: usize, + ) -> bool { + self.batch_store + .is_batch_visible(batch_position, max_visible_batch_position) + } + + /// Scan batches visible up to a specific batch position. + /// + /// This combines `get_visible_batches` with the scan interface. + pub async fn scan_batches_at_position( + &self, + max_visible_batch_position: usize, + ) -> Result> { + Ok(self.get_visible_batches(max_visible_batch_position).await) + } + + /// Update the bloom filter with primary keys from a batch. + fn update_bloom_filter(&mut self, batch: &RecordBatch) -> Result<()> { + let bloom = &mut self.pk_bloom_filter; + + // Get primary key columns + let pk_columns: Vec<_> = self + .pk_field_ids + .iter() + .filter_map(|&field_id| { + // Find column by field ID + self.lance_schema + .fields + .iter() + .position(|f| f.id == field_id) + .and_then(|idx| batch.column(idx).clone().into()) + }) + .collect(); + + if pk_columns.len() != self.pk_field_ids.len() { + return Err(Error::invalid_input( + "Batch is missing primary key columns", + location!(), + )); + } + + // Insert each row's primary key hash + for row_idx in 0..batch.num_rows() { + let hash = compute_row_hash(&pk_columns, row_idx); + bloom.insert_hash(hash); + } + + Ok(()) + } + + /// Mark batches as flushed to WAL. + /// + /// Updates the WAL batch mapping for use during MemTable flush. + /// Also updates the batch_store's watermark to the highest flushed batch_position. + pub fn mark_wal_flushed( + &mut self, + batch_positions: &[usize], + wal_entry_position: u64, + positions: &[usize], + ) { + for (idx, &batch_position) in batch_positions.iter().enumerate() { + self.wal_batch_mapping + .insert(batch_position, (wal_entry_position, positions[idx])); + self.flushed_batch_positions.insert(batch_position); + } + self.last_flushed_wal_entry_position = wal_entry_position; + + // Update batch_store watermark to the highest batch_position flushed (inclusive) + if let Some(&max_batch_position) = batch_positions.iter().max() { + self.batch_store + .set_max_flushed_batch_position(max_batch_position); + } + } + + /// Get or create a Dataset for reading. + /// + /// Uses caching based on the configured eventual consistency strategy: + /// - If `always_fresh` is true, always constructs a new Dataset + /// - Otherwise, returns cached Dataset if within TTL and has same batch count + /// + /// Returns None if there's no data to read. + pub async fn get_or_create_dataset(&self) -> Result> { + let current_batch_count = self.batch_count(); + if current_batch_count == 0 { + return Ok(None); + } + + // Check if we can use cached dataset + if !self.cache_config.always_fresh { + let cached = self.cached_dataset.read().await; + if let Some(ref cached_ds) = *cached { + // Check if cache is still valid (within TTL and same batch count) + if cached_ds.batch_count == current_batch_count + && cached_ds.created_at.elapsed() < self.cache_config.ttl + { + debug!( + "Using cached Dataset (batch_count={}, age={:?})", + current_batch_count, + cached_ds.created_at.elapsed() + ); + return Ok(Some(cached_ds.dataset.clone())); + } + } + } + + // Need to construct a new Dataset + debug!( + "Constructing new Dataset from batches (batch_count={})", + current_batch_count + ); + + let dataset = self.construct_dataset().await?; + + // Cache the new dataset (unless always_fresh) + if !self.cache_config.always_fresh { + let mut cached = self.cached_dataset.write().await; + *cached = Some(CachedDataset { + dataset: dataset.clone(), + created_at: Instant::now(), + batch_count: current_batch_count, + }); + } + + Ok(Some(dataset)) + } + + /// Construct a fresh Dataset from stored batches. + async fn construct_dataset(&self) -> Result { + if self.batch_store.is_empty() { + return Err(Error::invalid_input( + "Cannot construct Dataset: no batches", + location!(), + )); + } + + // Get batches + let batches = self.batch_store.to_vec(); + + // Create a new Dataset with all the batches + let reader = RecordBatchIterator::new(batches.into_iter().map(Ok), self.schema.clone()); + let dataset = Dataset::write(reader, &self.dataset_uri, None).await?; + + Ok(dataset) + } + + /// Scan all data from the MemTable. + /// + /// Returns all batches for flushing to persistent storage. + pub async fn scan_batches(&self) -> Result> { + Ok(self.batch_store.to_vec()) + } + + /// Scan specific batches by their batch_positions. + pub async fn scan_batches_by_ids(&self, batch_positions: &[usize]) -> Result> { + let mut results = Vec::with_capacity(batch_positions.len()); + for &batch_position in batch_positions { + let batch = self.batch_store.get_batch(batch_position).ok_or_else(|| { + Error::invalid_input(format!("Batch {} not found", batch_position), location!()) + })?; + results.push(batch.clone()); + } + Ok(results) + } + + /// Get batches for WAL flush. + pub async fn get_batches_for_wal(&self, batch_positions: &[usize]) -> Result> { + self.scan_batches_by_ids(batch_positions).await + } + + /// Check if a primary key might exist in this MemTable. + /// + /// Uses bloom filter for fast negative lookups. + /// Returns true if the key might exist, false if definitely not present. + pub fn might_contain_pk(&self, pk_hash: u64) -> bool { + self.pk_bloom_filter.check_hash(pk_hash) + } + + /// Get the schema. + pub fn schema(&self) -> &Arc { + &self.schema + } + + /// Get the Lance schema. + pub fn lance_schema(&self) -> &Schema { + &self.lance_schema + } + + /// Get the generation number. + pub fn generation(&self) -> u64 { + self.generation + } + + /// Get total row count. + pub fn row_count(&self) -> usize { + self.batch_store.total_rows() + } + + /// Get batch count. + /// + /// Note: This is now synchronous (no longer requires async). + pub fn batch_count(&self) -> usize { + self.batch_store.len() + } + + /// Get batch count (async version for API compatibility). + #[allow(clippy::unused_async)] + pub async fn batch_count_async(&self) -> usize { + self.batch_count() + } + + /// Get estimated size in bytes. + pub fn estimated_size(&self) -> usize { + self.batch_store.estimated_bytes() + self.pk_bloom_filter.estimated_memory_size() + } + + /// Get the WAL batch mapping. + pub fn wal_batch_mapping(&self) -> &HashMap { + &self.wal_batch_mapping + } + + /// Get the last flushed WAL entry position. + pub fn last_flushed_wal_entry_position(&self) -> u64 { + self.last_flushed_wal_entry_position + } + + /// Get the bloom filter for serialization. + pub fn bloom_filter(&self) -> &Sbbf { + &self.pk_bloom_filter + } + + /// Get reference to indexes. + pub fn indexes(&self) -> Option<&IndexRegistry> { + self.indexes.as_ref().map(|arc| arc.as_ref()) + } + + /// Get the Arc-wrapped indexes (for sharing with async handler). + pub fn indexes_arc(&self) -> Option> { + self.indexes.clone() + } + + /// Take the index registry (for flushing). + /// Returns the Arc, which may be shared with async handler. + pub fn take_indexes(&mut self) -> Option> { + self.indexes.take() + } + + /// Check if all batches have been flushed to WAL. + /// + /// Uses the batch_store's watermark tracking instead of maintaining + /// a separate HashSet. + pub fn all_flushed_to_wal(&self) -> bool { + self.batch_store.pending_wal_flush_count() == 0 + } + + /// Get unflushed batch IDs. + pub fn unflushed_batch_positions(&self) -> Vec { + let batch_count = self.batch_count(); + (0..batch_count) + .filter(|id| !self.flushed_batch_positions.contains(id)) + .collect() + } + + /// Get cache configuration. + pub fn cache_config(&self) -> &CacheConfig { + &self.cache_config + } + + /// Get the batch store capacity. + pub fn batch_capacity(&self) -> usize { + self.batch_store.capacity() + } + + /// Get remaining batch capacity. + pub fn remaining_batch_capacity(&self) -> usize { + self.batch_store.remaining_capacity() + } + + /// Check if batch store is full. + pub fn is_batch_store_full(&self) -> bool { + self.batch_store.is_full() + } + + /// Create a scanner for querying this MemTable. + /// + /// # Arguments + /// + /// * `max_visible_batch_position` - Maximum batch position visible (inclusive) + /// + /// # Example + /// + /// ```ignore + /// let scanner = memtable.scan(max_visible_batch_position); + /// let results = scanner + /// .project(&["id", "name"]) + /// .filter("id > 10")? + /// .try_into_batch() + /// .await?; + /// ``` + pub fn scan( + &self, + max_visible_batch_position: usize, + ) -> crate::dataset::mem_wal::scanner::MemTableScanner { + crate::dataset::mem_wal::scanner::MemTableScanner::new( + self.batch_store.clone(), + self.indexes.clone(), + self.schema.clone(), + max_visible_batch_position, + ) + } + + /// Get a clone of the batch store Arc for external use. + pub fn batch_store(&self) -> Arc { + self.batch_store.clone() + } +} + +/// Compute a hash for a row's primary key values. +fn compute_row_hash(columns: &[Arc], row_idx: usize) -> u64 { + use std::hash::{Hash, Hasher}; + + let mut hasher = std::collections::hash_map::DefaultHasher::new(); + + for col in columns { + // Hash the scalar value at this row + let is_null = col.is_null(row_idx); + is_null.hash(&mut hasher); + + if !is_null { + // Hash based on data type + if let Some(arr) = col.as_any().downcast_ref::() { + arr.value(row_idx).hash(&mut hasher); + } else if let Some(arr) = col.as_any().downcast_ref::() { + arr.value(row_idx).hash(&mut hasher); + } else if let Some(arr) = col.as_any().downcast_ref::() { + arr.value(row_idx).hash(&mut hasher); + } else if let Some(arr) = col.as_any().downcast_ref::() { + arr.value(row_idx).hash(&mut hasher); + } + // Add more types as needed + } + } + + hasher.finish() +} + +#[cfg(test)] +mod tests { + use super::*; + use arrow_array::{Int32Array, StringArray}; + use arrow_schema::{DataType, Field}; + + fn create_test_schema() -> Arc { + Arc::new(ArrowSchema::new(vec![ + Field::new("id", DataType::Int32, false), + Field::new("name", DataType::Utf8, true), + ])) + } + + fn create_test_batch(schema: &ArrowSchema, num_rows: usize) -> RecordBatch { + RecordBatch::try_new( + Arc::new(schema.clone()), + vec![ + Arc::new(Int32Array::from_iter_values(0..num_rows as i32)), + Arc::new(StringArray::from_iter_values( + (0..num_rows).map(|i| format!("name_{}", i)), + )), + ], + ) + .unwrap() + } + + #[tokio::test] + async fn test_memtable_insert() { + let schema = create_test_schema(); + let mut memtable = MemTable::new(schema.clone(), 1, vec![]).unwrap(); + + let batch = create_test_batch(&schema, 10); + let batch_position = memtable.insert(batch).await.unwrap(); + + assert_eq!(batch_position, 0); + assert_eq!(memtable.row_count(), 10); + assert_eq!(memtable.batch_count(), 1); + // Dataset is constructed on-demand + assert!(memtable.get_or_create_dataset().await.unwrap().is_some()); + } + + #[tokio::test] + async fn test_memtable_multiple_inserts() { + let schema = create_test_schema(); + let mut memtable = MemTable::new(schema.clone(), 1, vec![]).unwrap(); + + for i in 0..3 { + let batch = create_test_batch(&schema, 10); + let batch_position = memtable.insert(batch).await.unwrap(); + assert_eq!(batch_position, i); + } + + assert_eq!(memtable.row_count(), 30); + assert_eq!(memtable.batch_count(), 3); + } + + #[tokio::test] + async fn test_memtable_scan() { + let schema = create_test_schema(); + let mut memtable = MemTable::new(schema.clone(), 1, vec![]).unwrap(); + + memtable + .insert(create_test_batch(&schema, 10)) + .await + .unwrap(); + memtable + .insert(create_test_batch(&schema, 5)) + .await + .unwrap(); + + let batches = memtable.scan_batches().await.unwrap(); + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert_eq!(total_rows, 15); + } + + #[tokio::test] + async fn test_memtable_wal_mapping() { + let schema = create_test_schema(); + let mut memtable = MemTable::new(schema.clone(), 1, vec![]).unwrap(); + + let batch_position = memtable + .insert(create_test_batch(&schema, 10)) + .await + .unwrap(); + assert!(!memtable.all_flushed_to_wal()); + + memtable.mark_wal_flushed(&[batch_position], 5, &[0]); + + assert!(memtable.all_flushed_to_wal()); + assert_eq!( + memtable.wal_batch_mapping().get(&batch_position), + Some(&(5, 0)) + ); + assert_eq!(memtable.last_flushed_wal_entry_position(), 5); + } + + #[tokio::test] + async fn test_memtable_unflushed_batches() { + let schema = create_test_schema(); + let mut memtable = MemTable::new(schema.clone(), 1, vec![]).unwrap(); + + let batch1 = memtable + .insert(create_test_batch(&schema, 10)) + .await + .unwrap(); + let batch2 = memtable + .insert(create_test_batch(&schema, 5)) + .await + .unwrap(); + + assert_eq!(memtable.unflushed_batch_positions(), vec![batch1, batch2]); + + memtable.mark_wal_flushed(&[batch1], 1, &[0]); + + assert_eq!(memtable.unflushed_batch_positions(), vec![batch2]); + } + + #[tokio::test] + async fn test_memtable_visibility_tracking() { + let schema = create_test_schema(); + let mut memtable = MemTable::new(schema.clone(), 1, vec![]).unwrap(); + + // Insert batches at positions 0, 1, 2 + memtable + .insert(create_test_batch(&schema, 10)) + .await + .unwrap(); + memtable + .insert(create_test_batch(&schema, 5)) + .await + .unwrap(); + memtable + .insert(create_test_batch(&schema, 3)) + .await + .unwrap(); + + // max_visible_batch_position=1 means positions 0 and 1 are visible + let visible = memtable.get_visible_batches(1).await; + assert_eq!(visible.len(), 2); + let total_rows: usize = visible.iter().map(|b| b.num_rows()).sum(); + assert_eq!(total_rows, 15); // 10 + 5 + + // max_visible_batch_position=2 means all batches are visible + let visible = memtable.get_visible_batches(2).await; + assert_eq!(visible.len(), 3); + + // max_visible_batch_position=0 means only position 0 is visible + let visible = memtable.get_visible_batches(0).await; + assert_eq!(visible.len(), 1); + } + + #[tokio::test] + async fn test_memtable_get_max_visible_batch_positions() { + let schema = create_test_schema(); + let mut memtable = MemTable::new(schema.clone(), 1, vec![]).unwrap(); + + // Insert batches at positions 0, 1, 2 + memtable + .insert(create_test_batch(&schema, 10)) + .await + .unwrap(); + memtable + .insert(create_test_batch(&schema, 5)) + .await + .unwrap(); + memtable + .insert(create_test_batch(&schema, 3)) + .await + .unwrap(); + + // max_visible_batch_position=1 means positions 0 and 1 visible + let visible_ids = memtable.get_max_visible_batch_positions(1).await; + assert_eq!(visible_ids, vec![0, 1]); + + // max_visible_batch_position=2 means all positions visible + let visible_ids = memtable.get_max_visible_batch_positions(2).await; + assert_eq!(visible_ids, vec![0, 1, 2]); + + // max_visible_batch_position=0 means only position 0 visible + let visible_ids = memtable.get_max_visible_batch_positions(0).await; + assert_eq!(visible_ids, vec![0]); + } + + #[tokio::test] + async fn test_memtable_is_batch_visible() { + let schema = create_test_schema(); + let mut memtable = MemTable::new(schema.clone(), 1, vec![]).unwrap(); + + memtable + .insert(create_test_batch(&schema, 10)) + .await + .unwrap(); // position 0 + memtable + .insert(create_test_batch(&schema, 5)) + .await + .unwrap(); // position 1 + memtable + .insert(create_test_batch(&schema, 3)) + .await + .unwrap(); // position 2 + + // batch_position 0 is visible when max_visible_batch_position >= 0 + assert!(memtable.is_batch_visible(0, 0).await); + assert!(memtable.is_batch_visible(0, 1).await); + assert!(memtable.is_batch_visible(0, 2).await); + + // batch_position 2 is only visible when max_visible_batch_position >= 2 + assert!(!memtable.is_batch_visible(2, 1).await); + assert!(memtable.is_batch_visible(2, 2).await); + assert!(memtable.is_batch_visible(2, 3).await); + + // Non-existent batch + assert!(!memtable.is_batch_visible(999, 100).await); + } + + #[tokio::test] + async fn test_memtable_scan_batches_at_position() { + let schema = create_test_schema(); + let mut memtable = MemTable::new(schema.clone(), 1, vec![]).unwrap(); + + memtable + .insert(create_test_batch(&schema, 10)) + .await + .unwrap(); // position 0 + memtable + .insert(create_test_batch(&schema, 5)) + .await + .unwrap(); // position 1 + + let batches = memtable.scan_batches_at_position(0).await.unwrap(); + assert_eq!(batches.len(), 1); + assert_eq!(batches[0].num_rows(), 10); + + let batches = memtable.scan_batches_at_position(1).await.unwrap(); + assert_eq!(batches.len(), 2); + } + + #[tokio::test] + async fn test_memtable_capacity() { + let schema = create_test_schema(); + let mut memtable = + MemTable::with_capacity(schema.clone(), 1, vec![], CacheConfig::default(), 3).unwrap(); + + assert_eq!(memtable.batch_capacity(), 3); + assert_eq!(memtable.remaining_batch_capacity(), 3); + assert!(!memtable.is_batch_store_full()); + + // Fill up the store + memtable + .insert(create_test_batch(&schema, 10)) + .await + .unwrap(); + memtable + .insert(create_test_batch(&schema, 10)) + .await + .unwrap(); + memtable + .insert(create_test_batch(&schema, 10)) + .await + .unwrap(); + + assert!(memtable.is_batch_store_full()); + assert_eq!(memtable.remaining_batch_capacity(), 0); + + // Next insert should fail + let result = memtable.insert(create_test_batch(&schema, 10)).await; + assert!(result.is_err()); + } + + #[tokio::test] + async fn test_memtable_should_flush() { + let schema = create_test_schema(); + let mut memtable = + MemTable::with_capacity(schema.clone(), 1, vec![], CacheConfig::default(), 2).unwrap(); + + // Not full yet + assert!(!memtable.should_flush(1024 * 1024)); + + memtable + .insert(create_test_batch(&schema, 10)) + .await + .unwrap(); + memtable + .insert(create_test_batch(&schema, 10)) + .await + .unwrap(); + + // Now full + assert!(memtable.should_flush(1024 * 1024)); + } +} diff --git a/rust/lance/src/dataset/mem_wal/memtable/perf_test.rs b/rust/lance/src/dataset/mem_wal/memtable/perf_test.rs new file mode 100644 index 00000000000..5696b9083f2 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/memtable/perf_test.rs @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! Performance tests for MemTable to identify bottlenecks with large batch counts. + +#[cfg(test)] +#[allow(clippy::print_stdout)] +mod tests { + use std::sync::Arc; + use std::time::Instant; + + use arrow_array::{Int32Array, RecordBatch, StringArray}; + use arrow_schema::{DataType, Field, Schema as ArrowSchema}; + + use crate::dataset::mem_wal::memtable::{CacheConfig, MemTable}; + + fn create_test_schema() -> Arc { + Arc::new(ArrowSchema::new(vec![ + Field::new("id", DataType::Int32, false), + Field::new("name", DataType::Utf8, true), + ])) + } + + fn create_test_batch(schema: &ArrowSchema, start_id: i32, num_rows: usize) -> RecordBatch { + RecordBatch::try_new( + Arc::new(schema.clone()), + vec![ + Arc::new(Int32Array::from_iter_values( + start_id..start_id + num_rows as i32, + )), + Arc::new(StringArray::from_iter_values( + (0..num_rows).map(|i| format!("name_{}", start_id as usize + i)), + )), + ], + ) + .unwrap() + } + + /// Test that inserting many batches is O(1) per insert. + #[tokio::test] + async fn test_memtable_insert_many_batches_performance() { + let schema = create_test_schema(); + let mut memtable = MemTable::new(schema.clone(), 1, vec![]).unwrap(); + + let num_batches = 1000; + let batch_size = 20; + + let start = Instant::now(); + for i in 0..num_batches { + let batch = create_test_batch(&schema, (i * batch_size) as i32, batch_size); + memtable.insert(batch).await.unwrap(); + } + let insert_duration = start.elapsed(); + + println!( + "Inserted {} batches ({} rows) in {:?} ({:.2} batches/sec)", + num_batches, + num_batches * batch_size, + insert_duration, + num_batches as f64 / insert_duration.as_secs_f64() + ); + + assert_eq!(memtable.row_count(), num_batches * batch_size); + assert_eq!(memtable.batch_count(), num_batches); + + // Insert should be fast - less than 100ms per 1000 batches + assert!( + insert_duration.as_millis() < 5000, + "Insert took too long: {:?}", + insert_duration + ); + } + + /// Test that get_or_create_dataset with many fragments is the bottleneck. + /// This test demonstrates the O(n^2) issue when constructing a Dataset + /// from many batches. + #[tokio::test] + async fn test_memtable_construct_dataset_bottleneck() { + let schema = create_test_schema(); + // Use larger capacity for this test + let mut memtable = + MemTable::with_capacity(schema.clone(), 1, vec![], CacheConfig::default(), 2500) + .unwrap(); + + // Insert batches in stages and measure construct_dataset time + let stages = vec![100, 500, 1000, 2000]; + + for &target in &stages { + // Insert more batches to reach target + let current = memtable.batch_count(); + for i in current..target { + let batch = create_test_batch(&schema, (i * 20) as i32, 20); + memtable.insert(batch).await.unwrap(); + } + + // Measure get_or_create_dataset time + // Note: We need to invalidate cache to force reconstruction + let start = Instant::now(); + let dataset = memtable.get_or_create_dataset().await.unwrap(); + let construct_duration = start.elapsed(); + + let row_count = if let Some(ds) = &dataset { + ds.count_rows(None).await.unwrap_or(0) + } else { + 0 + }; + println!( + "construct_dataset with {} fragments: {:?} ({} rows)", + target, construct_duration, row_count + ); + } + } + + /// Test scanning batches directly from FragmentStore (should be fast). + #[tokio::test] + async fn test_fragment_store_get_all_batches_performance() { + let schema = create_test_schema(); + let mut memtable = MemTable::new(schema.clone(), 1, vec![]).unwrap(); + + let num_batches = 1000; + let batch_size = 20; + + // Insert batches + for i in 0..num_batches { + let batch = create_test_batch(&schema, (i * batch_size) as i32, batch_size); + memtable.insert(batch).await.unwrap(); + } + + // Measure scan_batches time + let start = Instant::now(); + let batches = memtable.scan_batches().await.unwrap(); + let duration = start.elapsed(); + + println!("scan_batches with {} batches: {:?}", num_batches, duration); + assert_eq!(batches.len(), num_batches); + + // get_all_batches should be very fast (just cloning references) + assert!( + duration.as_millis() < 100, + "get_all_batches took too long: {:?}", + duration + ); + } + + /// Test that the issue is Dataset::write with many batches creating + /// many fragments (O(n) manifest growth per fragment = O(n^2) total). + #[tokio::test] + async fn test_dataset_write_many_batches_is_slow() { + use crate::Dataset; + use arrow_array::RecordBatchIterator; + + let schema = create_test_schema(); + + // Create batches + let num_batches = 500; // Use fewer batches to avoid timeout + let batch_size = 20; + let batches: Vec = (0..num_batches) + .map(|i| create_test_batch(&schema, (i * batch_size) as i32, batch_size)) + .collect(); + + // Measure Dataset::write time with many batches + let uri = format!("memory://test_many_batches_{}", uuid::Uuid::new_v4()); + let reader = RecordBatchIterator::new(batches.into_iter().map(Ok), schema.clone()); + + let start = Instant::now(); + let dataset = Dataset::write(reader, &uri, None).await.unwrap(); + let write_duration = start.elapsed(); + + println!( + "Dataset::write with {} batches: {:?} ({} fragments)", + num_batches, + write_duration, + dataset.get_fragments().len() + ); + + // Note: This creates num_batches fragments! + // Each fragment addition has O(n) manifest overhead. + // With n batches, total is O(n^2). + } + + /// Test with 10000 batches to reproduce the benchmark hang. + #[tokio::test] + async fn test_memtable_10000_batches() { + let schema = create_test_schema(); + // Use larger capacity for this test (10000 batches needed) + let mut memtable = + MemTable::with_capacity(schema.clone(), 1, vec![], CacheConfig::default(), 10500) + .unwrap(); + + let num_batches = 10000; + let batch_size = 20; + + println!("Starting insert of {} batches...", num_batches); + let start = Instant::now(); + for i in 0..num_batches { + let batch = create_test_batch(&schema, (i * batch_size) as i32, batch_size); + memtable.insert(batch).await.unwrap(); + if i % 1000 == 0 && i > 0 { + println!(" Inserted {} batches in {:?}", i, start.elapsed()); + } + } + let insert_duration = start.elapsed(); + println!( + "Inserted {} batches ({} rows) in {:?}", + num_batches, + num_batches * batch_size, + insert_duration + ); + + // Now try to construct dataset - this might hang + println!("Constructing dataset..."); + let start = Instant::now(); + let dataset = memtable.get_or_create_dataset().await.unwrap(); + let construct_duration = start.elapsed(); + let row_count = if let Some(ds) = &dataset { + ds.count_rows(None).await.unwrap_or(0) + } else { + 0 + }; + println!( + "Constructed dataset in {:?} ({} rows)", + construct_duration, row_count + ); + } + + /// Test the proposed fix: concat all batches before writing. + #[tokio::test] + async fn test_concat_batches_before_write_is_fast() { + use crate::Dataset; + use arrow::compute::concat_batches; + use arrow_array::RecordBatchIterator; + + let schema = create_test_schema(); + + // Create batches + let num_batches = 500; + let batch_size = 20; + let batches: Vec = (0..num_batches) + .map(|i| create_test_batch(&schema, (i * batch_size) as i32, batch_size)) + .collect(); + + // Concat all batches into one + let start = Instant::now(); + let combined = concat_batches(&schema, batches.iter()).unwrap(); + let concat_duration = start.elapsed(); + println!( + "concat_batches: {:?} ({} rows)", + concat_duration, + combined.num_rows() + ); + + // Write single batch + let uri = format!("memory://test_single_batch_{}", uuid::Uuid::new_v4()); + let reader = RecordBatchIterator::new([Ok(combined)], schema.clone()); + + let start = Instant::now(); + let dataset = Dataset::write(reader, &uri, None).await.unwrap(); + let write_duration = start.elapsed(); + + println!( + "Dataset::write with 1 combined batch: {:?} ({} fragments)", + write_duration, + dataset.get_fragments().len() + ); + + // This should be MUCH faster because we only create 1 fragment + assert_eq!(dataset.get_fragments().len(), 1); + } +} diff --git a/rust/lance/src/dataset/mem_wal/scanner.rs b/rust/lance/src/dataset/mem_wal/scanner.rs new file mode 100644 index 00000000000..29ec96f6b25 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/scanner.rs @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! Read path for MemTable. +//! +//! This module provides query execution over MemTable data using DataFusion. +//! +//! ## Architecture +//! +//! ```text +//! MemTableScanner (Builder) +//! | +//! create_plan() +//! | +//! +------------+------------+ +//! | | | +//! Full Scan Index Query Split Plan +//! | | | +//! v v v +//! MemTableScanExec IndexExec SplitPlanExec +//! | | / \ +//! +------------+--------- \ +//! | IndexExec + RangeScanExec +//! DataFusion Execution | +//! | UnionExec +//! v +//! SendableRecordBatchStream +//! ``` +//! +//! ## Key Features +//! +//! - **MVCC Visibility**: All scans respect visibility sequence numbers +//! - **Index Support**: BTree, IVF-PQ vector, and FTS indexes +//! - **Split Plans**: Handle partial index coverage gracefully +//! - **DataFusion Integration**: Full ExecutionPlan compatibility + +mod builder; +mod exec; + +pub use builder::MemTableScanner; +pub use exec::{ + BTreeIndexExec, FtsIndexExec, MemTableRangeScanExec, MemTableScanExec, SplitPlanExec, + VectorIndexExec, +}; diff --git a/rust/lance/src/dataset/mem_wal/scanner/builder.rs b/rust/lance/src/dataset/mem_wal/scanner/builder.rs new file mode 100644 index 00000000000..af27e48f309 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/scanner/builder.rs @@ -0,0 +1,910 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! MemTableScanner builder for creating query execution plans. + +use std::sync::Arc; + +use arrow_array::{Array, RecordBatch}; +use arrow_schema::SchemaRef; +use datafusion::common::{ScalarValue, ToDFSchema}; +use datafusion::physical_plan::limit::GlobalLimitExec; +use datafusion::physical_plan::stream::RecordBatchStreamAdapter; +use datafusion::physical_plan::{ExecutionPlan, SendableRecordBatchStream}; +use datafusion::prelude::{Expr, SessionContext}; +use futures::stream::StreamExt; +use futures::TryStreamExt; +use lance_core::{Error, Result}; +use lance_datafusion::planner::Planner; +use lance_linalg::distance::DistanceType; +use snafu::location; + +use super::exec::{ + BTreeIndexExec, FtsIndexExec, MemTableRangeScanExec, MemTableScanExec, SplitPlanExec, + VectorIndexExec, +}; +use crate::dataset::mem_wal::write::{CoverageResult, IndexRegistry, LockFreeBatchStore}; + +/// A simple filter execution plan that applies Arrow filter kernels to avoid +/// DataFusion's FilterExec analysis bugs. +struct FilteredScanExec { + input: Arc, + predicate: datafusion_physical_expr::PhysicalExprRef, + filter_expr: Expr, + schema: SchemaRef, + properties: datafusion::physical_plan::PlanProperties, +} + +impl std::fmt::Debug for FilteredScanExec { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("FilteredScanExec") + .field("filter_expr", &self.filter_expr) + .finish() + } +} + +impl FilteredScanExec { + fn new(input: Arc, filter_expr: Expr, schema: SchemaRef) -> Result { + let planner = Planner::new(schema.clone()); + let predicate = planner.create_physical_expr(&filter_expr)?; + let properties = input.properties().clone(); + + Ok(Self { + input, + predicate, + filter_expr, + schema, + properties, + }) + } +} + +impl datafusion::physical_plan::DisplayAs for FilteredScanExec { + fn fmt_as( + &self, + _t: datafusion::physical_plan::DisplayFormatType, + f: &mut std::fmt::Formatter<'_>, + ) -> std::fmt::Result { + write!(f, "FilteredScanExec: expr={:?}", self.filter_expr) + } +} + +impl ExecutionPlan for FilteredScanExec { + fn name(&self) -> &str { + "FilteredScanExec" + } + + fn as_any(&self) -> &dyn std::any::Any { + self + } + + fn schema(&self) -> SchemaRef { + self.schema.clone() + } + + fn children(&self) -> Vec<&Arc> { + vec![&self.input] + } + + fn with_new_children( + self: Arc, + children: Vec>, + ) -> datafusion::error::Result> { + if children.len() != 1 { + return Err(datafusion::error::DataFusionError::Internal( + "FilteredScanExec requires exactly one child".to_string(), + )); + } + Ok(Arc::new(Self { + input: children[0].clone(), + predicate: self.predicate.clone(), + filter_expr: self.filter_expr.clone(), + schema: self.schema.clone(), + properties: self.properties.clone(), + })) + } + + fn properties(&self) -> &datafusion::physical_plan::PlanProperties { + &self.properties + } + + fn execute( + &self, + partition: usize, + context: Arc, + ) -> datafusion::error::Result { + let input_stream = self.input.execute(partition, context)?; + let predicate = self.predicate.clone(); + let schema = self.schema.clone(); + + let filtered_stream = input_stream.map(move |batch_result| { + let batch = batch_result?; + if batch.num_rows() == 0 { + return Ok(batch); + } + + // Evaluate the predicate to get a boolean array + let filter_array = predicate.evaluate(&batch)?.into_array(batch.num_rows())?; + + let filter_array = filter_array + .as_any() + .downcast_ref::() + .ok_or_else(|| { + datafusion::error::DataFusionError::Internal( + "Filter predicate did not evaluate to boolean".to_string(), + ) + })?; + + // Apply the filter + let filtered_batch = arrow_select::filter::filter_record_batch(&batch, filter_array)?; + + Ok(filtered_batch) + }); + + Ok(Box::pin(RecordBatchStreamAdapter::new( + schema, + filtered_stream.boxed(), + ))) + } + + fn partition_statistics( + &self, + _partition: Option, + ) -> datafusion::error::Result { + Ok(datafusion::common::stats::Statistics { + num_rows: datafusion::common::stats::Precision::Absent, + total_byte_size: datafusion::common::stats::Precision::Absent, + column_statistics: vec![], + }) + } +} + +/// Vector search query parameters. +#[derive(Debug, Clone)] +pub struct VectorQuery { + /// Column name containing vectors. + pub column: String, + /// Query vector. + pub query_vector: Arc, + /// Number of results to return. + pub k: usize, + /// Number of probes for IVF search. + pub nprobes: usize, + /// Distance type for search. + pub distance_type: DistanceType, +} + +/// Full-text search query parameters. +#[derive(Debug, Clone)] +pub struct FtsQuery { + /// Column name to search. + pub column: String, + /// Search term. + pub query: String, +} + +/// Scalar predicate for BTree index queries. +#[derive(Debug, Clone)] +pub enum ScalarPredicate { + /// Exact match: column = value. + Eq { column: String, value: ScalarValue }, + /// Range query: column in [lower, upper). + Range { + column: String, + lower: Option, + upper: Option, + }, + /// IN query: column in (values...). + In { + column: String, + values: Vec, + }, +} + +impl ScalarPredicate { + /// Get the column name for this predicate. + pub fn column(&self) -> &str { + match self { + Self::Eq { column, .. } => column, + Self::Range { column, .. } => column, + Self::In { column, .. } => column, + } + } +} + +/// Scanner builder for querying MemTable data. +/// +/// Provides a builder pattern similar to Lance's Scanner interface +/// for constructing DataFusion execution plans over in-memory data. +/// +/// # Example +/// +/// ```ignore +/// let scanner = MemTableScanner::new(batch_store, indexes, schema, max_visible_batch_position) +/// .project(&["id", "name"])? +/// .filter("id > 10")? +/// .limit(100, None)?; +/// +/// let stream = scanner.try_into_stream().await?; +/// ``` +pub struct MemTableScanner { + batch_store: Arc, + indexes: Option>, + schema: SchemaRef, + max_visible_batch_position: usize, + projection: Option>, + filter: Option, + limit: Option, + offset: Option, + nearest: Option, + full_text_query: Option, + use_index: bool, + batch_size: Option, +} + +impl MemTableScanner { + /// Create a new scanner. + /// + /// # Arguments + /// + /// * `batch_store` - Lock-free batch store containing the data + /// * `indexes` - Optional index registry for indexed queries + /// * `schema` - Schema of the data + /// * `max_visible_batch_position` - MVCC visibility sequence number + pub fn new( + batch_store: Arc, + indexes: Option>, + schema: SchemaRef, + max_visible_batch_position: usize, + ) -> Self { + Self { + batch_store, + indexes, + schema, + max_visible_batch_position, + projection: None, + filter: None, + limit: None, + offset: None, + nearest: None, + full_text_query: None, + use_index: true, + batch_size: None, + } + } + + /// Project only the specified columns. + pub fn project(&mut self, columns: &[&str]) -> &mut Self { + self.projection = Some(columns.iter().map(|s| s.to_string()).collect()); + self + } + + /// Set a filter expression using SQL-like syntax. + pub fn filter(&mut self, filter_expr: &str) -> Result<&mut Self> { + let ctx = SessionContext::new(); + let df_schema = self.schema.clone().to_dfschema().map_err(|e| { + Error::invalid_input(format!("Failed to create DFSchema: {}", e), location!()) + })?; + let expr = ctx.parse_sql_expr(filter_expr, &df_schema).map_err(|e| { + Error::invalid_input( + format!("Failed to parse filter expression: {}", e), + location!(), + ) + })?; + self.filter = Some(expr); + Ok(self) + } + + /// Set a filter expression directly. + pub fn filter_expr(&mut self, expr: Expr) -> &mut Self { + self.filter = Some(expr); + self + } + + /// Limit the number of results. + pub fn limit(&mut self, limit: usize, offset: Option) -> &mut Self { + self.limit = Some(limit); + self.offset = offset; + self + } + + /// Set up a vector similarity search. + pub fn nearest( + &mut self, + column: &str, + query: Arc, + k: usize, + nprobes: Option, + distance_type: Option, + ) -> &mut Self { + self.nearest = Some(VectorQuery { + column: column.to_string(), + query_vector: query, + k, + nprobes: nprobes.unwrap_or(20), + distance_type: distance_type.unwrap_or(DistanceType::L2), + }); + self + } + + /// Set up a full-text search. + pub fn full_text_search(&mut self, column: &str, query: &str) -> &mut Self { + self.full_text_query = Some(FtsQuery { + column: column.to_string(), + query: query.to_string(), + }); + self + } + + /// Enable or disable index usage. + pub fn use_index(&mut self, use_index: bool) -> &mut Self { + self.use_index = use_index; + self + } + + /// Set the batch size for output. + pub fn batch_size(&mut self, size: usize) -> &mut Self { + self.batch_size = Some(size); + self + } + + /// Execute the scan and return a stream of record batches. + pub async fn try_into_stream(&self) -> Result { + let plan = self.create_plan().await?; + let ctx = SessionContext::new(); + let task_ctx = ctx.task_ctx(); + plan.execute(0, task_ctx) + .map_err(|e| Error::io(format!("Failed to execute plan: {}", e), location!())) + } + + /// Execute the scan and collect all results into a single RecordBatch. + pub async fn try_into_batch(&self) -> Result { + let stream = self.try_into_stream().await?; + let batches: Vec = stream + .try_collect() + .await + .map_err(|e| Error::io(format!("Failed to collect batches: {}", e), location!()))?; + + if batches.is_empty() { + return Ok(RecordBatch::new_empty(self.output_schema())); + } + + arrow_select::concat::concat_batches(&self.output_schema(), &batches) + .map_err(|e| Error::io(format!("Failed to concatenate batches: {}", e), location!())) + } + + /// Count the number of rows that match the query. + pub async fn count_rows(&self) -> Result { + let stream = self.try_into_stream().await?; + let batches: Vec = stream + .try_collect() + .await + .map_err(|e| Error::io(format!("Failed to count rows: {}", e), location!()))?; + + Ok(batches.iter().map(|b| b.num_rows() as u64).sum()) + } + + /// Get the output schema after projection. + pub fn output_schema(&self) -> SchemaRef { + if let Some(ref projection) = self.projection { + let fields: Vec<_> = projection + .iter() + .filter_map(|name| self.schema.field_with_name(name).ok().cloned()) + .collect(); + Arc::new(arrow_schema::Schema::new(fields)) + } else { + self.schema.clone() + } + } + + /// Create the execution plan based on the query configuration. + pub async fn create_plan(&self) -> Result> { + // Determine which type of plan to create + if let Some(ref vector_query) = self.nearest { + return self.plan_vector_search(vector_query).await; + } + + if let Some(ref fts_query) = self.full_text_query { + return self.plan_fts_search(fts_query).await; + } + + // Check if we can use a BTree index for the filter + if self.use_index { + if let Some(predicate) = self.extract_btree_predicate() { + if self.has_btree_index(predicate.column()) { + return self.plan_btree_query(&predicate).await; + } + } + } + + // Fall back to full scan + self.plan_full_scan().await + } + + /// Plan a full table scan. + async fn plan_full_scan(&self) -> Result> { + let projection_indices = self.compute_projection_indices()?; + + let scan = MemTableScanExec::new( + self.batch_store.clone(), + self.max_visible_batch_position, + projection_indices, + self.output_schema(), + ); + + let mut plan: Arc = Arc::new(scan); + + // Apply filter if present - use inline filtering to avoid DataFusion bugs + if let Some(ref filter) = self.filter { + let filter_expr = filter.clone(); + let schema = plan.schema(); + plan = Arc::new(FilteredScanExec::new(plan, filter_expr, schema)?); + } + + // Apply limit if present + if let Some(limit) = self.limit { + plan = Arc::new(GlobalLimitExec::new( + plan, + self.offset.unwrap_or(0), + Some(limit), + )); + } + + Ok(plan) + } + + /// Plan a BTree index query with potential split plan for partial coverage. + async fn plan_btree_query( + &self, + predicate: &ScalarPredicate, + ) -> Result> { + let coverage = self.get_btree_coverage(predicate.column()); + let projection_indices = self.compute_projection_indices()?; + + match coverage { + CoverageResult::Full => { + // Index covers all visible data + let index_exec = BTreeIndexExec::new( + self.batch_store.clone(), + self.indexes.clone().unwrap(), + predicate.clone(), + self.max_visible_batch_position, + projection_indices, + self.output_schema(), + )?; + self.apply_post_index_ops(Arc::new(index_exec)).await + } + CoverageResult::Partial { + max_indexed_batch_position, + } => { + // Create split plan: index for covered portion, scan for uncovered + let indexed_plan = BTreeIndexExec::new( + self.batch_store.clone(), + self.indexes.clone().unwrap(), + predicate.clone(), + max_indexed_batch_position, // Only query up to indexed portion + projection_indices.clone(), + self.output_schema(), + )?; + + let unindexed_plan = MemTableRangeScanExec::new( + self.batch_store.clone(), + max_indexed_batch_position, // > this + self.max_visible_batch_position, // <= this + projection_indices, + self.output_schema(), + self.filter.clone(), + ); + + let split = SplitPlanExec::new(Arc::new(indexed_plan), Arc::new(unindexed_plan)); + self.apply_post_index_ops(Arc::new(split)).await + } + CoverageResult::None => { + // Fall back to full scan with filter + self.plan_full_scan().await + } + } + } + + /// Plan a vector similarity search. + async fn plan_vector_search(&self, query: &VectorQuery) -> Result> { + let coverage = self.get_vector_coverage(&query.column); + let projection_indices = self.compute_projection_indices()?; + + match coverage { + CoverageResult::Full => { + // Index covers all visible data + let index_exec = VectorIndexExec::new( + self.batch_store.clone(), + self.indexes.clone().unwrap(), + query.clone(), + self.max_visible_batch_position, + projection_indices, + self.output_schema(), + )?; + self.apply_post_index_ops(Arc::new(index_exec)).await + } + CoverageResult::Partial { + max_indexed_batch_position, + } => { + // Create split plan: index for covered, flat scan for uncovered + let indexed_plan = VectorIndexExec::new( + self.batch_store.clone(), + self.indexes.clone().unwrap(), + VectorQuery { + k: query.k * 2, // Over-fetch from index + ..query.clone() + }, + max_indexed_batch_position, + projection_indices.clone(), + self.output_schema(), + )?; + + // For unindexed portion, we need a flat vector scan + // For now, fall back to range scan (flat search would be added later) + let unindexed_plan = MemTableRangeScanExec::new( + self.batch_store.clone(), + max_indexed_batch_position, + self.max_visible_batch_position, + projection_indices, + self.output_schema(), + None, // No filter for vector search - handled by distance calculation + ); + + let split = SplitPlanExec::new(Arc::new(indexed_plan), Arc::new(unindexed_plan)); + self.apply_post_index_ops(Arc::new(split)).await + } + CoverageResult::None => { + // No index available - fall back to full scan + // Ideally would do flat vector search, but for now just scan + self.plan_full_scan().await + } + } + } + + /// Plan a full-text search. + async fn plan_fts_search(&self, query: &FtsQuery) -> Result> { + let coverage = self.get_fts_coverage(&query.column); + let projection_indices = self.compute_projection_indices()?; + + match coverage { + CoverageResult::Full => { + let index_exec = FtsIndexExec::new( + self.batch_store.clone(), + self.indexes.clone().unwrap(), + query.clone(), + self.max_visible_batch_position, + projection_indices, + self.output_schema(), + )?; + self.apply_post_index_ops(Arc::new(index_exec)).await + } + CoverageResult::Partial { + max_indexed_batch_position, + } => { + let indexed_plan = FtsIndexExec::new( + self.batch_store.clone(), + self.indexes.clone().unwrap(), + query.clone(), + max_indexed_batch_position, + projection_indices.clone(), + self.output_schema(), + )?; + + let unindexed_plan = MemTableRangeScanExec::new( + self.batch_store.clone(), + max_indexed_batch_position, + self.max_visible_batch_position, + projection_indices, + self.output_schema(), + None, // FTS filter would need text search logic + ); + + let split = SplitPlanExec::new(Arc::new(indexed_plan), Arc::new(unindexed_plan)); + self.apply_post_index_ops(Arc::new(split)).await + } + CoverageResult::None => self.plan_full_scan().await, + } + } + + /// Apply limit and other post-processing operations. + async fn apply_post_index_ops( + &self, + plan: Arc, + ) -> Result> { + let mut result = plan; + + if let Some(limit) = self.limit { + result = Arc::new(GlobalLimitExec::new( + result, + self.offset.unwrap_or(0), + Some(limit), + )); + } + + Ok(result) + } + + /// Compute column indices for projection. + fn compute_projection_indices(&self) -> Result>> { + if let Some(ref columns) = self.projection { + let indices: Result> = columns + .iter() + .map(|name| { + self.schema + .column_with_name(name) + .map(|(idx, _)| idx) + .ok_or_else(|| { + Error::invalid_input( + format!("Column '{}' not found in schema", name), + location!(), + ) + }) + }) + .collect(); + Ok(Some(indices?)) + } else { + Ok(None) + } + } + + /// Extract a BTree-compatible predicate from the filter. + fn extract_btree_predicate(&self) -> Option { + let filter = self.filter.as_ref()?; + + // Simple pattern matching for common predicates + match filter { + Expr::BinaryExpr(binary) => { + if let (Expr::Column(col), Expr::Literal(lit, _)) = + (binary.left.as_ref(), binary.right.as_ref()) + { + match binary.op { + datafusion::logical_expr::Operator::Eq => { + return Some(ScalarPredicate::Eq { + column: col.name.clone(), + value: lit.clone(), + }); + } + datafusion::logical_expr::Operator::Lt + | datafusion::logical_expr::Operator::LtEq => { + return Some(ScalarPredicate::Range { + column: col.name.clone(), + lower: None, + upper: Some(lit.clone()), + }); + } + datafusion::logical_expr::Operator::Gt + | datafusion::logical_expr::Operator::GtEq => { + return Some(ScalarPredicate::Range { + column: col.name.clone(), + lower: Some(lit.clone()), + upper: None, + }); + } + _ => {} + } + } + } + Expr::InList(in_list) => { + if let Expr::Column(col) = in_list.expr.as_ref() { + let values: Vec = in_list + .list + .iter() + .filter_map(|e| { + if let Expr::Literal(lit, _) = e { + Some(lit.clone()) + } else { + None + } + }) + .collect(); + + if values.len() == in_list.list.len() { + return Some(ScalarPredicate::In { + column: col.name.clone(), + values, + }); + } + } + } + _ => {} + } + + None + } + + /// Check if a BTree index exists for a column. + fn has_btree_index(&self, column: &str) -> bool { + self.indexes + .as_ref() + .and_then(|idx| { + // Try to find an index that covers this column + // The index registry doesn't expose a direct lookup by column, + // but we can check coverage which indicates index existence + let coverage = idx.get_coverage(); + coverage + .btree_coverage + .iter() + .find(|(_, &max_indexed_batch_position)| max_indexed_batch_position > 0) + .and_then(|(name, _)| idx.get_btree(name)) + .filter(|btree| btree.column_name() == column) + }) + .is_some() + } + + /// Get BTree index coverage for a column. + fn get_btree_coverage(&self, column: &str) -> CoverageResult { + self.indexes + .as_ref() + .and_then(|idx| { + let coverage = idx.get_coverage(); + // Find an index that covers this column + for name in coverage.btree_coverage.keys() { + if let Some(btree) = idx.get_btree(name) { + if btree.column_name() == column { + return Some( + coverage + .check_btree_coverage(name, self.max_visible_batch_position), + ); + } + } + } + None + }) + .unwrap_or(CoverageResult::None) + } + + /// Get vector index coverage for a column. + fn get_vector_coverage(&self, column: &str) -> CoverageResult { + self.indexes + .as_ref() + .and_then(|idx| { + let coverage = idx.get_coverage(); + for name in coverage.ivfpq_coverage.keys() { + if let Some(ivfpq) = idx.get_ivf_pq(name) { + if ivfpq.column_name() == column { + return Some( + coverage + .check_ivfpq_coverage(name, self.max_visible_batch_position), + ); + } + } + } + None + }) + .unwrap_or(CoverageResult::None) + } + + /// Get FTS index coverage for a column. + fn get_fts_coverage(&self, column: &str) -> CoverageResult { + self.indexes + .as_ref() + .and_then(|idx| { + let coverage = idx.get_coverage(); + for name in coverage.fts_coverage.keys() { + if let Some(fts) = idx.get_fts(name) { + if fts.column_name() == column { + return Some( + coverage.check_fts_coverage(name, self.max_visible_batch_position), + ); + } + } + } + None + }) + .unwrap_or(CoverageResult::None) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use arrow_array::{Int32Array, StringArray}; + use arrow_schema::{DataType, Field, Schema}; + + fn create_test_schema() -> SchemaRef { + Arc::new(Schema::new(vec![ + Field::new("id", DataType::Int32, false), + Field::new("name", DataType::Utf8, true), + ])) + } + + fn create_test_batch(schema: &Schema, start_id: i32, count: usize) -> RecordBatch { + let ids: Vec = (start_id..start_id + count as i32).collect(); + let names: Vec = ids.iter().map(|id| format!("name_{}", id)).collect(); + + RecordBatch::try_new( + Arc::new(schema.clone()), + vec![ + Arc::new(Int32Array::from(ids)), + Arc::new(StringArray::from(names)), + ], + ) + .unwrap() + } + + #[tokio::test] + async fn test_scanner_basic_scan() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + // Insert test data + let batch = create_test_batch(&schema, 0, 10); + batch_store.append(batch).unwrap(); + + let scanner = MemTableScanner::new(batch_store, None, schema.clone(), 0); + + let result = scanner.try_into_batch().await.unwrap(); + assert_eq!(result.num_rows(), 10); + } + + #[tokio::test] + async fn test_scanner_visibility_filtering() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + // Insert 3 batches at positions 0, 1, 2 + let batch1 = create_test_batch(&schema, 0, 10); + batch_store.append(batch1).unwrap(); + + let batch2 = create_test_batch(&schema, 10, 10); + batch_store.append(batch2).unwrap(); + + let batch3 = create_test_batch(&schema, 20, 10); + batch_store.append(batch3).unwrap(); + + // Scanner with max_visible=1 should see positions 0 and 1 (2 batches) + let scanner = MemTableScanner::new(batch_store.clone(), None, schema.clone(), 1); + let result = scanner.try_into_batch().await.unwrap(); + assert_eq!(result.num_rows(), 20); + + // Scanner with max_visible=0 should see only position 0 (1 batch) + let scanner = MemTableScanner::new(batch_store, None, schema, 0); + let result = scanner.try_into_batch().await.unwrap(); + assert_eq!(result.num_rows(), 10); + } + + #[tokio::test] + async fn test_scanner_projection() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + let batch = create_test_batch(&schema, 0, 10); + batch_store.append(batch).unwrap(); + + let mut scanner = MemTableScanner::new(batch_store, None, schema, 0); + scanner.project(&["id"]); + + let result = scanner.try_into_batch().await.unwrap(); + assert_eq!(result.num_columns(), 1); + assert_eq!(result.schema().field(0).name(), "id"); + } + + #[tokio::test] + async fn test_scanner_limit() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + let batch = create_test_batch(&schema, 0, 100); + batch_store.append(batch).unwrap(); + + let mut scanner = MemTableScanner::new(batch_store, None, schema, 0); + scanner.limit(10, None); + + let result = scanner.try_into_batch().await.unwrap(); + assert_eq!(result.num_rows(), 10); + } + + #[tokio::test] + async fn test_scanner_count_rows() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + let batch = create_test_batch(&schema, 0, 50); + batch_store.append(batch).unwrap(); + + let scanner = MemTableScanner::new(batch_store, None, schema, 0); + let count = scanner.count_rows().await.unwrap(); + assert_eq!(count, 50); + } +} diff --git a/rust/lance/src/dataset/mem_wal/scanner/exec.rs b/rust/lance/src/dataset/mem_wal/scanner/exec.rs new file mode 100644 index 00000000000..3419fb12ca2 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/scanner/exec.rs @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! DataFusion ExecutionPlan implementations for MemWAL read path. +//! +//! This module contains execution nodes for: +//! - `MemTableScanExec` - Full table scan with MVCC visibility +//! - `MemTableRangeScanExec` - Scan specific sequence range +//! - `BTreeIndexExec` - BTree index queries +//! - `VectorIndexExec` - IVF-PQ vector search +//! - `FtsIndexExec` - Full-text search +//! - `SplitPlanExec` - Combine indexed and unindexed portions + +mod btree; +mod fts; +mod range_scan; +mod scan; +mod split; +mod vector; + +pub use btree::BTreeIndexExec; +pub use fts::FtsIndexExec; +pub use range_scan::MemTableRangeScanExec; +pub use scan::MemTableScanExec; +pub use split::SplitPlanExec; +pub use vector::VectorIndexExec; diff --git a/rust/lance/src/dataset/mem_wal/scanner/exec/btree.rs b/rust/lance/src/dataset/mem_wal/scanner/exec/btree.rs new file mode 100644 index 00000000000..09e287014ea --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/scanner/exec/btree.rs @@ -0,0 +1,516 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! BTreeIndexExec - BTree index queries with MVCC visibility. + +use std::any::Any; +use std::fmt::{Debug, Formatter}; +use std::sync::Arc; + +use arrow_array::RecordBatch; +use arrow_schema::SchemaRef; +use datafusion::common::stats::Precision; +use datafusion::error::Result as DataFusionResult; +use datafusion::execution::TaskContext; +use datafusion::physical_plan::execution_plan::{Boundedness, EmissionType}; +use datafusion::physical_plan::metrics::{ExecutionPlanMetricsSet, MetricsSet}; +use datafusion::physical_plan::stream::RecordBatchStreamAdapter; +use datafusion::physical_plan::{ + DisplayAs, DisplayFormatType, ExecutionPlan, Partitioning, PlanProperties, + SendableRecordBatchStream, Statistics, +}; +use datafusion_physical_expr::EquivalenceProperties; +use futures::stream::{self, StreamExt}; +use lance_core::{Error, Result}; +use snafu::location; + +use super::super::builder::ScalarPredicate; +use crate::dataset::mem_wal::write::{IndexRegistry, LockFreeBatchStore}; + +/// ExecutionPlan node that queries BTree index with visibility filtering. +pub struct BTreeIndexExec { + batch_store: Arc, + indexes: Arc, + predicate: ScalarPredicate, + max_visible_batch_position: usize, + projection: Option>, + output_schema: SchemaRef, + properties: PlanProperties, + metrics: ExecutionPlanMetricsSet, + /// Name of the index to use (determined during construction). + index_name: String, +} + +impl Debug for BTreeIndexExec { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("BTreeIndexExec") + .field("predicate", &self.predicate) + .field( + "max_visible_batch_position", + &self.max_visible_batch_position, + ) + .field("index_name", &self.index_name) + .finish() + } +} + +impl BTreeIndexExec { + /// Create a new BTreeIndexExec. + /// + /// # Arguments + /// + /// * `batch_store` - Lock-free batch store containing data + /// * `indexes` - Index registry with BTree indexes + /// * `predicate` - Scalar predicate to apply + /// * `max_visible_batch_position` - MVCC visibility sequence number + /// * `projection` - Optional column indices to project + /// * `output_schema` - Schema after projection + pub fn new( + batch_store: Arc, + indexes: Arc, + predicate: ScalarPredicate, + max_visible_batch_position: usize, + projection: Option>, + output_schema: SchemaRef, + ) -> Result { + // Find the index for this column + let column = predicate.column(); + let coverage = indexes.get_coverage(); + let index_name = coverage + .btree_coverage + .iter() + .find_map(|(name, _)| { + indexes + .get_btree(name) + .filter(|idx| idx.column_name() == column) + .map(|_| name.clone()) + }) + .ok_or_else(|| { + Error::invalid_input( + format!("No BTree index found for column '{}'", column), + location!(), + ) + })?; + + let properties = PlanProperties::new( + EquivalenceProperties::new(output_schema.clone()), + Partitioning::UnknownPartitioning(1), + EmissionType::Incremental, + Boundedness::Bounded, + ); + + Ok(Self { + batch_store, + indexes, + predicate, + max_visible_batch_position, + projection, + output_schema, + properties, + metrics: ExecutionPlanMetricsSet::new(), + index_name, + }) + } + + /// Compute the maximum visible row position based on max_visible_batch_position. + /// Returns None if no batches are visible. + fn compute_max_visible_row(&self) -> Option { + let mut max_visible_row_exclusive: u64 = 0; + let mut current_row: u64 = 0; + + for (batch_position, stored_batch) in self.batch_store.iter().enumerate() { + let batch_end = current_row + stored_batch.num_rows as u64; + if batch_position <= self.max_visible_batch_position { + max_visible_row_exclusive = batch_end; + } + current_row = batch_end; + } + + if max_visible_row_exclusive > 0 { + Some(max_visible_row_exclusive - 1) + } else { + None + } + } + + /// Query the index and return matching row positions filtered by visibility. + fn query_index(&self) -> Vec { + let Some(index) = self.indexes.get_btree(&self.index_name) else { + return vec![]; + }; + + let Some(max_visible_row) = self.compute_max_visible_row() else { + return vec![]; + }; + + let positions = match &self.predicate { + ScalarPredicate::Eq { value, .. } => index.get(value), + ScalarPredicate::Range { lower, upper, .. } => { + // For range queries, use a range scan approach + // This is simplified - in production we'd need proper range iteration + let mut results = Vec::new(); + let snapshot = index.snapshot(); + + for (key, positions) in snapshot { + let in_range = match (lower, upper) { + (Some(l), Some(u)) => &key.0 >= l && &key.0 < u, + (Some(l), None) => &key.0 >= l, + (None, Some(u)) => &key.0 < u, + (None, None) => true, + }; + + if in_range { + results.extend(positions); + } + } + results + } + ScalarPredicate::In { values, .. } => { + let mut results = Vec::new(); + for value in values { + results.extend(index.get(value)); + } + results + } + }; + + // Filter by visibility + positions + .into_iter() + .filter(|&pos| pos <= max_visible_row) + .collect() + } + + /// Convert row positions to batch_id and row_within_batch pairs. + fn positions_to_batch_rows(&self, positions: &[u64]) -> Vec<(usize, usize)> { + // Build a map of batch_id -> (start_row, end_row) + let mut batch_ranges = Vec::new(); + let mut current_row = 0usize; + + for stored_batch in self.batch_store.iter() { + let batch_start = current_row; + let batch_end = current_row + stored_batch.num_rows; + batch_ranges.push((batch_start, batch_end)); + current_row = batch_end; + } + + // Convert positions to (batch_id, row_in_batch) pairs + let mut result = Vec::new(); + for &pos in positions { + let pos = pos as usize; + for (batch_id, &(start, end)) in batch_ranges.iter().enumerate() { + if pos >= start && pos < end { + result.push((batch_id, pos - start)); + break; + } + } + } + result + } + + /// Materialize rows from batch store. + fn materialize_rows( + &self, + batch_rows: &[(usize, usize)], + ) -> DataFusionResult> { + if batch_rows.is_empty() { + return Ok(vec![]); + } + + // Group rows by batch + let mut batches_to_rows: std::collections::HashMap> = + std::collections::HashMap::new(); + for &(batch_id, row_in_batch) in batch_rows { + batches_to_rows + .entry(batch_id) + .or_default() + .push(row_in_batch); + } + + let mut results = Vec::new(); + for (batch_id, rows) in batches_to_rows { + if let Some(stored) = self.batch_store.get(batch_id) { + // Use take to select specific rows + let indices = arrow_array::UInt32Array::from( + rows.iter().map(|&r| r as u32).collect::>(), + ); + + let columns: std::result::Result, datafusion::error::DataFusionError> = + stored + .data + .columns() + .iter() + .map(|col| { + arrow_select::take::take(col.as_ref(), &indices, None).map_err(|e| { + datafusion::error::DataFusionError::ArrowError(Box::new(e), None) + }) + }) + .collect(); + + let columns = columns?; + + // Apply projection + let final_columns = if let Some(ref proj_indices) = self.projection { + proj_indices.iter().map(|&i| columns[i].clone()).collect() + } else { + columns + }; + + let batch = RecordBatch::try_new(self.output_schema.clone(), final_columns)?; + results.push(batch); + } + } + + Ok(results) + } +} + +impl DisplayAs for BTreeIndexExec { + fn fmt_as(&self, t: DisplayFormatType, f: &mut Formatter<'_>) -> std::fmt::Result { + match t { + DisplayFormatType::Default | DisplayFormatType::Verbose => { + write!( + f, + "BTreeIndexExec: index={}, predicate={:?}, max_visible_batch_position={}", + self.index_name, self.predicate, self.max_visible_batch_position + ) + } + DisplayFormatType::TreeRender => { + write!( + f, + "BTreeIndexExec\nindex={}\npredicate={:?}\nmax_visible_batch_position={}", + self.index_name, self.predicate, self.max_visible_batch_position + ) + } + } + } +} + +impl ExecutionPlan for BTreeIndexExec { + fn name(&self) -> &str { + "BTreeIndexExec" + } + + fn as_any(&self) -> &dyn Any { + self + } + + fn schema(&self) -> SchemaRef { + self.output_schema.clone() + } + + fn children(&self) -> Vec<&Arc> { + vec![] + } + + fn with_new_children( + self: Arc, + children: Vec>, + ) -> DataFusionResult> { + if !children.is_empty() { + return Err(datafusion::error::DataFusionError::Internal( + "BTreeIndexExec does not have children".to_string(), + )); + } + Ok(self) + } + + fn execute( + &self, + _partition: usize, + _context: Arc, + ) -> DataFusionResult { + // Query the index + let positions = self.query_index(); + + // Convert positions to batch/row pairs with visibility filtering + let batch_rows = self.positions_to_batch_rows(&positions); + + // Materialize the rows + let batches = self.materialize_rows(&batch_rows)?; + + let stream = stream::iter(batches.into_iter().map(Ok)).boxed(); + + Ok(Box::pin(RecordBatchStreamAdapter::new( + self.output_schema.clone(), + stream, + ))) + } + + fn partition_statistics(&self, _partition: Option) -> DataFusionResult { + // We can't know the exact count without querying the index + Ok(Statistics { + num_rows: Precision::Absent, + total_byte_size: Precision::Absent, + column_statistics: vec![], + }) + } + + fn metrics(&self) -> Option { + Some(self.metrics.clone_inner()) + } + + fn properties(&self) -> &PlanProperties { + &self.properties + } + + fn supports_limit_pushdown(&self) -> bool { + false + } +} + +#[cfg(test)] +mod tests { + use super::*; + use arrow_array::{Int32Array, StringArray}; + use arrow_schema::{DataType, Field, Schema}; + use datafusion::common::ScalarValue; + use futures::TryStreamExt; + + fn create_test_schema() -> Arc { + Arc::new(Schema::new(vec![ + Field::new("id", DataType::Int32, false), + Field::new("name", DataType::Utf8, true), + ])) + } + + fn create_test_batch(schema: &Schema, start_id: i32, count: usize) -> RecordBatch { + let ids: Vec = (start_id..start_id + count as i32).collect(); + let names: Vec = ids.iter().map(|id| format!("name_{}", id)).collect(); + + RecordBatch::try_new( + Arc::new(schema.clone()), + vec![ + Arc::new(Int32Array::from(ids)), + Arc::new(StringArray::from(names)), + ], + ) + .unwrap() + } + + #[tokio::test] + async fn test_btree_index_eq_query() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + // Create index registry with btree index on "id" + let mut registry = IndexRegistry::new(); + registry.add_btree("id_idx".to_string(), "id".to_string()); + + // Insert test data and update index + let batch = create_test_batch(&schema, 0, 10); + registry.insert(&batch, 0).unwrap(); + batch_store.append(batch).unwrap(); + + let indexes = Arc::new(registry); + + let predicate = ScalarPredicate::Eq { + column: "id".to_string(), + value: ScalarValue::Int32(Some(5)), + }; + + let exec = BTreeIndexExec::new( + batch_store, + indexes, + predicate, + 0, // max_visible_batch_position (batch at position 0) + None, + schema, + ) + .unwrap(); + + let ctx = Arc::new(TaskContext::default()); + let stream = exec.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + // Should find one row with id=5 + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert_eq!(total_rows, 1); + } + + #[tokio::test] + async fn test_btree_index_in_query() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + let mut registry = IndexRegistry::new(); + registry.add_btree("id_idx".to_string(), "id".to_string()); + + let batch = create_test_batch(&schema, 0, 10); + registry.insert(&batch, 0).unwrap(); + batch_store.append(batch).unwrap(); + + let indexes = Arc::new(registry); + + let predicate = ScalarPredicate::In { + column: "id".to_string(), + values: vec![ + ScalarValue::Int32(Some(2)), + ScalarValue::Int32(Some(5)), + ScalarValue::Int32(Some(8)), + ], + }; + + let exec = BTreeIndexExec::new(batch_store, indexes, predicate, 0, None, schema).unwrap(); + + let ctx = Arc::new(TaskContext::default()); + let stream = exec.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + // Should find 3 rows + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert_eq!(total_rows, 3); + } + + #[tokio::test] + async fn test_btree_index_visibility() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + let mut registry = IndexRegistry::new(); + registry.add_btree("id_idx".to_string(), "id".to_string()); + + // Insert two batches at positions 0 and 1 + let batch1 = create_test_batch(&schema, 0, 10); + let batch2 = create_test_batch(&schema, 10, 10); + registry.insert(&batch1, 0).unwrap(); + registry.insert(&batch2, 10).unwrap(); + batch_store.append(batch1).unwrap(); + batch_store.append(batch2).unwrap(); + + let indexes = Arc::new(registry); + + let predicate = ScalarPredicate::Eq { + column: "id".to_string(), + value: ScalarValue::Int32(Some(15)), + }; + + // Query with max_visible=0 should not see batch at position 1 + let exec = BTreeIndexExec::new( + batch_store.clone(), + indexes.clone(), + predicate.clone(), + 0, + None, + schema.clone(), + ) + .unwrap(); + + let ctx = Arc::new(TaskContext::default()); + let stream = exec.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert_eq!(total_rows, 0); + + // Query with max_visible=1 should see both batches + let exec = BTreeIndexExec::new(batch_store, indexes, predicate, 1, None, schema).unwrap(); + + let ctx = Arc::new(TaskContext::default()); + let stream = exec.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert_eq!(total_rows, 1); + } +} diff --git a/rust/lance/src/dataset/mem_wal/scanner/exec/fts.rs b/rust/lance/src/dataset/mem_wal/scanner/exec/fts.rs new file mode 100644 index 00000000000..aea4bb56fd9 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/scanner/exec/fts.rs @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! FtsIndexExec - Full-text search with MVCC visibility. + +use std::any::Any; +use std::fmt::{Debug, Formatter}; +use std::sync::Arc; + +use arrow_array::{Float32Array, RecordBatch, UInt32Array}; +use arrow_schema::{DataType, Field, Schema, SchemaRef}; +use datafusion::common::stats::Precision; +use datafusion::error::Result as DataFusionResult; +use datafusion::execution::TaskContext; +use datafusion::physical_plan::execution_plan::{Boundedness, EmissionType}; +use datafusion::physical_plan::metrics::{ExecutionPlanMetricsSet, MetricsSet}; +use datafusion::physical_plan::stream::RecordBatchStreamAdapter; +use datafusion::physical_plan::{ + DisplayAs, DisplayFormatType, ExecutionPlan, Partitioning, PlanProperties, + SendableRecordBatchStream, Statistics, +}; +use datafusion_physical_expr::EquivalenceProperties; +use futures::stream::{self, StreamExt}; +use lance_core::{Error, Result}; +use snafu::location; + +use super::super::builder::FtsQuery; +use crate::dataset::mem_wal::write::{IndexRegistry, LockFreeBatchStore}; + +/// Score column name in output. +pub const SCORE_COLUMN: &str = "_score"; + +/// Batch range info for efficient row position lookup. +#[derive(Debug, Clone)] +struct BatchRange { + start: usize, + end: usize, + batch_id: usize, +} + +/// ExecutionPlan node that queries FTS index with MVCC visibility. +pub struct FtsIndexExec { + batch_store: Arc, + indexes: Arc, + query: FtsQuery, + max_visible_batch_position: usize, + projection: Option>, + output_schema: SchemaRef, + properties: PlanProperties, + metrics: ExecutionPlanMetricsSet, + index_name: String, + /// Pre-computed batch ranges for O(log n) lookup. + batch_ranges: Vec, + /// Maximum visible row position based on max_visible_batch_position (None if nothing visible). + max_visible_row: Option, +} + +impl Debug for FtsIndexExec { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("FtsIndexExec") + .field("column", &self.query.column) + .field("query", &self.query.query) + .field( + "max_visible_batch_position", + &self.max_visible_batch_position, + ) + .field("index_name", &self.index_name) + .finish() + } +} + +impl FtsIndexExec { + /// Create a new FtsIndexExec. + /// + /// # Arguments + /// + /// * `batch_store` - Lock-free batch store containing data + /// * `indexes` - Index registry with FTS indexes + /// * `query` - FTS query parameters + /// * `max_visible_batch_position` - MVCC visibility sequence number + /// * `projection` - Optional column indices to project + /// * `base_schema` - Schema before adding score column + pub fn new( + batch_store: Arc, + indexes: Arc, + query: FtsQuery, + max_visible_batch_position: usize, + projection: Option>, + base_schema: SchemaRef, + ) -> Result { + // Find the index for this column + let column = &query.column; + let coverage = indexes.get_coverage(); + let index_name = coverage + .fts_coverage + .iter() + .find_map(|(name, _)| { + indexes + .get_fts(name) + .filter(|idx| idx.column_name() == column) + .map(|_| name.clone()) + }) + .ok_or_else(|| { + Error::invalid_input( + format!("No FTS index found for column '{}'", column), + location!(), + ) + })?; + + // Add _score column to output schema + let mut fields: Vec = base_schema + .fields() + .iter() + .map(|f| f.as_ref().clone()) + .collect(); + fields.push(Field::new(SCORE_COLUMN, DataType::Float32, false)); + let output_schema = Arc::new(Schema::new(fields)); + + let properties = PlanProperties::new( + EquivalenceProperties::new(output_schema.clone()), + Partitioning::UnknownPartitioning(1), + EmissionType::Incremental, + Boundedness::Bounded, + ); + + // Pre-compute batch ranges for O(log n) lookup and max visible row + let mut batch_ranges = Vec::new(); + let mut current_row = 0usize; + let mut max_visible_row_exclusive: u64 = 0; + + for (batch_id, stored_batch) in batch_store.iter().enumerate() { + let batch_start = current_row; + let batch_end = current_row + stored_batch.num_rows; + batch_ranges.push(BatchRange { + start: batch_start, + end: batch_end, + batch_id, + }); + if batch_id <= max_visible_batch_position { + max_visible_row_exclusive = batch_end as u64; + } + current_row = batch_end; + } + + // Convert exclusive end to inclusive last position, or None if nothing visible + let max_visible_row = if max_visible_row_exclusive > 0 { + Some(max_visible_row_exclusive - 1) + } else { + None + }; + + Ok(Self { + batch_store, + indexes, + query, + max_visible_batch_position, + projection, + output_schema, + properties, + metrics: ExecutionPlanMetricsSet::new(), + index_name, + batch_ranges, + max_visible_row, + }) + } + + /// Find batch for a row position using binary search. O(log n). + #[inline] + fn find_batch(&self, row_pos: usize) -> Option<&BatchRange> { + // Binary search: find the batch where start <= row_pos < end + let idx = self.batch_ranges.partition_point(|b| b.end <= row_pos); + self.batch_ranges + .get(idx) + .filter(|b| row_pos >= b.start && row_pos < b.end) + } + + /// Query the index and return matching rows with scores. + fn query_index(&self) -> Vec<(u64, f32)> { + let Some(index) = self.indexes.get_fts(&self.index_name) else { + return vec![]; + }; + + // Search the index + let entries = index.search(&self.query.query); + + // Convert to (row_position, score) pairs + // Score is based on term frequency (simple TF scoring) + entries + .into_iter() + .map(|entry| (entry.row_position, entry.frequency as f32)) + .collect() + } + + /// Filter results by MVCC visibility using max_row_position. O(n). + fn filter_by_visibility(&self, results: Vec<(u64, f32)>) -> Vec<(u64, f32)> { + let Some(max_visible) = self.max_visible_row else { + return vec![]; + }; + results + .into_iter() + .filter(|&(pos, _)| pos <= max_visible) + .collect() + } + + /// Materialize rows from batch store with score column. + fn materialize_rows(&self, results: &[(u64, f32)]) -> DataFusionResult> { + if results.is_empty() { + return Ok(vec![]); + } + + // Group rows by batch using binary search on pre-computed ranges + let mut batches_data: std::collections::HashMap> = + std::collections::HashMap::new(); + + for &(pos, score) in results { + if let Some(batch) = self.find_batch(pos as usize) { + batches_data + .entry(batch.batch_id) + .or_default() + .push((pos as usize - batch.start, score)); + } + } + + let mut all_batches = Vec::new(); + + for (batch_id, rows_with_score) in batches_data { + if let Some(stored) = self.batch_store.get(batch_id) { + let rows: Vec = rows_with_score.iter().map(|&(r, _)| r as u32).collect(); + let scores: Vec = rows_with_score.iter().map(|&(_, s)| s).collect(); + + let indices = UInt32Array::from(rows); + + let mut columns: Vec> = stored + .data + .columns() + .iter() + .map(|col| arrow_select::take::take(col.as_ref(), &indices, None).unwrap()) + .collect(); + + // Add score column + columns.push(Arc::new(Float32Array::from(scores))); + + // Apply projection if needed (excluding score column which is always included) + let final_columns = if let Some(ref proj_indices) = self.projection { + let mut projected: Vec<_> = + proj_indices.iter().map(|&i| columns[i].clone()).collect(); + // Always include score as last column + projected.push(columns.last().unwrap().clone()); + projected + } else { + columns + }; + + let batch = RecordBatch::try_new(self.output_schema.clone(), final_columns)?; + all_batches.push(batch); + } + } + + Ok(all_batches) + } +} + +impl DisplayAs for FtsIndexExec { + fn fmt_as(&self, t: DisplayFormatType, f: &mut Formatter<'_>) -> std::fmt::Result { + match t { + DisplayFormatType::Default | DisplayFormatType::Verbose => { + write!( + f, + "FtsIndexExec: index={}, column={}, query={:?}, max_visible_batch_position={}", + self.index_name, + self.query.column, + self.query.query, + self.max_visible_batch_position + ) + } + DisplayFormatType::TreeRender => { + write!( + f, + "FtsIndexExec\nindex={}\ncolumn={}\nquery={:?}\nmax_visible_batch_position={}", + self.index_name, + self.query.column, + self.query.query, + self.max_visible_batch_position + ) + } + } + } +} + +impl ExecutionPlan for FtsIndexExec { + fn name(&self) -> &str { + "FtsIndexExec" + } + + fn as_any(&self) -> &dyn Any { + self + } + + fn schema(&self) -> SchemaRef { + self.output_schema.clone() + } + + fn children(&self) -> Vec<&Arc> { + vec![] + } + + fn with_new_children( + self: Arc, + children: Vec>, + ) -> DataFusionResult> { + if !children.is_empty() { + return Err(datafusion::error::DataFusionError::Internal( + "FtsIndexExec does not have children".to_string(), + )); + } + Ok(self) + } + + fn execute( + &self, + _partition: usize, + _context: Arc, + ) -> DataFusionResult { + // Query the index + let results = self.query_index(); + + // Filter by visibility + let visible_results = self.filter_by_visibility(results); + + // Materialize the rows + let batches = self.materialize_rows(&visible_results)?; + + let stream = stream::iter(batches.into_iter().map(Ok)).boxed(); + + Ok(Box::pin(RecordBatchStreamAdapter::new( + self.output_schema.clone(), + stream, + ))) + } + + fn partition_statistics(&self, _partition: Option) -> DataFusionResult { + Ok(Statistics { + num_rows: Precision::Absent, + total_byte_size: Precision::Absent, + column_statistics: vec![], + }) + } + + fn metrics(&self) -> Option { + Some(self.metrics.clone_inner()) + } + + fn properties(&self) -> &PlanProperties { + &self.properties + } + + fn supports_limit_pushdown(&self) -> bool { + false + } +} + +#[cfg(test)] +mod tests { + use super::*; + use arrow_array::{Int32Array, StringArray}; + use arrow_schema::{DataType, Field, Schema}; + use futures::TryStreamExt; + + fn create_test_schema() -> Arc { + Arc::new(Schema::new(vec![ + Field::new("id", DataType::Int32, false), + Field::new("text", DataType::Utf8, true), + ])) + } + + fn create_test_batch(schema: &Schema, start_id: i32) -> RecordBatch { + RecordBatch::try_new( + Arc::new(schema.clone()), + vec![ + Arc::new(Int32Array::from(vec![start_id, start_id + 1, start_id + 2])), + Arc::new(StringArray::from(vec![ + "hello world", + "goodbye world", + "hello again", + ])), + ], + ) + .unwrap() + } + + #[tokio::test] + async fn test_fts_index_search() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + // Create index registry with FTS index on "text" + let mut registry = IndexRegistry::new(); + registry.add_fts("text_idx".to_string(), "text".to_string()); + + // Insert test data and update index + let batch = create_test_batch(&schema, 0); + registry.insert(&batch, 0).unwrap(); + batch_store.append(batch).unwrap(); + + let indexes = Arc::new(registry); + + let query = FtsQuery { + column: "text".to_string(), + query: "hello".to_string(), + }; + + let exec = FtsIndexExec::new(batch_store, indexes, query, 0, None, schema).unwrap(); + + let ctx = Arc::new(TaskContext::default()); + let stream = exec.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + // "hello" appears in docs 0 and 2 + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert_eq!(total_rows, 2); + + // Check that _score column exists + let result_schema = batches[0].schema(); + assert!(result_schema.field_with_name(SCORE_COLUMN).is_ok()); + } + + #[tokio::test] + async fn test_fts_index_visibility() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + let mut registry = IndexRegistry::new(); + registry.add_fts("text_idx".to_string(), "text".to_string()); + + // Insert two batches at positions 0 and 1 + // Each batch has 3 rows, so batch1 has rows 0-2, batch2 has rows 3-5 + let batch1 = create_test_batch(&schema, 0); + let batch2 = create_test_batch(&schema, 5); + registry.insert(&batch1, 0).unwrap(); + registry.insert(&batch2, 3).unwrap(); // start_row_id=3 since batch1 has 3 rows + batch_store.append(batch1).unwrap(); + batch_store.append(batch2).unwrap(); + + let indexes = Arc::new(registry); + + let query = FtsQuery { + column: "text".to_string(), + query: "hello".to_string(), + }; + + // Query with max_visible=0 should only see first batch + let exec = FtsIndexExec::new( + batch_store.clone(), + indexes.clone(), + query.clone(), + 0, + None, + schema.clone(), + ) + .unwrap(); + + let ctx = Arc::new(TaskContext::default()); + let stream = exec.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert_eq!(total_rows, 2); // "hello" in batch1 docs 0 and 2 + + // Query with max_visible=1 should see both batches + let exec = FtsIndexExec::new(batch_store, indexes, query, 1, None, schema).unwrap(); + + let ctx = Arc::new(TaskContext::default()); + let stream = exec.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert_eq!(total_rows, 4); // "hello" in both batches + } + + #[test] + fn test_score_column_name() { + assert_eq!(SCORE_COLUMN, "_score"); + } +} diff --git a/rust/lance/src/dataset/mem_wal/scanner/exec/range_scan.rs b/rust/lance/src/dataset/mem_wal/scanner/exec/range_scan.rs new file mode 100644 index 00000000000..efe374cf520 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/scanner/exec/range_scan.rs @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! MemTableRangeScanExec - Scan batches within a specific batch position range. + +use std::any::Any; +use std::fmt::{Debug, Formatter}; +use std::sync::Arc; + +use arrow_array::RecordBatch; +use arrow_schema::SchemaRef; +use datafusion::common::stats::Precision; +use datafusion::error::Result as DataFusionResult; +use datafusion::execution::TaskContext; +use datafusion::physical_plan::execution_plan::{Boundedness, EmissionType}; +use datafusion::physical_plan::metrics::{ExecutionPlanMetricsSet, MetricsSet}; +use datafusion::physical_plan::stream::RecordBatchStreamAdapter; +use datafusion::physical_plan::{ + DisplayAs, DisplayFormatType, ExecutionPlan, Partitioning, PlanProperties, + SendableRecordBatchStream, Statistics, +}; +use datafusion::prelude::Expr; +use datafusion_physical_expr::EquivalenceProperties; +use futures::stream::{self, StreamExt}; + +use crate::dataset::mem_wal::write::LockFreeBatchStore; + +/// ExecutionPlan node that scans batches within a batch position range. +/// +/// This is used in split plans to scan the unindexed portion of data, +/// where `min_batch_position_exclusive < batch_position <= max_batch_position_inclusive`. +pub struct MemTableRangeScanExec { + batch_store: Arc, + min_batch_position_exclusive: usize, + max_batch_position_inclusive: usize, + projection: Option>, + output_schema: SchemaRef, + filter: Option, + properties: PlanProperties, + metrics: ExecutionPlanMetricsSet, +} + +impl Debug for MemTableRangeScanExec { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("MemTableRangeScanExec") + .field( + "min_batch_position_exclusive", + &self.min_batch_position_exclusive, + ) + .field( + "max_batch_position_inclusive", + &self.max_batch_position_inclusive, + ) + .field("projection", &self.projection) + .field("has_filter", &self.filter.is_some()) + .finish() + } +} + +impl MemTableRangeScanExec { + /// Create a new MemTableRangeScanExec. + /// + /// # Arguments + /// + /// * `batch_store` - Lock-free batch store containing data + /// * `min_batch_position_exclusive` - Minimum batch position (exclusive): batches with position > this + /// * `max_batch_position_inclusive` - Maximum batch position (inclusive): batches with position <= this + /// * `projection` - Optional column indices to project + /// * `output_schema` - Schema after projection + /// * `filter` - Optional filter expression to apply + pub fn new( + batch_store: Arc, + min_batch_position_exclusive: usize, + max_batch_position_inclusive: usize, + projection: Option>, + output_schema: SchemaRef, + filter: Option, + ) -> Self { + let properties = PlanProperties::new( + EquivalenceProperties::new(output_schema.clone()), + Partitioning::UnknownPartitioning(1), + EmissionType::Incremental, + Boundedness::Bounded, + ); + + Self { + batch_store, + min_batch_position_exclusive, + max_batch_position_inclusive, + projection, + output_schema, + filter, + properties, + metrics: ExecutionPlanMetricsSet::new(), + } + } + + /// Check if a batch position is in the range. + fn is_in_range(&self, batch_position: usize) -> bool { + batch_position > self.min_batch_position_exclusive + && batch_position <= self.max_batch_position_inclusive + } +} + +impl DisplayAs for MemTableRangeScanExec { + fn fmt_as(&self, t: DisplayFormatType, f: &mut Formatter<'_>) -> std::fmt::Result { + match t { + DisplayFormatType::Default | DisplayFormatType::Verbose => { + write!( + f, + "MemTableRangeScanExec: batch_position_range=({}, {}], projection={:?}", + self.min_batch_position_exclusive, + self.max_batch_position_inclusive, + self.projection + ) + } + DisplayFormatType::TreeRender => { + write!( + f, + "MemTableRangeScanExec\nbatch_position_range=({}, {}]\nprojection={:?}", + self.min_batch_position_exclusive, + self.max_batch_position_inclusive, + self.projection + ) + } + } + } +} + +impl ExecutionPlan for MemTableRangeScanExec { + fn name(&self) -> &str { + "MemTableRangeScanExec" + } + + fn as_any(&self) -> &dyn Any { + self + } + + fn schema(&self) -> SchemaRef { + self.output_schema.clone() + } + + fn children(&self) -> Vec<&Arc> { + vec![] + } + + fn with_new_children( + self: Arc, + children: Vec>, + ) -> DataFusionResult> { + if !children.is_empty() { + return Err(datafusion::error::DataFusionError::Internal( + "MemTableRangeScanExec does not have children".to_string(), + )); + } + Ok(self) + } + + fn execute( + &self, + _partition: usize, + _context: Arc, + ) -> DataFusionResult { + // Filter batches by batch position range + let batches: Vec = self + .batch_store + .iter() + .enumerate() + .filter(|(batch_position, _)| self.is_in_range(*batch_position)) + .map(|(_, stored)| stored.data.clone()) + .collect(); + + // Apply projection to each batch + let projection = self.projection.clone(); + let schema = self.output_schema.clone(); + + let projected_batches: Vec> = batches + .into_iter() + .map(|batch| { + if let Some(ref indices) = projection { + let columns: Vec<_> = + indices.iter().map(|&i| batch.column(i).clone()).collect(); + RecordBatch::try_new(schema.clone(), columns).map_err(|e| e.into()) + } else { + Ok(batch) + } + }) + .collect(); + + let stream = stream::iter(projected_batches).boxed(); + + Ok(Box::pin(RecordBatchStreamAdapter::new( + self.output_schema.clone(), + stream, + ))) + } + + fn partition_statistics(&self, _partition: Option) -> DataFusionResult { + // Count batches in range + let count: usize = self + .batch_store + .iter() + .enumerate() + .filter(|(batch_position, _)| self.is_in_range(*batch_position)) + .map(|(_, stored)| stored.num_rows) + .sum(); + + Ok(Statistics { + num_rows: Precision::Exact(count), + total_byte_size: Precision::Absent, + column_statistics: vec![], + }) + } + + fn metrics(&self) -> Option { + Some(self.metrics.clone_inner()) + } + + fn properties(&self) -> &PlanProperties { + &self.properties + } + + fn supports_limit_pushdown(&self) -> bool { + false + } +} + +#[cfg(test)] +mod tests { + use super::*; + use arrow_array::{Int32Array, StringArray}; + use arrow_schema::{DataType, Field, Schema}; + use futures::TryStreamExt; + + fn create_test_schema() -> Arc { + Arc::new(Schema::new(vec![ + Field::new("id", DataType::Int32, false), + Field::new("name", DataType::Utf8, true), + ])) + } + + fn create_test_batch(schema: &Schema, start_id: i32, count: usize) -> RecordBatch { + let ids: Vec = (start_id..start_id + count as i32).collect(); + let names: Vec = ids.iter().map(|id| format!("name_{}", id)).collect(); + + RecordBatch::try_new( + Arc::new(schema.clone()), + vec![ + Arc::new(Int32Array::from(ids)), + Arc::new(StringArray::from(names)), + ], + ) + .unwrap() + } + + #[tokio::test] + async fn test_range_scan_basic() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + // Insert 5 batches + for i in 0..5 { + batch_store + .append(create_test_batch(&schema, i * 10, 10)) + .unwrap(); + } + + // Range scan positions 2-4 (exclusive-inclusive: > 1, <= 4) + let exec = + MemTableRangeScanExec::new(batch_store.clone(), 1, 4, None, schema.clone(), None); + + let ctx = Arc::new(TaskContext::default()); + let stream = exec.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + // Should get batches at positions 2, 3, 4 (3 batches) + assert_eq!(batches.len(), 3); + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert_eq!(total_rows, 30); + } + + #[tokio::test] + async fn test_range_scan_empty_range() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + // Insert 3 batches + for i in 0..3 { + batch_store + .append(create_test_batch(&schema, i * 10, 10)) + .unwrap(); + } + + // Empty range (> 5, <= 10 with only 3 batches) + let exec = MemTableRangeScanExec::new(batch_store.clone(), 5, 10, None, schema, None); + + let ctx = Arc::new(TaskContext::default()); + let stream = exec.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + assert!(batches.is_empty()); + } + + #[tokio::test] + async fn test_range_scan_with_projection() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + batch_store + .append(create_test_batch(&schema, 0, 10)) + .unwrap(); + batch_store + .append(create_test_batch(&schema, 10, 10)) + .unwrap(); + + // Project only "id" column + let projected_schema = + Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)])); + let exec = MemTableRangeScanExec::new( + batch_store.clone(), + 0, + 1, + Some(vec![0]), + projected_schema, + None, + ); + + let ctx = Arc::new(TaskContext::default()); + let stream = exec.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + // Should get batch at position 1 (> 0, <= 1) + assert_eq!(batches.len(), 1); + assert_eq!(batches[0].num_columns(), 1); + assert_eq!(batches[0].schema().field(0).name(), "id"); + } + + #[tokio::test] + async fn test_range_scan_statistics() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + batch_store + .append(create_test_batch(&schema, 0, 10)) + .unwrap(); + batch_store + .append(create_test_batch(&schema, 10, 20)) + .unwrap(); + batch_store + .append(create_test_batch(&schema, 30, 15)) + .unwrap(); + + // Range positions 0-1 inclusive (> -1, but we use usize so 0, <= 1) + // This means batches at position 0 and 1 if we use min_exclusive = max of usize... + // Actually let's use a proper range: > 0, <= 2 means positions 1 and 2 + let exec = MemTableRangeScanExec::new(batch_store, 0, 2, None, schema, None); + + let stats = exec.partition_statistics(None).unwrap(); + // Positions 1 and 2 have 20 + 15 = 35 rows + assert_eq!(stats.num_rows, Precision::Exact(35)); + } +} diff --git a/rust/lance/src/dataset/mem_wal/scanner/exec/scan.rs b/rust/lance/src/dataset/mem_wal/scanner/exec/scan.rs new file mode 100644 index 00000000000..7e1fe8d67c5 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/scanner/exec/scan.rs @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! MemTableScanExec - Full table scan with MVCC visibility filtering. + +use std::any::Any; +use std::fmt::{Debug, Formatter}; +use std::sync::Arc; + +use arrow_array::RecordBatch; +use arrow_schema::SchemaRef; +use datafusion::common::stats::Precision; +use datafusion::error::Result as DataFusionResult; +use datafusion::execution::TaskContext; +use datafusion::physical_plan::execution_plan::{Boundedness, EmissionType}; +use datafusion::physical_plan::metrics::{ExecutionPlanMetricsSet, MetricsSet}; +use datafusion::physical_plan::stream::RecordBatchStreamAdapter; +use datafusion::physical_plan::{ + DisplayAs, DisplayFormatType, ExecutionPlan, Partitioning, PlanProperties, + SendableRecordBatchStream, Statistics, +}; +use datafusion_physical_expr::EquivalenceProperties; +use futures::stream::{self, StreamExt}; + +use crate::dataset::mem_wal::write::LockFreeBatchStore; + +/// ExecutionPlan node that scans all visible batches from a MemTable. +/// +/// This node implements visibility filtering, returning only batches +/// where `batch_position <= max_visible_batch_position`. +pub struct MemTableScanExec { + batch_store: Arc, + max_visible_batch_position: usize, + projection: Option>, + output_schema: SchemaRef, + properties: PlanProperties, + metrics: ExecutionPlanMetricsSet, +} + +impl Debug for MemTableScanExec { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("MemTableScanExec") + .field( + "max_visible_batch_position", + &self.max_visible_batch_position, + ) + .field("projection", &self.projection) + .finish() + } +} + +impl MemTableScanExec { + /// Create a new MemTableScanExec. + /// + /// # Arguments + /// + /// * `batch_store` - Lock-free batch store containing data + /// * `max_visible_batch_position` - Maximum batch position visible (inclusive) + /// * `projection` - Optional column indices to project + /// * `output_schema` - Schema after projection + pub fn new( + batch_store: Arc, + max_visible_batch_position: usize, + projection: Option>, + output_schema: SchemaRef, + ) -> Self { + let properties = PlanProperties::new( + EquivalenceProperties::new(output_schema.clone()), + Partitioning::UnknownPartitioning(1), + EmissionType::Incremental, + Boundedness::Bounded, + ); + + Self { + batch_store, + max_visible_batch_position, + projection, + output_schema, + properties, + metrics: ExecutionPlanMetricsSet::new(), + } + } +} + +impl DisplayAs for MemTableScanExec { + fn fmt_as(&self, t: DisplayFormatType, f: &mut Formatter<'_>) -> std::fmt::Result { + match t { + DisplayFormatType::Default | DisplayFormatType::Verbose => { + write!( + f, + "MemTableScanExec: max_visible_batch_position={}, projection={:?}", + self.max_visible_batch_position, self.projection + ) + } + DisplayFormatType::TreeRender => { + write!( + f, + "MemTableScanExec\nmax_visible_batch_position={}\nprojection={:?}", + self.max_visible_batch_position, self.projection + ) + } + } + } +} + +impl ExecutionPlan for MemTableScanExec { + fn name(&self) -> &str { + "MemTableScanExec" + } + + fn as_any(&self) -> &dyn Any { + self + } + + fn schema(&self) -> SchemaRef { + self.output_schema.clone() + } + + fn children(&self) -> Vec<&Arc> { + vec![] + } + + fn with_new_children( + self: Arc, + children: Vec>, + ) -> DataFusionResult> { + if !children.is_empty() { + return Err(datafusion::error::DataFusionError::Internal( + "MemTableScanExec does not have children".to_string(), + )); + } + Ok(self) + } + + fn execute( + &self, + _partition: usize, + _context: Arc, + ) -> DataFusionResult { + // Get all visible batches + let batches = self + .batch_store + .visible_record_batches(self.max_visible_batch_position); + + // Apply projection to each batch + let projection = self.projection.clone(); + let schema = self.output_schema.clone(); + + let projected_batches: Vec> = batches + .into_iter() + .map(|batch| { + if let Some(ref indices) = projection { + let columns: Vec<_> = + indices.iter().map(|&i| batch.column(i).clone()).collect(); + RecordBatch::try_new(schema.clone(), columns).map_err(|e| e.into()) + } else { + Ok(batch) + } + }) + .collect(); + + let stream = stream::iter(projected_batches).boxed(); + + Ok(Box::pin(RecordBatchStreamAdapter::new( + self.output_schema.clone(), + stream, + ))) + } + + fn partition_statistics(&self, _partition: Option) -> DataFusionResult { + // Report statistics as Absent to avoid DataFusion analysis bugs + // with selectivity calculation on in-memory tables. + Ok(Statistics { + num_rows: Precision::Absent, + total_byte_size: Precision::Absent, + column_statistics: vec![], + }) + } + + fn metrics(&self) -> Option { + Some(self.metrics.clone_inner()) + } + + fn properties(&self) -> &PlanProperties { + &self.properties + } + + fn supports_limit_pushdown(&self) -> bool { + false + } +} + +#[cfg(test)] +mod tests { + use super::*; + use arrow_array::{Int32Array, StringArray}; + use arrow_schema::{DataType, Field, Schema}; + use futures::TryStreamExt; + + fn create_test_schema() -> Arc { + Arc::new(Schema::new(vec![ + Field::new("id", DataType::Int32, false), + Field::new("name", DataType::Utf8, true), + ])) + } + + fn create_test_batch(schema: &Schema, start_id: i32, count: usize) -> RecordBatch { + let ids: Vec = (start_id..start_id + count as i32).collect(); + let names: Vec = ids.iter().map(|id| format!("name_{}", id)).collect(); + + RecordBatch::try_new( + Arc::new(schema.clone()), + vec![ + Arc::new(Int32Array::from(ids)), + Arc::new(StringArray::from(names)), + ], + ) + .unwrap() + } + + #[tokio::test] + async fn test_scan_exec_basic() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + let batch = create_test_batch(&schema, 0, 10); + batch_store.append(batch).unwrap(); + + // Batch is at position 0, max_visible=0 means position 0 is visible + let exec = MemTableScanExec::new(batch_store, 0, None, schema); + + let ctx = Arc::new(TaskContext::default()); + let stream = exec.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + assert_eq!(batches.len(), 1); + assert_eq!(batches[0].num_rows(), 10); + } + + #[tokio::test] + async fn test_scan_exec_visibility() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + // Insert 3 batches at positions 0, 1, 2 + batch_store + .append(create_test_batch(&schema, 0, 10)) + .unwrap(); + batch_store + .append(create_test_batch(&schema, 10, 10)) + .unwrap(); + batch_store + .append(create_test_batch(&schema, 20, 10)) + .unwrap(); + + // max_visible_batch_position=1 means positions 0 and 1 are visible (2 batches) + let exec = MemTableScanExec::new(batch_store.clone(), 1, None, schema.clone()); + let ctx = Arc::new(TaskContext::default()); + let stream = exec.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + assert_eq!(batches.len(), 2); + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert_eq!(total_rows, 20); + } + + #[tokio::test] + async fn test_scan_exec_projection() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + let batch = create_test_batch(&schema, 0, 10); + batch_store.append(batch).unwrap(); + + // Project only "id" column (index 0) + let projected_schema = + Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)])); + let exec = MemTableScanExec::new(batch_store, 0, Some(vec![0]), projected_schema); + + let ctx = Arc::new(TaskContext::default()); + let stream = exec.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + assert_eq!(batches.len(), 1); + assert_eq!(batches[0].num_columns(), 1); + assert_eq!(batches[0].schema().field(0).name(), "id"); + } + + #[tokio::test] + async fn test_scan_exec_empty() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + // Empty store with max_visible=0 should return no batches + let exec = MemTableScanExec::new(batch_store, 0, None, schema); + + let ctx = Arc::new(TaskContext::default()); + let stream = exec.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + assert!(batches.is_empty()); + } + + #[tokio::test] + async fn test_scan_exec_statistics() { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + + batch_store + .append(create_test_batch(&schema, 0, 10)) + .unwrap(); + batch_store + .append(create_test_batch(&schema, 10, 20)) + .unwrap(); + + // max_visible=1 means positions 0 and 1 are visible + let exec = MemTableScanExec::new(batch_store, 1, None, schema); + + let stats = exec.partition_statistics(None).unwrap(); + // Statistics are Absent to avoid DataFusion analysis bugs + assert_eq!(stats.num_rows, Precision::Absent); + } +} diff --git a/rust/lance/src/dataset/mem_wal/scanner/exec/split.rs b/rust/lance/src/dataset/mem_wal/scanner/exec/split.rs new file mode 100644 index 00000000000..8614079d184 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/scanner/exec/split.rs @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! SplitPlanExec - Combine indexed and unindexed portions for partial coverage. + +use std::any::Any; +use std::fmt::{Debug, Formatter}; +use std::sync::Arc; + +use arrow_schema::SchemaRef; +use datafusion::common::stats::Precision; +use datafusion::error::Result as DataFusionResult; +use datafusion::execution::TaskContext; +use datafusion::physical_plan::execution_plan::{Boundedness, EmissionType}; +use datafusion::physical_plan::metrics::{ExecutionPlanMetricsSet, MetricsSet}; +use datafusion::physical_plan::stream::RecordBatchStreamAdapter; +use datafusion::physical_plan::{ + DisplayAs, DisplayFormatType, ExecutionPlan, Partitioning, PlanProperties, + SendableRecordBatchStream, Statistics, +}; +use datafusion_physical_expr::EquivalenceProperties; +use futures::stream::StreamExt; + +/// ExecutionPlan node that combines indexed and unindexed portions. +/// +/// This is used when an index only covers part of the visible data. +/// It executes both child plans and chains/unions the result streams. +pub struct SplitPlanExec { + indexed_plan: Arc, + unindexed_plan: Arc, + output_schema: SchemaRef, + properties: PlanProperties, + metrics: ExecutionPlanMetricsSet, +} + +impl Debug for SplitPlanExec { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("SplitPlanExec") + .field("indexed_plan", &self.indexed_plan.name()) + .field("unindexed_plan", &self.unindexed_plan.name()) + .finish() + } +} + +impl SplitPlanExec { + /// Create a new SplitPlanExec. + /// + /// # Arguments + /// + /// * `indexed_plan` - Plan for the indexed portion of data + /// * `unindexed_plan` - Plan for the unindexed portion of data + pub fn new( + indexed_plan: Arc, + unindexed_plan: Arc, + ) -> Self { + let output_schema = indexed_plan.schema(); + + let properties = PlanProperties::new( + EquivalenceProperties::new(output_schema.clone()), + Partitioning::UnknownPartitioning(1), + EmissionType::Incremental, + Boundedness::Bounded, + ); + + Self { + indexed_plan, + unindexed_plan, + output_schema, + properties, + metrics: ExecutionPlanMetricsSet::new(), + } + } +} + +impl DisplayAs for SplitPlanExec { + fn fmt_as(&self, t: DisplayFormatType, f: &mut Formatter<'_>) -> std::fmt::Result { + match t { + DisplayFormatType::Default | DisplayFormatType::Verbose => { + write!( + f, + "SplitPlanExec: indexed={}, unindexed={}", + self.indexed_plan.name(), + self.unindexed_plan.name() + ) + } + DisplayFormatType::TreeRender => { + write!( + f, + "SplitPlanExec\nindexed={}\nunindexed={}", + self.indexed_plan.name(), + self.unindexed_plan.name() + ) + } + } + } +} + +impl ExecutionPlan for SplitPlanExec { + fn name(&self) -> &str { + "SplitPlanExec" + } + + fn as_any(&self) -> &dyn Any { + self + } + + fn schema(&self) -> SchemaRef { + self.output_schema.clone() + } + + fn children(&self) -> Vec<&Arc> { + vec![&self.indexed_plan, &self.unindexed_plan] + } + + fn with_new_children( + self: Arc, + children: Vec>, + ) -> DataFusionResult> { + if children.len() != 2 { + return Err(datafusion::error::DataFusionError::Internal( + "SplitPlanExec requires exactly two children".to_string(), + )); + } + + Ok(Arc::new(Self::new( + children[0].clone(), + children[1].clone(), + ))) + } + + fn execute( + &self, + partition: usize, + context: Arc, + ) -> DataFusionResult { + // Execute both child plans + let indexed_stream = self.indexed_plan.execute(partition, context.clone())?; + let unindexed_stream = self.unindexed_plan.execute(partition, context)?; + + // Chain the streams: first indexed results, then unindexed results + let combined_stream = indexed_stream.chain(unindexed_stream).boxed(); + + Ok(Box::pin(RecordBatchStreamAdapter::new( + self.output_schema.clone(), + combined_stream, + ))) + } + + fn partition_statistics(&self, _partition: Option) -> DataFusionResult { + // Statistics are unknown since we're combining two sources + Ok(Statistics { + num_rows: Precision::Absent, + total_byte_size: Precision::Absent, + column_statistics: vec![], + }) + } + + fn metrics(&self) -> Option { + Some(self.metrics.clone_inner()) + } + + fn properties(&self) -> &PlanProperties { + &self.properties + } + + fn supports_limit_pushdown(&self) -> bool { + false // Can't push limit into split plan easily + } +} + +#[cfg(test)] +mod tests { + use super::*; + use arrow_array::{Int32Array, RecordBatch}; + use arrow_schema::{DataType, Field, Schema}; + use futures::TryStreamExt; + + use crate::dataset::mem_wal::write::LockFreeBatchStore; + + use super::super::scan::MemTableScanExec; + + fn create_test_schema() -> Arc { + Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)])) + } + + fn create_test_batch(start: i32, count: usize) -> RecordBatch { + let schema = create_test_schema(); + let ids: Vec = (start..start + count as i32).collect(); + + RecordBatch::try_new(schema, vec![Arc::new(Int32Array::from(ids))]).unwrap() + } + + fn create_memory_plan(batches: Vec) -> Arc { + let schema = create_test_schema(); + let batch_store = Arc::new(LockFreeBatchStore::with_capacity(100)); + for batch in batches { + batch_store.append(batch).unwrap(); + } + let max_batch_pos = batch_store.max_buffered_batch_position().unwrap_or(0); + Arc::new(MemTableScanExec::new( + batch_store, + max_batch_pos, + None, + schema, + )) + } + + #[tokio::test] + async fn test_split_plan_combines_results() { + // Create indexed plan with 10 rows + let indexed_batches = vec![create_test_batch(0, 10)]; + let indexed_plan = create_memory_plan(indexed_batches); + + // Create unindexed plan with 5 rows + let unindexed_batches = vec![create_test_batch(10, 5)]; + let unindexed_plan = create_memory_plan(unindexed_batches); + + let split = SplitPlanExec::new(indexed_plan, unindexed_plan); + + let ctx = Arc::new(TaskContext::default()); + let stream = split.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + // Should have results from both plans + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert_eq!(total_rows, 15); + } + + #[tokio::test] + async fn test_split_plan_statistics() { + let indexed_batches = vec![create_test_batch(0, 10)]; + let indexed_plan = create_memory_plan(indexed_batches); + + let unindexed_batches = vec![create_test_batch(10, 5)]; + let unindexed_plan = create_memory_plan(unindexed_batches); + + let split = SplitPlanExec::new(indexed_plan, unindexed_plan); + + // Statistics are unknown since we're combining two sources + let stats = split.partition_statistics(None).unwrap(); + assert_eq!(stats.num_rows, Precision::Absent); + } + + #[tokio::test] + async fn test_split_plan_empty_indexed() { + // Create empty indexed plan + let indexed_plan = create_memory_plan(vec![]); + + // Create unindexed plan with 5 rows + let unindexed_batches = vec![create_test_batch(0, 5)]; + let unindexed_plan = create_memory_plan(unindexed_batches); + + let split = SplitPlanExec::new(indexed_plan, unindexed_plan); + + let ctx = Arc::new(TaskContext::default()); + let stream = split.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + // Should have results from unindexed plan only + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert_eq!(total_rows, 5); + } + + #[tokio::test] + async fn test_split_plan_empty_unindexed() { + // Create indexed plan with 10 rows + let indexed_batches = vec![create_test_batch(0, 10)]; + let indexed_plan = create_memory_plan(indexed_batches); + + // Create empty unindexed plan + let unindexed_plan = create_memory_plan(vec![]); + + let split = SplitPlanExec::new(indexed_plan, unindexed_plan); + + let ctx = Arc::new(TaskContext::default()); + let stream = split.execute(0, ctx).unwrap(); + let batches: Vec = stream.try_collect().await.unwrap(); + + // Should have results from indexed plan only + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + assert_eq!(total_rows, 10); + } + + #[test] + fn test_split_plan_children() { + let indexed_batches = vec![create_test_batch(0, 10)]; + let indexed_plan = create_memory_plan(indexed_batches); + + let unindexed_batches = vec![create_test_batch(10, 5)]; + let unindexed_plan = create_memory_plan(unindexed_batches); + + let split = SplitPlanExec::new(indexed_plan, unindexed_plan); + + // Should have two children + assert_eq!(split.children().len(), 2); + } + + #[test] + fn test_split_plan_with_new_children() { + let indexed_batches = vec![create_test_batch(0, 10)]; + let indexed_plan = create_memory_plan(indexed_batches); + + let unindexed_batches = vec![create_test_batch(10, 5)]; + let unindexed_plan = create_memory_plan(unindexed_batches); + + let split = Arc::new(SplitPlanExec::new( + indexed_plan.clone(), + unindexed_plan.clone(), + )); + + // Clone with new children should work + let new_children = vec![indexed_plan, unindexed_plan]; + let new_split = split.clone().with_new_children(new_children).unwrap(); + assert_eq!(new_split.children().len(), 2); + + // Wrong number of children should fail + let invalid_children = vec![create_memory_plan(vec![])]; + assert!(split.with_new_children(invalid_children).is_err()); + } +} diff --git a/rust/lance/src/dataset/mem_wal/scanner/exec/vector.rs b/rust/lance/src/dataset/mem_wal/scanner/exec/vector.rs new file mode 100644 index 00000000000..770eca8b79d --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/scanner/exec/vector.rs @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! VectorIndexExec - IVF-PQ vector search with MVCC visibility. + +use std::any::Any; +use std::fmt::{Debug, Formatter}; +use std::sync::Arc; + +use arrow_array::{cast::AsArray, FixedSizeListArray, Float32Array, RecordBatch}; +use arrow_schema::{DataType, Field, Schema, SchemaRef}; +use datafusion::common::stats::Precision; +use datafusion::error::Result as DataFusionResult; +use datafusion::execution::TaskContext; +use datafusion::physical_plan::execution_plan::{Boundedness, EmissionType}; +use datafusion::physical_plan::metrics::{ExecutionPlanMetricsSet, MetricsSet}; +use datafusion::physical_plan::stream::RecordBatchStreamAdapter; +use datafusion::physical_plan::{ + DisplayAs, DisplayFormatType, ExecutionPlan, Partitioning, PlanProperties, + SendableRecordBatchStream, Statistics, +}; +use datafusion_physical_expr::EquivalenceProperties; +use futures::stream::{self, StreamExt}; +use lance_core::{Error, Result}; +use snafu::location; + +use super::super::builder::VectorQuery; +use crate::dataset::mem_wal::write::{IndexRegistry, LockFreeBatchStore}; + +/// Distance column name in output. +pub const DISTANCE_COLUMN: &str = "_distance"; + +/// ExecutionPlan node that queries IVF-PQ vector index with MVCC visibility. +pub struct VectorIndexExec { + batch_store: Arc, + indexes: Arc, + query: VectorQuery, + max_visible_batch_position: usize, + projection: Option>, + output_schema: SchemaRef, + properties: PlanProperties, + metrics: ExecutionPlanMetricsSet, + index_name: String, +} + +impl Debug for VectorIndexExec { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("VectorIndexExec") + .field("column", &self.query.column) + .field("k", &self.query.k) + .field("nprobes", &self.query.nprobes) + .field( + "max_visible_batch_position", + &self.max_visible_batch_position, + ) + .field("index_name", &self.index_name) + .finish() + } +} + +impl VectorIndexExec { + /// Create a new VectorIndexExec. + /// + /// # Arguments + /// + /// * `batch_store` - Lock-free batch store containing data + /// * `indexes` - Index registry with IVF-PQ indexes + /// * `query` - Vector query parameters + /// * `max_visible_batch_position` - MVCC visibility sequence number + /// * `projection` - Optional column indices to project + /// * `output_schema` - Schema after projection (will add _distance column) + pub fn new( + batch_store: Arc, + indexes: Arc, + query: VectorQuery, + max_visible_batch_position: usize, + projection: Option>, + base_schema: SchemaRef, + ) -> Result { + // Find the index for this column + let column = &query.column; + let coverage = indexes.get_coverage(); + let index_name = coverage + .ivfpq_coverage + .iter() + .find_map(|(name, _)| { + indexes + .get_ivf_pq(name) + .filter(|idx| idx.column_name() == column) + .map(|_| name.clone()) + }) + .ok_or_else(|| { + Error::invalid_input( + format!("No IVF-PQ index found for column '{}'", column), + location!(), + ) + })?; + + // Add _distance column to output schema + let mut fields: Vec = base_schema + .fields() + .iter() + .map(|f| f.as_ref().clone()) + .collect(); + fields.push(Field::new(DISTANCE_COLUMN, DataType::Float32, false)); + let output_schema = Arc::new(Schema::new(fields)); + + let properties = PlanProperties::new( + EquivalenceProperties::new(output_schema.clone()), + Partitioning::UnknownPartitioning(1), + EmissionType::Incremental, + Boundedness::Bounded, + ); + + Ok(Self { + batch_store, + indexes, + query, + max_visible_batch_position, + projection, + output_schema, + properties, + metrics: ExecutionPlanMetricsSet::new(), + index_name, + }) + } + + /// Compute the maximum visible row position based on max_visible_batch_position. + /// + /// Returns the last row position that is visible at the given max_visible_batch_position, + /// or None if no batches are visible. + fn compute_max_visible_row(&self) -> Option { + let mut max_visible_row_exclusive: u64 = 0; + let mut current_row: u64 = 0; + + for (batch_position, stored_batch) in self.batch_store.iter().enumerate() { + let batch_end = current_row + stored_batch.num_rows as u64; + if batch_position <= self.max_visible_batch_position { + max_visible_row_exclusive = batch_end; + } + current_row = batch_end; + } + + if max_visible_row_exclusive > 0 { + Some(max_visible_row_exclusive - 1) + } else { + None + } + } + + /// Query the index and return matching rows with distances. + fn query_index(&self) -> Vec<(f32, u64)> { + let Some(index) = self.indexes.get_ivf_pq(&self.index_name) else { + return vec![]; + }; + + // Compute max visible row for MVCC filtering + let Some(max_visible_row) = self.compute_max_visible_row() else { + return vec![]; + }; + + // Convert query vector to FixedSizeListArray + let query_array = self.query.query_vector.as_ref(); + + // Try to interpret as FixedSizeList + let fsl = if let Some(fsl) = query_array.as_fixed_size_list_opt() { + fsl.clone() + } else { + // If it's a primitive array, wrap it in a FixedSizeList (single row) + let values = self.query.query_vector.clone(); + let dim = values.len() as i32; + let field = Arc::new(Field::new("item", values.data_type().clone(), true)); + match FixedSizeListArray::try_new(field, dim, values, None) { + Ok(arr) => arr, + Err(_) => return vec![], + } + }; + + // Search the index with visibility filtering + index + .search(&fsl, self.query.k, self.query.nprobes, max_visible_row) + .unwrap_or_default() + } + + /// Materialize rows from batch store with distance column. + fn materialize_rows(&self, results: &[(f32, u64)]) -> DataFusionResult> { + if results.is_empty() { + return Ok(vec![]); + } + + // Build batch ranges + let mut batch_ranges = Vec::new(); + let mut current_row = 0usize; + + for stored_batch in self.batch_store.iter() { + let batch_start = current_row; + let batch_end = current_row + stored_batch.num_rows; + batch_ranges.push((batch_start, batch_end)); + current_row = batch_end; + } + + // Group rows by batch + let mut batches_data: std::collections::HashMap> = + std::collections::HashMap::new(); + + for &(distance, pos) in results { + let pos = pos as usize; + for (batch_id, &(start, end)) in batch_ranges.iter().enumerate() { + if pos >= start && pos < end { + batches_data + .entry(batch_id) + .or_default() + .push((pos - start, distance)); + break; + } + } + } + + let mut all_batches = Vec::new(); + + for (batch_id, rows_with_dist) in batches_data { + if let Some(stored) = self.batch_store.get(batch_id) { + let rows: Vec = rows_with_dist.iter().map(|&(r, _)| r as u32).collect(); + let distances: Vec = rows_with_dist.iter().map(|&(_, d)| d).collect(); + + let indices = arrow_array::UInt32Array::from(rows); + + let mut columns: Vec> = stored + .data + .columns() + .iter() + .map(|col| arrow_select::take::take(col.as_ref(), &indices, None).unwrap()) + .collect(); + + // Add distance column + columns.push(Arc::new(Float32Array::from(distances))); + + // Apply projection if needed (excluding distance column which is always included) + let final_columns = if let Some(ref proj_indices) = self.projection { + let mut projected: Vec<_> = + proj_indices.iter().map(|&i| columns[i].clone()).collect(); + // Always include distance as last column + projected.push(columns.last().unwrap().clone()); + projected + } else { + columns + }; + + let batch = RecordBatch::try_new(self.output_schema.clone(), final_columns)?; + all_batches.push(batch); + } + } + + Ok(all_batches) + } +} + +impl DisplayAs for VectorIndexExec { + fn fmt_as(&self, t: DisplayFormatType, f: &mut Formatter<'_>) -> std::fmt::Result { + match t { + DisplayFormatType::Default | DisplayFormatType::Verbose => { + write!( + f, + "VectorIndexExec: index={}, column={}, k={}, nprobes={}, max_visible_batch_position={}", + self.index_name, + self.query.column, + self.query.k, + self.query.nprobes, + self.max_visible_batch_position + ) + } + DisplayFormatType::TreeRender => { + write!( + f, + "VectorIndexExec\nindex={}\ncolumn={}\nk={}\nnprobes={}\nmax_visible_batch_position={}", + self.index_name, + self.query.column, + self.query.k, + self.query.nprobes, + self.max_visible_batch_position + ) + } + } + } +} + +impl ExecutionPlan for VectorIndexExec { + fn name(&self) -> &str { + "VectorIndexExec" + } + + fn as_any(&self) -> &dyn Any { + self + } + + fn schema(&self) -> SchemaRef { + self.output_schema.clone() + } + + fn children(&self) -> Vec<&Arc> { + vec![] + } + + fn with_new_children( + self: Arc, + children: Vec>, + ) -> DataFusionResult> { + if !children.is_empty() { + return Err(datafusion::error::DataFusionError::Internal( + "VectorIndexExec does not have children".to_string(), + )); + } + Ok(self) + } + + fn execute( + &self, + _partition: usize, + _context: Arc, + ) -> DataFusionResult { + // Query the index (visibility filtering happens inside search) + let results = self.query_index(); + + // Materialize the rows + let batches = self.materialize_rows(&results)?; + + let stream = stream::iter(batches.into_iter().map(Ok)).boxed(); + + Ok(Box::pin(RecordBatchStreamAdapter::new( + self.output_schema.clone(), + stream, + ))) + } + + fn partition_statistics(&self, _partition: Option) -> DataFusionResult { + Ok(Statistics { + num_rows: Precision::Exact(self.query.k), + total_byte_size: Precision::Absent, + column_statistics: vec![], + }) + } + + fn metrics(&self) -> Option { + Some(self.metrics.clone_inner()) + } + + fn properties(&self) -> &PlanProperties { + &self.properties + } + + fn supports_limit_pushdown(&self) -> bool { + true // Vector search naturally supports limit + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // Note: Full tests for VectorIndexExec require setting up IVF-PQ index + // with trained centroids and codebook, which is complex. + // Basic structure tests are included here. + + #[test] + fn test_distance_column_name() { + assert_eq!(DISTANCE_COLUMN, "_distance"); + } +} diff --git a/rust/lance/src/dataset/mem_wal/util.rs b/rust/lance/src/dataset/mem_wal/util.rs new file mode 100644 index 00000000000..28e0a8ed5d4 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/util.rs @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! Utility functions for MemWAL operations. + +use object_store::path::Path; +use uuid::Uuid; + +/// Bit-reverse a 64-bit integer. +/// +/// Used for file naming to distribute files evenly across object store keyspace, +/// optimizing S3 throughput by spreading sequential writes across internal partitions. +/// +/// # Example +/// ```ignore +/// // 5 in binary: 000...101 +/// // Reversed: 101...000 +/// assert_eq!(bit_reverse_u64(5), 0xa000000000000000); +/// ``` +pub fn bit_reverse_u64(n: u64) -> u64 { + n.reverse_bits() +} + +/// Generate a bit-reversed filename for a given ID. +/// +/// # Arguments +/// * `id` - The sequential ID to convert +/// * `ext` - File extension (e.g., "binpb", "lance") +/// +/// # Returns +/// A string like "1010000000000000000000000000000000000000000000000000000000000000.binpb" +/// for id=5, ext="binpb" +pub fn bit_reversed_filename(id: u64, ext: &str) -> String { + format!("{:064b}.{}", bit_reverse_u64(id), ext) +} + +/// Parse a bit-reversed filename back to the original ID. +/// +/// # Arguments +/// * `filename` - The filename without path (e.g., "1010...0000.binpb") +/// +/// # Returns +/// The original ID, or None if parsing fails +pub fn parse_bit_reversed_filename(filename: &str) -> Option { + let stem = filename.split('.').next()?; + if stem.len() != 64 || !stem.chars().all(|c| c == '0' || c == '1') { + return None; + } + let reversed = u64::from_str_radix(stem, 2).ok()?; + Some(bit_reverse_u64(reversed)) +} + +/// Base path for a region within the MemWAL directory. +/// +/// Returns: `{base_path}/_mem_wal/{region_id}/` +pub fn region_base_path(base_path: &Path, region_id: &Uuid) -> Path { + base_path + .child("_mem_wal") + .child(region_id.as_hyphenated().to_string()) +} + +/// Path to the WAL directory for a region. +/// +/// Returns: `{base_path}/_mem_wal/{region_id}/wal/` +pub fn region_wal_path(base_path: &Path, region_id: &Uuid) -> Path { + region_base_path(base_path, region_id).child("wal") +} + +/// Path to the manifest directory for a region. +/// +/// Returns: `{base_path}/_mem_wal/{region_id}/manifest/` +pub fn region_manifest_path(base_path: &Path, region_id: &Uuid) -> Path { + region_base_path(base_path, region_id).child("manifest") +} + +/// Path to a flushed MemTable directory. +/// +/// Returns: `{base_path}/_mem_wal/{region_id}/{random_hash}_gen_{generation}/` +pub fn flushed_memtable_path( + base_path: &Path, + region_id: &Uuid, + random_hash: &str, + generation: u64, +) -> Path { + region_base_path(base_path, region_id).child(format!("{}_gen_{}", random_hash, generation)) +} + +/// Generate an 8-character random hex string for flushed MemTable directories. +pub fn generate_random_hash() -> String { + let bytes: [u8; 4] = rand::random(); + format!( + "{:02x}{:02x}{:02x}{:02x}", + bytes[0], bytes[1], bytes[2], bytes[3] + ) +} + +/// WAL entry filename. +/// +/// Returns bit-reversed filename with .lance extension (Lance format). +pub fn wal_entry_filename(wal_entry_position: u64) -> String { + bit_reversed_filename(wal_entry_position, "lance") +} + +/// Region manifest filename. +/// +/// Returns bit-reversed filename with .binpb extension. +pub fn manifest_filename(version: u64) -> String { + bit_reversed_filename(version, "binpb") +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_bit_reverse_u64() { + // 0 should remain 0 + assert_eq!(bit_reverse_u64(0), 0); + + // 1 (least significant bit) becomes most significant + assert_eq!(bit_reverse_u64(1), 0x8000000000000000); + + // 5 = 101 in binary, reversed = 101 followed by 61 zeros + assert_eq!(bit_reverse_u64(5), 0xa000000000000000); + + // Double reversal should give original + for i in [0u64, 1, 2, 5, 100, 1000, u64::MAX / 2, u64::MAX] { + assert_eq!(bit_reverse_u64(bit_reverse_u64(i)), i); + } + } + + #[test] + fn test_bit_reversed_filename() { + let filename = bit_reversed_filename(1, "binpb"); + assert_eq!( + filename, + "1000000000000000000000000000000000000000000000000000000000000000.binpb" + ); + + let filename = bit_reversed_filename(5, "lance"); + assert_eq!( + filename, + "1010000000000000000000000000000000000000000000000000000000000000.lance" + ); + } + + #[test] + fn test_parse_bit_reversed_filename() { + // Round-trip test + for id in [1u64, 5, 100, 1000, u64::MAX / 2] { + let filename = bit_reversed_filename(id, "binpb"); + let parsed = parse_bit_reversed_filename(&filename); + assert_eq!(parsed, Some(id), "Failed round-trip for id={}", id); + } + + // Invalid inputs + assert_eq!(parse_bit_reversed_filename("invalid"), None); + assert_eq!(parse_bit_reversed_filename("123.binpb"), None); + assert_eq!( + parse_bit_reversed_filename( + "10100000000000000000000000000000000000000000000000000000000000002.binpb" + ), + None + ); + } + + #[test] + fn test_region_paths() { + let base_path = Path::from("my/dataset"); + let region_id = Uuid::parse_str("550e8400-e29b-41d4-a716-446655440000").unwrap(); + + assert_eq!( + region_base_path(&base_path, ®ion_id).as_ref(), + "my/dataset/_mem_wal/550e8400-e29b-41d4-a716-446655440000" + ); + + assert_eq!( + region_wal_path(&base_path, ®ion_id).as_ref(), + "my/dataset/_mem_wal/550e8400-e29b-41d4-a716-446655440000/wal" + ); + + assert_eq!( + region_manifest_path(&base_path, ®ion_id).as_ref(), + "my/dataset/_mem_wal/550e8400-e29b-41d4-a716-446655440000/manifest" + ); + + assert_eq!( + flushed_memtable_path(&base_path, ®ion_id, "a1b2c3d4", 5).as_ref(), + "my/dataset/_mem_wal/550e8400-e29b-41d4-a716-446655440000/a1b2c3d4_gen_5" + ); + + // Test with empty base path + let empty_base = Path::from(""); + assert_eq!( + region_wal_path(&empty_base, ®ion_id).as_ref(), + "_mem_wal/550e8400-e29b-41d4-a716-446655440000/wal" + ); + } + + #[test] + fn test_generate_random_hash() { + let hash = generate_random_hash(); + assert_eq!(hash.len(), 8); + assert!(hash.chars().all(|c| c.is_ascii_hexdigit())); + + // Should generate different values (with very high probability) + let hash2 = generate_random_hash(); + assert_ne!(hash, hash2); + } +} diff --git a/rust/lance/src/dataset/mem_wal/watchable_cell.rs b/rust/lance/src/dataset/mem_wal/watchable_cell.rs new file mode 100644 index 00000000000..7e132240a5d --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/watchable_cell.rs @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! Watchable cell and monotonic sequence utilities. + +use std::sync::atomic::{AtomicU64, Ordering}; + +use lance_core::{Error, Result}; + +/// A cell that can be written to once and read by multiple readers. +/// +/// Used for durability notifications where multiple callers may need to await the same result. +#[derive(Clone, Debug)] +pub struct WatchableOnceCell { + rx: tokio::sync::watch::Receiver>, + tx: tokio::sync::watch::Sender>, +} + +/// Reader handle for a WatchableOnceCell. +/// +/// Can be cloned and shared across tasks to await the same value. +#[derive(Clone, Debug)] +pub struct WatchableOnceCellReader { + rx: tokio::sync::watch::Receiver>, +} + +impl WatchableOnceCell { + /// Create a new empty cell. + pub fn new() -> Self { + let (tx, rx) = tokio::sync::watch::channel(None); + Self { rx, tx } + } + + /// Write a value to the cell. + /// + /// Only the first write takes effect; subsequent writes are ignored. + pub fn write(&self, val: T) { + self.tx.send_if_modified(|v| { + if v.is_some() { + return false; + } + v.replace(val); + true + }); + } + + /// Get a reader handle for this cell. + pub fn reader(&self) -> WatchableOnceCellReader { + WatchableOnceCellReader { + rx: self.rx.clone(), + } + } +} + +impl Default for WatchableOnceCell { + fn default() -> Self { + Self::new() + } +} + +impl WatchableOnceCellReader { + /// Read the current value without waiting. + /// + /// Returns `None` if no value has been written yet. + pub fn read(&self) -> Option { + self.rx.borrow().clone() + } + + /// Wait for a value to be written. + /// + /// Returns immediately if a value is already present. + pub async fn await_value(&mut self) -> T { + self.rx + .wait_for(|v| v.is_some()) + .await + .expect("watch channel closed") + .clone() + .expect("no value found") + } +} + +/// A monotonically increasing atomic sequence counter. +/// +/// Used for generating sequence numbers in MVCC implementations. +/// The sequence only ever increases, never decreases. +#[derive(Debug)] +pub struct MonotonicSeq { + val: AtomicU64, +} + +impl MonotonicSeq { + /// Create a new sequence starting at the given value. + pub fn new(initial_value: u64) -> Self { + Self { + val: AtomicU64::new(initial_value), + } + } + + /// Get the next sequence number, incrementing the counter. + /// + /// Returns the new value after incrementing. + pub fn next(&self) -> u64 { + self.val.fetch_add(1, Ordering::SeqCst) + 1 + } + + /// Store a specific value. + pub fn store(&self, value: u64) { + self.val.store(value, Ordering::SeqCst); + } + + /// Load the current value. + pub fn load(&self) -> u64 { + self.val.load(Ordering::SeqCst) + } + + /// Store a value only if it's greater than the current value. + /// + /// This is useful for updating the last persisted sequence + /// without going backwards. + pub fn store_if_greater(&self, value: u64) { + self.val.fetch_max(value, Ordering::SeqCst); + } +} + +impl Default for MonotonicSeq { + fn default() -> Self { + Self::new(0) + } +} + +/// Closed status for channels. +/// +/// Used to track whether a channel was closed normally or due to an error. +#[derive(Clone, Debug)] +pub enum ClosedStatus { + /// Channel closed normally (shutdown) + Normal, + /// Channel closed due to an error + Error(String), +} + +impl ClosedStatus { + /// Create a new error status with the given message. + pub fn error(msg: impl Into) -> Self { + Self::Error(msg.into()) + } +} + +/// Extension trait for safely sending to unbounded channels. +/// +/// When a channel is closed, this trait helps determine if it was +/// due to a normal shutdown or an error condition. +pub trait SendSafely { + /// Attempts to send a message to the channel. + /// + /// If the channel is closed, returns an error based on the closed_result. + fn send_safely( + &self, + closed_result_reader: WatchableOnceCellReader, + message: T, + ) -> Result<()>; +} + +impl SendSafely for tokio::sync::mpsc::UnboundedSender { + fn send_safely( + &self, + closed_result_reader: WatchableOnceCellReader, + message: T, + ) -> Result<()> { + match self.send(message) { + Ok(_) => Ok(()), + Err(_) => { + if let Some(status) = closed_result_reader.read() { + match status { + ClosedStatus::Normal => { + Err(Error::io("Channel closed", snafu::location!())) + } + ClosedStatus::Error(msg) => Err(Error::io( + format!("Channel closed due to error: {}", msg), + snafu::location!(), + )), + } + } else { + Err(Error::io( + "Failed to send message to channel", + snafu::location!(), + )) + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_watchable_once_cell_write_once() { + let cell = WatchableOnceCell::new(); + let reader = cell.reader(); + + assert_eq!(reader.read(), None); + + cell.write(42); + assert_eq!(reader.read(), Some(42)); + + // Second write is ignored + cell.write(100); + assert_eq!(reader.read(), Some(42)); + } + + #[tokio::test] + async fn test_watchable_once_cell_await() { + let cell = WatchableOnceCell::new(); + let mut reader = cell.reader(); + + let handle = tokio::spawn(async move { reader.await_value().await }); + + // Brief delay to ensure the task is waiting + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + + cell.write(123); + + let result = handle.await.unwrap(); + assert_eq!(result, 123); + } + + #[tokio::test] + async fn test_watchable_once_cell_multiple_readers() { + let cell = WatchableOnceCell::new(); + let mut reader1 = cell.reader(); + let mut reader2 = cell.reader(); + + let h1 = tokio::spawn(async move { reader1.await_value().await }); + let h2 = tokio::spawn(async move { reader2.await_value().await }); + + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + + cell.write(456); + + assert_eq!(h1.await.unwrap(), 456); + assert_eq!(h2.await.unwrap(), 456); + } + + #[test] + fn test_monotonic_seq_next() { + let seq = MonotonicSeq::new(0); + + assert_eq!(seq.next(), 1); + assert_eq!(seq.next(), 2); + assert_eq!(seq.next(), 3); + assert_eq!(seq.load(), 3); + } + + #[test] + fn test_monotonic_seq_store_if_greater() { + let seq = MonotonicSeq::new(10); + + // Should update - 15 > 10 + seq.store_if_greater(15); + assert_eq!(seq.load(), 15); + + // Should not update - 12 < 15 + seq.store_if_greater(12); + assert_eq!(seq.load(), 15); + + // Should update - 20 > 15 + seq.store_if_greater(20); + assert_eq!(seq.load(), 20); + } + + #[test] + fn test_monotonic_seq_concurrent() { + use std::sync::Arc; + use std::thread; + + let seq = Arc::new(MonotonicSeq::new(0)); + let mut handles = vec![]; + + for _ in 0..10 { + let seq = seq.clone(); + handles.push(thread::spawn(move || { + for _ in 0..100 { + seq.next(); + } + })); + } + + for h in handles { + h.join().unwrap(); + } + + // 10 threads * 100 increments = 1000 + assert_eq!(seq.load(), 1000); + } +} diff --git a/rust/lance/src/dataset/mem_wal/write.rs b/rust/lance/src/dataset/mem_wal/write.rs new file mode 100644 index 00000000000..da136fcd04d --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/write.rs @@ -0,0 +1,1577 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +#![allow(clippy::print_stderr)] + +//! Write path for MemWAL. +//! +//! This module contains all components for the write path: +//! - [`RegionWriter`] - Main writer interface for a single region +//! - [`MemTable`] - In-memory table storing Arrow RecordBatches +//! - [`WalFlusher`] - Write-ahead log buffer for durability (Arrow IPC format) +//! - [`IndexRegistry`] - In-memory index management +//! - [`MemTableFlusher`] - Flush MemTable to storage as single Lance file + +mod batch_write; +mod flush; +mod stats; +mod wal; + +use std::collections::VecDeque; +use std::sync::Arc; +use std::time::Instant; + +use arrow_array::RecordBatch; +use arrow_schema::Schema as ArrowSchema; +use async_trait::async_trait; +use lance_core::datatypes::Schema; +use lance_core::{Error, Result}; +use lance_index::mem_wal::RegionManifest; +use lance_io::object_store::ObjectStore; +use object_store::path::Path; +use snafu::location; +use tokio::sync::{mpsc, RwLock}; +use tracing::{debug, info}; +use uuid::Uuid; + +pub use super::batch_store::{LockFreeBatchStore, StoreFull, StoredBatch}; +pub use super::indexes::{ + BTreeIndexConfig, BTreeMemIndex, CoverageResult, FtsIndexConfig, IndexCoverageInfo, + IndexRegistry, IvfPqIndexConfig, MemIndexConfig, +}; +pub use super::memtable::CacheConfig; +pub use super::memtable::MemTable; +pub use super::watchable_cell::{ + ClosedStatus, MonotonicSeq, SendSafely, WatchableOnceCell, WatchableOnceCellReader, +}; +pub use batch_write::{ + BackpressureController, BackpressureStats, DurabilityCell, DurabilityResult, DurabilityWatcher, +}; +pub use flush::MemTableFlusher; +pub use stats::{new_shared_stats, SharedWriteStats, WriteStats, WriteStatsSnapshot}; +pub use wal::{WalEntry, WalEntryData, WalFlushResult, WalFlusher}; + +use flush::TriggerMemTableFlush; +use wal::TriggerWalFlush; + +use super::config::RegionWriterConfig; +use super::dispatcher::{DurableWatcher, MessageHandler, TaskExecutor}; +use super::epoch::EpochGuard; +use super::manifest::RegionManifestStore; + +fn log_ts() -> String { + chrono::Local::now().format("%H:%M:%S%.3f").to_string() +} + +/// Result of a write operation. +#[derive(Debug)] +pub struct WriteResult { + /// Batch ID in the MemTable. + pub batch_position: usize, + /// Watcher to await durability (if durable_write enabled). + pub durable_watcher: Option, +} + +/// RegionWriter state shared across tasks. +struct WriterState { + memtable: MemTable, + last_flushed_wal_entry_position: u64, + /// Total size of frozen memtables (for backpressure). + frozen_memtable_bytes: usize, + /// Flush watchers for frozen memtables (for backpressure). + frozen_flush_watchers: VecDeque<(usize, DurabilityWatcher)>, + /// Flag to prevent duplicate memtable flush requests. + flush_requested: bool, + /// Counter for WAL flush threshold crossings. + wal_flush_trigger_count: usize, + /// Last time a WAL flush was triggered (for time-based flush). + last_wal_flush_trigger_time: u64, +} + +fn start_time() -> std::time::Instant { + use std::sync::OnceLock; + static START: OnceLock = OnceLock::new(); + *START.get_or_init(std::time::Instant::now) +} + +fn now_millis() -> u64 { + start_time().elapsed().as_millis() as u64 +} + +/// Shared state for writer operations. +struct SharedWriterState { + state: Arc>, + wal_flusher: Arc, + wal_flush_tx: mpsc::UnboundedSender, + memtable_flush_tx: mpsc::UnboundedSender, + config: RegionWriterConfig, + schema: Arc, + pk_field_ids: Vec, + max_memtable_batches: usize, + max_memtable_rows: usize, + ivf_index_partition_capacity_safety_factor: usize, + index_configs: Vec, +} + +impl SharedWriterState { + #[allow(clippy::too_many_arguments)] + fn new( + state: Arc>, + wal_flusher: Arc, + wal_flush_tx: mpsc::UnboundedSender, + memtable_flush_tx: mpsc::UnboundedSender, + config: RegionWriterConfig, + schema: Arc, + pk_field_ids: Vec, + max_memtable_batches: usize, + max_memtable_rows: usize, + ivf_index_partition_capacity_safety_factor: usize, + index_configs: Vec, + ) -> Self { + Self { + state, + wal_flusher, + wal_flush_tx, + memtable_flush_tx, + config, + schema, + pk_field_ids, + max_memtable_batches, + max_memtable_rows, + ivf_index_partition_capacity_safety_factor, + index_configs, + } + } + + /// Freeze the current memtable and send it to the flush handler. + /// + /// Takes `&mut WriterState` directly since caller already holds the lock. + fn freeze_memtable(&self, state: &mut WriterState) -> Result { + let freeze_start = std::time::Instant::now(); + eprintln!("[{}][FREEZE] Starting freeze_memtable", log_ts()); + + let pending_wal_range = state.memtable.batch_store().pending_wal_flush_range(); + let last_wal_entry_position = state.last_flushed_wal_entry_position; + let old_batch_count = + state.memtable.batch_capacity() - state.memtable.remaining_batch_capacity(); + + eprintln!( + "[{}][FREEZE] Swapping memtable: old had {} batches, pending_wal_range={:?}", + log_ts(), + old_batch_count, + pending_wal_range + ); + + let old_batch_store = state.memtable.batch_store(); + let old_indexes = state.memtable.indexes_arc(); + + let next_generation = state.memtable.generation() + 1; + let mut new_memtable = MemTable::with_capacity( + self.schema.clone(), + next_generation, + self.pk_field_ids.clone(), + CacheConfig::default(), + self.max_memtable_batches, + )?; + + if !self.index_configs.is_empty() { + let indexes = Arc::new(IndexRegistry::from_configs( + &self.index_configs, + self.max_memtable_rows, + self.ivf_index_partition_capacity_safety_factor, + )?); + new_memtable.set_indexes_arc(indexes); + } + + let mut old_memtable = std::mem::replace(&mut state.memtable, new_memtable); + old_memtable.freeze(last_wal_entry_position); + let _memtable_flush_watcher = old_memtable.create_memtable_flush_completion(); + + if pending_wal_range.is_some() { + let completion_cell: WatchableOnceCell> = + WatchableOnceCell::new(); + let completion_reader = completion_cell.reader(); + old_memtable.set_wal_flush_completion(completion_reader); + + let end_batch_position = old_batch_store.len(); + eprintln!( + "[{}][FREEZE] Sending TriggerWalFlush (frozen) to WAL handler queue, end_batch_position={}", + log_ts(), + end_batch_position + ); + self.wal_flusher.trigger_flush( + old_batch_store, + old_indexes, + end_batch_position, + Some(completion_cell), + )?; + } + + let frozen_size = old_memtable.estimated_size(); + state.frozen_memtable_bytes += frozen_size; + state.last_flushed_wal_entry_position = last_wal_entry_position; + + let flush_watcher = old_memtable + .get_memtable_flush_watcher() + .expect("Flush watcher should exist after create_memtable_flush_completion"); + state + .frozen_flush_watchers + .push_back((frozen_size, flush_watcher)); + + let frozen_memtable = Arc::new(old_memtable); + + debug!( + "Frozen memtable generation {}, pending_count = {}", + next_generation - 1, + state.frozen_flush_watchers.len() + ); + + eprintln!( + "[{}][FREEZE] Complete: gen={} frozen_count={} new_capacity={} took={}ms", + log_ts(), + next_generation - 1, + state.frozen_flush_watchers.len(), + state.memtable.remaining_batch_capacity(), + freeze_start.elapsed().as_millis() + ); + + let _ = self.memtable_flush_tx.send(TriggerMemTableFlush { + memtable: frozen_memtable, + done: None, + }); + + Ok(next_generation) + } + + /// Track batch for WAL durability. + fn track_batch_for_wal(&self, batch_position: usize) -> DurabilityWatcher { + let _wal_watcher = self.wal_flusher.track_batch(batch_position); + // Return pre-resolved watcher for non-durable case + let cell: WatchableOnceCell = WatchableOnceCell::new(); + cell.write(DurabilityResult::ok()); + cell.reader() + } + + /// Check if memtable flush is needed and trigger if so. + /// + /// Takes `&mut WriterState` directly since caller already holds the lock. + fn maybe_trigger_memtable_flush(&self, state: &mut WriterState) -> Result<()> { + if state.flush_requested { + return Ok(()); + } + + let should_flush = state.memtable.estimated_size() >= self.config.max_memtable_size + || state.memtable.is_batch_store_full(); + + if should_flush { + state.flush_requested = true; + self.freeze_memtable(state)?; + state.flush_requested = false; + } + Ok(()) + } + + /// Check if WAL flush is needed and trigger if so. + /// + /// Takes `&mut WriterState` directly since caller already holds the lock. + fn maybe_trigger_wal_flush(&self, state: &mut WriterState) { + let threshold = self.config.max_wal_buffer_size; + + let batch_count = state.memtable.batch_count(); + let total_bytes = state.memtable.estimated_size(); + let batch_store = state.memtable.batch_store(); + let indexes = state.memtable.indexes_arc(); + + // Check if there are any unflushed batches + let has_pending = batch_store.pending_wal_flush_count() > 0; + + // Check time-based trigger first + let time_trigger = if let Some(interval) = self.config.max_wal_flush_interval { + let interval_millis = interval.as_millis() as u64; + let last_trigger = state.last_wal_flush_trigger_time; + let now = now_millis(); + + // If last_trigger is 0, this is the first write - start the timer but don't flush + if last_trigger == 0 { + state.last_wal_flush_trigger_time = now; + None + } else { + let elapsed = now.saturating_sub(last_trigger); + + if elapsed >= interval_millis && has_pending { + state.last_wal_flush_trigger_time = now; + Some(now) + } else { + None + } + } + } else { + None + }; + + // If time trigger fired, send a flush message + if time_trigger.is_some() { + eprintln!( + "[{}][WAL_TIME_TRIGGER] total_bytes={}, interval elapsed, triggering flush to batch #{}", + log_ts(), + total_bytes, + batch_count + ); + let _ = self.wal_flush_tx.send(TriggerWalFlush { + batch_store, + indexes, + end_batch_position: batch_count, + done: None, + }); + return; + } + + // Check size-based trigger + if threshold == 0 { + return; + } + + // Calculate how many thresholds have been crossed (1 at 10MB, 2 at 20MB, etc.) + let thresholds_crossed = total_bytes / threshold; + + // Trigger flush for each unclaimed threshold crossing + while state.wal_flush_trigger_count < thresholds_crossed { + state.wal_flush_trigger_count += 1; + // Update last trigger time so time-based trigger doesn't fire immediately after + state.last_wal_flush_trigger_time = now_millis(); + + eprintln!( + "[{}][WAL_SIZE_TRIGGER] total_bytes={}, threshold #{} crossed ({}MB), triggering flush to batch #{}", + log_ts(), + total_bytes, + state.wal_flush_trigger_count, + state.wal_flush_trigger_count * threshold / (1024 * 1024), + batch_count + ); + // Trigger WAL flush with captured batch range + let _ = self.wal_flush_tx.send(TriggerWalFlush { + batch_store: batch_store.clone(), + indexes: indexes.clone(), + end_batch_position: batch_count, + done: None, + }); + } + } +} + +impl SharedWriterState { + fn unflushed_memtable_bytes(&self) -> usize { + // Total unflushed bytes = active memtable + all frozen memtables + self.state + .try_read() + .ok() + .map(|s| { + let active = s.memtable.estimated_size(); + active + s.frozen_memtable_bytes + }) + .unwrap_or(0) + } + + fn oldest_memtable_watcher(&self) -> Option { + // Return a watcher for the oldest frozen memtable's flush completion. + // If no frozen memtables, return the active memtable's watcher since it will + // eventually be frozen and flushed. + self.state.try_read().ok().and_then(|s| { + // First try frozen memtable watchers + s.frozen_flush_watchers + .front() + .map(|(_, watcher)| watcher.clone()) + // If no frozen memtables, use active memtable's watcher + .or_else(|| s.memtable.get_memtable_flush_watcher()) + }) + } +} + +/// Main writer for a MemWAL region. +pub struct RegionWriter { + config: RegionWriterConfig, + epoch_guard: Arc, + state: Arc>, + wal_flusher: Arc, + task_executor: Arc, + manifest_store: Arc, + stats: SharedWriteStats, + writer_state: Arc, + backpressure: BackpressureController, +} + +impl RegionWriter { + /// Open or create a RegionWriter. + /// + /// The `base_path` should come from `ObjectStore::from_uri()` to ensure + /// WAL files are written inside the dataset directory. + pub async fn open( + object_store: Arc, + base_path: Path, + base_uri: impl Into, + config: RegionWriterConfig, + schema: Arc, + index_configs: Vec, + ) -> Result { + let base_uri = base_uri.into(); + let region_id = config.region_id; + let manifest_store = Arc::new(RegionManifestStore::new( + object_store.clone(), + &base_path, + region_id, + config.manifest_scan_batch_size, + )); + + // Claim the region (epoch-based fencing) + let (epoch_guard, manifest) = + EpochGuard::claim(manifest_store.clone(), config.region_spec_id).await?; + let epoch_guard = Arc::new(epoch_guard); + + info!( + "Opened RegionWriter for region {} (epoch {}, generation {})", + region_id, + epoch_guard.epoch(), + manifest.current_generation + ); + + // Create MemTable with primary key field IDs from schema + let lance_schema = Schema::try_from(schema.as_ref())?; + let pk_field_ids: Vec = lance_schema + .unenforced_primary_key() + .iter() + .map(|f| f.id) + .collect(); + let mut memtable = MemTable::with_capacity( + schema.clone(), + manifest.current_generation, + pk_field_ids.clone(), + CacheConfig::default(), + config.max_memtable_batches, + )?; + + // Create indexes if configured and set them on the MemTable + // Indexes are always created when index_configs is non-empty + // (they will be updated either sync or async based on config) + if !index_configs.is_empty() { + let indexes = Arc::new(IndexRegistry::from_configs( + &index_configs, + config.max_memtable_rows, + config.ivf_index_partition_capacity_safety_factor, + )?); + memtable.set_indexes_arc(indexes); + } + + let state = Arc::new(RwLock::new(WriterState { + memtable, + last_flushed_wal_entry_position: manifest.wal_id_last_seen, + frozen_memtable_bytes: 0, + frozen_flush_watchers: VecDeque::new(), + flush_requested: false, + wal_flush_trigger_count: 0, + last_wal_flush_trigger_time: 0, + })); + + // Create WAL flusher + let mut wal_flusher = WalFlusher::new( + &base_path, + region_id, + epoch_guard.epoch(), + manifest.wal_id_last_seen + 1, + ); + wal_flusher.set_object_store(object_store.clone()); + + // Create channels for background tasks + let (wal_flush_tx, wal_flush_rx) = mpsc::unbounded_channel(); + let (memtable_flush_tx, memtable_flush_rx) = mpsc::unbounded_channel(); + + wal_flusher.set_flush_channel(wal_flush_tx.clone()); + let wal_flusher = Arc::new(wal_flusher); + + // Create flusher + let flusher = Arc::new(MemTableFlusher::new( + object_store.clone(), + base_path, + base_uri, + region_id, + manifest_store.clone(), + )); + + // Create stats collector + let stats = new_shared_stats(); + + let backpressure = BackpressureController::new(config.clone()); + + // Create task executor + let task_executor = Arc::new(TaskExecutor::new()); + + // Start background WAL flush handler + // The WAL flush handler does parallel WAL I/O + index updates + eprintln!( + "[{}][WRITER] Creating wal_flusher with flush_interval={:?}", + log_ts(), + config.max_wal_flush_interval + ); + let wal_handler = WalFlushHandler::new(wal_flusher.clone(), state.clone(), stats.clone()); + task_executor.add_handler( + "wal_flusher".to_string(), + Box::new(wal_handler), + wal_flush_rx, + )?; + + // Start background MemTable flush handler + // Note: Freezing is handled by SharedWriterState, not this handler. + // This handler only processes frozen memtables from the queue. + let memtable_handler = + MemTableFlushHandler::new(state.clone(), flusher, epoch_guard.clone(), stats.clone()); + eprintln!("[{}][WRITER] Adding memtable_flusher handler", log_ts()); + task_executor.add_handler( + "memtable_flusher".to_string(), + Box::new(memtable_handler), + memtable_flush_rx, + )?; + eprintln!("[{}][WRITER] memtable_flusher handler added", log_ts()); + + // Create shared writer state for put() operations + let writer_state = Arc::new(SharedWriterState::new( + state.clone(), + wal_flusher.clone(), + wal_flush_tx, + memtable_flush_tx, + config.clone(), + schema.clone(), + pk_field_ids, + config.max_memtable_batches, + config.max_memtable_rows, + config.ivf_index_partition_capacity_safety_factor, + index_configs, + )); + + Ok(Self { + config, + epoch_guard, + state, + wal_flusher, + task_executor, + manifest_store, + stats, + writer_state, + backpressure, + }) + } + + /// Write a record batch to the region. + /// + /// # Arguments + /// + /// * `batch` - The record batch to write + /// + /// # Returns + /// + /// A WriteResult with fragment ID and optional durability watcher. + /// + /// # Note + /// + /// Fencing is detected lazily during WAL flush via atomic writes. + /// If another writer has taken over, the WAL flush will fail with + /// `AlreadyExists`, indicating this writer has been fenced. + pub async fn put(&self, batch: RecordBatch) -> Result { + static PUT_COUNT: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0); + let count = PUT_COUNT.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + if count % 100 == 0 { + eprintln!("[{}][PUT] put #{}", log_ts(), count); + } + + let num_rows = batch.num_rows(); + if num_rows == 0 { + return Err(Error::invalid_input( + "Cannot write empty batch", + location!(), + )); + } + + // Apply backpressure if needed (before acquiring main lock) + let writer_state = &self.writer_state; + let bp_start = std::time::Instant::now(); + self.backpressure + .maybe_apply_backpressure(|| { + ( + writer_state.unflushed_memtable_bytes(), + writer_state.oldest_memtable_watcher(), + ) + }) + .await?; + let bp_elapsed = bp_start.elapsed(); + if bp_elapsed.as_millis() > 1 { + debug!( + "[{}][BP] waited {}ms | unflushed_memtable_bytes={}", + log_ts(), + bp_elapsed.as_millis(), + writer_state.unflushed_memtable_bytes() + ); + } + + let start = std::time::Instant::now(); + + // Acquire write lock for entire operation (atomic approach) + let (batch_position, durable_watcher, batch_store, indexes) = { + let mut state = self.state.write().await; + + // 1. Insert batch into memtable + let (batch_position, _row_offset, _estimated_size) = + state.memtable.insert_batch_only(batch).await?; + + // 2. Track batch for WAL durability + let durable_watcher = self.writer_state.track_batch_for_wal(batch_position); + + // 3. Check if WAL flush should be triggered + self.writer_state.maybe_trigger_wal_flush(&mut state); + + // 4. Check if memtable flush is needed + if let Err(e) = self.writer_state.maybe_trigger_memtable_flush(&mut state) { + tracing::warn!("Failed to trigger memtable flush: {}", e); + } + + // Get batch_store and indexes while we have the lock (for durable_write case) + let batch_store = state.memtable.batch_store(); + let indexes = state.memtable.indexes_arc(); + + (batch_position, durable_watcher, batch_store, indexes) + }; // Lock released here + + self.stats.record_put(start.elapsed()); + + debug!( + "Wrote {} rows to region {} (batch_position={})", + num_rows, self.config.region_id, batch_position + ); + + // Wait for durability if configured (outside the lock) + let durable_watcher = if self.config.durable_write { + // Must trigger a flush to ensure durability (flush up to and including this batch) + self.wal_flusher + .trigger_flush(batch_store, indexes, batch_position + 1, None)?; + durable_watcher.clone().await_value().await.into_result()?; + None + } else { + None + }; + + Ok(WriteResult { + batch_position, + durable_watcher, + }) + } + + /// Get a snapshot of current write statistics. + pub fn stats(&self) -> WriteStatsSnapshot { + self.stats.snapshot() + } + + /// Get the shared stats handle (for external monitoring). + pub fn stats_handle(&self) -> SharedWriteStats { + self.stats.clone() + } + + /// Get the current region manifest. + pub async fn manifest(&self) -> Result> { + self.manifest_store.read_latest().await + } + + /// Get the writer's epoch. + pub fn epoch(&self) -> u64 { + self.epoch_guard.epoch() + } + + /// Get the region ID. + pub fn region_id(&self) -> Uuid { + self.config.region_id + } + + /// Get current MemTable statistics. + pub async fn memtable_stats(&self) -> MemTableStats { + let state = self.state.read().await; + MemTableStats { + row_count: state.memtable.row_count(), + batch_count: state.memtable.batch_count(), + estimated_size: state.memtable.estimated_size(), + generation: state.memtable.generation(), + } + } + + /// Get WAL statistics. + pub fn wal_stats(&self) -> WalStats { + WalStats { + next_wal_entry_position: self.wal_flusher.next_wal_entry_position(), + } + } + + /// Close the writer gracefully. + /// + /// Flushes pending data and shuts down background tasks. + pub async fn close(self) -> Result<()> { + info!("Closing RegionWriter for region {}", self.config.region_id); + + // Send final WAL flush message and wait for completion + let state = self.state.read().await; + let batch_store = state.memtable.batch_store(); + let indexes = state.memtable.indexes_arc(); + let batch_count = state.memtable.batch_count(); + drop(state); + + // Only send flush if there are batches to flush + if batch_count > 0 { + // Create a completion cell to wait for flush + let done = WatchableOnceCell::new(); + let reader = done.reader(); + + // Send flush message with end_batch_position = batch_count to flush all pending + if self + .writer_state + .wal_flush_tx + .send(TriggerWalFlush { + batch_store, + indexes, + end_batch_position: batch_count, + done: Some(done), + }) + .is_ok() + { + // Wait for flush to complete + let mut reader = reader; + let _ = reader.await_value().await; + } + } + + // Shutdown background tasks + self.task_executor.shutdown_all().await?; + + info!("RegionWriter closed for region {}", self.config.region_id); + Ok(()) + } +} + +/// MemTable statistics. +#[derive(Debug, Clone)] +pub struct MemTableStats { + pub row_count: usize, + pub batch_count: usize, + pub estimated_size: usize, + pub generation: u64, +} + +/// WAL statistics. +#[derive(Debug, Clone)] +pub struct WalStats { + /// Next WAL entry position to be used. + pub next_wal_entry_position: u64, +} + +/// Background handler for WAL flush operations. +/// +/// This handler does parallel WAL I/O + index updates during flush. +/// Indexes are passed through the TriggerWalFlush message. +struct WalFlushHandler { + wal_flusher: Arc, + state: Arc>, + stats: SharedWriteStats, +} + +impl WalFlushHandler { + fn new( + wal_flusher: Arc, + state: Arc>, + stats: SharedWriteStats, + ) -> Self { + Self { + wal_flusher, + state, + stats, + } + } +} + +#[async_trait] +impl MessageHandler for WalFlushHandler { + async fn handle(&mut self, message: TriggerWalFlush) -> Result<()> { + let TriggerWalFlush { + batch_store, + indexes, + end_batch_position, + done, + } = message; + + let pending_count = batch_store.pending_wal_flush_count(); + eprintln!( + "[{}][WAL_FLUSH_HANDLER] Received TriggerWalFlush: pending={}, end_batch_position={}", + log_ts(), + pending_count, + end_batch_position + ); + + let result = self + .do_flush(batch_store, indexes, end_batch_position) + .await; + + // Notify completion if requested + if let Some(cell) = done { + cell.write(result.map_err(|e| e.to_string())); + } + + Ok(()) + } +} + +impl WalFlushHandler { + /// Unified flush method for both active and frozen memtables. + /// + /// Detects frozen vs active flush by comparing the passed batch_store with the + /// current active memtable's batch_store. If different, it's a frozen memtable flush. + /// + /// # Arguments + /// + /// * `batch_store` - The batch store to flush from + /// * `indexes` - Optional indexes to update in parallel with WAL I/O + /// * `end_batch_position` - End batch ID (exclusive). Flush batches in (max_flushed, end_batch_position). + async fn do_flush( + &self, + batch_store: Arc, + indexes: Option>, + end_batch_position: usize, + ) -> Result { + let start = Instant::now(); + // Use batch_store's watermark - this is the authoritative source + let max_flushed = batch_store.max_flushed_batch_position(); + // Convert to count-like value for comparison: number of batches already flushed + let flushed_up_to = max_flushed.map(|p| p + 1).unwrap_or(0); + + // Detect if this is a frozen memtable flush by comparing batch_store pointers. + // If the batch_store is different from the current active memtable's, it's frozen. + let is_frozen_flush = { + let state = self.state.read().await; + !Arc::ptr_eq(&batch_store, &state.memtable.batch_store()) + }; + + // Check if there's anything to flush (only skip for active memtable) + if !is_frozen_flush && flushed_up_to >= end_batch_position { + eprintln!( + "[{}][WAL_FLUSH_HANDLER] Skipping flush to batch #{} - already flushed up to position {:?}", + log_ts(), + end_batch_position, + max_flushed + ); + return Ok(WalFlushResult { + entry: None, + wal_io_duration: std::time::Duration::ZERO, + index_update_duration: std::time::Duration::ZERO, + index_update_duration_breakdown: std::collections::HashMap::new(), + rows_indexed: 0, + wal_bytes: 0, + }); + } + + // Flush batches up to end_batch_position + let flush_result = self + .wal_flusher + .flush_to_with_index_update(&batch_store, end_batch_position, indexes) + .await?; + + let batches_flushed = flush_result + .entry + .as_ref() + .map(|e| e.batch_positionitions.len()) + .unwrap_or(0); + + // Note: WAL watermark is already updated by flush_to_with_index_update() + // via batch_store.set_max_flushed_batch_position(). No need for separate mapping. + + // Record WAL flush stats + if batches_flushed > 0 { + self.stats + .record_wal_flush(start.elapsed(), flush_result.wal_bytes, batches_flushed); + self.stats.record_wal_io(flush_result.wal_io_duration); + self.stats.record_index_update( + flush_result.index_update_duration, + flush_result.rows_indexed, + ); + } + + eprintln!( + "[{}][WAL_FLUSH_HANDLER] Flush complete: {} batches, {}ms (S3={}ms, index={:?}), frozen={}", + log_ts(), + batches_flushed, + start.elapsed().as_millis(), + flush_result.wal_io_duration.as_millis(), + flush_result.index_update_duration_breakdown, + is_frozen_flush + ); + + Ok(flush_result) + } +} + +/// Background handler for MemTable flush operations. +/// +/// This handler receives frozen memtables directly via messages and flushes them to Lance storage. +/// Freezing is done by the writer (via SharedWriterState::freeze_memtable) to ensure +/// immediate memtable switching, so writes can continue on the new memtable while this +/// handler flushes in the background. +struct MemTableFlushHandler { + state: Arc>, + flusher: Arc, + epoch_guard: Arc, + /// Write statistics collector. + stats: SharedWriteStats, +} + +impl MemTableFlushHandler { + fn new( + state: Arc>, + flusher: Arc, + epoch_guard: Arc, + stats: SharedWriteStats, + ) -> Self { + Self { + state, + flusher, + epoch_guard, + stats, + } + } +} + +#[async_trait] +impl MessageHandler for MemTableFlushHandler { + async fn handle(&mut self, message: TriggerMemTableFlush) -> Result<()> { + let TriggerMemTableFlush { memtable, done } = message; + + eprintln!( + "[{}][FLUSH_HANDLER] Received TriggerMemTableFlush: gen={}, {} rows, {} batches", + log_ts(), + memtable.generation(), + memtable.row_count(), + memtable.batch_count() + ); + let result = self.flush_memtable(memtable).await; + if let Some(tx) = done { + // Send result through the channel - caller is waiting for it + let _ = tx.send(result); + } else { + // No done channel, propagate errors + result?; + } + Ok(()) + } +} + +impl MemTableFlushHandler { + /// Flush the given frozen memtable to Lance storage. + /// + /// This method waits for the WAL flush to complete (sent at freeze time), + /// then flushes to Lance storage. The WAL flush is already queued by + /// freeze_memtable to ensure strict ordering of WAL entries. + async fn flush_memtable(&mut self, memtable: Arc) -> Result { + let start = Instant::now(); + let memtable_size = memtable.estimated_size(); + eprintln!( + "[{}][MEMTABLE_FLUSH] Starting flush: gen={}, {} rows, {} batches, has_pending_wal={}", + log_ts(), + memtable.generation(), + memtable.row_count(), + memtable.batch_count(), + memtable.has_pending_wal_flush() + ); + + // Step 1: Wait for WAL flush completion (already queued at freeze time) + // The TriggerWalFlush message was sent by freeze_memtable to ensure + // strict ordering of WAL entries. + if let Some(mut completion_reader) = memtable.take_wal_flush_completion() { + let wal_start = std::time::Instant::now(); + eprintln!( + "[{}][MEMTABLE_FLUSH] Awaiting WAL flush completion...", + log_ts() + ); + let wal_result = completion_reader + .await_value() + .await + .map_err(|e| Error::io(format!("WAL flush failed: {}", e), snafu::location!()))?; + eprintln!( + "[{}][MEMTABLE_FLUSH] WAL flush completed in {}ms (S3={}ms, index={:?}, {} rows)", + log_ts(), + wal_start.elapsed().as_millis(), + wal_result.wal_io_duration.as_millis(), + wal_result.index_update_duration_breakdown, + wal_result.rows_indexed + ); + } else { + eprintln!( + "[{}][MEMTABLE_FLUSH] No pending WAL flush to wait for", + log_ts() + ); + } + + // Step 2: Flush the memtable to Lance storage + let flusher_start = std::time::Instant::now(); + let result = self.flusher.flush(&memtable, &self.epoch_guard).await?; + eprintln!( + "[{}][MEMTABLE_FLUSH] flusher.flush() took {}ms", + log_ts(), + flusher_start.elapsed().as_millis() + ); + + // Step 3: Signal completion and update backpressure tracking + // Signal memtable flush completion for backpressure watchers + memtable.signal_memtable_flush_complete(); + + // Update backpressure tracking - remove the oldest watcher and decrement bytes + { + let mut state = self.state.write().await; + if let Some((_size, _watcher)) = state.frozen_flush_watchers.pop_front() { + state.frozen_memtable_bytes = + state.frozen_memtable_bytes.saturating_sub(memtable_size); + } + } + + // Record stats + self.stats.record_memtable_flush( + start.elapsed(), + result.rows_flushed, + result.fragments_created, + ); + + info!( + "Flushed frozen memtable generation {} ({} rows, {} fragments in {:?})", + result.generation.generation, + result.rows_flushed, + result.fragments_created, + start.elapsed() + ); + + Ok(result) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use arrow_array::{Int32Array, StringArray}; + use arrow_schema::{DataType, Field}; + use tempfile::TempDir; + + async fn create_local_store() -> (Arc, Path, String, TempDir) { + let temp_dir = tempfile::tempdir().unwrap(); + let uri = format!("file://{}", temp_dir.path().display()); + let (store, path) = ObjectStore::from_uri(&uri).await.unwrap(); + (store, path, uri, temp_dir) + } + + fn create_test_schema() -> Arc { + Arc::new(ArrowSchema::new(vec![ + Field::new("id", DataType::Int32, false), + Field::new("name", DataType::Utf8, true), + ])) + } + + fn create_test_batch(schema: &ArrowSchema, start_id: i32, num_rows: usize) -> RecordBatch { + RecordBatch::try_new( + Arc::new(schema.clone()), + vec![ + Arc::new(Int32Array::from_iter_values( + start_id..start_id + num_rows as i32, + )), + Arc::new(StringArray::from_iter_values( + (0..num_rows).map(|i| format!("name_{}", start_id as usize + i)), + )), + ], + ) + .unwrap() + } + + #[tokio::test] + async fn test_region_writer_basic_write() { + let (store, base_path, base_uri, _temp_dir) = create_local_store().await; + let schema = create_test_schema(); + + let config = RegionWriterConfig { + region_id: Uuid::new_v4(), + region_spec_id: 0, + durable_write: false, + sync_indexed_write: false, + max_wal_buffer_size: 1024 * 1024, + max_wal_flush_interval: None, + max_memtable_size: 64 * 1024 * 1024, + manifest_scan_batch_size: 2, + ..Default::default() + }; + + let writer = RegionWriter::open( + store, + base_path, + base_uri, + config.clone(), + schema.clone(), + vec![], + ) + .await + .unwrap(); + + // Write a batch + let batch = create_test_batch(&schema, 0, 10); + let result = writer.put(batch).await.unwrap(); + + assert_eq!(result.batch_position, 0); + + // Check stats + let stats = writer.memtable_stats().await; + assert_eq!(stats.row_count, 10); + assert_eq!(stats.batch_count, 1); + + // Close writer + writer.close().await.unwrap(); + } + + #[tokio::test] + async fn test_region_writer_multiple_writes() { + let (store, base_path, base_uri, _temp_dir) = create_local_store().await; + let schema = create_test_schema(); + + let config = RegionWriterConfig { + region_id: Uuid::new_v4(), + region_spec_id: 0, + durable_write: false, + sync_indexed_write: false, + max_wal_buffer_size: 1024 * 1024, + max_wal_flush_interval: None, + max_memtable_size: 64 * 1024 * 1024, + manifest_scan_batch_size: 2, + ..Default::default() + }; + + let writer = RegionWriter::open(store, base_path, base_uri, config, schema.clone(), vec![]) + .await + .unwrap(); + + // Write multiple batches + for i in 0..5 { + let batch = create_test_batch(&schema, i * 10, 10); + let result = writer.put(batch).await.unwrap(); + assert_eq!(result.batch_position, i as usize); + } + + let stats = writer.memtable_stats().await; + assert_eq!(stats.row_count, 50); + assert_eq!(stats.batch_count, 5); + + writer.close().await.unwrap(); + } + + #[tokio::test] + async fn test_region_writer_with_indexes() { + let (store, base_path, base_uri, _temp_dir) = create_local_store().await; + let schema = create_test_schema(); + + let config = RegionWriterConfig { + region_id: Uuid::new_v4(), + region_spec_id: 0, + durable_write: false, + sync_indexed_write: true, + max_wal_buffer_size: 1024 * 1024, + max_wal_flush_interval: None, + max_memtable_size: 64 * 1024 * 1024, + manifest_scan_batch_size: 2, + ..Default::default() + }; + + let index_configs = vec![MemIndexConfig::BTree(BTreeIndexConfig { + name: "id_idx".to_string(), + column: "id".to_string(), + })]; + + let writer = RegionWriter::open( + store, + base_path, + base_uri, + config, + schema.clone(), + index_configs, + ) + .await + .unwrap(); + + // Write a batch + let batch = create_test_batch(&schema, 0, 10); + writer.put(batch).await.unwrap(); + + let stats = writer.memtable_stats().await; + assert_eq!(stats.row_count, 10); + + writer.close().await.unwrap(); + } + + /// Test memtable auto-flush triggered by size threshold. + #[tokio::test] + async fn test_region_writer_auto_flush_by_size() { + let (store, base_path, base_uri, _temp_dir) = create_local_store().await; + let schema = create_test_schema(); + + // Use a small memtable size to trigger auto-flush + let config = RegionWriterConfig { + region_id: Uuid::new_v4(), + region_spec_id: 0, + durable_write: false, + sync_indexed_write: false, + max_wal_buffer_size: 1024 * 1024, + max_wal_flush_interval: None, + max_memtable_size: 1024, // Very small - will trigger flush quickly + manifest_scan_batch_size: 2, + ..Default::default() + }; + + let writer = RegionWriter::open(store, base_path, base_uri, config, schema.clone(), vec![]) + .await + .unwrap(); + + let initial_gen = writer.memtable_stats().await.generation; + + // Write batches until auto-flush triggers + for i in 0..20 { + let batch = create_test_batch(&schema, i * 10, 10); + writer.put(batch).await.unwrap(); + } + + // Give time for background flush to process + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + // Check that generation increased (indicating flush happened) + let stats = writer.memtable_stats().await; + assert!( + stats.generation > initial_gen, + "Generation should increment after auto-flush" + ); + + writer.close().await.unwrap(); + } +} + +#[cfg(test)] +#[allow(clippy::print_stderr)] +mod region_writer_tests { + use std::sync::Arc; + use std::time::Instant; + + use arrow_array::{ + FixedSizeListArray, Float32Array, Int64Array, RecordBatch, RecordBatchIterator, StringArray, + }; + use arrow_schema::{DataType, Field, Schema as ArrowSchema}; + use lance_arrow::FixedSizeListArrayExt; + use lance_index::scalar::inverted::InvertedIndexParams; + use lance_index::scalar::ScalarIndexParams; + use lance_index::vector::ivf::IvfBuildParams; + use lance_index::vector::pq::builder::PQBuildParams; + use lance_index::{DatasetIndexExt, IndexType}; + use lance_linalg::distance::MetricType; + use uuid::Uuid; + + use crate::dataset::mem_wal::{DatasetMemWalExt, MemWalConfig}; + use crate::dataset::{Dataset, WriteParams}; + use crate::index::vector::VectorIndexParams; + + use super::super::RegionWriterConfig; + + fn create_test_schema(vector_dim: i32) -> Arc { + use std::collections::HashMap; + + let mut id_metadata = HashMap::new(); + id_metadata.insert( + "lance-schema:unenforced-primary-key".to_string(), + "true".to_string(), + ); + let id_field = Field::new("id", DataType::Int64, false).with_metadata(id_metadata); + + Arc::new(ArrowSchema::new(vec![ + id_field, + Field::new( + "vector", + DataType::FixedSizeList( + Arc::new(Field::new("item", DataType::Float32, true)), + vector_dim, + ), + true, + ), + Field::new("text", DataType::Utf8, true), + ])) + } + + fn create_test_batch( + schema: &ArrowSchema, + start_id: i64, + num_rows: usize, + vector_dim: i32, + ) -> RecordBatch { + let vectors: Vec = (0..num_rows) + .flat_map(|i| { + let seed = (start_id as usize + i) as f32; + (0..vector_dim as usize).map(move |d| (seed * 0.1 + d as f32 * 0.01).sin()) + }) + .collect(); + + let vector_array = + FixedSizeListArray::try_new_from_values(Float32Array::from(vectors), vector_dim) + .unwrap(); + + let texts: Vec = (0..num_rows) + .map(|i| format!("Sample text for row {}", start_id as usize + i)) + .collect(); + + RecordBatch::try_new( + Arc::new(schema.clone()), + vec![ + Arc::new(Int64Array::from_iter_values( + start_id..start_id + num_rows as i64, + )), + Arc::new(vector_array), + Arc::new(StringArray::from_iter_values(texts)), + ], + ) + .unwrap() + } + + /// Quick smoke test for region writer - runs against memory:// + /// Run with: cargo test -p lance region_writer_tests::test_region_writer_smoke -- --nocapture + #[tokio::test] + async fn test_region_writer_smoke() { + let vector_dim = 128; + let batch_size = 20; + let num_batches = 100; + + let schema = create_test_schema(vector_dim); + let uri = format!("memory://test_region_writer_{}", Uuid::new_v4()); + + eprintln!("[TEST] Creating dataset at {}", uri); + + // Create initial dataset + let initial_batch = create_test_batch(&schema, 0, 100, vector_dim); + let batches = RecordBatchIterator::new([Ok(initial_batch)], schema.clone()); + let mut dataset = Dataset::write(batches, &uri, Some(WriteParams::default())) + .await + .expect("Failed to create dataset"); + + // Initialize MemWAL (no indexes for smoke test) + dataset + .initialize_mem_wal(MemWalConfig { + region_spec: None, + maintained_indexes: vec![], + }) + .await + .expect("Failed to initialize MemWAL"); + + // Create region writer + let region_id = Uuid::new_v4(); + let config = RegionWriterConfig::new(region_id) + .with_durable_write(false) + .with_sync_indexed_write(false); + + eprintln!("[TEST] Creating region writer with config: {:?}", config); + + let writer = dataset + .mem_wal_writer(region_id, config) + .await + .expect("Failed to create writer"); + + // Pre-generate batches + let batches: Vec = (0..num_batches) + .map(|i| create_test_batch(&schema, (i * batch_size) as i64, batch_size, vector_dim)) + .collect(); + + eprintln!( + "[TEST] Writing {} batches x {} rows = {} total rows", + num_batches, + batch_size, + num_batches * batch_size + ); + + let start = Instant::now(); + for (i, batch) in batches.iter().enumerate() { + writer.put(batch.clone()).await.expect("Failed to write"); + if (i + 1) % 50 == 0 { + eprintln!("[TEST] Wrote {} batches in {:?}", i + 1, start.elapsed()); + } + } + let write_duration = start.elapsed(); + + eprintln!("[TEST] All writes done in {:?}", write_duration); + + let stats = writer.stats(); + eprintln!("[TEST] Closing writer..."); + let close_start = Instant::now(); + writer.close().await.expect("Failed to close"); + eprintln!("[TEST] Close took {:?}", close_start.elapsed()); + + eprintln!( + "[TEST] Stats: puts={}, WAL flushes={}, WAL bytes={}", + stats.put_count, stats.wal_flush_count, stats.wal_flush_bytes + ); + } + + /// Test region writer against S3 with IVF-PQ, BTree, and FTS indexes (requires DATASET_PREFIX env var) + /// Run with: DATASET_PREFIX=s3://bucket/path cargo test -p lance --release region_writer_tests::test_region_writer_s3_ivfpq -- --nocapture --ignored + #[tokio::test] + #[ignore] + async fn test_region_writer_s3_ivfpq() { + let prefix = std::env::var("DATASET_PREFIX").expect("DATASET_PREFIX not set"); + + let vector_dim = 512; + let batch_size = 20; + let num_batches = 10000; + let num_partitions = 16; + let num_sub_vectors = 64; // 512 / 8 = 64 subvectors + + let schema = create_test_schema(vector_dim); + let uri = format!( + "{}/test_s3_{}", + prefix.trim_end_matches('/'), + Uuid::new_v4() + ); + + eprintln!("[TEST] Creating dataset at {}", uri); + + // Create initial dataset with enough data for IVF-PQ training + let initial_batch = create_test_batch(&schema, 0, 1000, vector_dim); + let batches = RecordBatchIterator::new([Ok(initial_batch)], schema.clone()); + let mut dataset = Dataset::write(batches, &uri, Some(WriteParams::default())) + .await + .expect("Failed to create dataset"); + + // Create BTree index on id column + eprintln!("[TEST] Creating BTree index on id column..."); + let btree_start = Instant::now(); + let scalar_params = ScalarIndexParams::default(); + dataset + .create_index( + &["id"], + IndexType::BTree, + Some("id_btree".to_string()), + &scalar_params, + false, + ) + .await + .expect("Failed to create BTree index"); + eprintln!( + "[TEST] BTree index creation took {:?}", + btree_start.elapsed() + ); + + // Create FTS index on text column + eprintln!("[TEST] Creating FTS index on text column..."); + let fts_start = Instant::now(); + let fts_params = InvertedIndexParams::default(); + dataset + .create_index( + &["text"], + IndexType::Inverted, + Some("text_fts".to_string()), + &fts_params, + false, + ) + .await + .expect("Failed to create FTS index"); + eprintln!("[TEST] FTS index creation took {:?}", fts_start.elapsed()); + + // Create IVF-PQ index on dataset + eprintln!("[TEST] Creating IVF-PQ index on dataset..."); + let train_start = Instant::now(); + + let ivf_params = IvfBuildParams { + num_partitions: Some(num_partitions), + ..Default::default() + }; + let pq_params = PQBuildParams { + num_sub_vectors, + num_bits: 8, + ..Default::default() + }; + let vector_params = + VectorIndexParams::with_ivf_pq_params(MetricType::L2, ivf_params, pq_params); + + dataset + .create_index( + &["vector"], + IndexType::Vector, + Some("vector_idx".to_string()), + &vector_params, + true, + ) + .await + .expect("Failed to create IVF-PQ index"); + + eprintln!( + "[TEST] IVF-PQ index creation took {:?}", + train_start.elapsed() + ); + + // Initialize MemWAL with all three indexes + dataset + .initialize_mem_wal(MemWalConfig { + region_spec: None, + maintained_indexes: vec![ + "id_btree".to_string(), + "text_fts".to_string(), + "vector_idx".to_string(), + ], + }) + .await + .expect("Failed to initialize MemWAL"); + + // Create region writer with default config + let region_id = Uuid::new_v4(); + let config = RegionWriterConfig::new(region_id) + .with_durable_write(false) + .with_sync_indexed_write(false); + + eprintln!( + "[TEST] Config: flush_interval={:?}, wal_flush_size={}", + config.max_wal_flush_interval, config.max_wal_buffer_size + ); + + let writer = dataset + .mem_wal_writer(region_id, config) + .await + .expect("Failed to create writer"); + + // Pre-generate batches + let batches: Vec = (0..num_batches) + .map(|i| create_test_batch(&schema, (i * batch_size) as i64, batch_size, vector_dim)) + .collect(); + + eprintln!( + "[TEST] Writing {} batches x {} rows = {} total rows", + num_batches, + batch_size, + num_batches * batch_size + ); + + let start = Instant::now(); + for (i, batch) in batches.iter().enumerate() { + writer.put(batch.clone()).await.expect("Failed to write"); + if (i + 1) % 100 == 0 { + eprintln!("[TEST] Wrote {} batches in {:?}", i + 1, start.elapsed()); + } + } + let write_duration = start.elapsed(); + + eprintln!("[TEST] All writes done in {:?}", write_duration); + + let stats = writer.stats(); + eprintln!("[TEST] Closing writer..."); + let close_start = Instant::now(); + writer.close().await.expect("Failed to close"); + eprintln!("[TEST] Close took {:?}", close_start.elapsed()); + + eprintln!( + "[TEST] Stats: puts={}, WAL flushes={}, WAL bytes={}", + stats.put_count, stats.wal_flush_count, stats.wal_flush_bytes + ); + + let rows_per_sec = (num_batches * batch_size) as f64 / write_duration.as_secs_f64(); + eprintln!("[TEST] Throughput: {:.0} rows/sec", rows_per_sec); + } +} diff --git a/rust/lance/src/dataset/mem_wal/write/batch_write.rs b/rust/lance/src/dataset/mem_wal/write/batch_write.rs new file mode 100644 index 00000000000..05847934619 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/write/batch_write.rs @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! Batch write handling - backpressure control and durability tracking. + +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; + +use lance_core::Result; +use tracing::{debug, warn}; + +use crate::dataset::mem_wal::config::RegionWriterConfig; + +/// Result of a durability notification. +/// +/// This is a simple enum that can be cloned, unlike `Result<(), Error>`. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum DurabilityResult { + /// Write is now durable. + Durable, + /// Write failed with an error message. + Failed(String), +} + +impl DurabilityResult { + /// Create a successful durability result. + pub fn ok() -> Self { + Self::Durable + } + + /// Create a failed durability result. + pub fn err(msg: impl Into) -> Self { + Self::Failed(msg.into()) + } + + /// Check if the result is durable. + pub fn is_ok(&self) -> bool { + matches!(self, Self::Durable) + } + + /// Convert to a Result. + pub fn into_result(self) -> Result<()> { + match self { + Self::Durable => Ok(()), + Self::Failed(msg) => Err(lance_core::Error::io(msg, snafu::location!())), + } + } +} + +/// Type alias for durability watchers. +pub type DurabilityWatcher = + super::super::watchable_cell::WatchableOnceCellReader; + +/// Type alias for durability cells. +pub type DurabilityCell = super::super::watchable_cell::WatchableOnceCell; + +/// Statistics for backpressure monitoring. +#[derive(Debug, Default)] +pub struct BackpressureStats { + /// Total number of times backpressure was applied. + total_count: AtomicU64, + /// Total time spent waiting on backpressure (in milliseconds). + total_wait_ms: AtomicU64, +} + +impl BackpressureStats { + /// Create new backpressure stats. + pub fn new() -> Self { + Self::default() + } + + /// Record a backpressure event. + pub fn record(&self, wait_ms: u64) { + self.total_count.fetch_add(1, Ordering::Relaxed); + self.total_wait_ms.fetch_add(wait_ms, Ordering::Relaxed); + } + + /// Get the total backpressure count. + pub fn count(&self) -> u64 { + self.total_count.load(Ordering::Relaxed) + } + + /// Get the total time spent waiting on backpressure. + pub fn total_wait_ms(&self) -> u64 { + self.total_wait_ms.load(Ordering::Relaxed) + } + + /// Get a snapshot of all stats. + pub fn snapshot(&self) -> BackpressureStatsSnapshot { + BackpressureStatsSnapshot { + total_count: self.total_count.load(Ordering::Relaxed), + total_wait_ms: self.total_wait_ms.load(Ordering::Relaxed), + } + } +} + +/// Snapshot of backpressure statistics. +#[derive(Debug, Clone, Default)] +pub struct BackpressureStatsSnapshot { + /// Total number of times backpressure was applied. + pub total_count: u64, + /// Total time spent waiting on backpressure (in milliseconds). + pub total_wait_ms: u64, +} + +/// Backpressure controller for managing write flow. +pub struct BackpressureController { + /// Configuration. + config: RegionWriterConfig, + /// Stats for monitoring. + stats: Arc, +} + +impl BackpressureController { + /// Create a new backpressure controller. + pub fn new(config: RegionWriterConfig) -> Self { + Self { + config, + stats: Arc::new(BackpressureStats::new()), + } + } + + /// Get backpressure stats. + pub fn stats(&self) -> &Arc { + &self.stats + } + + /// Check and apply backpressure if needed. + /// + /// This method blocks if the system is under memory pressure, waiting for + /// frozen memtables to be flushed to storage until under threshold. + /// + /// Backpressure is applied when: + /// - `unflushed_memtable_bytes` >= `max_unflushed_memtable_bytes` + /// + /// # Arguments + /// - `get_state`: Closure that returns current (unflushed_memtable_bytes, oldest_memtable_watcher) + /// + /// The closure is called in a loop to get fresh state after each wait. + pub async fn maybe_apply_backpressure(&self, mut get_state: F) -> Result<()> + where + F: FnMut() -> (usize, Option), + { + let start = std::time::Instant::now(); + let mut iteration = 0u32; + + loop { + let (unflushed_memtable_bytes, oldest_watcher) = get_state(); + + // Check if under threshold + if unflushed_memtable_bytes < self.config.max_unflushed_memtable_bytes { + if iteration > 0 { + let wait_ms = start.elapsed().as_millis() as u64; + self.stats.record(wait_ms); + } + return Ok(()); + } + + iteration += 1; + + debug!( + unflushed_memtable_bytes = unflushed_memtable_bytes, + max_unflushed_memtable_bytes = self.config.max_unflushed_memtable_bytes, + iteration = iteration, + "Backpressure triggered" + ); + + // Wait for oldest memtable to flush + if let Some(mut mem_watcher) = oldest_watcher { + tokio::select! { + _ = mem_watcher.await_value() => {} + _ = tokio::time::sleep(self.config.backpressure_log_interval) => { + warn!( + unflushed_memtable_bytes = unflushed_memtable_bytes, + interval_secs = self.config.backpressure_log_interval.as_secs(), + iteration = iteration, + "Backpressure wait timeout, continuing to wait" + ); + } + } + } else { + // No watcher available - sleep briefly to avoid busy loop + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::time::Duration; + + #[tokio::test] + async fn test_no_backpressure_when_under_threshold() { + let config = RegionWriterConfig::default().with_max_unflushed_memtable_bytes(1024 * 1024); // 1MB + + let controller = BackpressureController::new(config); + + // Should return immediately - well under threshold (100 bytes < 1MB) + controller + .maybe_apply_backpressure(|| (100, None)) + .await + .unwrap(); + + assert_eq!(controller.stats().count(), 0); + } + + #[tokio::test] + async fn test_backpressure_loops_until_under_threshold() { + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::Arc; + + let config = RegionWriterConfig::default() + .with_max_unflushed_memtable_bytes(100) // Very low threshold + .with_backpressure_log_interval(Duration::from_millis(50)); + + let controller = BackpressureController::new(config); + + // Simulate: starts at 1000 bytes, drops by 400 each call (simulating memtable flushes) + let call_count = Arc::new(AtomicUsize::new(0)); + let call_count_clone = call_count.clone(); + + controller + .maybe_apply_backpressure(move || { + let count = call_count_clone.fetch_add(1, Ordering::Relaxed); + // 1000 -> 600 -> 200 -> under threshold (need 3 iterations) + let unflushed = 1000usize.saturating_sub(count * 400); + (unflushed, None) + }) + .await + .unwrap(); + + // Should have called get_state 4 times (initial + 3 waits until under 100) + assert_eq!(call_count.load(Ordering::Relaxed), 4); + // Should have recorded backpressure wait time (waited 3 times) + assert_eq!(controller.stats().count(), 1); + } +} diff --git a/rust/lance/src/dataset/mem_wal/write/flush.rs b/rust/lance/src/dataset/mem_wal/write/flush.rs new file mode 100644 index 00000000000..aeb07374787 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/write/flush.rs @@ -0,0 +1,857 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! MemTable flush to persistent storage. + +use std::sync::Arc; + +use bytes::Bytes; +use lance_core::{Error, Result}; +use lance_index::mem_wal::{FlushedGeneration, RegionManifest}; +use lance_index::scalar::ScalarIndexParams; +use lance_index::IndexType; +use lance_io::object_store::ObjectStore; +use lance_table::format::IndexMetadata; +use object_store::path::Path; +use snafu::location; +use tracing::{debug, info, warn}; +use uuid::Uuid; + +use super::super::indexes::MemIndexConfig; +use super::super::memtable::MemTable; +use crate::dataset::mem_wal::epoch::EpochGuard; +use crate::dataset::mem_wal::manifest::RegionManifestStore; +use crate::dataset::mem_wal::util::{flushed_memtable_path, generate_random_hash}; +use crate::Dataset; + +#[derive(Debug, Clone)] +pub struct FlushResult { + pub generation: FlushedGeneration, + pub rows_flushed: usize, + pub fragments_created: usize, + pub covered_wal_entry_position: u64, +} + +pub struct MemTableFlusher { + object_store: Arc, + base_path: Path, + base_uri: String, + region_id: Uuid, + manifest_store: Arc, +} + +impl MemTableFlusher { + pub fn new( + object_store: Arc, + base_path: Path, + base_uri: impl Into, + region_id: Uuid, + manifest_store: Arc, + ) -> Self { + Self { + object_store, + base_path, + base_uri: base_uri.into(), + region_id, + manifest_store, + } + } + + /// Construct a full URI for a path within the base dataset. + fn path_to_uri(&self, path: &Path) -> String { + // Remove base_path prefix from path to get relative path + let path_str = path.as_ref(); + let base_str = self.base_path.as_ref(); + + let relative = if let Some(stripped) = path_str.strip_prefix(base_str) { + stripped.trim_start_matches('/') + } else { + path_str + }; + + // Combine base_uri with relative path + let base = self.base_uri.trim_end_matches('/'); + if relative.is_empty() { + base.to_string() + } else { + format!("{}/{}", base, relative) + } + } + + /// Flush the MemTable to storage (data files, indexes, bloom filter). + pub async fn flush( + &self, + memtable: &MemTable, + epoch_guard: &EpochGuard, + ) -> Result { + epoch_guard.check_fenced().await?; + + if memtable.row_count() == 0 { + return Err(Error::invalid_input( + "Cannot flush empty MemTable", + location!(), + )); + } + + if !memtable.all_flushed_to_wal() { + return Err(Error::invalid_input( + "MemTable has unflushed fragments - WAL flush required first", + location!(), + )); + } + + let random_hash = generate_random_hash(); + let generation = memtable.generation(); + let gen_path = + flushed_memtable_path(&self.base_path, &self.region_id, &random_hash, generation); + + info!( + "Flushing MemTable generation {} to {} ({} rows, {} batches)", + generation, + gen_path, + memtable.row_count(), + memtable.batch_count() + ); + + self.write_data_file(&gen_path, memtable).await?; + + let bloom_path = gen_path.child("bloom_filter.bin"); + self.write_bloom_filter(&bloom_path, memtable.bloom_filter()) + .await?; + + let last_wal_entry_position = memtable.last_flushed_wal_entry_position(); + let new_manifest = self + .update_manifest( + epoch_guard, + generation, + gen_path.as_ref(), + last_wal_entry_position, + ) + .await?; + + info!( + "Flushed generation {} for region {} (manifest version {})", + generation, self.region_id, new_manifest.version + ); + + Ok(FlushResult { + generation: FlushedGeneration { + generation, + path: gen_path.to_string(), + }, + rows_flushed: memtable.row_count(), + fragments_created: memtable.batch_count(), + covered_wal_entry_position: last_wal_entry_position, + }) + } + + async fn write_data_file(&self, path: &Path, memtable: &MemTable) -> Result<()> { + use arrow_array::RecordBatchIterator; + + if memtable.row_count() == 0 { + return Ok(()); + } + + let batches = memtable.scan_batches().await?; + if batches.is_empty() { + return Ok(()); + } + + let uri = self.path_to_uri(path); + let reader = + RecordBatchIterator::new(batches.into_iter().map(Ok), memtable.schema().clone()); + Dataset::write(reader, &uri, None).await?; + + debug!("Wrote Lance Dataset to {}", uri); + Ok(()) + } + + async fn write_bloom_filter( + &self, + path: &Path, + bloom: &lance_index::scalar::bloomfilter::sbbf::Sbbf, + ) -> Result<()> { + let data = bloom.to_bytes(); + self.object_store + .inner + .put(path, Bytes::from(data).into()) + .await + .map_err(|e| Error::io(format!("Failed to write bloom filter: {}", e), location!()))?; + debug!("Wrote bloom filter to {}", path); + Ok(()) + } + + /// Flush the MemTable to storage with indexes. + pub async fn flush_with_indexes( + &self, + memtable: &MemTable, + epoch_guard: &EpochGuard, + index_configs: &[MemIndexConfig], + ) -> Result { + epoch_guard.check_fenced().await?; + + if memtable.row_count() == 0 { + return Err(Error::invalid_input( + "Cannot flush empty MemTable", + location!(), + )); + } + + if !memtable.all_flushed_to_wal() { + return Err(Error::invalid_input( + "MemTable has unflushed fragments - WAL flush required first", + location!(), + )); + } + + let random_hash = generate_random_hash(); + let generation = memtable.generation(); + let gen_path = + flushed_memtable_path(&self.base_path, &self.region_id, &random_hash, generation); + + info!( + "Flushing MemTable generation {} with indexes to {} ({} rows, {} batches)", + generation, + gen_path, + memtable.row_count(), + memtable.batch_count() + ); + + self.write_data_file(&gen_path, memtable).await?; + + let created_indexes = self + .create_indexes(&gen_path, index_configs, memtable.indexes()) + .await?; + if !created_indexes.is_empty() { + info!( + "Created {} BTree indexes on flushed generation {}", + created_indexes.len(), + generation + ); + } + + if let Some(registry) = memtable.indexes() { + for config in index_configs { + if let MemIndexConfig::IvfPq(ivf_pq_config) = config { + if let Some(mem_index) = registry.get_ivf_pq(&ivf_pq_config.name) { + let _index_meta = self + .create_ivf_pq_index(&gen_path, ivf_pq_config, mem_index) + .await?; + info!( + "Created IVF-PQ index '{}' on flushed generation {}", + ivf_pq_config.name, generation + ); + } + } + } + } + + let bloom_path = gen_path.child("bloom_filter.bin"); + self.write_bloom_filter(&bloom_path, memtable.bloom_filter()) + .await?; + + let last_wal_entry_position = memtable.last_flushed_wal_entry_position(); + let new_manifest = self + .update_manifest( + epoch_guard, + generation, + gen_path.as_ref(), + last_wal_entry_position, + ) + .await?; + + info!( + "Flushed generation {} for region {} (manifest version {})", + generation, self.region_id, new_manifest.version + ); + + Ok(FlushResult { + generation: FlushedGeneration { + generation, + path: gen_path.to_string(), + }, + rows_flushed: memtable.row_count(), + fragments_created: memtable.batch_count(), + covered_wal_entry_position: last_wal_entry_position, + }) + } + + /// Create BTree indexes on the flushed dataset. + async fn create_indexes( + &self, + gen_path: &Path, + index_configs: &[MemIndexConfig], + mem_indexes: Option<&super::super::indexes::IndexRegistry>, + ) -> Result> { + use arrow_array::RecordBatchIterator; + + use crate::index::CreateIndexBuilder; + + let uri = self.path_to_uri(gen_path); + + let btree_configs: Vec<_> = index_configs + .iter() + .filter_map(|c| match c { + MemIndexConfig::BTree(cfg) => Some(cfg), + MemIndexConfig::IvfPq(_) => None, + MemIndexConfig::Fts(_) => { + warn!( + "Skipping FTS index '{}' for flushed generation (not yet implemented)", + c.name() + ); + None + } + }) + .collect(); + + if btree_configs.is_empty() { + return Ok(vec![]); + } + + let mut dataset = Dataset::open(&uri).await?; + let mut created_indexes = Vec::new(); + + for btree_cfg in btree_configs { + debug!( + "Creating BTree index '{}' on column '{}' for flushed generation", + btree_cfg.name, btree_cfg.column + ); + + let params = ScalarIndexParams::default(); + let mut builder = CreateIndexBuilder::new( + &mut dataset, + &[btree_cfg.column.as_str()], + IndexType::BTree, + ¶ms, + ) + .name(btree_cfg.name.clone()); + + if let Some(registry) = mem_indexes { + if let Some(btree_index) = registry.get_btree(&btree_cfg.name) { + let training_batches = btree_index.to_training_batches(8192)?; + if !training_batches.is_empty() { + let schema = training_batches[0].schema(); + let reader = + RecordBatchIterator::new(training_batches.into_iter().map(Ok), schema); + builder = builder.preprocessed_data(Box::new(reader)); + } + } + } + + let index_meta = builder.execute_uncommitted().await?; + created_indexes.push(index_meta.clone()); + + use crate::dataset::transaction::{Operation, Transaction}; + let transaction = Transaction::new( + index_meta.dataset_version, + Operation::CreateIndex { + new_indices: vec![index_meta], + removed_indices: vec![], + }, + None, + ); + dataset + .apply_commit(transaction, &Default::default(), &Default::default()) + .await?; + + debug!("Created and committed BTree index '{}'", btree_cfg.name); + } + + Ok(created_indexes) + } + + /// Create an IVF-PQ index from in-memory data. + /// + /// Writes the index files directly using the pre-computed partition assignments + /// and PQ codes from the in-memory index. + async fn create_ivf_pq_index( + &self, + gen_path: &Path, + config: &super::super::indexes::IvfPqIndexConfig, + mem_index: &super::super::indexes::IvfPqMemIndex, + ) -> Result { + use arrow_schema::{Field, Schema as ArrowSchema}; + use lance_core::ROW_ID; + use lance_file::writer::FileWriter; + use lance_index::pb; + use lance_index::vector::flat::index::FlatIndex; + use lance_index::vector::ivf::storage::IVF_METADATA_KEY; + use lance_index::vector::quantizer::{ + Quantization, QuantizationMetadata, QuantizerMetadata, + }; + use lance_index::vector::storage::STORAGE_METADATA_KEY; + use lance_index::vector::v3::subindex::IvfSubIndex; + use lance_index::vector::{DISTANCE_TYPE_KEY, PQ_CODE_COLUMN}; + use lance_index::{ + IndexMetadata as IndexMetaSchema, INDEX_AUXILIARY_FILE_NAME, INDEX_FILE_NAME, + INDEX_METADATA_SCHEMA_KEY, + }; + use prost::Message; + use std::sync::Arc; + + let index_uuid = uuid::Uuid::new_v4(); + let index_dir = gen_path.child("_indices").child(index_uuid.to_string()); + + // Get partition data from in-memory index + let partition_batches = mem_index.to_partition_batches()?; + let ivf_model = mem_index.ivf_model(); + let pq = mem_index.pq(); + let distance_type = mem_index.distance_type(); + + // Create storage file schema: _rowid, __pq_code + let pq_code_len = pq.num_sub_vectors * pq.num_bits as usize / 8; + let storage_schema: ArrowSchema = ArrowSchema::new(vec![ + Field::new(ROW_ID, arrow_schema::DataType::UInt64, false), + Field::new( + PQ_CODE_COLUMN, + arrow_schema::DataType::FixedSizeList( + Arc::new(Field::new("item", arrow_schema::DataType::UInt8, false)), + pq_code_len as i32, + ), + false, + ), + ]); + + // Create index file schema (FlatIndex schema) + let index_schema: ArrowSchema = FlatIndex::schema().as_ref().clone(); + + // Create file writers + let storage_path = index_dir.child(INDEX_AUXILIARY_FILE_NAME); + let index_path = index_dir.child(INDEX_FILE_NAME); + + let mut storage_writer = FileWriter::try_new( + self.object_store.create(&storage_path).await?, + (&storage_schema).try_into()?, + Default::default(), + )?; + let mut index_writer = FileWriter::try_new( + self.object_store.create(&index_path).await?, + (&index_schema).try_into()?, + Default::default(), + )?; + + // Track IVF partitions for both files + let mut storage_ivf = lance_index::vector::ivf::storage::IvfModel::empty(); + + // Get centroids (required for IVF index) + let centroids = ivf_model + .centroids + .clone() + .ok_or_else(|| Error::io("IVF model has no centroids", location!()))?; + let mut index_ivf = lance_index::vector::ivf::storage::IvfModel::new(centroids, None); + let mut partition_index_metadata = Vec::with_capacity(ivf_model.num_partitions()); + + // Create a map of partition_id -> batch for quick lookup + let partition_map: std::collections::HashMap = + partition_batches.into_iter().collect(); + + // Write each partition + for part_id in 0..ivf_model.num_partitions() { + if let Some(batch) = partition_map.get(&part_id) { + // Transpose PQ codes for storage (column-major layout) + let transposed_batch = transpose_pq_batch(batch, pq_code_len)?; + + // Write storage data + storage_writer.write_batch(&transposed_batch).await?; + storage_ivf.add_partition(transposed_batch.num_rows() as u32); + + // FlatIndex is empty (no additional sub-index data needed for IVF-PQ) + index_ivf.add_partition(0); + partition_index_metadata.push(String::new()); + } else { + // Empty partition + storage_ivf.add_partition(0); + index_ivf.add_partition(0); + partition_index_metadata.push(String::new()); + } + } + + // Write storage file metadata + let storage_ivf_pb = pb::Ivf::try_from(&storage_ivf)?; + storage_writer.add_schema_metadata(DISTANCE_TYPE_KEY, distance_type.to_string()); + let ivf_buffer_pos = storage_writer + .add_global_buffer(storage_ivf_pb.encode_to_vec().into()) + .await?; + storage_writer.add_schema_metadata(IVF_METADATA_KEY, ivf_buffer_pos.to_string()); + + // Write PQ metadata + let pq_metadata = pq.metadata(Some(QuantizationMetadata { + codebook_position: Some(0), + codebook: None, + transposed: true, + })); + if let Some(extra_metadata) = pq_metadata.extra_metadata()? { + let idx = storage_writer.add_global_buffer(extra_metadata).await?; + let mut pq_meta = pq_metadata; + pq_meta.set_buffer_index(idx); + let storage_partition_metadata = vec![serde_json::to_string(&pq_meta)?]; + storage_writer.add_schema_metadata( + STORAGE_METADATA_KEY, + serde_json::to_string(&storage_partition_metadata)?, + ); + } + + // Write index file metadata + let index_ivf_pb = pb::Ivf::try_from(&index_ivf)?; + let index_metadata = IndexMetaSchema { + index_type: "IVF_PQ".to_string(), + distance_type: distance_type.to_string(), + }; + index_writer.add_schema_metadata( + INDEX_METADATA_SCHEMA_KEY, + serde_json::to_string(&index_metadata)?, + ); + let ivf_buffer_pos = index_writer + .add_global_buffer(index_ivf_pb.encode_to_vec().into()) + .await?; + index_writer.add_schema_metadata(IVF_METADATA_KEY, ivf_buffer_pos.to_string()); + index_writer.add_schema_metadata( + FlatIndex::metadata_key(), + serde_json::to_string(&partition_index_metadata)?, + ); + + // Finish writing + storage_writer.finish().await?; + index_writer.finish().await?; + + debug!( + "Created IVF-PQ index '{}' at {}", + config.name, + index_dir.as_ref() + ); + + // Create index metadata for commit + let index_meta = IndexMetadata { + uuid: index_uuid, + name: config.name.clone(), + fields: vec![0], // Will be updated when committing + dataset_version: 0, + fragment_bitmap: None, + index_details: None, + base_id: None, + created_at: Some(chrono::Utc::now()), + index_version: 1, + }; + + Ok(index_meta) + } + + /// Update the region manifest with the new flushed generation. + async fn update_manifest( + &self, + epoch_guard: &EpochGuard, + generation: u64, + gen_path: &str, + covered_wal_entry_position: u64, + ) -> Result { + // Read current manifest + let current = self + .manifest_store + .read_latest() + .await? + .ok_or_else(|| Error::io("Region manifest not found", location!()))?; + + // Create updated manifest + let mut flushed_generations = current.flushed_generations.clone(); + flushed_generations.push(FlushedGeneration { + generation, + path: gen_path.to_string(), + }); + + let new_manifest = RegionManifest { + version: current.version + 1, + replay_after_wal_id: covered_wal_entry_position, + wal_id_last_seen: current.wal_id_last_seen.max(covered_wal_entry_position), + current_generation: generation + 1, + flushed_generations, + ..current + }; + + // Write with fencing check + epoch_guard.write_manifest(&new_manifest).await?; + + Ok(new_manifest) + } +} + +/// Transpose PQ codes in a batch from row-major to column-major layout. +/// +/// The storage format expects PQ codes to be transposed for efficient distance computation. +fn transpose_pq_batch( + batch: &arrow_array::RecordBatch, + pq_code_len: usize, +) -> Result { + use arrow_array::cast::AsArray; + use arrow_array::FixedSizeListArray; + use lance_arrow::FixedSizeListArrayExt; + use lance_core::ROW_ID; + use lance_index::vector::pq::storage::transpose; + use lance_index::vector::PQ_CODE_COLUMN; + use std::sync::Arc; + + let row_ids = batch + .column_by_name(ROW_ID) + .ok_or_else(|| Error::io("Missing _rowid column in partition batch", location!()))?; + + let pq_codes = batch + .column_by_name(PQ_CODE_COLUMN) + .ok_or_else(|| Error::io("Missing __pq_code column in partition batch", location!()))?; + + let pq_codes_fsl = pq_codes.as_fixed_size_list(); + let codes_flat = pq_codes_fsl + .values() + .as_primitive::(); + + // Transpose from row-major to column-major + let transposed = transpose(codes_flat, pq_code_len, batch.num_rows()); + let transposed_fsl = Arc::new( + FixedSizeListArray::try_new_from_values(transposed, pq_code_len as i32).map_err(|e| { + Error::io( + format!("Failed to create transposed PQ array: {}", e), + location!(), + ) + })?, + ); + + arrow_array::RecordBatch::try_new(batch.schema(), vec![row_ids.clone(), transposed_fsl]) + .map_err(|e| { + Error::io( + format!("Failed to create transposed batch: {}", e), + location!(), + ) + }) +} + +/// Message to trigger flush of a frozen memtable to Lance storage. +pub struct TriggerMemTableFlush { + /// The frozen memtable to flush. + pub memtable: Arc, + /// Optional channel to notify when flush completes. + pub done: Option>>, +} + +impl std::fmt::Debug for TriggerMemTableFlush { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TriggerMemTableFlush") + .field("memtable_gen", &self.memtable.generation()) + .field("memtable_rows", &self.memtable.row_count()) + .field("has_done", &self.done.is_some()) + .finish() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use arrow_array::{Int32Array, RecordBatch, StringArray}; + use arrow_schema::{DataType, Field, Schema as ArrowSchema}; + use std::sync::Arc; + use tempfile::TempDir; + + async fn create_local_store() -> (Arc, Path, String, TempDir) { + let temp_dir = tempfile::tempdir().unwrap(); + let uri = format!("file://{}", temp_dir.path().display()); + let (store, path) = ObjectStore::from_uri(&uri).await.unwrap(); + (store, path, uri, temp_dir) + } + + fn create_test_schema() -> Arc { + Arc::new(ArrowSchema::new(vec![ + Field::new("id", DataType::Int32, false), + Field::new("name", DataType::Utf8, true), + ])) + } + + fn create_test_batch(schema: &ArrowSchema, num_rows: usize) -> RecordBatch { + RecordBatch::try_new( + Arc::new(schema.clone()), + vec![ + Arc::new(Int32Array::from_iter_values(0..num_rows as i32)), + Arc::new(StringArray::from_iter_values( + (0..num_rows).map(|i| format!("name_{}", i)), + )), + ], + ) + .unwrap() + } + + #[tokio::test] + async fn test_flusher_requires_wal_flush() { + let (store, base_path, base_uri, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let manifest_store = Arc::new(RegionManifestStore::new( + store.clone(), + &base_path, + region_id, + 2, + )); + + // Claim region + let (epoch_guard, _manifest) = EpochGuard::claim(manifest_store.clone(), 0).await.unwrap(); + + let schema = create_test_schema(); + let mut memtable = MemTable::new(schema.clone(), 1, vec![]).unwrap(); + memtable + .insert(create_test_batch(&schema, 10)) + .await + .unwrap(); + + // Not flushed to WAL yet + assert!(!memtable.all_flushed_to_wal()); + + let flusher = MemTableFlusher::new(store, base_path, base_uri, region_id, manifest_store); + let result = flusher.flush(&memtable, &epoch_guard).await; + + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("unflushed fragments")); + } + + #[tokio::test] + async fn test_flusher_empty_memtable() { + let (store, base_path, base_uri, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let manifest_store = Arc::new(RegionManifestStore::new( + store.clone(), + &base_path, + region_id, + 2, + )); + + // Claim region + let (epoch_guard, _manifest) = EpochGuard::claim(manifest_store.clone(), 0).await.unwrap(); + + let schema = create_test_schema(); + let memtable = MemTable::new(schema, 1, vec![]).unwrap(); + + let flusher = MemTableFlusher::new(store, base_path, base_uri, region_id, manifest_store); + let result = flusher.flush(&memtable, &epoch_guard).await; + + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("empty MemTable")); + } + + #[tokio::test] + async fn test_flusher_success() { + let (store, base_path, base_uri, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let manifest_store = Arc::new(RegionManifestStore::new( + store.clone(), + &base_path, + region_id, + 2, + )); + + // Claim region + let (epoch_guard, _manifest) = EpochGuard::claim(manifest_store.clone(), 0).await.unwrap(); + + let schema = create_test_schema(); + let mut memtable = MemTable::new(schema.clone(), 1, vec![]).unwrap(); + let frag_id = memtable + .insert(create_test_batch(&schema, 10)) + .await + .unwrap(); + + // Simulate WAL flush + memtable.mark_wal_flushed(&[frag_id], 1, &[0]); + assert!(memtable.all_flushed_to_wal()); + + let flusher = MemTableFlusher::new( + store.clone(), + base_path, + base_uri, + region_id, + manifest_store.clone(), + ); + let result = flusher.flush(&memtable, &epoch_guard).await.unwrap(); + + assert_eq!(result.generation.generation, 1); + assert_eq!(result.rows_flushed, 10); + assert_eq!(result.fragments_created, 1); + assert_eq!(result.covered_wal_entry_position, 1); + + // Verify manifest was updated + let updated_manifest = manifest_store.read_latest().await.unwrap().unwrap(); + assert_eq!(updated_manifest.version, 2); + assert_eq!(updated_manifest.replay_after_wal_id, 1); + assert_eq!(updated_manifest.current_generation, 2); + assert_eq!(updated_manifest.flushed_generations.len(), 1); + } + + #[tokio::test] + async fn test_flusher_with_btree_index() { + use super::super::super::indexes::{BTreeIndexConfig, IndexRegistry}; + use lance_index::DatasetIndexExt; + + let (store, base_path, base_uri, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let manifest_store = Arc::new(RegionManifestStore::new( + store.clone(), + &base_path, + region_id, + 2, + )); + + // Claim region + let (epoch_guard, _manifest) = EpochGuard::claim(manifest_store.clone(), 0).await.unwrap(); + + // Create index config for the 'id' column + let index_configs = vec![MemIndexConfig::BTree(BTreeIndexConfig { + name: "id_btree".to_string(), + column: "id".to_string(), + })]; + + let schema = create_test_schema(); + let mut memtable = MemTable::new(schema.clone(), 1, vec![]).unwrap(); + + // Set up in-memory index registry so preprocessed data path is used + let registry = IndexRegistry::from_configs(&index_configs, 100_000, 8).unwrap(); + memtable.set_indexes(registry); + + let frag_id = memtable + .insert(create_test_batch(&schema, 10)) + .await + .unwrap(); + + // Simulate WAL flush + memtable.mark_wal_flushed(&[frag_id], 1, &[0]); + + let flusher = MemTableFlusher::new( + store.clone(), + base_path.clone(), + base_uri.clone(), + region_id, + manifest_store.clone(), + ); + let result = flusher + .flush_with_indexes(&memtable, &epoch_guard, &index_configs) + .await + .unwrap(); + + assert_eq!(result.generation.generation, 1); + assert_eq!(result.rows_flushed, 10); + + // Verify the flushed dataset has the BTree index + // result.generation.path is an object store path, so we construct the URI + // by using the base_uri and the relative portion of the path + let gen_path_str = result.generation.path.as_str(); + let base_path_str = base_path.as_ref(); + let relative_path = if let Some(stripped) = gen_path_str.strip_prefix(base_path_str) { + stripped.trim_start_matches('/') + } else { + gen_path_str + }; + let gen_uri = if base_path_str.is_empty() { + format!("{}/{}", base_uri, gen_path_str) + } else { + format!("{}/{}", base_uri, relative_path) + }; + let dataset = Dataset::open(&gen_uri).await.unwrap(); + let indices = dataset.load_indices().await.unwrap(); + + assert_eq!(indices.len(), 1); + assert_eq!(indices[0].name, "id_btree"); + } +} diff --git a/rust/lance/src/dataset/mem_wal/write/stats.rs b/rust/lance/src/dataset/mem_wal/write/stats.rs new file mode 100644 index 00000000000..15cd61cd6c2 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/write/stats.rs @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! Write performance statistics collection. +//! +//! Provides atomic counters for tracking write performance metrics: +//! - Put operations (count, time) +//! - WAL flushes (count, time, bytes, fragments) +//! - MemTable flushes (count, time, rows, fragments) + +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; +use std::time::Duration; + +use tracing::info; + +/// Write performance statistics. +/// +/// All fields use atomic operations for thread-safe updates. +/// Use `snapshot()` to get a consistent view of all stats. +#[derive(Debug, Default)] +pub struct WriteStats { + // Put operation stats + put_count: AtomicU64, + put_time_nanos: AtomicU64, + + // WAL flush stats (total time = max(wal_io, index_update) due to parallel execution) + wal_flush_count: AtomicU64, + wal_flush_time_nanos: AtomicU64, + wal_flush_bytes: AtomicU64, + wal_flush_fragments: AtomicU64, + + // WAL flush sub-component stats (for diagnosing bottlenecks) + wal_io_time_nanos: AtomicU64, + wal_io_count: AtomicU64, + index_update_time_nanos: AtomicU64, + index_update_count: AtomicU64, + index_update_rows: AtomicU64, + + // MemTable flush stats + memtable_flush_count: AtomicU64, + memtable_flush_time_nanos: AtomicU64, + memtable_flush_rows: AtomicU64, + memtable_flush_fragments: AtomicU64, +} + +/// Snapshot of write statistics at a point in time. +#[derive(Debug, Clone)] +pub struct WriteStatsSnapshot { + pub put_count: u64, + pub put_time: Duration, + + pub wal_flush_count: u64, + pub wal_flush_time: Duration, + pub wal_flush_bytes: u64, + pub wal_flush_fragments: u64, + + // WAL flush sub-component stats + pub wal_io_time: Duration, + pub wal_io_count: u64, + pub index_update_time: Duration, + pub index_update_count: u64, + pub index_update_rows: u64, + + pub memtable_flush_count: u64, + pub memtable_flush_time: Duration, + pub memtable_flush_rows: u64, + pub memtable_flush_fragments: u64, +} + +impl WriteStats { + /// Create a new stats collector. + pub fn new() -> Self { + Self::default() + } + + /// Record a put operation. + pub fn record_put(&self, duration: Duration) { + self.put_count.fetch_add(1, Ordering::Relaxed); + self.put_time_nanos + .fetch_add(duration.as_nanos() as u64, Ordering::Relaxed); + } + + /// Record a WAL flush operation (total time including parallel I/O and index). + pub fn record_wal_flush(&self, duration: Duration, bytes: usize, fragments: usize) { + self.wal_flush_count.fetch_add(1, Ordering::Relaxed); + self.wal_flush_time_nanos + .fetch_add(duration.as_nanos() as u64, Ordering::Relaxed); + self.wal_flush_bytes + .fetch_add(bytes as u64, Ordering::Relaxed); + self.wal_flush_fragments + .fetch_add(fragments as u64, Ordering::Relaxed); + } + + /// Record WAL I/O duration (sub-component of WAL flush). + pub fn record_wal_io(&self, duration: Duration) { + self.wal_io_count.fetch_add(1, Ordering::Relaxed); + self.wal_io_time_nanos + .fetch_add(duration.as_nanos() as u64, Ordering::Relaxed); + } + + /// Record index update duration (sub-component of WAL flush). + pub fn record_index_update(&self, duration: Duration, rows: usize) { + self.index_update_count.fetch_add(1, Ordering::Relaxed); + self.index_update_time_nanos + .fetch_add(duration.as_nanos() as u64, Ordering::Relaxed); + self.index_update_rows + .fetch_add(rows as u64, Ordering::Relaxed); + } + + /// Record a MemTable flush operation. + pub fn record_memtable_flush(&self, duration: Duration, rows: usize, fragments: usize) { + self.memtable_flush_count.fetch_add(1, Ordering::Relaxed); + self.memtable_flush_time_nanos + .fetch_add(duration.as_nanos() as u64, Ordering::Relaxed); + self.memtable_flush_rows + .fetch_add(rows as u64, Ordering::Relaxed); + self.memtable_flush_fragments + .fetch_add(fragments as u64, Ordering::Relaxed); + } + + /// Get a snapshot of current statistics. + pub fn snapshot(&self) -> WriteStatsSnapshot { + WriteStatsSnapshot { + put_count: self.put_count.load(Ordering::Relaxed), + put_time: Duration::from_nanos(self.put_time_nanos.load(Ordering::Relaxed)), + + wal_flush_count: self.wal_flush_count.load(Ordering::Relaxed), + wal_flush_time: Duration::from_nanos(self.wal_flush_time_nanos.load(Ordering::Relaxed)), + wal_flush_bytes: self.wal_flush_bytes.load(Ordering::Relaxed), + wal_flush_fragments: self.wal_flush_fragments.load(Ordering::Relaxed), + + wal_io_time: Duration::from_nanos(self.wal_io_time_nanos.load(Ordering::Relaxed)), + wal_io_count: self.wal_io_count.load(Ordering::Relaxed), + index_update_time: Duration::from_nanos( + self.index_update_time_nanos.load(Ordering::Relaxed), + ), + index_update_count: self.index_update_count.load(Ordering::Relaxed), + index_update_rows: self.index_update_rows.load(Ordering::Relaxed), + + memtable_flush_count: self.memtable_flush_count.load(Ordering::Relaxed), + memtable_flush_time: Duration::from_nanos( + self.memtable_flush_time_nanos.load(Ordering::Relaxed), + ), + memtable_flush_rows: self.memtable_flush_rows.load(Ordering::Relaxed), + memtable_flush_fragments: self.memtable_flush_fragments.load(Ordering::Relaxed), + } + } + + /// Reset all statistics. + pub fn reset(&self) { + self.put_count.store(0, Ordering::Relaxed); + self.put_time_nanos.store(0, Ordering::Relaxed); + + self.wal_flush_count.store(0, Ordering::Relaxed); + self.wal_flush_time_nanos.store(0, Ordering::Relaxed); + self.wal_flush_bytes.store(0, Ordering::Relaxed); + self.wal_flush_fragments.store(0, Ordering::Relaxed); + + self.wal_io_time_nanos.store(0, Ordering::Relaxed); + self.wal_io_count.store(0, Ordering::Relaxed); + self.index_update_time_nanos.store(0, Ordering::Relaxed); + self.index_update_count.store(0, Ordering::Relaxed); + self.index_update_rows.store(0, Ordering::Relaxed); + + self.memtable_flush_count.store(0, Ordering::Relaxed); + self.memtable_flush_time_nanos.store(0, Ordering::Relaxed); + self.memtable_flush_rows.store(0, Ordering::Relaxed); + self.memtable_flush_fragments.store(0, Ordering::Relaxed); + } +} + +impl WriteStatsSnapshot { + /// Get average put latency. + pub fn avg_put_latency(&self) -> Option { + if self.put_count > 0 { + Some(self.put_time / self.put_count as u32) + } else { + None + } + } + + /// Get put throughput (puts per second based on time spent in puts). + pub fn put_throughput(&self) -> f64 { + if self.put_time.as_secs_f64() > 0.0 { + self.put_count as f64 / self.put_time.as_secs_f64() + } else { + 0.0 + } + } + + /// Get average WAL flush latency. + pub fn avg_wal_flush_latency(&self) -> Option { + if self.wal_flush_count > 0 { + Some(self.wal_flush_time / self.wal_flush_count as u32) + } else { + None + } + } + + /// Get average WAL flush size in bytes. + pub fn avg_wal_flush_bytes(&self) -> Option { + if self.wal_flush_count > 0 { + Some(self.wal_flush_bytes / self.wal_flush_count) + } else { + None + } + } + + /// Get WAL write throughput (bytes per second based on WAL flush time). + pub fn wal_throughput_bytes(&self) -> f64 { + if self.wal_flush_time.as_secs_f64() > 0.0 { + self.wal_flush_bytes as f64 / self.wal_flush_time.as_secs_f64() + } else { + 0.0 + } + } + + /// Get average WAL I/O latency. + pub fn avg_wal_io_latency(&self) -> Option { + if self.wal_io_count > 0 { + Some(self.wal_io_time / self.wal_io_count as u32) + } else { + None + } + } + + /// Get average index update latency. + pub fn avg_index_update_latency(&self) -> Option { + if self.index_update_count > 0 { + Some(self.index_update_time / self.index_update_count as u32) + } else { + None + } + } + + /// Get average rows per index update. + pub fn avg_index_update_rows(&self) -> Option { + if self.index_update_count > 0 { + Some(self.index_update_rows / self.index_update_count) + } else { + None + } + } + + /// Get average MemTable flush latency. + pub fn avg_memtable_flush_latency(&self) -> Option { + if self.memtable_flush_count > 0 { + Some(self.memtable_flush_time / self.memtable_flush_count as u32) + } else { + None + } + } + + /// Get average MemTable flush size in rows. + pub fn avg_memtable_flush_rows(&self) -> Option { + if self.memtable_flush_count > 0 { + Some(self.memtable_flush_rows / self.memtable_flush_count) + } else { + None + } + } + + /// Log stats summary. + pub fn log_summary(&self, prefix: &str) { + info!( + "{} | Puts: {} ({:.0}/s, avg {:?}) | WAL: {} flushes ({} bytes, avg {:?}) | MemTable: {} flushes ({} rows, avg {:?})", + prefix, + self.put_count, + self.put_throughput(), + self.avg_put_latency().unwrap_or_default(), + self.wal_flush_count, + format_bytes(self.wal_flush_bytes), + self.avg_wal_flush_latency().unwrap_or_default(), + self.memtable_flush_count, + self.memtable_flush_rows, + self.avg_memtable_flush_latency().unwrap_or_default(), + ); + } + + /// Log detailed WAL flush breakdown (WAL I/O vs index update). + pub fn log_wal_breakdown(&self, prefix: &str) { + if self.wal_flush_count > 0 { + info!( + "{} | WAL flush breakdown: total={:?} | io={:?} | index={:?} ({} rows)", + prefix, + self.avg_wal_flush_latency().unwrap_or_default(), + self.avg_wal_io_latency().unwrap_or_default(), + self.avg_index_update_latency().unwrap_or_default(), + self.index_update_rows, + ); + } + } +} + +/// Format bytes in human-readable form. +fn format_bytes(bytes: u64) -> String { + if bytes >= 1024 * 1024 * 1024 { + format!("{:.2} GB", bytes as f64 / (1024.0 * 1024.0 * 1024.0)) + } else if bytes >= 1024 * 1024 { + format!("{:.2} MB", bytes as f64 / (1024.0 * 1024.0)) + } else if bytes >= 1024 { + format!("{:.2} KB", bytes as f64 / 1024.0) + } else { + format!("{} B", bytes) + } +} + +/// Shared stats handle for use across components. +pub type SharedWriteStats = Arc; + +/// Create a new shared stats collector. +pub fn new_shared_stats() -> SharedWriteStats { + Arc::new(WriteStats::new()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_record_put() { + let stats = WriteStats::new(); + stats.record_put(Duration::from_millis(10)); + stats.record_put(Duration::from_millis(20)); + + let snapshot = stats.snapshot(); + assert_eq!(snapshot.put_count, 2); + assert_eq!(snapshot.put_time, Duration::from_millis(30)); + assert_eq!(snapshot.avg_put_latency(), Some(Duration::from_millis(15))); + } + + #[test] + fn test_record_wal_flush() { + let stats = WriteStats::new(); + stats.record_wal_flush(Duration::from_millis(100), 1024, 5); + stats.record_wal_flush(Duration::from_millis(200), 2048, 10); + + let snapshot = stats.snapshot(); + assert_eq!(snapshot.wal_flush_count, 2); + assert_eq!(snapshot.wal_flush_time, Duration::from_millis(300)); + assert_eq!(snapshot.wal_flush_bytes, 3072); + assert_eq!(snapshot.wal_flush_fragments, 15); + assert_eq!(snapshot.avg_wal_flush_bytes(), Some(1536)); + } + + #[test] + fn test_record_memtable_flush() { + let stats = WriteStats::new(); + stats.record_memtable_flush(Duration::from_secs(1), 10000, 50); + + let snapshot = stats.snapshot(); + assert_eq!(snapshot.memtable_flush_count, 1); + assert_eq!(snapshot.memtable_flush_time, Duration::from_secs(1)); + assert_eq!(snapshot.memtable_flush_rows, 10000); + assert_eq!(snapshot.memtable_flush_fragments, 50); + } + + #[test] + fn test_reset() { + let stats = WriteStats::new(); + stats.record_put(Duration::from_millis(10)); + stats.record_wal_flush(Duration::from_millis(100), 1024, 5); + + stats.reset(); + + let snapshot = stats.snapshot(); + assert_eq!(snapshot.put_count, 0); + assert_eq!(snapshot.wal_flush_count, 0); + } +} diff --git a/rust/lance/src/dataset/mem_wal/write/wal.rs b/rust/lance/src/dataset/mem_wal/write/wal.rs new file mode 100644 index 00000000000..b326d3bde92 --- /dev/null +++ b/rust/lance/src/dataset/mem_wal/write/wal.rs @@ -0,0 +1,923 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright The Lance Authors + +//! Write-Ahead Log (WAL) flusher for durability. +//! +//! Batches are written as Arrow IPC streams with writer epoch metadata for fencing. +//! WAL files use bit-reversed naming to distribute files evenly across S3 keyspace. + +use std::io::Cursor; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; +use std::time::Instant; + +use arrow_array::RecordBatch; +use arrow_ipc::reader::StreamReader; +use arrow_ipc::writer::StreamWriter; +use arrow_schema::Schema as ArrowSchema; +use bytes::Bytes; +use lance_core::{Error, Result}; +use lance_io::object_store::ObjectStore; +use object_store::path::Path; +use snafu::location; +use tokio::sync::{mpsc, watch}; + +use super::super::watchable_cell::WatchableOnceCell; +use tracing::debug; +use uuid::Uuid; + +use crate::dataset::mem_wal::util::{region_wal_path, wal_entry_filename}; + +use super::super::batch_store::LockFreeBatchStore; +use super::super::indexes::{BufferedBatch, IndexRegistry}; + +/// Key for storing writer epoch in Arrow IPC file schema metadata. +pub const WRITER_EPOCH_KEY: &str = "writer_epoch"; + +/// Watcher for batch durability using watermark-based tracking. +/// +/// Instead of per-batch oneshot channels, this uses a shared watch channel +/// that broadcasts the durable watermark. The watcher waits until the +/// watermark reaches or exceeds its target batch ID. +#[derive(Clone)] +pub struct BatchDurableWatcher { + /// Watch receiver for the durable watermark. + rx: watch::Receiver, + /// Target batch ID to wait for. + target_batch_position: usize, +} + +impl BatchDurableWatcher { + /// Create a new watcher for a specific batch ID. + pub fn new(rx: watch::Receiver, target_batch_position: usize) -> Self { + Self { + rx, + target_batch_position, + } + } + + /// Wait until the batch is durable. + /// + /// Returns Ok(()) when `durable_watermark >= target_batch_position`. + pub async fn wait(&mut self) -> Result<()> { + loop { + let current = *self.rx.borrow(); + if current >= self.target_batch_position { + return Ok(()); + } + self.rx + .changed() + .await + .map_err(|_| Error::io("Durable watermark channel closed", location!()))?; + } + } + + /// Check if the batch is already durable (non-blocking). + pub fn is_durable(&self) -> bool { + *self.rx.borrow() >= self.target_batch_position + } +} + +impl std::fmt::Debug for BatchDurableWatcher { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("BatchDurableWatcher") + .field("target_batch_position", &self.target_batch_position) + .field("current_watermark", &*self.rx.borrow()) + .finish() + } +} + +/// A single WAL entry representing a batch of batches. +#[derive(Debug, Clone)] +pub struct WalEntry { + /// WAL entry position (0-based, sequential). + pub position: u64, + /// Writer epoch at the time of write. + pub writer_epoch: u64, + /// Batch IDs and their positions within this entry. + pub batch_positionitions: Vec<(usize, usize)>, // (batch_position, position) +} + +/// Result of a parallel WAL flush with index update. +#[derive(Debug, Clone)] +pub struct WalFlushResult { + /// WAL entry that was written (if any). + pub entry: Option, + /// Duration of WAL I/O operation. + pub wal_io_duration: std::time::Duration, + /// Overall wall-clock duration of the index update operation. + /// This includes any overhead from thread scheduling and context switching. + pub index_update_duration: std::time::Duration, + /// Per-index update durations. Key is index name, value is duration. + pub index_update_duration_breakdown: std::collections::HashMap, + /// Number of rows indexed. + pub rows_indexed: usize, + /// Size of WAL data written in bytes. + pub wal_bytes: usize, +} + +/// Message to trigger a WAL flush for a specific batch store. +/// +/// This unified message handles both: +/// - Normal periodic flushes (specific end_batch_position) +/// - Freeze-time flushes (end_batch_position = usize::MAX to flush all) +pub struct TriggerWalFlush { + /// The batch store to flush from. + pub batch_store: Arc, + /// The indexes to update in parallel (for WAL-coupled index updates). + pub indexes: Option>, + /// End batch position (exclusive) - flush batches after max_wal_flushed_batch_position up to this. + /// Use usize::MAX to flush all pending batches. + pub end_batch_position: usize, + /// Optional cell to write completion result. + /// Uses Result since Error doesn't implement Clone. + pub done: Option>>, +} + +impl std::fmt::Debug for TriggerWalFlush { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TriggerWalFlush") + .field( + "pending_batches", + &self.batch_store.pending_wal_flush_count(), + ) + .field("end_batch_position", &self.end_batch_position) + .finish() + } +} + +/// Buffer for WAL operations. +/// +/// Durability is tracked via a watch channel that broadcasts the durable watermark. +/// The actual flush watermark is stored in `LockFreeBatchStore.max_flushed_batch_position`. +pub struct WalFlusher { + /// Watch channel sender for durable watermark. + /// Broadcasts the highest batch_position that is now durable. + durable_watermark_tx: watch::Sender, + /// Watch channel receiver for creating new watchers. + durable_watermark_rx: watch::Receiver, + /// Object store for writing WAL files. + object_store: Option>, + /// Region ID. + region_id: Uuid, + /// Writer epoch (stored in WAL entries for fencing). + writer_epoch: u64, + /// Next WAL entry ID to use. + next_wal_entry_position: AtomicU64, + /// Channel to send flush messages. + flush_tx: Option>, + /// WAL directory path. + wal_dir: Path, + /// Cell for WAL flush completion notification. + /// Created at construction and recreated after each flush. + /// Used by backpressure to wait for WAL flushes. + wal_flush_cell: + std::sync::Mutex>>, +} + +impl WalFlusher { + /// Create a new WAL flusher. + /// + /// # Arguments + /// + /// * `base_path` - Base path within the object store (from ObjectStore::from_uri) + /// * `region_id` - Region UUID + /// * `writer_epoch` - Current writer epoch + /// * `next_wal_entry_position` - Next WAL entry ID (from recovery or 1 for new region) + pub fn new( + base_path: &Path, + region_id: Uuid, + writer_epoch: u64, + next_wal_entry_position: u64, + ) -> Self { + let wal_dir = region_wal_path(base_path, ®ion_id); + // Initialize durable watermark at 0 (no batches durable yet) + let (durable_watermark_tx, durable_watermark_rx) = watch::channel(0); + // Create initial WAL flush cell for backpressure + let wal_flush_cell = WatchableOnceCell::new(); + Self { + durable_watermark_tx, + durable_watermark_rx, + object_store: None, + region_id, + writer_epoch, + next_wal_entry_position: AtomicU64::new(next_wal_entry_position), + flush_tx: None, + wal_dir, + wal_flush_cell: std::sync::Mutex::new(Some(wal_flush_cell)), + } + } + + /// Set the object store for WAL file operations. + pub fn set_object_store(&mut self, object_store: Arc) { + self.object_store = Some(object_store); + } + + /// Set the flush channel for background flush handler. + pub fn set_flush_channel(&mut self, tx: mpsc::UnboundedSender) { + self.flush_tx = Some(tx); + } + + /// Track a batch for WAL durability. + /// + /// Returns a `BatchDurableWatcher` that can be awaited for durability. + /// + /// Note: The actual batch data is stored in the LockFreeBatchStore. + /// + /// # Arguments + /// + /// * `batch_position` - Batch ID (index in the BatchStore) + pub fn track_batch(&self, batch_position: usize) -> BatchDurableWatcher { + // Return a watcher that waits for this batch to become durable + // batch_position is 0-indexed, so we wait for watermark > batch_position (i.e., >= batch_position + 1) + BatchDurableWatcher::new(self.durable_watermark_rx.clone(), batch_position + 1) + } + + /// Get the current durable watermark. + pub fn durable_watermark(&self) -> usize { + *self.durable_watermark_rx.borrow() + } + + /// Get a watcher for WAL flush completion. + /// + /// Returns a watcher that resolves when the next WAL flush completes. + /// Used by backpressure to wait for WAL flushes when the buffer is full. + pub fn wal_flush_watcher( + &self, + ) -> Option< + super::super::watchable_cell::WatchableOnceCellReader, + > { + self.wal_flush_cell + .lock() + .unwrap() + .as_ref() + .map(|cell| cell.reader()) + } + + /// Signal that a WAL flush has completed and create a new cell for the next flush. + /// + /// Called after each successful WAL flush to notify backpressure waiters. + fn signal_wal_flush_complete(&self) { + let mut guard = self.wal_flush_cell.lock().unwrap(); + // Signal the current cell + if let Some(cell) = guard.take() { + cell.write(super::batch_write::DurabilityResult::ok()); + } + // Create a new cell for the next flush + *guard = Some(WatchableOnceCell::new()); + } + + /// Trigger an immediate flush for a specific batch store up to a specific batch ID. + /// + /// # Arguments + /// + /// * `batch_store` - The batch store to flush from + /// * `indexes` - Optional indexes to update in parallel with WAL I/O + /// * `end_batch_position` - End batch ID (exclusive). Use usize::MAX to flush all pending. + /// * `done` - Optional cell to write completion result + pub fn trigger_flush( + &self, + batch_store: Arc, + indexes: Option>, + end_batch_position: usize, + done: Option>>, + ) -> Result<()> { + if let Some(tx) = &self.flush_tx { + tx.send(TriggerWalFlush { + batch_store, + indexes, + end_batch_position, + done, + }) + .map_err(|_| Error::io("WAL flush channel closed", location!()))?; + } + Ok(()) + } + /// Flush pending batches to WAL and update indexes in parallel. + /// + /// This method: + /// 1. Gets pending batch range from watermarks + /// 2. Runs WAL I/O and index updates in parallel + /// 3. Returns timing metrics for both operations + /// + /// # Arguments + /// + /// * `batch_store` - The LockFreeBatchStore to read batches from + /// * `indexes` - Optional IndexRegistry to update (Arc for thread-safe sharing) + /// + /// # Returns + /// + /// A `WalFlushResult` with timing metrics and the WAL entry. + pub async fn flush_with_index_update( + &self, + batch_store: &LockFreeBatchStore, + indexes: Option>, + ) -> Result { + // Get pending batch range from per-memtable watermark (inclusive) + // start_batch_position is the first batch to flush + let start_batch_position = batch_store + .max_flushed_batch_position() + .map(|w| w + 1) + .unwrap_or(0); + let current_count = batch_store.len(); + + if current_count <= start_batch_position { + return Ok(WalFlushResult { + entry: None, + wal_io_duration: std::time::Duration::ZERO, + index_update_duration: std::time::Duration::ZERO, + index_update_duration_breakdown: std::collections::HashMap::new(), + rows_indexed: 0, + wal_bytes: 0, + }); + } + + let object_store = self + .object_store + .as_ref() + .ok_or_else(|| Error::io("Object store not set on WAL flusher", location!()))?; + + let wal_entry_position = self.next_wal_entry_position.fetch_add(1, Ordering::SeqCst); + let filename = wal_entry_filename(wal_entry_position); + let final_path = self.wal_dir.child(format!("{}.arrow", filename).as_str()); + + debug!( + "Flushing batches {}..{} to WAL entry {} with parallel index update", + start_batch_position, current_count, wal_entry_position + ); + + // Collect batches and prepare index data + let mut batches = Vec::with_capacity(current_count - start_batch_position); + let mut batch_positionitions = Vec::with_capacity(current_count - start_batch_position); + let mut index_batches = Vec::with_capacity(current_count - start_batch_position); + + for batch_position in start_batch_position..current_count { + if let Some(stored) = batch_store.get(batch_position) { + batch_positionitions.push((batch_position, batches.len())); + batches.push(stored.data.clone()); + index_batches.push(BufferedBatch { + batch: stored.data.clone(), + row_offset: stored.row_offset, + batch_position: Some(batch_position), + }); + } + } + + if batches.is_empty() { + return Ok(WalFlushResult { + entry: None, + wal_io_duration: std::time::Duration::ZERO, + index_update_duration: std::time::Duration::ZERO, + index_update_duration_breakdown: std::collections::HashMap::new(), + rows_indexed: 0, + wal_bytes: 0, + }); + } + + let rows_to_index: usize = index_batches.iter().map(|b| b.batch.num_rows()).sum(); + + // Prepare WAL I/O data + let schema = batches[0].schema(); + let mut metadata = schema.metadata().clone(); + metadata.insert(WRITER_EPOCH_KEY.to_string(), self.writer_epoch.to_string()); + let schema_with_epoch = Arc::new(ArrowSchema::new_with_metadata( + schema.fields().to_vec(), + metadata, + )); + + // Serialize WAL data as IPC stream (schema at start, no footer) + let mut buffer = Vec::new(); + { + let mut writer = + StreamWriter::try_new(&mut buffer, &schema_with_epoch).map_err(|e| { + Error::io( + format!("Failed to create Arrow IPC stream writer: {}", e), + location!(), + ) + })?; + + for batch in &batches { + writer.write(batch).map_err(|e| { + Error::io( + format!("Failed to write batch to Arrow IPC stream: {}", e), + location!(), + ) + })?; + } + + writer.finish().map_err(|e| { + Error::io( + format!("Failed to finish Arrow IPC stream: {}", e), + location!(), + ) + })?; + } + + let wal_bytes = buffer.len(); + + // WAL I/O task + let wal_path = final_path.clone(); + let wal_data = Bytes::from(buffer); + let store = object_store.clone(); + + // Index update task (runs in parallel via tokio::join!) + // Returns (overall_duration, per_index_durations) + let (wal_result, index_result) = if let Some(idx_registry) = indexes { + // Run both in parallel + let wal_future = async { + let start = Instant::now(); + store + .inner + .put(&wal_path, wal_data.into()) + .await + .map_err(|e| { + Error::io(format!("Failed to write WAL file: {}", e), location!()) + })?; + Ok::<_, Error>(start.elapsed()) + }; + + let index_future = async { + let start = Instant::now(); + let per_index = tokio::task::spawn_blocking(move || { + idx_registry.insert_batches_parallel(&index_batches) + }) + .await + .map_err(|e| Error::Internal { + message: format!("Index update task panicked: {}", e), + location: location!(), + })??; + // Return both overall duration and per-index durations + Ok::<_, Error>((start.elapsed(), per_index)) + }; + + tokio::join!(wal_future, index_future) + } else { + // No indexes, just do WAL I/O + let wal_future = async { + let start = Instant::now(); + store + .inner + .put(&wal_path, wal_data.into()) + .await + .map_err(|e| { + Error::io(format!("Failed to write WAL file: {}", e), location!()) + })?; + Ok::<_, Error>(start.elapsed()) + }; + + ( + wal_future.await, + Ok((std::time::Duration::ZERO, std::collections::HashMap::new())), + ) + }; + + // Check for errors + let wal_io_duration = wal_result?; + let (index_update_duration, index_update_duration_breakdown) = index_result?; + + // Update per-memtable watermark (inclusive: last batch ID that was flushed) + batch_store.set_max_flushed_batch_position(current_count - 1); + + // Notify durability waiters (global channel) + let _ = self.durable_watermark_tx.send(current_count); + // Signal WAL flush completion for backpressure waiters + self.signal_wal_flush_complete(); + + let entry = WalEntry { + position: wal_entry_position, + writer_epoch: self.writer_epoch, + batch_positionitions, + }; + + debug!( + "WAL entry {} written: wal_io={:?}, index={:?} ({:?}), {} rows", + wal_entry_position, + wal_io_duration, + index_update_duration, + index_update_duration_breakdown, + rows_to_index + ); + + Ok(WalFlushResult { + entry: Some(entry), + wal_io_duration, + index_update_duration, + index_update_duration_breakdown, + rows_indexed: rows_to_index, + wal_bytes, + }) + } + + /// Flush batches up to a specific end_batch_position with index updates. + /// + /// This method flushes batches from `(max_wal_flushed_batch_position + 1)` to `end_batch_position`, + /// allowing each trigger to flush only the batches that existed at trigger time. + /// + /// # Arguments + /// + /// * `batch_store` - The LockFreeBatchStore to read batches from + /// * `end_batch_position` - End batch ID (exclusive) - flush up to this batch + /// * `indexes` - Optional IndexRegistry to update + /// + /// # Returns + /// + /// A `WalFlushResult` with timing metrics and the WAL entry. + /// Returns empty result if nothing to flush (already flushed past end_batch_position). + pub async fn flush_to_with_index_update( + &self, + batch_store: &LockFreeBatchStore, + end_batch_position: usize, + indexes: Option>, + ) -> Result { + // Get current flush position from per-memtable watermark (inclusive) + // start_batch_position is the first batch to flush + let start_batch_position = batch_store + .max_flushed_batch_position() + .map(|w| w + 1) + .unwrap_or(0); + + // If we've already flushed past this end, nothing to do + if start_batch_position >= end_batch_position { + return Ok(WalFlushResult { + entry: None, + wal_io_duration: std::time::Duration::ZERO, + index_update_duration: std::time::Duration::ZERO, + index_update_duration_breakdown: std::collections::HashMap::new(), + rows_indexed: 0, + wal_bytes: 0, + }); + } + + let object_store = self + .object_store + .as_ref() + .ok_or_else(|| Error::io("Object store not set on WAL flusher", location!()))?; + + let wal_entry_position = self.next_wal_entry_position.fetch_add(1, Ordering::SeqCst); + let filename = wal_entry_filename(wal_entry_position); + let final_path = self.wal_dir.child(format!("{}.arrow", filename).as_str()); + + // Collect batches in range [start_batch_position, end_batch_position) + let mut batches = Vec::with_capacity(end_batch_position - start_batch_position); + let mut batch_positionitions = + Vec::with_capacity(end_batch_position - start_batch_position); + let mut index_batches = Vec::with_capacity(end_batch_position - start_batch_position); + + for batch_position in start_batch_position..end_batch_position { + if let Some(stored) = batch_store.get(batch_position) { + batch_positionitions.push((batch_position, batches.len())); + batches.push(stored.data.clone()); + index_batches.push(BufferedBatch { + batch: stored.data.clone(), + row_offset: stored.row_offset, + batch_position: Some(batch_position), + }); + } + } + + if batches.is_empty() { + return Ok(WalFlushResult { + entry: None, + wal_io_duration: std::time::Duration::ZERO, + index_update_duration: std::time::Duration::ZERO, + index_update_duration_breakdown: std::collections::HashMap::new(), + rows_indexed: 0, + wal_bytes: 0, + }); + } + + let rows_to_index: usize = index_batches.iter().map(|b| b.batch.num_rows()).sum(); + + // Prepare WAL I/O data + let schema = batches[0].schema(); + let mut metadata = schema.metadata().clone(); + metadata.insert(WRITER_EPOCH_KEY.to_string(), self.writer_epoch.to_string()); + let schema_with_epoch = Arc::new(ArrowSchema::new_with_metadata( + schema.fields().to_vec(), + metadata, + )); + + // Serialize WAL data as IPC stream (schema at start, no footer) + let mut buffer = Vec::new(); + { + let mut writer = + StreamWriter::try_new(&mut buffer, &schema_with_epoch).map_err(|e| { + Error::io( + format!("Failed to create Arrow IPC stream writer: {}", e), + location!(), + ) + })?; + + for batch in &batches { + writer.write(batch).map_err(|e| { + Error::io( + format!("Failed to write batch to Arrow IPC stream: {}", e), + location!(), + ) + })?; + } + + writer.finish().map_err(|e| { + Error::io( + format!("Failed to finish Arrow IPC stream: {}", e), + location!(), + ) + })?; + } + + let wal_bytes = buffer.len(); + + // WAL I/O and index update in parallel + let wal_path = final_path.clone(); + let wal_data = Bytes::from(buffer); + let store = object_store.clone(); + + // Returns (overall_duration, per_index_durations) + let (wal_result, index_result) = if let Some(idx_registry) = indexes { + let wal_future = async { + let start = Instant::now(); + store + .inner + .put(&wal_path, wal_data.into()) + .await + .map_err(|e| { + Error::io(format!("Failed to write WAL file: {}", e), location!()) + })?; + Ok::<_, Error>(start.elapsed()) + }; + + let index_future = async { + let start = Instant::now(); + let per_index = tokio::task::spawn_blocking(move || { + idx_registry.insert_batches_parallel(&index_batches) + }) + .await + .map_err(|e| Error::Internal { + message: format!("Index update task panicked: {}", e), + location: location!(), + })??; + Ok::<_, Error>((start.elapsed(), per_index)) + }; + + tokio::join!(wal_future, index_future) + } else { + let wal_future = async { + let start = Instant::now(); + store + .inner + .put(&wal_path, wal_data.into()) + .await + .map_err(|e| { + Error::io(format!("Failed to write WAL file: {}", e), location!()) + })?; + Ok::<_, Error>(start.elapsed()) + }; + + ( + wal_future.await, + Ok((std::time::Duration::ZERO, std::collections::HashMap::new())), + ) + }; + + let wal_io_duration = wal_result?; + let (index_update_duration, index_update_duration_breakdown) = index_result?; + + // Update per-memtable watermark (inclusive: last batch ID that was flushed) + batch_store.set_max_flushed_batch_position(end_batch_position - 1); + + // Notify durability waiters (global channel) + let _ = self.durable_watermark_tx.send(end_batch_position); + // Signal WAL flush completion for backpressure waiters + self.signal_wal_flush_complete(); + + let entry = WalEntry { + position: wal_entry_position, + writer_epoch: self.writer_epoch, + batch_positionitions, + }; + + Ok(WalFlushResult { + entry: Some(entry), + wal_io_duration, + index_update_duration, + index_update_duration_breakdown, + rows_indexed: rows_to_index, + wal_bytes, + }) + } + + /// Get the current WAL ID (last written + 1). + pub fn next_wal_entry_position(&self) -> u64 { + self.next_wal_entry_position.load(Ordering::SeqCst) + } + + /// Get the region ID. + pub fn region_id(&self) -> Uuid { + self.region_id + } + + /// Get the writer epoch. + pub fn writer_epoch(&self) -> u64 { + self.writer_epoch + } + + /// Get the path for a WAL entry. + pub fn wal_entry_path(&self, wal_entry_position: u64) -> Path { + let filename = wal_entry_filename(wal_entry_position); + self.wal_dir.child(format!("{}.arrow", filename).as_str()) + } +} + +/// A WAL entry read from storage for replay. +#[derive(Debug)] +pub struct WalEntryData { + /// Writer epoch from the WAL entry. + pub writer_epoch: u64, + /// Record batches from the WAL entry. + pub batches: Vec, +} + +impl WalEntryData { + /// Read a WAL entry from storage. + /// + /// # Arguments + /// + /// * `object_store` - Object store to read from + /// * `path` - Path to the WAL entry (Arrow IPC file) + /// + /// # Returns + /// + /// The parsed WAL entry data, or an error if reading/parsing fails. + pub async fn read(object_store: &ObjectStore, path: &Path) -> Result { + // Read the file + let data = object_store + .inner + .get(path) + .await + .map_err(|e| Error::io(format!("Failed to read WAL file: {}", e), location!()))? + .bytes() + .await + .map_err(|e| Error::io(format!("Failed to get WAL file bytes: {}", e), location!()))?; + + // Parse as Arrow IPC stream + let cursor = Cursor::new(data); + let reader = StreamReader::try_new(cursor, None).map_err(|e| { + Error::io( + format!("Failed to open Arrow IPC stream reader: {}", e), + location!(), + ) + })?; + + // Extract writer epoch from schema metadata (at start of stream) + let schema = reader.schema(); + let writer_epoch = schema + .metadata() + .get(WRITER_EPOCH_KEY) + .and_then(|s| s.parse::().ok()) + .unwrap_or(0); + + // Read all batches + let mut batches = Vec::new(); + for batch_result in reader { + let batch = batch_result.map_err(|e| { + Error::io( + format!("Failed to read batch from Arrow IPC stream: {}", e), + location!(), + ) + })?; + batches.push(batch); + } + + Ok(Self { + writer_epoch, + batches, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use arrow_array::{Int32Array, StringArray}; + use arrow_schema::{DataType, Field, Schema}; + use std::sync::Arc; + use tempfile::TempDir; + + async fn create_local_store() -> (Arc, Path, TempDir) { + let temp_dir = tempfile::tempdir().unwrap(); + let uri = format!("file://{}", temp_dir.path().display()); + let (store, path) = ObjectStore::from_uri(&uri).await.unwrap(); + (store, path, temp_dir) + } + + fn create_test_schema() -> Arc { + Arc::new(Schema::new(vec![ + Field::new("id", DataType::Int32, false), + Field::new("name", DataType::Utf8, true), + ])) + } + + fn create_test_batch(schema: &Schema, num_rows: usize) -> RecordBatch { + RecordBatch::try_new( + Arc::new(schema.clone()), + vec![ + Arc::new(Int32Array::from_iter_values(0..num_rows as i32)), + Arc::new(StringArray::from_iter_values( + (0..num_rows).map(|i| format!("name_{}", i)), + )), + ], + ) + .unwrap() + } + + #[tokio::test] + async fn test_wal_flusher_track_batch() { + let (store, base_path, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let mut buffer = WalFlusher::new(&base_path, region_id, 1, 1); + buffer.set_object_store(store); + + // Track a batch + let watcher = buffer.track_batch(0); + + // Watcher should not be durable yet + assert!(!watcher.is_durable()); + } + + #[tokio::test] + async fn test_wal_flusher_flush_with_index_update() { + let (store, base_path, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let mut buffer = WalFlusher::new(&base_path, region_id, 1, 1); + buffer.set_object_store(store); + + // Create a LockFreeBatchStore with some data + let schema = create_test_schema(); + let batch1 = create_test_batch(&schema, 10); + let batch2 = create_test_batch(&schema, 5); + + let batch_store = LockFreeBatchStore::with_capacity(10); + batch_store.append(batch1).unwrap(); + batch_store.append(batch2).unwrap(); + + // Track batch IDs in WAL flusher + let mut watcher1 = buffer.track_batch(0); + let mut watcher2 = buffer.track_batch(1); + + // Verify initial state + assert!(!watcher1.is_durable()); + assert!(!watcher2.is_durable()); + assert!(batch_store.max_flushed_batch_position().is_none()); + + // Flush using flush_with_index_update + let result = buffer + .flush_with_index_update(&batch_store, None) + .await + .unwrap(); + let entry = result.entry.unwrap(); + assert_eq!(entry.position, 1); + assert_eq!(entry.writer_epoch, 1); + assert_eq!(entry.batch_positionitions.len(), 2); + // After flushing 2 batches (positions 0 and 1), max flushed position is 1 (inclusive) + assert_eq!(batch_store.max_flushed_batch_position(), Some(1)); + + // Watchers should be notified + watcher1.wait().await.unwrap(); + watcher2.wait().await.unwrap(); + assert!(watcher1.is_durable()); + assert!(watcher2.is_durable()); + } + + #[tokio::test] + async fn test_wal_entry_read() { + let (store, base_path, _temp_dir) = create_local_store().await; + let region_id = Uuid::new_v4(); + let mut buffer = WalFlusher::new(&base_path, region_id, 42, 1); + buffer.set_object_store(store.clone()); + + // Create a LockFreeBatchStore with some data + let schema = create_test_schema(); + let batch_store = LockFreeBatchStore::with_capacity(10); + batch_store.append(create_test_batch(&schema, 10)).unwrap(); + batch_store.append(create_test_batch(&schema, 5)).unwrap(); + + // Track batch IDs and flush using flush_with_index_update + let _watcher1 = buffer.track_batch(0); + let _watcher2 = buffer.track_batch(1); + let result = buffer + .flush_with_index_update(&batch_store, None) + .await + .unwrap(); + let entry = result.entry.unwrap(); + + // Read back the WAL entry + let wal_path = buffer.wal_entry_path(entry.position); + let wal_data = WalEntryData::read(&store, &wal_path).await.unwrap(); + + // Verify the read data + assert_eq!(wal_data.writer_epoch, 42); + assert_eq!(wal_data.batches.len(), 2); + assert_eq!(wal_data.batches[0].num_rows(), 10); + assert_eq!(wal_data.batches[1].num_rows(), 5); + } +}