From 4d0d6962bcebc1a87d17b645e43cb7088424dfe8 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Mon, 13 Apr 2026 16:44:08 -0400 Subject: [PATCH 01/32] update 26.2.0 GA docs (wip) --- src/current/_config_cockroachdb.yml | 4 +- src/current/_data/releases.yml | 32 +++++ src/current/_data/versions.csv | 2 +- .../releases/v26.2/backward-incompatible.md | 56 +++++++-- .../releases/v26.2/cluster-setting-changes.md | 110 ++++++++++++------ .../_includes/releases/v26.2/deprecations.md | 20 ++-- .../releases/v26.2/feature-detail-key.html | 25 ---- .../releases/v26.2/upgrade-finalization.md | 22 ++-- .../_includes/releases/v26.2/v26.2.0.md | 44 ++++--- 9 files changed, 197 insertions(+), 118 deletions(-) delete mode 100644 src/current/_includes/releases/v26.2/feature-detail-key.html diff --git a/src/current/_config_cockroachdb.yml b/src/current/_config_cockroachdb.yml index 80495bcaf5d..a0682cb1310 100644 --- a/src/current/_config_cockroachdb.yml +++ b/src/current/_config_cockroachdb.yml @@ -1,7 +1,7 @@ baseurl: /docs -current_cloud_version: v25.4 +current_cloud_version: v26.2 destination: _site/docs homepage_title: CockroachDB Docs versions: - stable: v26.1 + stable: v26.2 dev: v26.2 diff --git a/src/current/_data/releases.yml b/src/current/_data/releases.yml index c6cb85033f3..b4121e80b87 100644 --- a/src/current/_data/releases.yml +++ b/src/current/_data/releases.yml @@ -10895,6 +10895,38 @@ docker_arm_limited_access: false source: true +- release_name: v26.2.0 + major_version: v26.2 + release_date: '2026-04-28' + release_type: Production + go_version: go1.25.5 + sha: TBD + has_sql_only: true + has_sha256sum: true + mac: + mac_arm: true + mac_arm_experimental: true + mac_arm_limited_access: false + windows: true + linux: + linux_arm: true + linux_arm_experimental: false + linux_arm_limited_access: false + linux_intel_fips: true + linux_arm_fips: false + docker: + docker_image: cockroachdb/cockroach + docker_arm: true + docker_arm_experimental: false + docker_arm_limited_access: false + source: true + previous_release: v26.2.0-beta.3 + cloud_only: true + cloud_only_message_short: 'Currently available for CockroachDB Advanced only' + cloud_only_message: > + This version is currently available only for + CockroachDB Cloud clusters on the Advanced plan. + - release_name: v26.2.0-alpha.2 major_version: v26.2 release_date: '2026-03-18' diff --git a/src/current/_data/versions.csv b/src/current/_data/versions.csv index 909a220ade8..190d4cd72e3 100644 --- a/src/current/_data/versions.csv +++ b/src/current/_data/versions.csv @@ -21,4 +21,4 @@ v25.2,2025-05-09,2026-05-12,2026-11-12,25.2.9,25.2.10,2025-12-17,2026-12-17,2027 v25.3,2025-08-04,2026-02-04,N/A,N/A,N/A,N/A,N/A,N/A,v25.2,release-25.3,2029-08-04 v25.4,2025-11-03,2026-11-03,2027-05-03,N/A,N/A,N/A,N/A,N/A,v25.3,release-25.4,2029-11-03 v26.1,2026-02-02,2026-08-02,N/A,N/A,N/A,N/A,N/A,N/A,v25.4,release-26.1,2030-02-02 -v26.2,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,v26.1,release-26.2,N/A +v26.2,2026-04-28,2027-04-28,2027-10-28,N/A,N/A,N/A,N/A,N/A,v26.1,release-26.2,2030-04-28 diff --git a/src/current/_includes/releases/v26.2/backward-incompatible.md b/src/current/_includes/releases/v26.2/backward-incompatible.md index c5e92205162..b7ed3504525 100644 --- a/src/current/_includes/releases/v26.2/backward-incompatible.md +++ b/src/current/_includes/releases/v26.2/backward-incompatible.md @@ -1,14 +1,52 @@ -- Bullet +- **Statistics concurrency limit:** Increased the default value of `sql.stats.automatic_full_concurrency_limit` (which controls the maximum number of concurrent full statistics collections) from `1` to number of vCPUs divided by 2 (e.g., 4 vCPU nodes will have the value of `2`). [#161806](https://github.com/cockroachdb/cockroach/pull/161806) - - `information_schema.crdb_datums_to_bytes` - previously only available as `crdb_internal.datums_to_bytes` [#](https://github.com/cockroachdb/cockroach/pull/) - - `information_schema.crdb_index_usage_stats` - previously only available as `crdb_internal.index_usage_stats` [#](https://github.com/cockroachdb/cockroach/pull/) - - `information_schema.crdb_rewrite_inline_hints` - replaces the function previously introduced as `crdb_internal.inject_hint` [#](https://github.com/cockroachdb/cockroach/pull/) +- **`TG_ARGV` indexing:** The `TG_ARGV` trigger function parameter now uses 0-based indexing to match PostgreSQL behavior. Previously, `TG_ARGV[1]` returned the first argument; now `TG_ARGV[0]` returns the first argument and `TG_ARGV[1]` returns the second argument. Additionally, usage of `TG_ARGV` no longer requires setting the `allow_create_trigger_function_with_argv_references` session variable. [#161925](https://github.com/cockroachdb/cockroach/pull/161925) - [#](https://github.com/cockroachdb/cockroach/pull/) +- **Row size guardrails:** Lowered the default value of the `sql.guardrails.max_row_size_log` cluster setting from `64 MiB` to `16 MiB`, and the default value of `sql.guardrails.max_row_size_err` from `512 MiB` to `80 MiB`. These settings control the maximum size of a row (or column family) that SQL can write before logging a warning or returning an error, respectively. The previous defaults were high enough that large rows would hit other limits first (such as the Raft command size limit or the backup SST size limit), producing confusing errors. The new defaults align with existing system limits to provide clearer diagnostics. If your workload legitimately writes rows larger than these new defaults, you can restore the previous behavior by increasing these settings. [#164468](https://github.com/cockroachdb/cockroach/pull/164468) -- Bullet +- **Catalog descriptor caching:** Changed the default value of the `sql.catalog.allow_leased_descriptors.enabled` cluster setting to `true`. This setting allows introspection tables like `information_schema` and `pg_catalog` to use cached descriptors when building the table results, which improves the performance of introspection queries when there are many tables in the cluster. [#159162](https://github.com/cockroachdb/cockroach/pull/159162) - - **Production FIPS-ready clusters should stay on v25.4** or wait to upgrade directly to v26.2, which will return FIPS support to GA status, using a version of the Go module that is in review for FIPS 140-3 validation. - - **Password length requirement:** FIPS 140-3 requires a minimum password length of 14 characters. Users with passwords shorter than 14 characters may be unable to log in after upgrading to a FIPS-ready binary. +- **Import elastic control:** The `bulkio.import.elastic_control.enabled` cluster setting is now enabled by default, allowing import operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163867](https://github.com/cockroachdb/cockroach/pull/163867) + +- **SST batcher elastic control:** The `bulkio.ingest.sst_batcher_elastic_control.enabled` cluster setting is now enabled by default, allowing SST batcher operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163868](https://github.com/cockroachdb/cockroach/pull/163868) + +- **DistSQL scan planning:** The session variable `distsql_prevent_partitioning_soft_limited_scans` is now enabled by default. This prevents scans with soft limits from being planned as multiple TableReaders, which decreases the initial setup costs of some fully-distributed query plans. [#160051](https://github.com/cockroachdb/cockroach/pull/160051) + +- **Empty `topic_name` validation:** Creating or altering a changefeed or Kafka/Pub/Sub external connection now returns an error when the `topic_name` query parameter is explicitly set to an empty string in the sink URI, rather than silently falling back to using the table name as the topic name. Existing changefeeds with an empty `topic_name` are not affected. [#164225](https://github.com/cockroachdb/cockroach/pull/164225) + +- **TTL job ownership:** TTL jobs are now owned by the schedule owner instead of the `node` user. This allows users with `CONTROLJOB` privilege to cancel TTL jobs, provided the schedule owner is not an admin (`CONTROLJOB` does not grant control over admin-owned jobs). [#161226](https://github.com/cockroachdb/cockroach/pull/161226) + +- **Inline hints privilege:** Calling `information_schema.crdb_rewrite_inline_hints` now requires the `REPAIRCLUSTER` privilege. [#160716](https://github.com/cockroachdb/cockroach/pull/160716) + +- **Statement Details page URL:** The **Statement Details** page URL format has changed from `/statement/{implicitTxn}/{statementId}` to `/statement/{statementId}`. As a result, bookmarks using the old URL structure will no longer work. [#159558](https://github.com/cockroachdb/cockroach/pull/159558) + +- **Admission control metrics units:** Changed the unit of measurement for admission control duration metrics from microseconds to nanoseconds. The following metrics are affected: `admission.granter.slots_exhausted_duration.kv`, `admission.granter.cpu_load_short_period_duration.kv`, `admission.granter.cpu_load_long_period_duration.kv`, `admission.granter.io_tokens_exhausted_duration.kv`, `admission.granter.elastic_io_tokens_exhausted_duration.kv`, and `admission.elastic_cpu.nanos_exhausted_duration`. Note that dashboards displaying these metrics will show a discontinuity at upgrade time, with pre-upgrade values appearing much lower due to the unit change. [#160956](https://github.com/cockroachdb/cockroach/pull/160956) + +- **Builtin function rename:** Renamed the builtin function `crdb_internal.inject_hint` (introduced in v26.1.0-alpha.2) to `information_schema.crdb_rewrite_inline_hints`. [#160716](https://github.com/cockroachdb/cockroach/pull/160716) + +- **`incremental_location` option:** Removed the `incremental_location` option from `BACKUP` and `CREATE SCHEDULE FOR BACKUP`. [#159189](https://github.com/cockroachdb/cockroach/pull/159189) + +- **`incremental_location` option:** Removed the `incremental_location` option from `SHOW BACKUP` and `RESTORE`. [#160416](https://github.com/cockroachdb/cockroach/pull/160416) + +- **View privilege checking:** When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) + +- **Index backfill elastic control:** The `bulkio.index_backfill.elastic_control.enabled` cluster setting is now enabled by default, allowing index backfill operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163866](https://github.com/cockroachdb/cockroach/pull/163866) + +- **Changefeed retry backoff:** Lowered the default value of the `changefeed.max_retry_backoff` cluster setting from `10m` to `30s` to reduce changefeed lag during rolling restarts. [#164874](https://github.com/cockroachdb/cockroach/pull/164874) + +- **`ALTER CHANGEFEED ADD` validation:** Using `ALTER CHANGEFEED ADD ...` for a table that is already watched will now return an error: `target already watched by changefeed`. [#164433](https://github.com/cockroachdb/cockroach/pull/164433) + +- **PCR reader AOST restriction:** Explicit `AS OF SYSTEM TIME` queries are no longer allowed on a Physical Cluster Replication (PCR) reader virtual cluster, unless the `bypass_pcr_reader_catalog_aost` session variable is set to `true`. This session variable should only be used during investigation or for changing cluster settings specific to the reader virtual cluster. [#165382](https://github.com/cockroachdb/cockroach/pull/165382) + +- **Super regions:** The `enable_super_regions` session variable and the `sql.defaults.super_regions.enabled` cluster setting are no longer required to use super regions. Super region DDL operations (`ADD`, `DROP`, and `ALTER SUPER REGION`) now work without any experimental flag. The session variable and cluster setting are deprecated, and existing scripts that set them will continue to work without error. [#165227](https://github.com/cockroachdb/cockroach/pull/165227) + +- **`TEMPORARY` database privilege:** Added the `TEMPORARY` database privilege, which controls whether users can create temporary tables and views. On new databases, this privilege is granted to the `public` role by default, matching PostgreSQL behavior. [#165992](https://github.com/cockroachdb/cockroach/pull/165992) + +- **`cockroach encode-uri` command:** The `cockroach encode-uri` command has been merged into the `cockroach convert-url` command and `encode-uri` has been deprecated. As a result, the flags `--inline`, `--database`, `--user`, `--password`, `--cluster`, `--certs-dir`, `--ca-cert`, `--cert`, and `--key` have been added to `convert-url`. [#164561](https://github.com/cockroachdb/cockroach/pull/164561) + +- **Statement diagnostics bundles:** Statement diagnostics requests with `sampling_probability` and `expires_at` now collect up to 10 bundles (configurable via `sql.stmt_diagnostics.max_bundles_per_request`) instead of a single bundle. Set the cluster setting to `1` to restore single-bundle behavior. [#166159](https://github.com/cockroachdb/cockroach/pull/166159) + +- **`crdb_internal` view access checks:** User-defined views that reference `crdb_internal` virtual tables now enforce unsafe access checks. To restore the previous behavior, set the session variable `allow_unsafe_internals` or the cluster setting `sql.override.allow_unsafe_internals.enabled` to `true`. [#167023](https://github.com/cockroachdb/cockroach/pull/167023) + +- **`REFRESH MATERIALIZED VIEW` RLS:** `REFRESH MATERIALIZED VIEW` now evaluates row-level security (RLS) policies using the view owner's identity instead of the invoker's, matching PostgreSQL's definer semantics. [#167419](https://github.com/cockroachdb/cockroach/pull/167419) - For detailed upgrade guidance and migration paths, refer to [FIPS-ready CockroachDB]({% link {{ page.version.version }}/fips.md %}). diff --git a/src/current/_includes/releases/v26.2/cluster-setting-changes.md b/src/current/_includes/releases/v26.2/cluster-setting-changes.md index 819b2aaf026..ce4509b37c0 100644 --- a/src/current/_includes/releases/v26.2/cluster-setting-changes.md +++ b/src/current/_includes/releases/v26.2/cluster-setting-changes.md @@ -1,36 +1,74 @@ -Changes to [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) should be reviewed prior to upgrading. New default cluster setting values will be used unless you have manually set a value for a setting. This can be confirmed by running the SQL statement `SELECT * FROM system.settings` to view the non-default settings. - -
New cluster settings
- -- Bullet - - Example: - - ~~~ sql - CREATE CHANGEFEED FOR x into 'null://' WITH - range_distribution_strategy='balanced_simple'; - ~~~ - -
Settings with changed defaults
- -- Bullet - - Events related to changefeed operations are now routed to the CHANGEFEED channel, while sampled queries and transactions, along with certain SQL performance events, are logged to SQL_EXEC. To continue using the previous logging channels, set `log.channel_compatibility_mode.enabled` to `true`. - -- Bullet - - Changed the default value of the `sql.catalog.allow_leased_descriptors.enabled` cluster setting to `true`. This setting allows introspection tables like `information_schema` and `pg_catalog` to use cached descriptors when building the table results, which improves the performance of introspection queries when there are many tables in the cluster. [#][#] - - -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ +Review the following changes **before** upgrading. New default cluster settings will be used unless you have manually set a value for a setting. This can be confirmed by running the SQL statement (`SELECT * FROM system.settings`) to view the non-default settings. + +
+ +| Setting | Description | Default | Change type | Previous versions affected | +|---|---|---|---|---| +| `security.provisioning.ldap.enabled` | LDAP authentication for the DB Console now supports automatic user provisioning. When the cluster setting `security.provisioning.ldap.enabled` is set to true, users who authenticate successfully via LDAP will be automatically created in CockroachDB if they do not already exist. [#163199](https://github.com/cockroachdb/cockroach/pull/163199) | `false` | New setting | None | +| `security.provisioning.oidc.enabled` | Added a new cluster setting, `security.provisioning.oidc.enabled`, to allow automatic provisioning of users when they log in for the first time via OIDC. When enabled, a new user will be created in CockroachDB upon their first successful OIDC authentication. This feature is disabled by default. [#159787](https://github.com/cockroachdb/cockroach/pull/159787) | `false` | New setting | v26.1 | +| `security.client_cert.san_required.enabled` | The new cluster setting `security.client_cert.san_required.enabled` enables Subject Alternative Name (SAN) based authentication for client certificates. When enabled, CockroachDB validates client identities using SAN attributes (URIs, DNS names, or IP addresses) from X.509 certificates instead of or in addition to the certificate's Common Name field.

Key capabilities include:This authentication method works across both SQL client connections and internal RPC communication between cluster nodes, ensuring consistent identity verification throughout the system. Organizations using modern certificate management systems and service identity frameworks can now leverage their existing infrastructure for database authentication without requiring certificate reissuance or CN-based naming conventions. [#162583](https://github.com/cockroachdb/cockroach/pull/162583) | `false` | New setting | None | +| `server.oidc_authentication.tls_insecure_skip_verify.enabled` | Added the opt-in cluster setting `server.oidc_authentication.tls_insecure_skip_verify.enabled` to skip TLS certificate verification for OIDC provider connections. [#164514](https://github.com/cockroachdb/cockroach/pull/164514) | `false` | New setting | None | +| `changefeed.max_retry_backoff` | Lowered the default value of the `changefeed.max_retry_backoff` cluster setting from `10m` to `30s` to reduce changefeed lag during rolling restarts. [#164874](https://github.com/cockroachdb/cockroach/pull/164874) | `30s` (was `10m`) | Changed default | v25.4, v26.1 | +| `changefeed.partition_alg.enabled` | Changefeeds now support the `partition_alg` option for specifying a Kafka partitioning algorithm. Currently `fnv-1a` (default) and `murmur2` are supported. The option is only valid on Kafka v2 sinks. This is protected by the cluster setting `changefeed.partition_alg.enabled`. An example usage: `SET CLUSTER SETTING changefeed.partition_alg.enabled=true; CREATE CHANGEFEED ... INTO 'kafka://...' WITH partition_alg='murmur2';`. Note that if a changefeed is created using the `murmur2` algorithm, and then the cluster setting is disabled, the changefeed will continue using the `murmur2` algorithm unless the changefeed is altered to use a different `partition_alg`. [#161265](https://github.com/cockroachdb/cockroach/pull/161265) | `false` | New setting | v25.2, v25.4, v26.1 | +| `sql.instance_info.use_instance_resolver.enabled` | The fix for `node descriptor not found` errors for changefeeds with `execution_locality` filters in CockroachDB Basic and Standard clusters is now controlled by cluster setting `sql.instance_info.use_instance_resolver.enabled` (default: `true`). [#163947](https://github.com/cockroachdb/cockroach/pull/163947) | `true` | New setting | v26.1 | +| `bulkio.import.elastic_control.enabled` | The `bulkio.import.elastic_control.enabled` cluster setting is now enabled by default, allowing import operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163867](https://github.com/cockroachdb/cockroach/pull/163867) | `true` (was `false`) | Changed default | None | +| `bulkio.ingest.sst_batcher_elastic_control.enabled` | The `bulkio.ingest.sst_batcher_elastic_control.enabled` cluster setting is now enabled by default, allowing SST batcher operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163868](https://github.com/cockroachdb/cockroach/pull/163868) | `true` (was `false`) | Changed default | None | +| `bulkio.index_backfill.elastic_control.enabled` | The `bulkio.index_backfill.elastic_control.enabled` cluster setting is now enabled by default, allowing index backfill operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163866](https://github.com/cockroachdb/cockroach/pull/163866) | `true` (was `false`) | Changed default | None | +| `bulkio.import.distributed_merge.mode` | Added a new cluster setting `bulkio.import.distributed_merge.mode` to enable distributed merge support for `IMPORT` operations. When enabled (default: false), `IMPORT` jobs will use a two-phase approach where import processors first write SST files to local storage, then a coordinator merges and ingests them. This can improve performance for large imports by reducing L0 file counts and enabling merge-time optimizations. This feature requires all nodes to be running v26.1 or later. [#159330](https://github.com/cockroachdb/cockroach/pull/159330) | `false` | New setting | v26.1 | +| `bulkio.import.row_count_validation.mode` | Row count validation after `IMPORT` is now enabled by default in async mode. After an `IMPORT` completes, a background `INSPECT` job validates that the imported row count matches expectations. The `IMPORT` result now includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. The `bulkio.import.row_count_validation.mode` cluster setting controls this behavior, with valid values of `off`, `async` (default), and `sync`. [#163543](https://github.com/cockroachdb/cockroach/pull/163543) | `async` | New setting | None | +| `kv.range_split.load_sample_reset_duration` | The `kv.range_split.load_sample_reset_duration` cluster setting now defaults to `30m`. This should improve load-based splitting in rare edge cases. [#159499](https://github.com/cockroachdb/cockroach/pull/159499) | `30m` (was `0`) | Changed default | None | +| `kvadmission.store.snapshot_ingest_bandwidth_control.min_rate.enabled` | Introduced a new cluster setting `kvadmission.store.snapshot_ingest_bandwidth_control.min_rate.enabled`. When this setting is enabled and disk bandwidth-based admission control is active, snapshot ingestion will be admitted at a minimum rate. This prevents snapshot ingestion from being starved by other elastic work. [#159436](https://github.com/cockroachdb/cockroach/pull/159436) | `true` | New setting | None | +| `server.sql_tcp_user.timeout` | Added the `server.sql_tcp_user.timeout` cluster setting, which specifies the maximum amount of time transmitted data can remain unacknowledged before the underlying TCP connection is forcefully closed. This setting is enabled by default with a value of 30 seconds and is supported on Linux and macOS (Darwin). [#164037](https://github.com/cockroachdb/cockroach/pull/164037) | `30s` | New setting | None | +| `server.gc_assist.enabled` | A new cluster setting, `server.gc_assist.enabled`, allows operators to dynamically disable GC assist in CockroachDB's forked Go runtime. By default, it follows the `GODEBUG=gcnoassist` flag. A new metric, `sys.gc.assist.enabled`, reports the current state (`1` = enabled, `0` = disabled). [#166555](https://github.com/cockroachdb/cockroach/pull/166555) | `true` | New setting | None | +| `sql.auth.skip_underlying_view_privilege_checks.enabled` | When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) | `false` | New setting | None | +| `sql.stats.automatic_full_concurrency_limit` | Increased the default value of `sql.stats.automatic_full_concurrency_limit` (which controls the maximum number of concurrent full statistics collections) from `1` to number of vCPUs divided by 2 (e.g., 4 vCPU nodes will have the value of `2`). [#161806](https://github.com/cockroachdb/cockroach/pull/161806) | number of vCPUs / 2 (was `1`) | Changed default | None | +| `sql.stats.automatic_extremes_concurrency_limit` | Added cluster settings to control the number of concurrent automatic statistics collection jobs: `sql.stats.automatic_full_concurrency_limit` controls the maximum number of concurrent full statistics collections. The default is 1. `sql.stats.automatic_extremes_concurrency_limit` controls the maximum number of concurrent partial statistics collections using extremes. The default is 128. Note that at most one statistics collection job can run on a single table at a time. [#158835](https://github.com/cockroachdb/cockroach/pull/158835) | `128` | New setting | v26.1 | +| `sql.stats.non_indexed_json_histograms.enabled` | Statistics histogram collection is now skipped for JSON columns referenced in partial index predicates, except when `sql.stats.non_indexed_json_histograms.enabled` is true (default: false). [#164477](https://github.com/cockroachdb/cockroach/pull/164477) | `false` | New setting | None | +| `sql.stmt_diagnostics.max_bundles_per_request` | Statement diagnostics requests with `sampling_probability` and `expires_at` now collect up to 10 bundles (configurable via `sql.stmt_diagnostics.max_bundles_per_request`) instead of a single bundle. Set the cluster setting to `1` to restore single-bundle behavior. [#166159](https://github.com/cockroachdb/cockroach/pull/166159) | `10` | New setting | None | +| `sql.catalog.allow_leased_descriptors.enabled` | Changed the default value of the `sql.catalog.allow_leased_descriptors.enabled` cluster setting to `true`. This setting allows introspection tables like `information_schema` and `pg_catalog` to use cached descriptors when building the table results, which improves the performance of introspection queries when there are many tables in the cluster. [#159162](https://github.com/cockroachdb/cockroach/pull/159162) | `true` (was `false`) | Changed default | v26.1 | +| `sql.guardrails.max_row_size_log` | Lowered the default value of the `sql.guardrails.max_row_size_log` cluster setting from `64 MiB` to `16 MiB`, and the default value of `sql.guardrails.max_row_size_err` from `512 MiB` to `80 MiB`. These settings control the maximum size of a row (or column family) that SQL can write before logging a warning or returning an error, respectively. The previous defaults were high enough that large rows would hit other limits first (such as the Raft command size limit or the backup SST size limit), producing confusing errors. The new defaults align with existing system limits to provide clearer diagnostics. If your workload legitimately writes rows larger than these new defaults, you can restore the previous behavior by increasing these settings. [#164468](https://github.com/cockroachdb/cockroach/pull/164468) | `16 MiB` (was `64 MiB`) | Changed default | None | +| `sql.guardrails.max_row_size_err` | Lowered the default value of the `sql.guardrails.max_row_size_log` cluster setting from `64 MiB` to `16 MiB`, and the default value of `sql.guardrails.max_row_size_err` from `512 MiB` to `80 MiB`. These settings control the maximum size of a row (or column family) that SQL can write before logging a warning or returning an error, respectively. The previous defaults were high enough that large rows would hit other limits first (such as the Raft command size limit or the backup SST size limit), producing confusing errors. The new defaults align with existing system limits to provide clearer diagnostics. If your workload legitimately writes rows larger than these new defaults, you can restore the previous behavior by increasing these settings. [#164468](https://github.com/cockroachdb/cockroach/pull/164468) | `80 MiB` (was `512 MiB`) | Changed default | None | +| `sql.defaults.super_regions.enabled` | The `enable_super_regions` session variable and the `sql.defaults.super_regions.enabled` cluster setting are no longer required to use super regions. Super region DDL operations (`ADD`, `DROP`, and `ALTER SUPER REGION`) now work without any experimental flag. The session variable and cluster setting are deprecated, and existing scripts that set them will continue to work without error. [#165227](https://github.com/cockroachdb/cockroach/pull/165227) | `false` | Deprecated | None | +| `obs.ash.log_interval` | Added periodic ASH workload summary logging to the `OPS` channel. Two new cluster settings, `obs.ash.log_interval` (default: `10m`) and `obs.ash.log_top_n` (default: `10`), control how often and how many entries are emitted. Each summary reports the most frequently sampled workloads grouped by event type, event name, and workload ID, providing visibility into workload patterns that previously existed only in memory. [#165093](https://github.com/cockroachdb/cockroach/pull/165093) | `10m` | New setting | None | +| `obs.ash.log_top_n` | Added periodic ASH workload summary logging to the `OPS` channel. Two new cluster settings, `obs.ash.log_interval` (default: `10m`) and `obs.ash.log_top_n` (default: `10`), control how often and how many entries are emitted. Each summary reports the most frequently sampled workloads grouped by event type, event name, and workload ID, providing visibility into workload patterns that previously existed only in memory. [#165093](https://github.com/cockroachdb/cockroach/pull/165093) | `10` | New setting | None | +| `sql.schema.auto_unlock.enabled` | Added a new cluster setting, `sql.schema.auto_unlock.enabled`, that controls whether DDL operations automatically unlock `schema_locked` tables. When set to `false`, DDL on schema-locked tables is blocked unless the user manually unlocks the table first. This allows customers using LDR to enforce `schema_locked` as a hard lock that prevents user-initiated DDL. The default is `true`, preserving existing behavior. [#166471](https://github.com/cockroachdb/cockroach/pull/166471) | `true` | New setting | None | +| `sql.prepared_transactions.unsafe.enabled` | Added a new cluster setting `sql.prepared_transactions.unsafe.enabled` (default: `false`) that controls whether `PREPARE TRANSACTION` statements are accepted. This setting is marked unsafe and requires the unsafe setting interlock to change. When disabled, attempting to prepare a transaction returns an error. `COMMIT PREPARED` and `ROLLBACK PREPARED` remain available regardless of this setting to allow cleanup of existing prepared transactions. [#166855](https://github.com/cockroachdb/cockroach/pull/166855) | `false` | New setting | None | +| `changefeed.kafka.max_request_size` | Added a new cluster setting `changefeed.kafka.max_request_size` and a per-changefeed `Flush.MaxBytes` option in the Kafka sink config to control the maximum size of record batches sent to Kafka by the v2 sink. Lowering this from the default of 256 MiB can prevent spurious message-too-large errors when multiple batches are coalesced into a single broker request. [#166740](https://github.com/cockroachdb/cockroach/pull/166740) | `256 MiB` | New setting | None | + +
+ + diff --git a/src/current/_includes/releases/v26.2/deprecations.md b/src/current/_includes/releases/v26.2/deprecations.md index 30c39187d9b..50e084a26a8 100644 --- a/src/current/_includes/releases/v26.2/deprecations.md +++ b/src/current/_includes/releases/v26.2/deprecations.md @@ -1,15 +1,9 @@ -The following deprecations are announced in v26.1. +
-- Bullet +| Deprecated | Description | +|---|---| +| `enable_inspect_command` session variable | `INSPECT` is now a generally available (GA) feature. The `enable_inspect_command` session variable has been deprecated, and is now effectively always set to `true`. [#159659](https://github.com/cockroachdb/cockroach/pull/159659) | +| `enable_super_regions` session variable and `sql.defaults.super_regions.enabled` cluster setting | The `enable_super_regions` session variable and the `sql.defaults.super_regions.enabled` cluster setting are no longer required to use super regions. Super region DDL operations (`ADD`, `DROP`, and `ALTER SUPER REGION`) now work without any experimental flag. The session variable and cluster setting are deprecated, and existing scripts that set them will continue to work without error. [#165227](https://github.com/cockroachdb/cockroach/pull/165227) | +| `cockroach encode-uri` command | The `cockroach encode-uri` command has been merged into the `cockroach convert-url` command and `encode-uri` has been deprecated. As a result, the flags `--inline`, `--database`, `--user`, `--password`, `--cluster`, `--certs-dir`, `--ca-cert`, `--cert`, and `--key` have been added to `convert-url`. [#164561](https://github.com/cockroachdb/cockroach/pull/164561) | - - To validate data consistency, use `INSPECT` instead of `EXPERIMENTAL SCRUB` - - `INSPECT` supports a `DETACHED` option to run the operation without waiting for it - - For more information, see the [`INSPECT`]({% link {{ page.version.version }}/inspect.md %}) documentation - - [#][#] - -- Bullet - - -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ +
diff --git a/src/current/_includes/releases/v26.2/feature-detail-key.html b/src/current/_includes/releases/v26.2/feature-detail-key.html deleted file mode 100644 index 95d79284eb4..00000000000 --- a/src/current/_includes/releases/v26.2/feature-detail-key.html +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - -
Feature detail key
Features marked "All*" were recently made available in the CockroachDB Cloud platform. They are available for all supported versions of CockroachDB, under the deployment methods specified in their row under Availability.
★★Features marked "All**" were recently made available via tools maintained outside of the CockroachDB binary. They are available to use with all supported versions of CockroachDB, under the deployment methods specified in their row under Availability.
{% include icon-yes.html %}Feature is available for this deployment method of CockroachDB as specified in the icon’s column: CockroachDB Self-hosted, CockroachDB Advanced, CockroachDB Standard, or CockroachDB Basic.
{% include icon-no.html %}Feature is not available for this deployment method of CockroachDB as specified in the icon’s column: CockroachDB Self-hosted, CockroachDB Advanced, CockroachDB Standard, or CockroachDB Basic.
diff --git a/src/current/_includes/releases/v26.2/upgrade-finalization.md b/src/current/_includes/releases/v26.2/upgrade-finalization.md index 7ecd398ec45..b107f60f0e3 100644 --- a/src/current/_includes/releases/v26.2/upgrade-finalization.md +++ b/src/current/_includes/releases/v26.2/upgrade-finalization.md @@ -1,13 +1,17 @@ -During a major-version upgrade, certain features and performance improvements are not available until the upgrade is finalized. In v26.1, these are: +During a major-version upgrade, certain features and performance improvements are not available until the upgrade is finalized. In v26.2, these are: -- Bullet +- **`security_invoker` option for views**: {% comment %}TODO: Verify with @shadiGh{% endcomment %}Views now support the PostgreSQL-compatible `security_invoker` option. When set via `CREATE VIEW ... WITH (security_invoker)` or `ALTER VIEW SET (security_invoker = true)`, privilege checks on the underlying tables are performed as the querying user rather than the view owner. The `security_invoker` option can be reset with `ALTER VIEW ... RESET (security_invoker)`. For details, refer to the [release note](#v26-2-0-alpha-2-security-invoker). -{% comment %}TODO: Verify with engineering that leased descriptors truly requires upgrade finalization{% endcomment %} +- **`ALTER TABLE ENABLE/DISABLE TRIGGER` syntax**: {% comment %}TODO: Verify with @rafiss{% endcomment %}Added support for `ALTER TABLE ENABLE TRIGGER` and `ALTER TABLE DISABLE TRIGGER` syntax. This allows users to temporarily disable triggers without dropping them, and later re-enable them. The syntax supports disabling/enabling individual triggers by name, or all triggers on a table using the `ALL` or `USER` keywords. For details, refer to the [release note](#v26-2-0-alpha-1-enable-disable-trigger). -{% comment %}TODO: Add anchor IDs to the referenced release notes if they don't exist{% endcomment %} +- **`skip_unique_checks` storage parameter**: {% comment %}TODO: Verify with @DrewKimball{% endcomment %}Added an index storage parameter `skip_unique_checks` that can be used to disable unique constraint checks for indexes with implicit partition columns, including indexes in `REGIONAL BY ROW` tables. This should **only** be used if the application can guarantee uniqueness, for example, by using external UUID values or relying on a `unique_rowid()` default value. Incorrectly applying this setting when uniqueness is not guaranteed by the application could result in logically duplicate keys in different partitions of a unique index. For details, refer to the [release note](#v26-2-0-alpha-2-skip-unique-checks). -{% comment %} -Additional features reviewed but NOT included: -- Bullet -- Bullet -{% endcomment %} +- **`DROP CONSTRAINT` on unique indexes**: {% comment %}TODO: Verify with @rafiss{% endcomment %}`ALTER TABLE ... DROP CONSTRAINT` can now be used to drop `UNIQUE` constraints. The backing `UNIQUE` index will also be dropped, as CockroachDB treats the constraint and index as the same thing. For details, refer to the [release note](#v26-2-0-alpha-1-drop-unique-constraint). + +- **Canary stats in `EXPLAIN` output**: {% comment %}TODO: Verify with @ZhouXing19{% endcomment %}`EXPLAIN` and `EXPLAIN ANALYZE` now display a `table stats mode` field (`canary` or `stable`) when the `sql.stats.canary_fraction` cluster setting is greater than 0, indicating which table statistics were used for query planning. Scan nodes for tables with active canary stats also show the configured canary window duration. For details, refer to the [release note](#v26-2-0-beta-2-canary-stats). + +- **View owner privilege checking**: {% comment %}TODO: Verify with @shadiGh{% endcomment %}When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. For details, refer to the [release note](#v26-2-0-alpha-2-view-privilege-checks). + +- **`INSPECT` uniqueness validation for `REGIONAL BY ROW` tables**: {% comment %}TODO: Verify with @bgbg{% endcomment %}During an `INSPECT` run, a new check validates unique column values in `REGIONAL BY ROW` tables. For details, refer to the [release note](#v26-2-0-alpha-2-inspect-uniqueness). + +- **`IMPORT` row count validation with `INSPECT`**: {% comment %}TODO: Verify with @bgbg{% endcomment %}Row count validation after `IMPORT` is now enabled by default in async mode. After an `IMPORT` completes, a background `INSPECT` job validates that the imported row count matches expectations. The `IMPORT` result now includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. The `bulkio.import.row_count_validation.mode` cluster setting controls this behavior, with valid values of `off`, `async` (default), and `sync`. For details, refer to the [release note](#v26-2-0-alpha-1-import-inspect-validation). diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index f5f93346395..f381e646037 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -171,9 +171,9 @@ This section summarizes the most significant user-facing changes in CockroachDB GA {% include icon-yes.html %} - - - + {% include icon-no.html %} + {% include icon-no.html %} + {% include icon-no.html %} @@ -237,9 +237,9 @@ This section summarizes the most significant user-facing changes in CockroachDB

Deploy CockroachDB Cloud Advanced clusters across two geographically separated data centers in an active-passive configuration, providing resilience against data center failures with near-zero downtime failover.

Preview - - - + {% include icon-no.html %} + {% include icon-no.html %} + {% include icon-no.html %} {% include icon-yes.html %} @@ -248,7 +248,7 @@ This section summarizes the most significant user-facing changes in CockroachDB

A modernized CockroachDB Cloud CLI with improved commands, better discoverability, and a more intuitive interface for managing clusters, users, and cloud resources from the terminal.

GA - + {% include icon-no.html %} {% include icon-yes.html %} {% include icon-yes.html %} {% include icon-yes.html %} @@ -259,7 +259,7 @@ This section summarizes the most significant user-facing changes in CockroachDB

An AI-powered in-console assistant that helps you troubleshoot issues, optimize queries, and manage CockroachDB clusters using natural language, without leaving the Cloud console.

GA - + {% include icon-no.html %} {% include icon-yes.html %} {% include icon-yes.html %} {% include icon-yes.html %} @@ -270,7 +270,7 @@ This section summarizes the most significant user-facing changes in CockroachDB

Enforce multi-factor authentication for all CockroachDB Cloud users, reducing the risk of unauthorized access from compromised credentials and strengthening organizational security posture.

GA - + {% include icon-no.html %} {% include icon-yes.html %} {% include icon-yes.html %} {% include icon-yes.html %} @@ -281,9 +281,9 @@ This section summarizes the most significant user-facing changes in CockroachDB

Run CockroachDB Cloud clusters entirely within your own AWS, Azure, or GCP account with Bring Your Own Cloud, giving you full control over networking, security, and data residency while retaining fully managed database operations.

Preview - - - + {% include icon-no.html %} + {% include icon-no.html %} + {% include icon-no.html %} {% include icon-yes.html %} @@ -292,7 +292,7 @@ This section summarizes the most significant user-facing changes in CockroachDB

Connect AI agents and LLM-powered applications to CockroachDB using the Model Context Protocol (MCP), enabling intelligent, database-aware AI workflows without custom integration work.

GA - + {% include icon-no.html %} {% include icon-yes.html %} {% include icon-yes.html %} {% include icon-yes.html %} @@ -303,7 +303,7 @@ This section summarizes the most significant user-facing changes in CockroachDB

Experience CockroachDB's resilience firsthand by simulating an AZ failure in a live production cluster and watching the cluster auto-recover, and traffic stay unimpacted.

GA - + {% include icon-no.html %} {% include icon-yes.html %} {% include icon-yes.html %} {% include icon-yes.html %} @@ -311,8 +311,6 @@ This section summarizes the most significant user-facing changes in CockroachDB -{% include releases/v26.2/feature-detail-key.html %} -

Upgrade Details

@@ -327,21 +325,21 @@ Before you upgrade, review these changes and other information about the new maj -

Key cluster setting changes

+
Key cluster setting changes
{% include releases/v26.2/cluster-setting-changes.md %} - + -

Features that require upgrade finalization

+
Deprecations
-{% include releases/v26.2/upgrade-finalization.md %} +{% include releases/v26.2/deprecations.md %} - + -

Deprecations

+

Features that require upgrade finalization

-{% include releases/v26.2/deprecations.md %} +{% include releases/v26.2/upgrade-finalization.md %}

Known limitations

From 1372778c6064c5cae41192e814682bd4ab98378c Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Wed, 15 Apr 2026 11:41:52 -0400 Subject: [PATCH 02/32] separate Cloud notes --- .../releases/v26.2/upgrade-finalization.md | 16 +- .../_includes/releases/v26.2/v26.2.0.md | 478 ++++++++++++++---- src/current/css/customstyles.scss | 28 + src/current/releases/cloud.md | 100 ++++ src/current/releases/v26.2.md | 4 +- 5 files changed, 520 insertions(+), 106 deletions(-) diff --git a/src/current/_includes/releases/v26.2/upgrade-finalization.md b/src/current/_includes/releases/v26.2/upgrade-finalization.md index b107f60f0e3..6a20b6d8050 100644 --- a/src/current/_includes/releases/v26.2/upgrade-finalization.md +++ b/src/current/_includes/releases/v26.2/upgrade-finalization.md @@ -1,17 +1,17 @@ During a major-version upgrade, certain features and performance improvements are not available until the upgrade is finalized. In v26.2, these are: -- **`security_invoker` option for views**: {% comment %}TODO: Verify with @shadiGh{% endcomment %}Views now support the PostgreSQL-compatible `security_invoker` option. When set via `CREATE VIEW ... WITH (security_invoker)` or `ALTER VIEW SET (security_invoker = true)`, privilege checks on the underlying tables are performed as the querying user rather than the view owner. The `security_invoker` option can be reset with `ALTER VIEW ... RESET (security_invoker)`. For details, refer to the [release note](#v26-2-0-alpha-2-security-invoker). +- **`security_invoker` option for views**: {% comment %}TODO: Verify with @shadiGh{% endcomment %}Views now support the PostgreSQL-compatible `security_invoker` option. When set via `CREATE VIEW ... WITH (security_invoker)` or `ALTER VIEW SET (security_invoker = true)`, privilege checks on the underlying tables are performed as the querying user rather than the view owner. The `security_invoker` option can be reset with `ALTER VIEW ... RESET (security_invoker)`. [#164184](https://github.com/cockroachdb/cockroach/pull/164184) -- **`ALTER TABLE ENABLE/DISABLE TRIGGER` syntax**: {% comment %}TODO: Verify with @rafiss{% endcomment %}Added support for `ALTER TABLE ENABLE TRIGGER` and `ALTER TABLE DISABLE TRIGGER` syntax. This allows users to temporarily disable triggers without dropping them, and later re-enable them. The syntax supports disabling/enabling individual triggers by name, or all triggers on a table using the `ALL` or `USER` keywords. For details, refer to the [release note](#v26-2-0-alpha-1-enable-disable-trigger). +- **`ALTER TABLE ENABLE/DISABLE TRIGGER` syntax**: {% comment %}TODO: Verify with @rafiss{% endcomment %}Added support for `ALTER TABLE ENABLE TRIGGER` and `ALTER TABLE DISABLE TRIGGER` syntax. This allows users to temporarily disable triggers without dropping them, and later re-enable them. The syntax supports disabling/enabling individual triggers by name, or all triggers on a table using the `ALL` or `USER` keywords. [#161924](https://github.com/cockroachdb/cockroach/pull/161924) -- **`skip_unique_checks` storage parameter**: {% comment %}TODO: Verify with @DrewKimball{% endcomment %}Added an index storage parameter `skip_unique_checks` that can be used to disable unique constraint checks for indexes with implicit partition columns, including indexes in `REGIONAL BY ROW` tables. This should **only** be used if the application can guarantee uniqueness, for example, by using external UUID values or relying on a `unique_rowid()` default value. Incorrectly applying this setting when uniqueness is not guaranteed by the application could result in logically duplicate keys in different partitions of a unique index. For details, refer to the [release note](#v26-2-0-alpha-2-skip-unique-checks). +- **`skip_unique_checks` storage parameter**: {% comment %}TODO: Verify with @DrewKimball{% endcomment %}Added an index storage parameter `skip_unique_checks` that can be used to disable unique constraint checks for indexes with implicit partition columns, including indexes in `REGIONAL BY ROW` tables. This should **only** be used if the application can guarantee uniqueness, for example, by using external UUID values or relying on a `unique_rowid()` default value. Incorrectly applying this setting when uniqueness is not guaranteed by the application could result in logically duplicate keys in different partitions of a unique index. [#163378](https://github.com/cockroachdb/cockroach/pull/163378) -- **`DROP CONSTRAINT` on unique indexes**: {% comment %}TODO: Verify with @rafiss{% endcomment %}`ALTER TABLE ... DROP CONSTRAINT` can now be used to drop `UNIQUE` constraints. The backing `UNIQUE` index will also be dropped, as CockroachDB treats the constraint and index as the same thing. For details, refer to the [release note](#v26-2-0-alpha-1-drop-unique-constraint). +- **`DROP CONSTRAINT` on unique indexes**: {% comment %}TODO: Verify with @rafiss{% endcomment %}`ALTER TABLE ... DROP CONSTRAINT` can now be used to drop `UNIQUE` constraints. The backing `UNIQUE` index will also be dropped, as CockroachDB treats the constraint and index as the same thing. [#162345](https://github.com/cockroachdb/cockroach/pull/162345) -- **Canary stats in `EXPLAIN` output**: {% comment %}TODO: Verify with @ZhouXing19{% endcomment %}`EXPLAIN` and `EXPLAIN ANALYZE` now display a `table stats mode` field (`canary` or `stable`) when the `sql.stats.canary_fraction` cluster setting is greater than 0, indicating which table statistics were used for query planning. Scan nodes for tables with active canary stats also show the configured canary window duration. For details, refer to the [release note](#v26-2-0-beta-2-canary-stats). +- **Canary stats in `EXPLAIN` output**: {% comment %}TODO: Verify with @ZhouXing19{% endcomment %}`EXPLAIN` and `EXPLAIN ANALYZE` now display a `table stats mode` field (`canary` or `stable`) when the `sql.stats.canary_fraction` cluster setting is greater than 0, indicating which table statistics were used for query planning. Scan nodes for tables with active canary stats also show the configured canary window duration. [#166129](https://github.com/cockroachdb/cockroach/pull/166129) -- **View owner privilege checking**: {% comment %}TODO: Verify with @shadiGh{% endcomment %}When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. For details, refer to the [release note](#v26-2-0-alpha-2-view-privilege-checks). +- **View owner privilege checking**: {% comment %}TODO: Verify with @shadiGh{% endcomment %}When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) -- **`INSPECT` uniqueness validation for `REGIONAL BY ROW` tables**: {% comment %}TODO: Verify with @bgbg{% endcomment %}During an `INSPECT` run, a new check validates unique column values in `REGIONAL BY ROW` tables. For details, refer to the [release note](#v26-2-0-alpha-2-inspect-uniqueness). +- **`INSPECT` uniqueness validation for `REGIONAL BY ROW` tables**: {% comment %}TODO: Verify with @bgbg{% endcomment %}During an `INSPECT` run, a new check validates unique column values in `REGIONAL BY ROW` tables. [#164449](https://github.com/cockroachdb/cockroach/pull/164449) -- **`IMPORT` row count validation with `INSPECT`**: {% comment %}TODO: Verify with @bgbg{% endcomment %}Row count validation after `IMPORT` is now enabled by default in async mode. After an `IMPORT` completes, a background `INSPECT` job validates that the imported row count matches expectations. The `IMPORT` result now includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. The `bulkio.import.row_count_validation.mode` cluster setting controls this behavior, with valid values of `off`, `async` (default), and `sync`. For details, refer to the [release note](#v26-2-0-alpha-1-import-inspect-validation). +- **`IMPORT` row count validation with `INSPECT`**: {% comment %}TODO: Verify with @bgbg{% endcomment %}Row count validation after `IMPORT` is now enabled by default in async mode. After an `IMPORT` completes, a background `INSPECT` job validates that the imported row count matches expectations. The `IMPORT` result now includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. The `bulkio.import.row_count_validation.mode` cluster setting controls this behavior, with valid values of `off`, `async` (default), and `sync`. [#163543](https://github.com/cockroachdb/cockroach/pull/163543) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index f381e646037..a220dce9796 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -10,7 +10,6 @@ For a summary of the most significant changes, refer to [Feature Highlights](#v2 - [Security](#v26-2-0-security) - [Observability](#v26-2-0-observability) - [Performance](#v26-2-0-performance) - - [CockroachDB Cloud](#v26-2-0-cloud) Before [upgrading to CockroachDB v26.2]({% link v26.2/upgrade-cockroach-version.md %}), be sure to also review the following [Upgrade Details](#v26-2-0-upgrade-details): @@ -217,100 +216,6 @@ This section summarizes the most significant user-facing changes in CockroachDB -

CockroachDB Cloud

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FeatureAvailabilitySelf-hostedBasicStandardAdvanced
-

2-DC Active-Passive Architecture

-

Deploy CockroachDB Cloud Advanced clusters across two geographically separated data centers in an active-passive configuration, providing resilience against data center failures with near-zero downtime failover.

-
Preview{% include icon-no.html %}{% include icon-no.html %}{% include icon-no.html %}{% include icon-yes.html %}
-

CockroachDB Cloud CLI Revamp

-

A modernized CockroachDB Cloud CLI with improved commands, better discoverability, and a more intuitive interface for managing clusters, users, and cloud resources from the terminal.

-
GA{% include icon-no.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
-

Agent Roach: AI-Powered In-Console Assistant

-

An AI-powered in-console assistant that helps you troubleshoot issues, optimize queries, and manage CockroachDB clusters using natural language, without leaving the Cloud console.

-
GA{% include icon-no.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
-

Multi-Factor Authentication for CockroachDB Cloud

-

Enforce multi-factor authentication for all CockroachDB Cloud users, reducing the risk of unauthorized access from compromised credentials and strengthening organizational security posture.

-
GA{% include icon-no.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
-

Bring Your Own Cloud (BYOC) for AWS, Azure, and GCP

-

Run CockroachDB Cloud clusters entirely within your own AWS, Azure, or GCP account with Bring Your Own Cloud, giving you full control over networking, security, and data residency while retaining fully managed database operations.

-
Preview{% include icon-no.html %}{% include icon-no.html %}{% include icon-no.html %}{% include icon-yes.html %}
-

CockroachDB Cloud MCP Server

-

Connect AI agents and LLM-powered applications to CockroachDB using the Model Context Protocol (MCP), enabling intelligent, database-aware AI workflows without custom integration work.

-
GA{% include icon-no.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
-

Fault Tolerance Demo

-

Experience CockroachDB's resilience firsthand by simulating an AZ failure in a live production cluster and watching the cluster auto-recover, and traffic stay unimpacted.

-
GA{% include icon-no.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
-

Upgrade Details

@@ -325,13 +230,13 @@ Before you upgrade, review these changes and other information about the new maj -
Key cluster setting changes
+

Key cluster setting changes

{% include releases/v26.2/cluster-setting-changes.md %} -
Deprecations
+

Deprecations

{% include releases/v26.2/deprecations.md %} @@ -346,3 +251,382 @@ Before you upgrade, review these changes and other information about the new maj + + +

Security updates

+ +- When the `security.provisioning.ldap.enabled` cluster setting is enabled, LDAP-authenticated DB Console logins now update the `estimated_last_login_time` column in the `system.users` table. [#163400][#163400] +- When the `security.provisioning.oidc.enabled` cluster setting is enabled, OIDC-authenticated DB Console logins now populate the `estimated_last_login_time` column in `system.users`, allowing administrators to track when OIDC users last accessed the DB Console. [#164129][#164129] +- Removed an overly restrictive TLS curve preference that limited FIPS mode to P-256. CockroachDB now uses Go's native FIPS curve selection, improving interoperability with clients that prefer other FIPS curves. [#166793][#166793] + + +

Enterprise edition changes

+ +- LDAP authentication for the DB Console now additionally supports role-based access control (RBAC) through LDAP group membership. To use this feature, an administrator must first create roles in CockroachDB with names that match the Common Names (CN) of their LDAP groups. These roles should then be granted the desired privileges for DB Console access. When a user who is a member of a corresponding LDAP group logs into the DB Console, they will be automatically granted the role and its associated privileges, creating consistent behavior with SQL client connections. [#162302][#162302] + + +

SQL language changes

+ +- Added the `MAINTAIN` privilege, which can be granted on tables and materialized views. Users with the `MAINTAIN` privilege on a materialized view can execute `REFRESH MATERIALIZED VIEW` without being the owner. Users with the `MAINTAIN` privilege on a table can execute `ANALYZE` without needing `SELECT`. This aligns with PostgreSQL 17 behavior. [#164236][#164236] +- CockroachDB now supports the PostgreSQL session variables `tcp_keepalives_idle`, `tcp_keepalives_interval`, `tcp_keepalives_count`, and `tcp_user_timeout`. These allow per-session control over TCP keepalive behavior on each connection. A value of 0 (the default) uses the corresponding cluster setting. Non-zero values override the cluster setting for that session only. Units match PostgreSQL: seconds for keepalive settings, milliseconds for `tcp_user_timeout`. [#164369][#164369] +- Added the `optimizer_inline_any_unnest_subquery` session setting to enable/disable the optimizer rule `InlineAnyProjectSet`. The setting is on by default in v26.2 and later. [#161880][#161880] +- Users can now set the `use_backups_with_ids` session setting to enable a new `SHOW BACKUPS IN` experience. When enabled, `SHOW BACKUPS IN {collection}` displays all backups in the collection. Results can be filtered by backup end time using `OLDER THAN {timestamp}` or `NEWER THAN {timestamp}` clauses. Example usage: `SET use_backups_with_ids = true; SHOW BACKUPS IN '{collection}' OLDER THAN '2026-01-09 12:13:14' NEWER THAN '2026-01-04 15:16:17';` [#160137][#160137] +- If the new `SHOW BACKUP` experience is enabled by setting the `use_backups_with_ids` session variable to true, `SHOW BACKUP` will parse the IDs provided by `SHOW BACKUPS` and display contents for single backups. [#160812][#160812] +- If the new `RESTORE` experience is enabled by setting the `use_backups_with_ids` session variable to true, `RESTORE` will parse the IDs provided by `SHOW BACKUPS` and will restore the specified backup without the use of `AS OF SYSTEM TIME`. [#161294][#161294] +- `SHOW BACKUP` and `RESTORE` now allow backup IDs even if the `use_backups_with_ids` session variable is not set. Setting the variable only configures whether `LATEST` is resolved using the new or legacy path. [#162329][#162329] +- Added the `REVISION START TIME` option to the new `SHOW BACKUPS` experience enabled via the `use_backups_with_ids` session variable. Use the `REVISION START TIME` option to view the revision start times of revision history backups. [#161328][#161328] +- Added support for `SHOW STATEMENT HINTS`, which displays information about the statement hints (if any) associated with the given statement fingerprint string. The fingerprint is normalized in the same way as `EXPLAIN (FINGERPRINT)` before hints are matched. Example usage: `SHOW STATEMENT HINTS FOR ' SELECT * FROM xy WHERE x = 10 '` or `SHOW STATEMENT HINTS FOR $$ SELECT * FROM xy WHERE x = 10 $$ WITH DETAILS`. [#159231][#159231] +- `CREATE OR REPLACE TRIGGER` is now supported. If a trigger with the same name already exists on the same table, it is replaced with the new definition. If no trigger with that name exists, a new trigger is created. [#162633][#162633] +- Updated `DROP TRIGGER` to accept the `CASCADE` option for PostgreSQL compatibility. Since triggers in CockroachDB cannot have dependents, `CASCADE` behaves the same as `RESTRICT` or omitting the option entirely. [#161915][#161915] +- `DROP COLUMN` and `DROP INDEX` with `CASCADE` now properly drop dependent triggers. Previously, these operations would fail with an unimplemented error when a trigger depended on the column or index being dropped. [#163296][#163296] +- `CREATE OR REPLACE FUNCTION` now works on trigger functions that have active triggers. Previously, this was blocked with an unimplemented error, requiring users to drop and recreate triggers. The replacement now atomically updates all dependent triggers to execute the new function body. [#163348][#163348] +- Updated CockroachDB to allow a prefix of index key columns to be used for the shard column in a hash-sharded index. The `shard_columns` storage parameter may be used to override the default, which uses all index key columns in the shard column. [#161422][#161422] +- Added support for the `pg_trigger_depth()` builtin function, which returns the current nesting level of PostgreSQL triggers (0 if not called from inside a trigger). [#162286][#162286] +- A database-level changefeed with no tables will periodically poll to check for tables added to the database. The new option `hibernation_polling_frequency` sets the frequency at which the polling occurs, until a table is found, at which point polling ceases. [#156771][#156771] +- Added the `STRICT` option for locality-aware backups. When enabled, backups fail if data from a KV node with one locality tag would be backed up to a bucket with a different locality tag, ensuring data domiciling compliance. [#158999][#158999] +- Added support for the `dmetaphone()`, `dmetaphone_alt()`, and `daitch_mokotoff()` built-in functions, completing CockroachDB's implementation of the PostgreSQL `fuzzystrmatch` extension. `dmetaphone` and `dmetaphone_alt` return Double Metaphone phonetic codes for a string, and `daitch_mokotoff` returns an array of Daitch-Mokotoff soundex codes. These functions are useful for fuzzy string matching based on phonetic similarity. [#163430][#163430] +- `crdb_internal.datums_to_bytes` is now available in the `information_schema` system catalog as `information_schema.crdb_datums_to_bytes`. [#156963][#156963] +- The `information_schema.crdb_datums_to_bytes` built-in function is now documented. [#160486][#160486] +- Queries executed via the vectorized engine now display their progress in the `phase` column of `SHOW QUERIES`. Previously, this feature was only available in the row-by-row engine. [#158029][#158029] +- CockroachDB now shows execution statistics (like `execution time`) on `EXPLAIN ANALYZE` output for `render` nodes, which often handle built-in functions. [#161509][#161509] +- The output of `EXPLAIN [ANALYZE]` in non-`VERBOSE` mode is now more succinct. [#153361][#153361] +- CockroachDB now supports `COMMIT AND CHAIN` and `ROLLBACK AND CHAIN` (as well as `END AND CHAIN` and `ABORT AND CHAIN`). These statements finish the current transaction and immediately start a new explicit transaction with the same isolation level, priority, and read/write mode as the previous transaction. `AND NO CHAIN` is also accepted for PostgreSQL compatibility but behaves identically to a plain `COMMIT` or `ROLLBACK`. [#164403][#164403] +- `RESTORE TABLE/DATABASE` now supports the `WITH GRANTS` option, which restores grants on restore targets for users in the restoring cluster. Note that using this option with `new_db_name` will cause the new database to inherit the privileges in the backed-up database. [#164444][#164444] +- Added support for a new statement hint used to change session variable values for the duration of a single statement without application changes. The new hint type can be created using the `information_schema.crdb_set_session_variable_hint` built-in function. The override applies only when executing a statement matching the given fingerprint and does not persist on the session or surrounding transaction. [#164909][#164909] +- Added the `ST_AsMVT` aggregate function to generate Mapbox Vector Tile (MVT) binary format from geospatial data, providing PostgreSQL/PostGIS compatibility for web mapping applications. [#150663][#150663] +- Introduced the `information_schema.crdb_delete_statement_hints` built-in function, which accepts 2 kinds of payload: `row_id` (int): the primary key of `system.statement_hints`; `fingerprint` (string). The function returns the number of rows deleted. [#163891][#163891] +- Added `to_date(text, text)` and `to_timestamp(text, text)` SQL functions that parse dates and timestamps from formatted strings using PostgreSQL-compatible format patterns. For example, `to_date('2023-03-15', 'YYYY-MM-DD')` returns a date, and `to_timestamp('2023-03-15 14:30:45', 'YYYY-MM-DD HH24:MI:SS')` returns a `timestamptz`. [#164672][#164672] +- Added support for importing Parquet files using the `IMPORT` statement. Parquet files can be imported from cloud storage URLs (`s3://`, `gs://`, `azure://`) or HTTP servers that support range requests (`Accept-Ranges: bytes`). This feature supports column-level compression formats (Snappy, GZIP, ZSTD, Brotli, etc.) as specified in the Parquet file format, but does not support additional file-level compression (e.g., `.parquet.gz` files). Nested Parquet types (lists, maps, structs) are not supported; only flat schemas with primitive types are supported at this time. [#163991][#163991] +- CockroachDB now includes `information_schema.crdb_rewrite_inline_hints` statements in the `schema.sql` file of a statement diagnostics bundle for re-creating all the statement hints bound to the statement. The hint recreation statements are sorted in ascending order of the original hint creation time. [#164164][#164164] +- `ALTER TABLE ... SET LOCALITY` is now fully executed using the declarative schema changer, improving reliability and consistency with other schema change operations. [#161763][#161763] +- Active Session History tables are now accessible via `information_schema.crdb_node_active_session_history` and `information_schema.crdb_cluster_active_session_history`, in addition to the existing `crdb_internal` tables. This improves discoverability when browsing `information_schema` for available metadata. [#164969][#164969] +- Rewrite-inline-hints rules can now be scoped to a specific database, and will only apply to matching statements when the current database also matches. This database can be specified with an optional third argument to `information_schema.crdb_rewrite_inline_hints`. [#165457][#165457] +- Added support for the `aclitem` type and the `makeaclitem` and `acldefault` built-in functions for PostgreSQL compatibility. The existing `aclexplode` function, which previously always returned no rows, now correctly parses ACL strings and returns the individual privilege grants they contain. [#165744][#165744] +- Added the `pg_get_triggerdef` builtin function, which returns the `CREATE TRIGGER` statement for a given trigger OID. This improves PostgreSQL compatibility for databases that contain triggers. [#165849][#165849] +- `SHOW ALL` now returns a third column, `description`, containing a human-readable description of each session variable. This matches the PostgreSQL behavior of `SHOW ALL`. [#165397][#165397] +- `SHOW STATEMENT HINTS` now includes `database` and `enabled` columns in its output. The `database` column indicates which database the hint applies to, and the `enabled` column indicates whether the hint is active. [#165712][#165712] +- The `tableoid` system column is now supported on virtual tables such as those in `pg_catalog` and `information_schema`. This improves compatibility with PostgreSQL tools like `pg_dump` that reference `tableoid` in their introspection queries. [#165727][#165727] +- Added a `workload_type` column to the `crdb_internal.node_active_session_history` and `crdb_internal.cluster_active_session_history` virtual tables, as well as the corresponding `information_schema` views. The column exposes the type of workload being sampled, with possible values `STATEMENT`, `JOB`, `SYSTEM`, or `UNKNOWN`. [#165866][#165866] +- Aggregation function `ST_AsMVT` can now also be used as a window function. [#166860][#166860] +- `CREATE CHANGEFEED FOR DATABASE` now returns an error stating that the feature is not implemented. [#166920][#166920] +- The `information_schema.crdb_delete_statement_hints` built-in function now accepts an optional second `database` argument to delete only hints scoped to a specific database. [#167192][#167192] +- Setting `skip_unique_checks = true` on an index now emits a notice warning that unique constraint enforcement is bypassed, with a pointer to the `INSPECT` documentation. [#167405][#167405] + + +

Operational changes

+ +- The new `cockroach gen dashboard` command generates standardized monitoring dashboards from an embedded configuration file. It outputs a dashboard JSON file for either Datadog (`--tool=datadog`) or Grafana (`--tool=grafana`), with Grafana dashboards using Prometheus queries. The generated dashboards include metrics across Overview, Hardware, Runtime, Networking, SQL, and Storage categories. Use `--output` to set the output file path and `--rollup-interval` to control metric aggregation. [#161050][#161050] +- Added the `kv.protectedts.protect`, `kv.protectedts.release`, `kv.protectedts.update_timestamp`, `kv.protectedts.get_record`, and `kv.protectedts.mark_verified` metrics to track protected timestamp storage operations. These metrics help diagnose issues with excessive protected timestamp churn and operational errors. Each operation tracks both successful completions (`.success`) and failures (`.failed`, such as `ErrExists` or `ErrNotExists`). Operators can monitor these metrics to understand PTS system behavior and identify performance issues related to backups, changefeeds, and other features that use protected timestamps. [#160129][#160129] +- Added a new metric `sql.rls.policies_applied.count` that tracks the number of SQL statements where row-level security (RLS) policies were applied during query planning. [#164405][#164405] +- External connections can now be used with online restore. [#159090][#159090] +- Changed goroutine profile dumps from human-readable `.txt.gz` files to binary proto `.pb.gz` files. This improves the performance of the goroutine dumper by eliminating brief in-process pauses that occurred when collecting goroutine stacks. [#160798][#160798] +- Added a new structured event of type `rewrite_inline_hints` that is emitted when an inline-hints rewrite rule is added using `information_schema.crdb_rewrite_inline_hints`. This event is written to both the event log and the `OPS` channel. [#160901][#160901] +- Added a new metric `sql.query.with_statement_hints.count` that is incremented whenever a statement is executed with one or more external statement hints applied. An example of an external statement hint is an inline-hints rewrite rule added by calling `information_schema.crdb_rewrite_inline_hints`. [#161043][#161043] +- Logical Data Replication (LDR) now supports hash-sharded indexes and secondary indexes with virtual computed columns. Previously, tables with these index types could not be replicated using LDR. [#161062][#161062] +- Backup schedules that utilize the `revision_history` option now apply that option only to incremental backups triggered by that schedule, rather than duplicating the revision history in the full backups as well. [#162105][#162105] +- The `build.timestamp` Prometheus metric now carries `major` and `minor` labels identifying the release series of the running CockroachDB binary (e.g., `major="26", minor="1"` for any v26.1.x build). [#163834][#163834] +- Jobs now clear their running status messages upon successful completion. [#163765][#163765] +- Changefeed ranges are now more accurately reported as lagging. [#163427][#163427] +- Promoted the following admission control metrics to `ESSENTIAL` status, making them more discoverable in monitoring dashboards and troubleshooting workflows: `admission.wait_durations.*` (`sql-kv-response`, `sql-sql-response`, `elastic-stores`, `elastic-cpu`), `admission.granter.*_exhausted_duration.kv` (`slots`, `io_tokens`, `elastic_io_tokens`), `admission.elastic_cpu.nanos_exhausted_duration`, `kvflowcontrol.eval_wait.*.duration` (`elastic`, `regular`), and `kvflowcontrol.send_queue.bytes`. These metrics track admission control wait times, resource exhaustion, and replication flow control, providing visibility into cluster health and performance throttling. [#164827][#164827] +- Red Hat certified CockroachDB container images are now published as multi-arch manifests supporting `linux/amd64`, `linux/arm64`, and `linux/s390x`. Previously only `linux/amd64` was published to the Red Hat registry. [#165725][#165725] +- When hash-based redaction is enabled in the logging configuration, usernames in authentication logs now produce deterministic hashes instead of being fully redacted. This lets support engineers correlate the same user across multiple log entries without revealing the actual values. [#165804][#165804] +- Added two new metrics, `auth.cert.san.conn.total` and `auth.cert.san.conn.success`, to track SAN-based certificate authentication attempts and successes. [#166829][#166829] + + +

Command-line changes

+ +- The `cockroach debug tsdump` command now defaults to `--format=raw` instead of `--format=text`. The `raw` (gob) format is optimized for Datadog ingestion. A new `--output` flag lets you write output directly to a file, avoiding potential file corruption that can occur with shell redirection. If `--output` is not specified, output is written to `stdout`. [#160538][#160538] +- The `cockroach debug tsdump` command now supports ZSTD encoding via `--format=raw --encoding=zstd`. This generates compressed tsdump files that are approximately 85% smaller than raw format. The `tsdump upload` command automatically detects and decompresses ZSTD files, allowing direct upload without manual decompression. [#161998][#161998] +- The `cockroach debug zip` command's `--include-files` and `--exclude-files` flags now support full zip path patterns. Patterns containing `/` are matched against the full path within the zip archive (e.g., `--include-files='debug/nodes/1/*.json'`). Patterns without `/` continue to match the base file name as before. [#163266][#163266] +- Added a `--list-dbs` flag to `workload init workload_generator` that lists all user databases found in debug logs without initializing tables. This helps users discover which databases are available in the debug zip before running the full init command. [#163930][#163930] +- Added the `--exclude-log-severities` flag to `cockroach debug zip` that filters log entries by severity server-side. For example, `--exclude-log-severities=INFO` excludes all `INFO`-level log entries from the collected log files, which can significantly reduce zip file size for large clusters. Valid severity names are `INFO`, `WARNING`, `ERROR`, and `FATAL`. The flag accepts a comma-delimited list or can be specified multiple times. [#165802][#165802] + + +

DB Console changes

+ +- Added a new time-series bar graph called **Plan Distribution Over Time** to the **Statement Fingerprint** page, on the **Explain Plans** tab. It shows which execution plans were used in each time interval, helping detect shifts in query plan distributions. [#161011][#161011] +- The **SQL Activity** > **Sessions** page now defaults the **Session Status** filter to **Active, Idle** to exclude closed sessions. [#160576][#160576] + + +

Bug fixes

+ +- Fixed a bug that caused a routine with an `INSERT` statement to unnecessarily block dropping a hash-sharded index or computed column on the target table. This fix applies only to newly created routines. In releases prior to v25.3, the fix must be enabled by setting the session variable `use_improved_routine_dependency_tracking` to `on`. [#146250][#146250] +- Fixed a bug where creating a routine could create unnecessary column dependencies when the routine references columns through CHECK constraints (including those for RLS policies and hash-sharded indexes) or partial index predicates. These unnecessary dependencies prevented dropping the column without first dropping the routine. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default prior to v26.1. [#159126][#159126] +- CockroachDB could previously encounter internal errors like `column statistics cannot be determined for empty column set` and `invalid union` in some edge cases with `UNION`, `EXCEPT`, and `INTERCEPT`. This has now been fixed. [#150706][#150706] +- Fixed a bug that could cause a scan over a secondary index to read significantly more KVs than necessary in order to satisfy a limit when the scanned index had more than one column family. [#156672][#156672] +- Fixed an issue where long-running transactions with many statements could cause unbounded memory growth in the SQL statistics subsystem. When a transaction includes a large number of statements, the SQL statistics ingester now automatically flushes buffered statistics before the transaction commits. As a side effect, the flushed statement statistics might not have an associated transaction fingerprint ID because the transaction has not yet completed. In such cases, the transaction fingerprint ID cannot be backfilled after the fact. [#158527][#158527] +- Fixed a bug that allowed columns to be dropped despite being referenced by a routine. This could occur when a column was only referenced as a target column in the `SET` clause of an `UPDATE` statement within the routine. This fix only applies to newly-created routines. In versions prior to v26.1, the fix must be enabled by setting the session variable `prevent_update_set_column_drop`. [#158935][#158935] +- Fixed a bug where schema changes could fail after a `RESTORE` due to missing session data. [#159176][#159176] +- The `ascii` built-in function now returns `0` when the input is the empty string instead of an error. [#159178][#159178] +- Fixed a bug where comments associated with constraints were left behind after the column and constraint were dropped. [#159180][#159180] +- Fixed a bug which could cause prepared statements to fail with the error message `non-const expression` when they contained filters with stable functions. This bug has been present since 25.4.0. [#159201][#159201] +- Fixed a bug in the TPC-C workload where long-duration runs (>= 4 days or indefinite) would experience periodic performance degradation every 24 hours due to excessive concurrent `UPDATE` statements resetting warehouse and district year-to-date values. [#159286][#159286] +- Fixed a race condition where queries run after revoking `BYPASSRLS` could return wrong results because cached plans failed to notice the change immediately. [#159354][#159354] +- Fixed a bug where `TRUNCATE` did not behave correctly with respect to the `schema_locked` storage parameter, and was not being blocked when Logical Data Replication (LDR) was in use. This behavior was incorrect and has been fixed. [#159378][#159378] +- Fixed a race condition that could occur during context cancellation of an incoming snapshot. [#159403][#159403] +- Fixed a bug that could cause a panic during changefeed startup if an error occurred while initializing the metrics controller. [#159431][#159431] +- Fixed a memory accounting issue that could occur when a lease expired due to a SQL liveness session-based timeout. [#159527][#159527] +- Fixed a bug that caused `SHOW CREATE FUNCTION` to error when the function body contained casts from columns to user-defined types. [#159642][#159642] +- Fixed a bug where a query predicate could be ignored when all of the following conditions were met: the query used a lookup join to an index, the predicate constrained a column to multiple values (e.g., `column IN (1, 2)`), and the constrained column followed one or more columns with optional multi-value constraints in the index. This bug was introduced in v24.3.0. [#159722][#159722] +- Fixed a bug where rolling back a transaction that had just rolled back a savepoint would block other transactions accessing the same rows for five seconds. [#160346][#160346] +- Fixed a deadlock that could occur when a statistics creation task panicked. [#160348][#160348] +- Fixed a bug where CockroachDB could crash when handling decimals with negative scales via the extended PGWire protocol. An error is now returned instead, matching PostgreSQL behavior. [#160499][#160499] +- Fixed a bug where the `pprof` UI endpoints for allocs, heap, block, and mutex profiles ignored the seconds parameter and returned immediate snapshots instead of delta profiles. [#160608][#160608] +- Previously, v26.1.0-beta.1 and v26.1.0-beta.2 could encounter a rare process crash when running TTL jobs. This has been fixed. [#160674][#160674] +- Fixed a bug where schema changes adding a `NOT NULL` constraint could enter an infinite retry loop if a row violated the constraint and contained certain content (e.g., `"EOF"`). Such errors are now correctly classified and don't cause retries. [#160780][#160780] +- An error will now be reported when the database provided as the argument to a `SHOW REGIONS` or `SHOW SUPER REGIONS` statement does not exist. This bug had been present since version v21.1. [#161014][#161014] +- Fixed a bug where `CREATE INDEX` on a table with `PARTITION ALL BY` would fail if the partition columns were explicitly included in the primary key definition. [#161083][#161083] +- Fixed a bug in which inline-hints rewrite rules created with `information_schema.crdb_rewrite_inline_hints` were not correctly applied to statements run with `EXPLAIN ANALYZE`. This bug was introduced in v26.1.0-alpha.2. [#161273][#161273] +- Fixed a bug where AVRO file imports of data with JSON or binary records could hang indefinitely when encountering stream errors from cloud storage (such as `HTTP/2` `CANCEL` errors). Import jobs will now properly fail with an error instead of hanging. [#161290][#161290] +- Fixed a bug where IMPORT with AVRO data using OCF format could silently lose data if the underlying storage (e.g., S3) returned an error during read. Such errors are now properly reported. Other formats (specified via `data_as_binary_records` and `data_as_json_records` options) are unaffected. The bug has been present since about v20.1. [#161318][#161318] +- Fixed a bug that prevented successfully injecting hints using `information_schema.crdb_rewrite_inline_hints` for `INSERT`, `UPSERT`, `UPDATE`, and `DELETE` statements. This bug had existed since hint injection was introduced in v26.1.0-alpha.2. [#161773][#161773] +- Fixed prepared statements failing with `version mismatch` errors when user-defined types are modified between preparation and execution. Prepared statements now automatically detect UDT changes and re-parse to use current type definitions. [#161827][#161827] +- Previously, CockroachDB could hit an internal error when evaluating built-in functions with `'{}'` as an argument (without explicit type casts, such as on a query like `SELECT cardinality('{}');`). This is now fixed and a regular error is returned instead (matching PostgreSQL behavior). [#161835][#161835] +- Fixed a bug where the index definition shown in `pg_indexes` for hash sharded indexes with `STORING` columns was not valid SQL. The `STORING` clause now appears in the correct position. [#161882][#161882] +- Fixed a bug where `DROP TABLE ... CASCADE` would incorrectly drop tables that had triggers or row-level security (RLS) policies referencing the dropped table. Now only the triggers/policies are dropped, and the tables owning them remain intact. [#161914][#161914] +- Reduced contention when dropping descriptors or running concurrent imports. [#161941][#161941] +- Fixed a bug where multi-statement explicit transactions using `SAVEPOINT` to recover from certain errors (like duplicate key-value violations) could lose writes performed before the savepoint was created, in rare cases when buffered writes were enabled (off by default). This bug was introduced in v25.2. [#161972][#161972] +- Fixed a bug introduced in v26.1.0-beta.1 in which row-level TTL jobs could encounter GC threshold errors if each node had a large number of spans to process. [#161979][#161979] +- Fixed an error that occurred when using generic query plans that generates a lookup join on indexes containing identity computed columns. [#162036][#162036] +- Fixed a bug that could cause changefeeds using Kafka v1 sinks to hang when the changefeed was cancelled. [#162058][#162058] +- Fixed an internal error `could not find format code for column N` that occurred when executing `EXPLAIN ANALYZE EXECUTE` statements via JDBC or other clients using the PostgreSQL binary protocol. [#162115][#162115] +- Fixed a bug where statement bundles were missing `CREATE TYPE` statements for user-defined types used as array column types. [#162357][#162357] +- Fixed a bug in which PL/pgSQL UDFs with many `IF` statements would cause a timeout and/or OOM when executed from a prepared statement. This bug was introduced in v23.2.22, v24.1.15, v24.3.9, v25.1.2, and v25.2.0. [#162512][#162512] +- Fixed a bug where an error would occur when defining a foreign key on a hash-sharded primary key without explicitly providing the primary key columns. [#162608][#162608] +- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using `json` or `json-compact` formatting. This bug was introduced in v24.1. [#163224][#163224] +- Fixed a bug that prevented the `optimizer_min_row_count` setting from applying to anti-join expressions, which could lead to bad query plans. The fix is gated behind `optimizer_use_min_row_count_anti_join_fix`, which is on by default on v26.2 and later, and off by default in earlier versions. [#163244][#163244] +- Fixed an optimizer limitation that prevented index usage on computed columns when querying through views or subqueries containing JSON fetch expressions (such as `->`, `->>`, `#>`, or `#>>`). Queries that project JSON expressions matching indexed computed column definitions now correctly use indexes instead of performing full table scans, significantly improving performance for JSON workloads. [#163395][#163395] +- Statements within a UDF or stored procedure similar to (1) and (2) where the limit/offset is a reference to an argument of the UDF/SP. [#163500][#163500] +- Dropping a region from the system database no longer leaves `REGIONAL BY TABLE` system tables referencing the removed region, preventing descriptor validation errors. [#163503][#163503] +- Fixed an issue where changefeeds with `execution_locality` filters could fail in multi-tenant clusters with `node descriptor not found` errors. [#163507][#163507] +- Fixed a bug where `EXPLAIN ANALYZE (DEBUG)` statement bundles did not include triggers, their functions, or tables modified by those triggers. The bundle's `schema.sql` file now contains the `CREATE TRIGGER`, `CREATE FUNCTION`, and `CREATE TABLE` statements needed to fully reproduce the query environment when triggers are involved. [#163584][#163584] +- Fixed a rare data race during parallel constraint checks where a fresh descriptor collection could resolve a stale enum type version. This bug was introduced in v26.1.0. [#163883][#163883] +- Fixed a bug where running **changefeeds** with `envelope=enriched` and `enriched_properties` containing `source` would cause failures during a **cluster upgrade**. [#163885][#163885] +- Fixed a bug where dropped columns appeared in `pg_catalog.pg_attribute` with the `atttypid` column equal to 2283 (`anyelement`). Now this column will be 0 for dropped columns. This matches PostgreSQL behavior, where `atttypid=0` is used for dropped columns. [#163950][#163950] +- Fixed a race condition/conflict between concurrent `ALTER FUNCTION ... SET SCHEMA` and `DROP SCHEMA` operations. [#164043][#164043] +- Fixed a bug where super region zone configurations did not constrain all replicas to regions within the super region. [#164285][#164285] +- Fixed a bug where CockroachDB returned "cached plan must not change result type" errors during the `Execute` phase instead of the `Bind` phase of the extended pgwire protocol. This caused compatibility issues with drivers like pgx that expect the error before `BindComplete` is sent, particularly when using batch operations with prepared statements after schema changes. [#164406][#164406] +- Fixed a bug where import rollback could incorrectly revert data in a table that was already online. This could only occur if an import job was cancelled or failed after the import had already succeeded and the table was made available for use. [#159627][#159627] +- Invalid `avro_schema_prefix` is now caught during statement time. The prefix must start with `[A-Za-z_]` and subsequently contain only `[A-Za-z0-9_]`, as specified in the [Avro specification](https://avro.apache.org/docs/1.8.1/spec.html). [#159869][#159869] +- JWT authentication now returns a clear error when HTTP requests to fetch JWKS or OpenID configuration return non-`2xx` status codes, instead of silently passing the response body to the JSON parser. [#158294][#158294] +- Fixed an issue where `ORDER BY` expressions containing subqueries with non-default `NULLS` ordering (e.g., `NULLS LAST` for `ASC`, `NULLS FIRST` for `DESC`) could cause an error during query planning. [#163230][#163230] +- Fixed a bug where incremental backups taken after downgrading a mixed-version cluster to v25.4 could result in inconsistent backup indexes. [#164301][#164301] +- Fixed a bug where creating a table with a user-defined type column failed when the user had `USAGE` privilege on the base type but not on its implicit array type. The array type now inherits privileges from the base type, matching PostgreSQL behavior. [#164471][#164471] +- `ALTER TABLE ... ALTER PRIMARY KEY USING COLUMNS (col) USING HASH` is now correctly treated as a no-op when the table already has a matching hash-sharded primary key, instead of attempting an unnecessary schema change. [#164557][#164557] +- Fixed a bug in `appBatchStats.merge` where the `numEmptyEntries` field was not being properly accumulated when merging statistics. This could result in incorrect statistics tracking for empty Raft log entries. [#164671][#164671] +- Fixed a bug where `ALTER TABLE ... ALTER COLUMN ... SET DATA TYPE` from an unbounded string or bit type to a bounded type with a length `>= 64` (for example, `STRING` to `STRING(100)`) would skip validating existing data against the new length constraint. This could leave rows in the table that violate the column's type, with values longer than the specified limit. [#164739][#164739] +- Fixed a bug where `RESTORE` with `skip_missing_foreign_keys` could fail with an internal error if the restored table had an in-progress schema change that added a foreign key constraint whose referenced table was not included in the restore. [#164757][#164757] +- Fixed a bug introduced in v25.4+ where setting `min_checkpoint_frequency` to `0` prevented changefeeds from advancing their resolved timestamp (high-water mark) and emitting resolved messages. Note that setting `min_checkpoint_frequency` to lower than `500ms` is **not** recommended as it may cause degraded changefeed performance. [#164765][#164765] +- Fixed a bug where CockroachDB did not always promptly respond to the statement timeout when performing a hash join with `ON` filter that is mostly `false`. [#164879][#164879] +- Fixed a bug where `IMPORT` error messages could include unredacted cloud storage credentials from the source URI. Credentials are now stripped from URIs before they appear in error messages. [#164881][#164881] +- Changefeed retry backoff now resets when the changefeed's resolved timestamp (high-water mark) advances between retries, in addition to the existing time-based reset (configured by `changefeed.retry_backoff_reset`). This prevents transient rolling restarts from causing changefeeds to fall behind because of excessive backoff. [#164933][#164933] +- Fixed a rare race condition where `SHOW CREATE TABLE` could fail with a `"relation does not exist"` error if a table referenced by a foreign key was being concurrently dropped. [#164942][#164942] +- Fixed a bug that had previously allowed the primary and secondary to be in separate super regions. [#164943][#164943] +- Fixed a bug that could cause row sampling for table statistics to crash a node due to a data race when processing a collated string column with values larger than 400 bytes. This bug has existed since before v23.1. [#165260][#165260] +- The `information_schema.crdb_node_active_session_history` and `information_schema.crdb_cluster_active_session_history` views now include the `app_name` column, matching the underlying `crdb_internal` tables. [#165367][#165367] +- Fixed a bug where temporary tables created in one session could fail to appear in `pg_catalog` queries from another session because the parent temporary schema could not be resolved by ID. [#165395][#165395] +- Fixed a bug in the legacy schema changer where rolling back a `CREATE TABLE` with inline `FOREIGN KEY` constraints could leave orphaned foreign key back-references on the referenced table, causing descriptor validation errors. [#165551][#165551] +- Fixed a bug where running `EXPLAIN ANALYZE (DEBUG)` on a query that invokes a UDF with many blocks could cause out-of-memory errors (OOMs). [#166132][#166132] +- Fixed a bug where restoring a database backup containing default privileges that referenced non-existent users would leave dangling user references in the restored database descriptor. [#166183][#166183] +- Fixed a bug where rolling back a `CREATE TABLE` that referenced user-defined types or sequences would leave orphaned back-references on the type and sequence descriptors, causing them to appear in `crdb_internal.invalid_objects` after the table was GC'd. [#166223][#166223] +- Fixed a bug where concurrent updates to a table using multiple column families during a partial index creation could result in data loss, incorrect `NULL` values, or validation failures in the resulting index. [#166325][#166325] +- Fixed a crash (`traceRegion: alloc too large`) that could occur when Go's execution tracer was enabled and a range cache lookup used a key longer than about 64 KB. [#166705][#166705] +- Fixed a data race that could cause certificate expiration metrics (`security.certificate.expiration.node-client`, `security.certificate.expiration.client-tenant`, `security.certificate.expiration.ca-client-tenant` and their TTL counterparts) to not update after certificate rotation via `SIGHUP`. [#166664][#166664] +- Fixed a bug where `ALTER FUNCTION ... RENAME TO` and `ALTER PROCEDURE ... RENAME TO` could create duplicate functions in non-public schemas. [#166681][#166681] +- The PCR job now switches into the cutover phase more promptly after a failover is requested, terminating the replication phase more quickly and more reliably when components of the ingestion process are hung due to network errors. [#166778][#166778] +- Fixed a bug where descriptor version fetching could be incorrectly throttled by the elastic CPU limiter, potentially leading to increased query latency or timeouts under high CPU load. [#166810][#166810] +- Context cancellation is now surfaced if a `statement_timeout` occurs while waiting for a schema change. [#167112][#167112] +- Fixed a bug where transient I/O errors (such as cloud storage network timeouts) during split or merge trigger evaluation were misidentified as replica corruption, causing the node to crash. These errors now correctly fail the operation, which is retried automatically. [#167377][#167377] +- Fixed a bug where executing a mutation in a subquery (e.g., as a CTE) could cause the "rows written" metrics like `sql.statements.index_rows_written.count` and `sql.statements.index_bytes_written.count` to not be incremented correctly. [#167432][#167432] +- Fixed a bug where converting a table from `REGIONAL BY ROW` to `GLOBAL` would not clear the `skip_unique_checks` storage parameter on the primary key, even though implicit partitioning was removed. [#167484][#167484] +- Fixed a bug where the `lock_timeout` and `deadlock_timeout` session settings were not honored by FK existence checks performed during insert fast path execution. This could cause inserts to block indefinitely on conflicting locks instead of returning a timeout error. [#167532][#167532] + + +

Performance improvements

+ +- Database- and table-level backups no longer fetch all object descriptors from disk in order to resolve the backup targets. Now only the objects that are referenced by the targeted objects will be fetched. This improves performance when there are many tables in the cluster. [#157790][#157790] +- Various background tasks and jobs now more actively yield to foreground work when that work is waiting to run. [#159205][#159205] +- Improved changefeed performance when filtering unwatched column families and offline tables by replacing expensive error chain traversal with direct status enum comparisons. [#159745][#159745] +- Fixed a performance regression in `pg_catalog.pg_roles` and `pg_catalog.pg_authid` by avoiding privilege lookups for each row in the table. [#160121][#160121] +- Queries that have comparison expressions with the `levenshtein` built-in are now up to 30% faster. [#160394][#160394] +- The optimizer now better optimizes query plans of statements within UDFs and stored procedures that have `IN` subqueries. [#160503][#160503] +- Significantly reduced WAL write latency when using encryption at rest by properly recycling WAL files instead of deleting and recreating them. [#160784][#160784] +- Optimized the logic that applies zone config constraints so it no longer fetches all descriptors in the cluster during background constraint reconciliation. [#160966][#160966] +- The optimizer can now better handle filters that redundantly `unnest()` an array placeholder argument within an `IN` or `ANY` filter. Previously, this pattern could prevent the filters from being used to constrain a table scan. Example: `SELECT k FROM a WHERE k = ANY(SELECT * FROM unnest($1:::INT[]))` [#161816][#161816] +- Improved changefeed checkpointing performance when changefeeds are lagging. Previously, checkpoint updates could be redundantly applied multiple times per checkpoint operation. [#162546][#162546] +- The query optimizer now eliminates redundant filter and projection operators over inputs with zero cardinality, even when the filter or projection expressions are not leakproof. This produces simpler, more efficient query plans in cases where joins or other operations fold to zero rows. [#164212][#164212] + + +[#146250]: https://github.com/cockroachdb/cockroach/pull/146250 +[#150663]: https://github.com/cockroachdb/cockroach/pull/150663 +[#150706]: https://github.com/cockroachdb/cockroach/pull/150706 +[#153361]: https://github.com/cockroachdb/cockroach/pull/153361 +[#156672]: https://github.com/cockroachdb/cockroach/pull/156672 +[#156771]: https://github.com/cockroachdb/cockroach/pull/156771 +[#156963]: https://github.com/cockroachdb/cockroach/pull/156963 +[#157790]: https://github.com/cockroachdb/cockroach/pull/157790 +[#158029]: https://github.com/cockroachdb/cockroach/pull/158029 +[#158294]: https://github.com/cockroachdb/cockroach/pull/158294 +[#158527]: https://github.com/cockroachdb/cockroach/pull/158527 +[#158935]: https://github.com/cockroachdb/cockroach/pull/158935 +[#158999]: https://github.com/cockroachdb/cockroach/pull/158999 +[#159090]: https://github.com/cockroachdb/cockroach/pull/159090 +[#159126]: https://github.com/cockroachdb/cockroach/pull/159126 +[#159176]: https://github.com/cockroachdb/cockroach/pull/159176 +[#159178]: https://github.com/cockroachdb/cockroach/pull/159178 +[#159180]: https://github.com/cockroachdb/cockroach/pull/159180 +[#159201]: https://github.com/cockroachdb/cockroach/pull/159201 +[#159205]: https://github.com/cockroachdb/cockroach/pull/159205 +[#159231]: https://github.com/cockroachdb/cockroach/pull/159231 +[#159286]: https://github.com/cockroachdb/cockroach/pull/159286 +[#159354]: https://github.com/cockroachdb/cockroach/pull/159354 +[#159378]: https://github.com/cockroachdb/cockroach/pull/159378 +[#159403]: https://github.com/cockroachdb/cockroach/pull/159403 +[#159431]: https://github.com/cockroachdb/cockroach/pull/159431 +[#159527]: https://github.com/cockroachdb/cockroach/pull/159527 +[#159627]: https://github.com/cockroachdb/cockroach/pull/159627 +[#159642]: https://github.com/cockroachdb/cockroach/pull/159642 +[#159722]: https://github.com/cockroachdb/cockroach/pull/159722 +[#159745]: https://github.com/cockroachdb/cockroach/pull/159745 +[#159869]: https://github.com/cockroachdb/cockroach/pull/159869 +[#160121]: https://github.com/cockroachdb/cockroach/pull/160121 +[#160129]: https://github.com/cockroachdb/cockroach/pull/160129 +[#160137]: https://github.com/cockroachdb/cockroach/pull/160137 +[#160346]: https://github.com/cockroachdb/cockroach/pull/160346 +[#160348]: https://github.com/cockroachdb/cockroach/pull/160348 +[#160394]: https://github.com/cockroachdb/cockroach/pull/160394 +[#160486]: https://github.com/cockroachdb/cockroach/pull/160486 +[#160499]: https://github.com/cockroachdb/cockroach/pull/160499 +[#160503]: https://github.com/cockroachdb/cockroach/pull/160503 +[#160538]: https://github.com/cockroachdb/cockroach/pull/160538 +[#160576]: https://github.com/cockroachdb/cockroach/pull/160576 +[#160608]: https://github.com/cockroachdb/cockroach/pull/160608 +[#160674]: https://github.com/cockroachdb/cockroach/pull/160674 +[#160780]: https://github.com/cockroachdb/cockroach/pull/160780 +[#160784]: https://github.com/cockroachdb/cockroach/pull/160784 +[#160798]: https://github.com/cockroachdb/cockroach/pull/160798 +[#160812]: https://github.com/cockroachdb/cockroach/pull/160812 +[#160901]: https://github.com/cockroachdb/cockroach/pull/160901 +[#160966]: https://github.com/cockroachdb/cockroach/pull/160966 +[#161011]: https://github.com/cockroachdb/cockroach/pull/161011 +[#161014]: https://github.com/cockroachdb/cockroach/pull/161014 +[#161043]: https://github.com/cockroachdb/cockroach/pull/161043 +[#161050]: https://github.com/cockroachdb/cockroach/pull/161050 +[#161062]: https://github.com/cockroachdb/cockroach/pull/161062 +[#161083]: https://github.com/cockroachdb/cockroach/pull/161083 +[#161273]: https://github.com/cockroachdb/cockroach/pull/161273 +[#161290]: https://github.com/cockroachdb/cockroach/pull/161290 +[#161294]: https://github.com/cockroachdb/cockroach/pull/161294 +[#161318]: https://github.com/cockroachdb/cockroach/pull/161318 +[#161328]: https://github.com/cockroachdb/cockroach/pull/161328 +[#161422]: https://github.com/cockroachdb/cockroach/pull/161422 +[#161509]: https://github.com/cockroachdb/cockroach/pull/161509 +[#161763]: https://github.com/cockroachdb/cockroach/pull/161763 +[#161773]: https://github.com/cockroachdb/cockroach/pull/161773 +[#161816]: https://github.com/cockroachdb/cockroach/pull/161816 +[#161827]: https://github.com/cockroachdb/cockroach/pull/161827 +[#161835]: https://github.com/cockroachdb/cockroach/pull/161835 +[#161880]: https://github.com/cockroachdb/cockroach/pull/161880 +[#161882]: https://github.com/cockroachdb/cockroach/pull/161882 +[#161914]: https://github.com/cockroachdb/cockroach/pull/161914 +[#161915]: https://github.com/cockroachdb/cockroach/pull/161915 +[#161941]: https://github.com/cockroachdb/cockroach/pull/161941 +[#161972]: https://github.com/cockroachdb/cockroach/pull/161972 +[#161979]: https://github.com/cockroachdb/cockroach/pull/161979 +[#161998]: https://github.com/cockroachdb/cockroach/pull/161998 +[#162036]: https://github.com/cockroachdb/cockroach/pull/162036 +[#162058]: https://github.com/cockroachdb/cockroach/pull/162058 +[#162105]: https://github.com/cockroachdb/cockroach/pull/162105 +[#162115]: https://github.com/cockroachdb/cockroach/pull/162115 +[#162286]: https://github.com/cockroachdb/cockroach/pull/162286 +[#162302]: https://github.com/cockroachdb/cockroach/pull/162302 +[#162329]: https://github.com/cockroachdb/cockroach/pull/162329 +[#162357]: https://github.com/cockroachdb/cockroach/pull/162357 +[#162512]: https://github.com/cockroachdb/cockroach/pull/162512 +[#162546]: https://github.com/cockroachdb/cockroach/pull/162546 +[#162608]: https://github.com/cockroachdb/cockroach/pull/162608 +[#162633]: https://github.com/cockroachdb/cockroach/pull/162633 +[#163224]: https://github.com/cockroachdb/cockroach/pull/163224 +[#163230]: https://github.com/cockroachdb/cockroach/pull/163230 +[#163244]: https://github.com/cockroachdb/cockroach/pull/163244 +[#163266]: https://github.com/cockroachdb/cockroach/pull/163266 +[#163296]: https://github.com/cockroachdb/cockroach/pull/163296 +[#163348]: https://github.com/cockroachdb/cockroach/pull/163348 +[#163395]: https://github.com/cockroachdb/cockroach/pull/163395 +[#163400]: https://github.com/cockroachdb/cockroach/pull/163400 +[#163427]: https://github.com/cockroachdb/cockroach/pull/163427 +[#163430]: https://github.com/cockroachdb/cockroach/pull/163430 +[#163500]: https://github.com/cockroachdb/cockroach/pull/163500 +[#163503]: https://github.com/cockroachdb/cockroach/pull/163503 +[#163507]: https://github.com/cockroachdb/cockroach/pull/163507 +[#163584]: https://github.com/cockroachdb/cockroach/pull/163584 +[#163765]: https://github.com/cockroachdb/cockroach/pull/163765 +[#163834]: https://github.com/cockroachdb/cockroach/pull/163834 +[#163883]: https://github.com/cockroachdb/cockroach/pull/163883 +[#163885]: https://github.com/cockroachdb/cockroach/pull/163885 +[#163891]: https://github.com/cockroachdb/cockroach/pull/163891 +[#163930]: https://github.com/cockroachdb/cockroach/pull/163930 +[#163950]: https://github.com/cockroachdb/cockroach/pull/163950 +[#163991]: https://github.com/cockroachdb/cockroach/pull/163991 +[#164043]: https://github.com/cockroachdb/cockroach/pull/164043 +[#164129]: https://github.com/cockroachdb/cockroach/pull/164129 +[#164164]: https://github.com/cockroachdb/cockroach/pull/164164 +[#164212]: https://github.com/cockroachdb/cockroach/pull/164212 +[#164236]: https://github.com/cockroachdb/cockroach/pull/164236 +[#164285]: https://github.com/cockroachdb/cockroach/pull/164285 +[#164301]: https://github.com/cockroachdb/cockroach/pull/164301 +[#164369]: https://github.com/cockroachdb/cockroach/pull/164369 +[#164403]: https://github.com/cockroachdb/cockroach/pull/164403 +[#164405]: https://github.com/cockroachdb/cockroach/pull/164405 +[#164406]: https://github.com/cockroachdb/cockroach/pull/164406 +[#164444]: https://github.com/cockroachdb/cockroach/pull/164444 +[#164471]: https://github.com/cockroachdb/cockroach/pull/164471 +[#164557]: https://github.com/cockroachdb/cockroach/pull/164557 +[#164671]: https://github.com/cockroachdb/cockroach/pull/164671 +[#164672]: https://github.com/cockroachdb/cockroach/pull/164672 +[#164739]: https://github.com/cockroachdb/cockroach/pull/164739 +[#164757]: https://github.com/cockroachdb/cockroach/pull/164757 +[#164765]: https://github.com/cockroachdb/cockroach/pull/164765 +[#164827]: https://github.com/cockroachdb/cockroach/pull/164827 +[#164879]: https://github.com/cockroachdb/cockroach/pull/164879 +[#164881]: https://github.com/cockroachdb/cockroach/pull/164881 +[#164909]: https://github.com/cockroachdb/cockroach/pull/164909 +[#164933]: https://github.com/cockroachdb/cockroach/pull/164933 +[#164942]: https://github.com/cockroachdb/cockroach/pull/164942 +[#164943]: https://github.com/cockroachdb/cockroach/pull/164943 +[#164969]: https://github.com/cockroachdb/cockroach/pull/164969 +[#165260]: https://github.com/cockroachdb/cockroach/pull/165260 +[#165367]: https://github.com/cockroachdb/cockroach/pull/165367 +[#165395]: https://github.com/cockroachdb/cockroach/pull/165395 +[#165397]: https://github.com/cockroachdb/cockroach/pull/165397 +[#165457]: https://github.com/cockroachdb/cockroach/pull/165457 +[#165551]: https://github.com/cockroachdb/cockroach/pull/165551 +[#165712]: https://github.com/cockroachdb/cockroach/pull/165712 +[#165725]: https://github.com/cockroachdb/cockroach/pull/165725 +[#165727]: https://github.com/cockroachdb/cockroach/pull/165727 +[#165744]: https://github.com/cockroachdb/cockroach/pull/165744 +[#165802]: https://github.com/cockroachdb/cockroach/pull/165802 +[#165804]: https://github.com/cockroachdb/cockroach/pull/165804 +[#165849]: https://github.com/cockroachdb/cockroach/pull/165849 +[#165866]: https://github.com/cockroachdb/cockroach/pull/165866 +[#166132]: https://github.com/cockroachdb/cockroach/pull/166132 +[#166183]: https://github.com/cockroachdb/cockroach/pull/166183 +[#166223]: https://github.com/cockroachdb/cockroach/pull/166223 +[#166325]: https://github.com/cockroachdb/cockroach/pull/166325 +[#166664]: https://github.com/cockroachdb/cockroach/pull/166664 +[#166681]: https://github.com/cockroachdb/cockroach/pull/166681 +[#166705]: https://github.com/cockroachdb/cockroach/pull/166705 +[#166778]: https://github.com/cockroachdb/cockroach/pull/166778 +[#166793]: https://github.com/cockroachdb/cockroach/pull/166793 +[#166810]: https://github.com/cockroachdb/cockroach/pull/166810 +[#166829]: https://github.com/cockroachdb/cockroach/pull/166829 +[#166860]: https://github.com/cockroachdb/cockroach/pull/166860 +[#166920]: https://github.com/cockroachdb/cockroach/pull/166920 +[#167112]: https://github.com/cockroachdb/cockroach/pull/167112 +[#167192]: https://github.com/cockroachdb/cockroach/pull/167192 +[#167377]: https://github.com/cockroachdb/cockroach/pull/167377 +[#167405]: https://github.com/cockroachdb/cockroach/pull/167405 +[#167432]: https://github.com/cockroachdb/cockroach/pull/167432 +[#167484]: https://github.com/cockroachdb/cockroach/pull/167484 +[#167532]: https://github.com/cockroachdb/cockroach/pull/167532 + diff --git a/src/current/css/customstyles.scss b/src/current/css/customstyles.scss index f185178dfb8..90c50bbf948 100755 --- a/src/current/css/customstyles.scss +++ b/src/current/css/customstyles.scss @@ -128,3 +128,31 @@ table { } } } + +#feature-highlights table { + width: 100%; + display: table; +} + +// Release notes cluster settings and deprecations table styling +.release-cluster-settings-table table { + display: table; + table-layout: auto; +} + +.release-cluster-settings-table table td:first-child, +.release-cluster-settings-table table th:first-child { + width: 280px; + min-width: 200px; + max-width: 300px; + word-break: normal; + overflow-wrap: anywhere; + hyphens: none; +} + +.release-cluster-settings-table table td:first-child code { + word-break: normal; + overflow-wrap: anywhere; + white-space: normal; + hyphens: none; +} diff --git a/src/current/releases/cloud.md b/src/current/releases/cloud.md index 896aef8cf36..fd143217259 100644 --- a/src/current/releases/cloud.md +++ b/src/current/releases/cloud.md @@ -14,6 +14,106 @@ Get future release notes emailed to you: {% include marketo.html formId=1083 %} +## May 13, 2026 + +

v26.2 Cloud Feature Highlights

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAvailabilitySelf-hostedBasicStandardAdvanced
+

2-DC Active-Passive Architecture

+

Deploy CockroachDB Cloud Advanced clusters across two geographically separated data centers in an active-passive configuration, providing resilience against data center failures with near-zero downtime failover.

+
Preview{% include icon-no.html %}{% include icon-no.html %}{% include icon-no.html %}{% include icon-yes.html %}
+

CockroachDB Cloud CLI Revamp

+

A modernized CockroachDB Cloud CLI with improved commands, better discoverability, and a more intuitive interface for managing clusters, users, and cloud resources from the terminal.

+
GA{% include icon-no.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
+

Agent Roach: AI-Powered In-Console Assistant

+

An AI-powered in-console assistant that helps you troubleshoot issues, optimize queries, and manage CockroachDB clusters using natural language, without leaving the Cloud console.

+
GA{% include icon-no.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
+

Multi-Factor Authentication for CockroachDB Cloud

+

Enforce multi-factor authentication for all CockroachDB Cloud users, reducing the risk of unauthorized access from compromised credentials and strengthening organizational security posture.

+
GA{% include icon-no.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
+

Bring Your Own Cloud (BYOC) for AWS, Azure, and GCP

+

Run CockroachDB Cloud clusters entirely within your own AWS, Azure, or GCP account with Bring Your Own Cloud, giving you full control over networking, security, and data residency while retaining fully managed database operations.

+
Preview{% include icon-no.html %}{% include icon-no.html %}{% include icon-no.html %}{% include icon-yes.html %}
+

CockroachDB Cloud MCP Server

+

Connect AI agents and LLM-powered applications to CockroachDB using the Model Context Protocol (MCP), enabling intelligent, database-aware AI workflows without custom integration work.

+
GA{% include icon-no.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
+

Fault Tolerance Demo

+

Experience CockroachDB's resilience firsthand by simulating an AZ failure in a live production cluster and watching the cluster auto-recover, and traffic stay unimpacted.

+
GA{% include icon-no.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
+ +
+ ## Feb 24, 2026 CockroachDB {{ site.data.products.cloud }} {{ site.data.products.advanced }} users can now run a built-in [fault tolerance demo]({% link {{ site.versions["stable"] }}/demo-cockroachdb-resilience.md %}#run-a-guided-demo-in-cockroachdb-cloud) that allows you to monitor query execution during a simulated failure and recovery. The fault tolerance demo is in [Preview]({% link {{ site.versions["stable"] }}/cockroachdb-feature-availability.md %}). diff --git a/src/current/releases/v26.2.md b/src/current/releases/v26.2.md index 18f0f4bdf01..c91c8401ad4 100644 --- a/src/current/releases/v26.2.md +++ b/src/current/releases/v26.2.md @@ -1,7 +1,7 @@ --- title: What's New in v26.2 toc: true -toc_not_nested: true +toc_not_nested: false summary: Additions and changes in CockroachDB version v26.2 since version v26.1 major_version: v26.2 pre_production_preview: false @@ -25,7 +25,9 @@ docs_area: releases {% include releases/whats-new-intro.md major_version=vers %} {% for r in rel %} +{% if r.release_type == "Production" %} {% include releases/{{ page.major_version }}/{{ r.release_name }}.md release=r.release_name %} +{% endif %} {% endfor %} {% else %} From f7b1b59940f74f0214e97387611f360cac945dda Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Thu, 16 Apr 2026 18:19:54 -0400 Subject: [PATCH 03/32] simplify GA release notes; roll up rc1; feature highlight corrections --- src/current/_data/releases.yml | 9 ++- .../new-release-downloads-docker-image.md | 9 +-- .../release-downloads-docker-image.md | 5 -- .../releases/v26.2/cluster-setting-changes.md | 2 + .../_includes/releases/v26.2/v26.2.0.md | 77 ++++++++++--------- .../_includes/releases/whats-new-intro.md | 19 +++-- src/current/releases/cloud.md | 6 +- src/current/releases/v26.2.md | 2 +- 8 files changed, 65 insertions(+), 64 deletions(-) diff --git a/src/current/_data/releases.yml b/src/current/_data/releases.yml index b4121e80b87..fee9a61ffe1 100644 --- a/src/current/_data/releases.yml +++ b/src/current/_data/releases.yml @@ -10920,12 +10920,13 @@ docker_arm_experimental: false docker_arm_limited_access: false source: true - previous_release: v26.2.0-beta.3 + previous_release: v26.2.0-rc.1 cloud_only: true - cloud_only_message_short: 'Currently available for CockroachDB Advanced only' + cloud_only_message_short: 'Available only for select CockroachDB Cloud clusters' cloud_only_message: > - This version is currently available only for - CockroachDB Cloud clusters on the Advanced plan. + This version is currently available only for select + CockroachDB Cloud clusters. Binaries for self-hosted clusters will be available + on May 13, 2026. - release_name: v26.2.0-alpha.2 major_version: v26.2 diff --git a/src/current/_includes/releases/new-release-downloads-docker-image.md b/src/current/_includes/releases/new-release-downloads-docker-image.md index b9c6218832d..91246fef1fa 100644 --- a/src/current/_includes/releases/new-release-downloads-docker-image.md +++ b/src/current/_includes/releases/new-release-downloads-docker-image.md @@ -2,7 +2,9 @@ {% assign release = site.data.releases | where_exp: "release", "release.release_name == include.release" | first %} {% assign version = site.data.versions | where_exp: "version", "version.major_version == release.major_version" | first %} -

Downloads

{% comment %} take the version name, force it to be lowercase, and replace all periods with hyphens. {% endcomment %} +{% if release.cloud_only != true %} +

Downloads

+{% endif %} {% if release.release_type == "Testing" %} {% include releases/experimental-test-release.md version=release.release_name %} @@ -99,8 +101,3 @@ docker pull {{ release.docker.docker_image }}:{{ release.release_name }} To view or download the source code for CockroachDB {{ release.release_name }} on Github, visit {{ release.release_name }} source tag. {% endif %} - -{% if release.previous_release %} -

Changelog

-View a detailed changelog on GitHub: [{{ release.previous_release }}...{{ release.release_name }}](https://github.com/cockroachdb/cockroach/compare/{{ release.previous_release }}...{{ release.release_name }}) -{% endif %} diff --git a/src/current/_includes/releases/release-downloads-docker-image.md b/src/current/_includes/releases/release-downloads-docker-image.md index 95a811c3d33..76fbc9a70af 100644 --- a/src/current/_includes/releases/release-downloads-docker-image.md +++ b/src/current/_includes/releases/release-downloads-docker-image.md @@ -95,9 +95,4 @@ To download the Docker image (Intel-only): docker pull {{ release.docker.docker_image }}:{{ release.release_name }} ~~~ - {% if release.previous_release %} -

Changelog

-View a detailed changelog on GitHub: [{{ release.previous_release }}...{{ release.release_name }}](https://github.com/cockroachdb/cockroach/compare/{{ release.previous_release }}...{{ release.release_name }}) - {% endif %} - {% endif %} diff --git a/src/current/_includes/releases/v26.2/cluster-setting-changes.md b/src/current/_includes/releases/v26.2/cluster-setting-changes.md index ce4509b37c0..6c2616a5d82 100644 --- a/src/current/_includes/releases/v26.2/cluster-setting-changes.md +++ b/src/current/_includes/releases/v26.2/cluster-setting-changes.md @@ -34,6 +34,8 @@ Review the following changes **before** upgrading. New default cluster settings | `sql.schema.auto_unlock.enabled` | Added a new cluster setting, `sql.schema.auto_unlock.enabled`, that controls whether DDL operations automatically unlock `schema_locked` tables. When set to `false`, DDL on schema-locked tables is blocked unless the user manually unlocks the table first. This allows customers using LDR to enforce `schema_locked` as a hard lock that prevents user-initiated DDL. The default is `true`, preserving existing behavior. [#166471](https://github.com/cockroachdb/cockroach/pull/166471) | `true` | New setting | None | | `sql.prepared_transactions.unsafe.enabled` | Added a new cluster setting `sql.prepared_transactions.unsafe.enabled` (default: `false`) that controls whether `PREPARE TRANSACTION` statements are accepted. This setting is marked unsafe and requires the unsafe setting interlock to change. When disabled, attempting to prepare a transaction returns an error. `COMMIT PREPARED` and `ROLLBACK PREPARED` remain available regardless of this setting to allow cleanup of existing prepared transactions. [#166855](https://github.com/cockroachdb/cockroach/pull/166855) | `false` | New setting | None | | `changefeed.kafka.max_request_size` | Added a new cluster setting `changefeed.kafka.max_request_size` and a per-changefeed `Flush.MaxBytes` option in the Kafka sink config to control the maximum size of record batches sent to Kafka by the v2 sink. Lowering this from the default of 256 MiB can prevent spurious message-too-large errors when multiple batches are coalesced into a single broker request. [#166740](https://github.com/cockroachdb/cockroach/pull/166740) | `256 MiB` | New setting | None | +| `sql.stats.canary_fraction` | Cluster setting that controls the probability that table statistics will use canary mode (i.e., always use the freshest stats) instead of stable mode (i.e., use the second-freshest stats) for query planning. Valid range: [0.0-1.0]. [#167944](https://github.com/cockroachdb/cockroach/pull/167944) | `0.0` | New setting | None | +| `canary_stats_mode` | Session variable that controls which table statistics are used for query planning on the current session when `sql.stats.canary_fraction` is greater than `0`: `on` always uses the newest (canary) stats immediately when they are collected, `off` delays using new stats until they outlive the canary window, and `auto` selects probabilistically based on the canary fraction. Has no effect when `sql.stats.canary_fraction` is `0`. [#167944](https://github.com/cockroachdb/cockroach/pull/167944) | `auto` | New setting | None | diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index a220dce9796..693960f9188 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -1,29 +1,21 @@ ## v26.2.0 -Release Date: May 13, 2026 +Release Date: April 28, 2026 -CockroachDB v26.2 includes new capabilities to help you migrate, build, and operate more efficiently. +{% include releases/new-release-downloads-docker-image.md release=include.release %} + +

Feature Highlights in v26.2

-For a summary of the most significant changes, refer to [Feature Highlights](#v26-2-0-feature-highlights), which contains the following categories: +This section summarizes the most significant user-facing changes in CockroachDB v26.2 in the following categories: - [SQL](#v26-2-0-sql) - [Security](#v26-2-0-security) - [Observability](#v26-2-0-observability) - [Performance](#v26-2-0-performance) -Before [upgrading to CockroachDB v26.2]({% link v26.2/upgrade-cockroach-version.md %}), be sure to also review the following [Upgrade Details](#v26-2-0-upgrade-details): - - - [Backward-incompatible changes](#v26-2-0-backward-incompatible-changes) - - [Features that require upgrade finalization](#v26-2-0-features-that-require-upgrade-finalization) - - [Key cluster setting changes](#v26-2-0-key-cluster-setting-changes) - - [Deprecations](#v26-2-0-deprecations) - - [Known limitations](#v26-2-0-known-limitations) - -{% include releases/new-release-downloads-docker-image.md release=include.release %} - -

Feature Highlights

- -This section summarizes the most significant user-facing changes in CockroachDB v26.2.0. For a complete list of features and changes in v26.2, including bug fixes and performance improvements, refer to the release notes for v26.2 testing releases. You can also search the docs for sections labeled [New in v26.2](https://www.cockroachlabs.com/docs/search?query=New+in+v26.2). +{{site.data.alerts.callout_success}} +You can also search the docs for sections labeled [New in v26.2](https://www.cockroachlabs.com/docs/search?query=New+in+v26.2). +{{site.data.alerts.end}}
@@ -55,7 +47,7 @@ This section summarizes the most significant user-facing changes in CockroachDB

PostgreSQL-compatible fuzzystrmatch functions support

-

CockroachDB now supports PostgreSQL-compatible fuzzystrmatch functions: dmetaphone(), dmetaphone_alt(), and daitch_mokotoff() as built-in. These functions are useful for fuzzy string matching based on phonetic similarity. dmetaphone and dmetaphone_alt return Double Metaphone phonetic codes for a string, and daitch_mokotoff returns an array of Daitch-Mokotoff soundex codes.

+

CockroachDB now supports PostgreSQL-compatible fuzzystrmatch built-in functions: dmetaphone(), dmetaphone_alt(), and daitch_mokotoff(). These functions are useful for fuzzy string matching based on phonetic similarity. dmetaphone and dmetaphone_alt return Double Metaphone phonetic codes for a string, and daitch_mokotoff returns an array of Daitch-Mokotoff soundex codes.

GA {% include icon-yes.html %} @@ -77,7 +69,7 @@ This section summarizes the most significant user-facing changes in CockroachDB

Prevent auto-unlock of schema_locked tables

-

A new cluster setting sql.schema.auto_unlock.enabled controls whether DDL operations automatically unlock schema_locked tables. When set to false, DDL on schema-locked tables is blocked unless the user manually unlocks the table first. This allows customers using LDR to enforce schema_locked as a hard lock preventing user-initiated DDL. The default is true, preserving existing behavior.

+

A new cluster setting sql.schema.auto_unlock.enabled controls whether DDL operations automatically unlock schema_locked tables. When set to false, DDL statements on schema-locked tables are blocked unless the user manually unlocks the table first. This allows customers using LDR to enforce schema_locked as a hard lock preventing user-initiated DDL. The default is true, preserving existing behavior.

GA {% include icon-yes.html %} @@ -118,7 +110,7 @@ This section summarizes the most significant user-facing changes in CockroachDB

Modernizing Database Authentication: CockroachDB Embraces Zero Trust with SPIFFE and SPIRE Support

CockroachDB now supports Subject Alternative Names (SAN) in X.509 certificates, improving compatibility with modern TLS clients and standard certificate management tooling.

- GA + Preview {% include icon-yes.html %} {% include icon-yes.html %} {% include icon-yes.html %} @@ -157,7 +149,7 @@ This section summarizes the most significant user-facing changes in CockroachDB

Active Session History (ASH): Pinpoint Bottlenecks Across CPU, I/O & Contention

Track CPU, I/O, wait events, and contention for session activity (statements, jobs, etc.), enabling faster diagnosis of performance bottlenecks and precise correlation of activity to resource usage.

- GA + Preview {% include icon-yes.html %} {% include icon-yes.html %} {% include icon-yes.html %} @@ -205,7 +197,7 @@ This section summarizes the most significant user-facing changes in CockroachDB

Buffered Writes

-

Buffered Writes is now generally available, delivering improved throughput and reduced tail latency under heavy write workloads by batching writes efficiently before flushing to disk.

+

Buffered writes are now generally available, delivering improved throughput and reduced tail latency under heavy write workloads by batching writes efficiently before flushing to disk.

GA {% include icon-yes.html %} @@ -218,39 +210,45 @@ This section summarizes the most significant user-facing changes in CockroachDB
-

Upgrade Details

+

Upgrade Details for v26.2

-Before you upgrade, review these changes and other information about the new major version. +Before you upgrade, review these changes and other information about the new major version: - + - [Backward-incompatible changes](#v26-2-0-backward-incompatible-changes) + - [Key cluster setting changes](#key-cluster-setting-changes) + - [Deprecations](#deprecations) + - [Features that require upgrade finalization](#features-that-require-upgrade-finalization) + - [Known limitations](#known-limitations) -

Backward-incompatible changes

+ -{% include releases/v26.2/backward-incompatible.md %} +#### Backward-incompatible changes - +{% include releases/v26.2/backward-incompatible.md %} -

Key cluster setting changes

+#### Key cluster setting changes {% include releases/v26.2/cluster-setting-changes.md %} - - -

Deprecations

+#### Deprecations {% include releases/v26.2/deprecations.md %} - - -

Features that require upgrade finalization

+#### Features that require upgrade finalization {% include releases/v26.2/upgrade-finalization.md %} -

Known limitations

+#### Known limitations + +This section describes newly identified limitations in CockroachDB v26.2. + +##### Views + +- Statements within views do not currently respect hint injections. The workaround is to modify the inline hints directly in the body by replacing the view. [#166782](https://github.com/cockroachdb/cockroach/issues/166782) - +- Statements within routines do not currently respect hint injections. The workaround is to modify the inline hints directly in the body by replacing the routine. [#162627](https://github.com/cockroachdb/cockroach/issues/162627)

Security updates

@@ -440,6 +438,7 @@ This section will be updated if functionality is later classified as a Known Lim - Fixed a bug where executing a mutation in a subquery (e.g., as a CTE) could cause the "rows written" metrics like `sql.statements.index_rows_written.count` and `sql.statements.index_bytes_written.count` to not be incremented correctly. [#167432][#167432] - Fixed a bug where converting a table from `REGIONAL BY ROW` to `GLOBAL` would not clear the `skip_unique_checks` storage parameter on the primary key, even though implicit partitioning was removed. [#167484][#167484] - Fixed a bug where the `lock_timeout` and `deadlock_timeout` session settings were not honored by FK existence checks performed during insert fast path execution. This could cause inserts to block indefinitely on conflicting locks instead of returning a timeout error. [#167532][#167532] +- Fixed a bug where CockroachDB might not have respected the table-level parameters `sql_stats_automatic_full_collection_enabled` and `sql_stats_automatic_partial_collection_enabled` and defaulted to using the corresponding cluster settings when deciding whether to perform automatic statistics collection on a table. [#167681][#167681]

Performance improvements

@@ -455,6 +454,7 @@ This section will be updated if functionality is later classified as a Known Lim - The optimizer can now better handle filters that redundantly `unnest()` an array placeholder argument within an `IN` or `ANY` filter. Previously, this pattern could prevent the filters from being used to constrain a table scan. Example: `SELECT k FROM a WHERE k = ANY(SELECT * FROM unnest($1:::INT[]))` [#161816][#161816] - Improved changefeed checkpointing performance when changefeeds are lagging. Previously, checkpoint updates could be redundantly applied multiple times per checkpoint operation. [#162546][#162546] - The query optimizer now eliminates redundant filter and projection operators over inputs with zero cardinality, even when the filter or projection expressions are not leakproof. This produces simpler, more efficient query plans in cases where joins or other operations fold to zero rows. [#164212][#164212] +- Statement executions using canary stats will no longer use cached plans, which prevents cache thrashing but causes a slight increase in planning time over statement executions using stable stats. [#167503][#167503] [#146250]: https://github.com/cockroachdb/cockroach/pull/146250 @@ -629,4 +629,7 @@ This section will be updated if functionality is later classified as a Known Lim [#167432]: https://github.com/cockroachdb/cockroach/pull/167432 [#167484]: https://github.com/cockroachdb/cockroach/pull/167484 [#167532]: https://github.com/cockroachdb/cockroach/pull/167532 +[#167503]: https://github.com/cockroachdb/cockroach/pull/167503 +[#167681]: https://github.com/cockroachdb/cockroach/pull/167681 +[#167944]: https://github.com/cockroachdb/cockroach/pull/167944 diff --git a/src/current/_includes/releases/whats-new-intro.md b/src/current/_includes/releases/whats-new-intro.md index 45dfc21ee86..c5286eaf67c 100644 --- a/src/current/_includes/releases/whats-new-intro.md +++ b/src/current/_includes/releases/whats-new-intro.md @@ -8,6 +8,9 @@ {% assign lts = false %} {% assign install_links = '' %} +{% comment %}Get the production release for this major version to check cloud_only{% endcomment %} +{% assign release = site.data.releases | where_exp: "release", "release.major_version == page.major_version and release.release_type == 'Production'" | first %} + {% comment %}Early in development, a new major-version directory may not yet exist. Adapt some links in this situation.{% endcomment %} @@ -92,24 +95,24 @@ CockroachDB {{ page.major_version }} is in active development, and the following {% if skippable == true %} CockroachDB {{ page.major_version }} is an optional [Innovation release]({% link releases/index.md %}#major-versions). This version can be skipped for CockroachDB {{ site.data.products.advanced }} and {{ site.data.products.core }} clusters. It is unavailable for CockroachDB {{ site.data.products.standard }} and CockroachDB {{ site.data.products.basic }} clusters. {% else %} -CockroachDB {{ page.major_version }}{% if lts == true %} [(LTS)]({% link releases/release-support-policy.md %}#support-types){% endif %} is a required [Regular release]({% link releases/index.md %}#major-versions). +CockroachDB {{ page.major_version }}{% if lts == true %} [(LTS)]({% link releases/release-support-policy.md %}#support-types){% endif %} is a required [Regular release]({% link releases/index.md %}#major-versions). This page contains a complete list of features and changes in {{ page.major_version }}. {% endif %} -Refer to [Major release types]({% link releases/release-support-policy.md %}#support-types) before installing or upgrading for release timing and support details.{% if no_highlights == false and released == true %} To learn what's new in this release, refer to its [Feature Highlights](#feature-highlights).{% endif %} - -On this page, you can read about changes and find downloads for all production and testing releases of CockroachDB {{ page.major_version }}{% if lts == true %} [(LTS)]({% link releases/release-support-policy.md %}#support-phases){% endif %} - {% comment %}Only show these bullet points if the version has been released{% endcomment %} {% if released == true %} {% comment %}v1.0 has no #v1-0-0 anchor, and before GA other releases also do not.{% endcomment %} -- For key feature enhancements in {{ page.major_version }} and other upgrade considerations, refer to the notes for {% if include.major_version.release_date != 'N/A' and page.major_version != 'v1.0' and page.major_version != 'v19.2' %}[{{ page.major_version }}.0](#{{ page.major_version | replace: '.', '-' }}-0){% else %}{{ page.major_version }} on this page{% endif %}. +- For a summary of the most significant changes in {{ page.major_version }}, refer to [Feature Highlights in {{ page.major_version }}](#feature-highlights). +- Before [upgrading to CockroachDB {{ page.major_version }}]({% link {{ page.major_version }}/upgrade-cockroach-version.md %}), review important [Upgrade Details for {{ page.major_version }}](#upgrade-details). {% endif %} {% endif %}{% comment %}End GA-only content{% endcomment %} -- For details about release types, naming, and licensing, refer to the [Releases]({% link releases/index.md %}) page. -- Be sure to also review the [Release Support Policy]({% link releases/release-support-policy.md %}). +- For details about the support window for this release type, review the [Release Support Policy]({% link releases/release-support-policy.md %}). +- For details about all supported releases, the release schedule, and licenses, refer to [CockroachDB Releases Overview]({% link releases/index.md %}). +{% if release.cloud_only != true %} - {{ install_sentence | strip_newlines }} +{% endif %} {% comment %}The strip_newlines is needed here because otherwise Jekyll inserts

tags around the install and upgrade links{% endcomment %} + Get future release notes emailed to you: {% include_cached marketo.html formId=1083 %} diff --git a/src/current/releases/cloud.md b/src/current/releases/cloud.md index fd143217259..aafbb4030b8 100644 --- a/src/current/releases/cloud.md +++ b/src/current/releases/cloud.md @@ -35,7 +35,7 @@ Get future release notes emailed to you:

2-DC Active-Passive Architecture

-

Deploy CockroachDB Cloud Advanced clusters across two geographically separated data centers in an active-passive configuration, providing resilience against data center failures with near-zero downtime failover.

+

Deploy CockroachDB Advanced clusters across two geographically separated data centers in an active-passive configuration, providing resilience against data center failures with near-zero downtime failover.

Preview {% include icon-no.html %} @@ -59,7 +59,7 @@ Get future release notes emailed to you:

Agent Roach: AI-Powered In-Console Assistant

An AI-powered in-console assistant that helps you troubleshoot issues, optimize queries, and manage CockroachDB clusters using natural language, without leaving the Cloud console.

- GA + Preview {% include icon-no.html %} {% include icon-yes.html %} {% include icon-yes.html %} @@ -68,7 +68,7 @@ Get future release notes emailed to you:

Multi-Factor Authentication for CockroachDB Cloud

-

Enforce multi-factor authentication for all CockroachDB Cloud users, reducing the risk of unauthorized access from compromised credentials and strengthening organizational security posture.

+

Multi-factor authentication is now enforced for all CockroachDB Cloud users. This reduces the risk of unauthorized access from compromised credentials and strengthens organizational security posture.

GA {% include icon-no.html %} diff --git a/src/current/releases/v26.2.md b/src/current/releases/v26.2.md index c91c8401ad4..7d846fc3f08 100644 --- a/src/current/releases/v26.2.md +++ b/src/current/releases/v26.2.md @@ -1,7 +1,7 @@ --- title: What's New in v26.2 toc: true -toc_not_nested: false +toc_not_nested: true summary: Additions and changes in CockroachDB version v26.2 since version v26.1 major_version: v26.2 pre_production_preview: false From fef3c34f6179dbbcd24034814f762a6bb3a4c3fa Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Mon, 20 Apr 2026 17:05:18 -0400 Subject: [PATCH 04/32] simplify and clarify GA release notes --- .../releases/v26.2/backward-incompatible.md | 18 +- .../releases/v26.2/cluster-setting-changes.md | 45 +-- .../releases/v26.2/upgrade-finalization.md | 2 +- .../_includes/releases/v26.2/v26.2.0.md | 297 +++++++++--------- .../_includes/releases/whats-new-intro.md | 4 +- 5 files changed, 171 insertions(+), 195 deletions(-) diff --git a/src/current/_includes/releases/v26.2/backward-incompatible.md b/src/current/_includes/releases/v26.2/backward-incompatible.md index b7ed3504525..2550a9fcf35 100644 --- a/src/current/_includes/releases/v26.2/backward-incompatible.md +++ b/src/current/_includes/releases/v26.2/backward-incompatible.md @@ -1,15 +1,7 @@ -- **Statistics concurrency limit:** Increased the default value of `sql.stats.automatic_full_concurrency_limit` (which controls the maximum number of concurrent full statistics collections) from `1` to number of vCPUs divided by 2 (e.g., 4 vCPU nodes will have the value of `2`). [#161806](https://github.com/cockroachdb/cockroach/pull/161806) +This section summarizes changes that can cause applications, scripts, or manual workflows to fail or behave differently than in previous releases. This includes [key cluster setting changes](#key-cluster-setting-changes) and [deprecations](#deprecations). - **`TG_ARGV` indexing:** The `TG_ARGV` trigger function parameter now uses 0-based indexing to match PostgreSQL behavior. Previously, `TG_ARGV[1]` returned the first argument; now `TG_ARGV[0]` returns the first argument and `TG_ARGV[1]` returns the second argument. Additionally, usage of `TG_ARGV` no longer requires setting the `allow_create_trigger_function_with_argv_references` session variable. [#161925](https://github.com/cockroachdb/cockroach/pull/161925) -- **Row size guardrails:** Lowered the default value of the `sql.guardrails.max_row_size_log` cluster setting from `64 MiB` to `16 MiB`, and the default value of `sql.guardrails.max_row_size_err` from `512 MiB` to `80 MiB`. These settings control the maximum size of a row (or column family) that SQL can write before logging a warning or returning an error, respectively. The previous defaults were high enough that large rows would hit other limits first (such as the Raft command size limit or the backup SST size limit), producing confusing errors. The new defaults align with existing system limits to provide clearer diagnostics. If your workload legitimately writes rows larger than these new defaults, you can restore the previous behavior by increasing these settings. [#164468](https://github.com/cockroachdb/cockroach/pull/164468) - -- **Catalog descriptor caching:** Changed the default value of the `sql.catalog.allow_leased_descriptors.enabled` cluster setting to `true`. This setting allows introspection tables like `information_schema` and `pg_catalog` to use cached descriptors when building the table results, which improves the performance of introspection queries when there are many tables in the cluster. [#159162](https://github.com/cockroachdb/cockroach/pull/159162) - -- **Import elastic control:** The `bulkio.import.elastic_control.enabled` cluster setting is now enabled by default, allowing import operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163867](https://github.com/cockroachdb/cockroach/pull/163867) - -- **SST batcher elastic control:** The `bulkio.ingest.sst_batcher_elastic_control.enabled` cluster setting is now enabled by default, allowing SST batcher operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163868](https://github.com/cockroachdb/cockroach/pull/163868) - - **DistSQL scan planning:** The session variable `distsql_prevent_partitioning_soft_limited_scans` is now enabled by default. This prevents scans with soft limits from being planned as multiple TableReaders, which decreases the initial setup costs of some fully-distributed query plans. [#160051](https://github.com/cockroachdb/cockroach/pull/160051) - **Empty `topic_name` validation:** Creating or altering a changefeed or Kafka/Pub/Sub external connection now returns an error when the `topic_name` query parameter is explicitly set to an empty string in the sink URI, rather than silently falling back to using the table name as the topic name. Existing changefeeds with an empty `topic_name` are not affected. [#164225](https://github.com/cockroachdb/cockroach/pull/164225) @@ -30,20 +22,12 @@ - **View privilege checking:** When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) -- **Index backfill elastic control:** The `bulkio.index_backfill.elastic_control.enabled` cluster setting is now enabled by default, allowing index backfill operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163866](https://github.com/cockroachdb/cockroach/pull/163866) - -- **Changefeed retry backoff:** Lowered the default value of the `changefeed.max_retry_backoff` cluster setting from `10m` to `30s` to reduce changefeed lag during rolling restarts. [#164874](https://github.com/cockroachdb/cockroach/pull/164874) - - **`ALTER CHANGEFEED ADD` validation:** Using `ALTER CHANGEFEED ADD ...` for a table that is already watched will now return an error: `target already watched by changefeed`. [#164433](https://github.com/cockroachdb/cockroach/pull/164433) - **PCR reader AOST restriction:** Explicit `AS OF SYSTEM TIME` queries are no longer allowed on a Physical Cluster Replication (PCR) reader virtual cluster, unless the `bypass_pcr_reader_catalog_aost` session variable is set to `true`. This session variable should only be used during investigation or for changing cluster settings specific to the reader virtual cluster. [#165382](https://github.com/cockroachdb/cockroach/pull/165382) -- **Super regions:** The `enable_super_regions` session variable and the `sql.defaults.super_regions.enabled` cluster setting are no longer required to use super regions. Super region DDL operations (`ADD`, `DROP`, and `ALTER SUPER REGION`) now work without any experimental flag. The session variable and cluster setting are deprecated, and existing scripts that set them will continue to work without error. [#165227](https://github.com/cockroachdb/cockroach/pull/165227) - - **`TEMPORARY` database privilege:** Added the `TEMPORARY` database privilege, which controls whether users can create temporary tables and views. On new databases, this privilege is granted to the `public` role by default, matching PostgreSQL behavior. [#165992](https://github.com/cockroachdb/cockroach/pull/165992) -- **`cockroach encode-uri` command:** The `cockroach encode-uri` command has been merged into the `cockroach convert-url` command and `encode-uri` has been deprecated. As a result, the flags `--inline`, `--database`, `--user`, `--password`, `--cluster`, `--certs-dir`, `--ca-cert`, `--cert`, and `--key` have been added to `convert-url`. [#164561](https://github.com/cockroachdb/cockroach/pull/164561) - - **Statement diagnostics bundles:** Statement diagnostics requests with `sampling_probability` and `expires_at` now collect up to 10 bundles (configurable via `sql.stmt_diagnostics.max_bundles_per_request`) instead of a single bundle. Set the cluster setting to `1` to restore single-bundle behavior. [#166159](https://github.com/cockroachdb/cockroach/pull/166159) - **`crdb_internal` view access checks:** User-defined views that reference `crdb_internal` virtual tables now enforce unsafe access checks. To restore the previous behavior, set the session variable `allow_unsafe_internals` or the cluster setting `sql.override.allow_unsafe_internals.enabled` to `true`. [#167023](https://github.com/cockroachdb/cockroach/pull/167023) diff --git a/src/current/_includes/releases/v26.2/cluster-setting-changes.md b/src/current/_includes/releases/v26.2/cluster-setting-changes.md index 6c2616a5d82..a43b8532222 100644 --- a/src/current/_includes/releases/v26.2/cluster-setting-changes.md +++ b/src/current/_includes/releases/v26.2/cluster-setting-changes.md @@ -1,41 +1,18 @@ -Review the following changes **before** upgrading. New default cluster settings will be used unless you have manually set a value for a setting. This can be confirmed by running the SQL statement (`SELECT * FROM system.settings`) to view the non-default settings. +Review the following changes **before** upgrading. New default values will be used unless you have manually set a cluster setting value. To view the non-default settings on your cluster, run the SQL statement `SELECT * FROM system.settings`.
-| Setting | Description | Default | Change type | Previous versions affected | +| Setting | Description | Previous default | New default | Backported to versions | |---|---|---|---|---| -| `security.provisioning.ldap.enabled` | LDAP authentication for the DB Console now supports automatic user provisioning. When the cluster setting `security.provisioning.ldap.enabled` is set to true, users who authenticate successfully via LDAP will be automatically created in CockroachDB if they do not already exist. [#163199](https://github.com/cockroachdb/cockroach/pull/163199) | `false` | New setting | None | -| `security.provisioning.oidc.enabled` | Added a new cluster setting, `security.provisioning.oidc.enabled`, to allow automatic provisioning of users when they log in for the first time via OIDC. When enabled, a new user will be created in CockroachDB upon their first successful OIDC authentication. This feature is disabled by default. [#159787](https://github.com/cockroachdb/cockroach/pull/159787) | `false` | New setting | v26.1 | -| `security.client_cert.san_required.enabled` | The new cluster setting `security.client_cert.san_required.enabled` enables Subject Alternative Name (SAN) based authentication for client certificates. When enabled, CockroachDB validates client identities using SAN attributes (URIs, DNS names, or IP addresses) from X.509 certificates instead of or in addition to the certificate's Common Name field.

Key capabilities include:
  • For privileged users (root and node): SAN identities are validated against values configured via the `--root-cert-san` and `--node-cert-san` startup flags, with automatic fallback to Distinguished Name validation when both methods are configured.
  • For database users: SAN identities are extracted from client certificates and mapped to database usernames using Host-Based Authentication (HBA) identity mapping rules, allowing a single certificate with multiple SAN entries to authenticate as different database users based on context.
  • Multiple identity attributes: A single certificate can contain multiple SAN entries (e.g., URI for service identity, DNS for hostname, IP for network location), providing flexible authentication options.
This authentication method works across both SQL client connections and internal RPC communication between cluster nodes, ensuring consistent identity verification throughout the system. Organizations using modern certificate management systems and service identity frameworks can now leverage their existing infrastructure for database authentication without requiring certificate reissuance or CN-based naming conventions. [#162583](https://github.com/cockroachdb/cockroach/pull/162583) | `false` | New setting | None | -| `server.oidc_authentication.tls_insecure_skip_verify.enabled` | Added the opt-in cluster setting `server.oidc_authentication.tls_insecure_skip_verify.enabled` to skip TLS certificate verification for OIDC provider connections. [#164514](https://github.com/cockroachdb/cockroach/pull/164514) | `false` | New setting | None | -| `changefeed.max_retry_backoff` | Lowered the default value of the `changefeed.max_retry_backoff` cluster setting from `10m` to `30s` to reduce changefeed lag during rolling restarts. [#164874](https://github.com/cockroachdb/cockroach/pull/164874) | `30s` (was `10m`) | Changed default | v25.4, v26.1 | -| `changefeed.partition_alg.enabled` | Changefeeds now support the `partition_alg` option for specifying a Kafka partitioning algorithm. Currently `fnv-1a` (default) and `murmur2` are supported. The option is only valid on Kafka v2 sinks. This is protected by the cluster setting `changefeed.partition_alg.enabled`. An example usage: `SET CLUSTER SETTING changefeed.partition_alg.enabled=true; CREATE CHANGEFEED ... INTO 'kafka://...' WITH partition_alg='murmur2';`. Note that if a changefeed is created using the `murmur2` algorithm, and then the cluster setting is disabled, the changefeed will continue using the `murmur2` algorithm unless the changefeed is altered to use a different `partition_alg`. [#161265](https://github.com/cockroachdb/cockroach/pull/161265) | `false` | New setting | v25.2, v25.4, v26.1 | -| `sql.instance_info.use_instance_resolver.enabled` | The fix for `node descriptor not found` errors for changefeeds with `execution_locality` filters in CockroachDB Basic and Standard clusters is now controlled by cluster setting `sql.instance_info.use_instance_resolver.enabled` (default: `true`). [#163947](https://github.com/cockroachdb/cockroach/pull/163947) | `true` | New setting | v26.1 | -| `bulkio.import.elastic_control.enabled` | The `bulkio.import.elastic_control.enabled` cluster setting is now enabled by default, allowing import operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163867](https://github.com/cockroachdb/cockroach/pull/163867) | `true` (was `false`) | Changed default | None | -| `bulkio.ingest.sst_batcher_elastic_control.enabled` | The `bulkio.ingest.sst_batcher_elastic_control.enabled` cluster setting is now enabled by default, allowing SST batcher operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163868](https://github.com/cockroachdb/cockroach/pull/163868) | `true` (was `false`) | Changed default | None | -| `bulkio.index_backfill.elastic_control.enabled` | The `bulkio.index_backfill.elastic_control.enabled` cluster setting is now enabled by default, allowing index backfill operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163866](https://github.com/cockroachdb/cockroach/pull/163866) | `true` (was `false`) | Changed default | None | -| `bulkio.import.distributed_merge.mode` | Added a new cluster setting `bulkio.import.distributed_merge.mode` to enable distributed merge support for `IMPORT` operations. When enabled (default: false), `IMPORT` jobs will use a two-phase approach where import processors first write SST files to local storage, then a coordinator merges and ingests them. This can improve performance for large imports by reducing L0 file counts and enabling merge-time optimizations. This feature requires all nodes to be running v26.1 or later. [#159330](https://github.com/cockroachdb/cockroach/pull/159330) | `false` | New setting | v26.1 | -| `bulkio.import.row_count_validation.mode` | Row count validation after `IMPORT` is now enabled by default in async mode. After an `IMPORT` completes, a background `INSPECT` job validates that the imported row count matches expectations. The `IMPORT` result now includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. The `bulkio.import.row_count_validation.mode` cluster setting controls this behavior, with valid values of `off`, `async` (default), and `sync`. [#163543](https://github.com/cockroachdb/cockroach/pull/163543) | `async` | New setting | None | -| `kv.range_split.load_sample_reset_duration` | The `kv.range_split.load_sample_reset_duration` cluster setting now defaults to `30m`. This should improve load-based splitting in rare edge cases. [#159499](https://github.com/cockroachdb/cockroach/pull/159499) | `30m` (was `0`) | Changed default | None | -| `kvadmission.store.snapshot_ingest_bandwidth_control.min_rate.enabled` | Introduced a new cluster setting `kvadmission.store.snapshot_ingest_bandwidth_control.min_rate.enabled`. When this setting is enabled and disk bandwidth-based admission control is active, snapshot ingestion will be admitted at a minimum rate. This prevents snapshot ingestion from being starved by other elastic work. [#159436](https://github.com/cockroachdb/cockroach/pull/159436) | `true` | New setting | None | -| `server.sql_tcp_user.timeout` | Added the `server.sql_tcp_user.timeout` cluster setting, which specifies the maximum amount of time transmitted data can remain unacknowledged before the underlying TCP connection is forcefully closed. This setting is enabled by default with a value of 30 seconds and is supported on Linux and macOS (Darwin). [#164037](https://github.com/cockroachdb/cockroach/pull/164037) | `30s` | New setting | None | -| `server.gc_assist.enabled` | A new cluster setting, `server.gc_assist.enabled`, allows operators to dynamically disable GC assist in CockroachDB's forked Go runtime. By default, it follows the `GODEBUG=gcnoassist` flag. A new metric, `sys.gc.assist.enabled`, reports the current state (`1` = enabled, `0` = disabled). [#166555](https://github.com/cockroachdb/cockroach/pull/166555) | `true` | New setting | None | -| `sql.auth.skip_underlying_view_privilege_checks.enabled` | When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) | `false` | New setting | None | -| `sql.stats.automatic_full_concurrency_limit` | Increased the default value of `sql.stats.automatic_full_concurrency_limit` (which controls the maximum number of concurrent full statistics collections) from `1` to number of vCPUs divided by 2 (e.g., 4 vCPU nodes will have the value of `2`). [#161806](https://github.com/cockroachdb/cockroach/pull/161806) | number of vCPUs / 2 (was `1`) | Changed default | None | -| `sql.stats.automatic_extremes_concurrency_limit` | Added cluster settings to control the number of concurrent automatic statistics collection jobs: `sql.stats.automatic_full_concurrency_limit` controls the maximum number of concurrent full statistics collections. The default is 1. `sql.stats.automatic_extremes_concurrency_limit` controls the maximum number of concurrent partial statistics collections using extremes. The default is 128. Note that at most one statistics collection job can run on a single table at a time. [#158835](https://github.com/cockroachdb/cockroach/pull/158835) | `128` | New setting | v26.1 | -| `sql.stats.non_indexed_json_histograms.enabled` | Statistics histogram collection is now skipped for JSON columns referenced in partial index predicates, except when `sql.stats.non_indexed_json_histograms.enabled` is true (default: false). [#164477](https://github.com/cockroachdb/cockroach/pull/164477) | `false` | New setting | None | -| `sql.stmt_diagnostics.max_bundles_per_request` | Statement diagnostics requests with `sampling_probability` and `expires_at` now collect up to 10 bundles (configurable via `sql.stmt_diagnostics.max_bundles_per_request`) instead of a single bundle. Set the cluster setting to `1` to restore single-bundle behavior. [#166159](https://github.com/cockroachdb/cockroach/pull/166159) | `10` | New setting | None | -| `sql.catalog.allow_leased_descriptors.enabled` | Changed the default value of the `sql.catalog.allow_leased_descriptors.enabled` cluster setting to `true`. This setting allows introspection tables like `information_schema` and `pg_catalog` to use cached descriptors when building the table results, which improves the performance of introspection queries when there are many tables in the cluster. [#159162](https://github.com/cockroachdb/cockroach/pull/159162) | `true` (was `false`) | Changed default | v26.1 | -| `sql.guardrails.max_row_size_log` | Lowered the default value of the `sql.guardrails.max_row_size_log` cluster setting from `64 MiB` to `16 MiB`, and the default value of `sql.guardrails.max_row_size_err` from `512 MiB` to `80 MiB`. These settings control the maximum size of a row (or column family) that SQL can write before logging a warning or returning an error, respectively. The previous defaults were high enough that large rows would hit other limits first (such as the Raft command size limit or the backup SST size limit), producing confusing errors. The new defaults align with existing system limits to provide clearer diagnostics. If your workload legitimately writes rows larger than these new defaults, you can restore the previous behavior by increasing these settings. [#164468](https://github.com/cockroachdb/cockroach/pull/164468) | `16 MiB` (was `64 MiB`) | Changed default | None | -| `sql.guardrails.max_row_size_err` | Lowered the default value of the `sql.guardrails.max_row_size_log` cluster setting from `64 MiB` to `16 MiB`, and the default value of `sql.guardrails.max_row_size_err` from `512 MiB` to `80 MiB`. These settings control the maximum size of a row (or column family) that SQL can write before logging a warning or returning an error, respectively. The previous defaults were high enough that large rows would hit other limits first (such as the Raft command size limit or the backup SST size limit), producing confusing errors. The new defaults align with existing system limits to provide clearer diagnostics. If your workload legitimately writes rows larger than these new defaults, you can restore the previous behavior by increasing these settings. [#164468](https://github.com/cockroachdb/cockroach/pull/164468) | `80 MiB` (was `512 MiB`) | Changed default | None | -| `sql.defaults.super_regions.enabled` | The `enable_super_regions` session variable and the `sql.defaults.super_regions.enabled` cluster setting are no longer required to use super regions. Super region DDL operations (`ADD`, `DROP`, and `ALTER SUPER REGION`) now work without any experimental flag. The session variable and cluster setting are deprecated, and existing scripts that set them will continue to work without error. [#165227](https://github.com/cockroachdb/cockroach/pull/165227) | `false` | Deprecated | None | -| `obs.ash.log_interval` | Added periodic ASH workload summary logging to the `OPS` channel. Two new cluster settings, `obs.ash.log_interval` (default: `10m`) and `obs.ash.log_top_n` (default: `10`), control how often and how many entries are emitted. Each summary reports the most frequently sampled workloads grouped by event type, event name, and workload ID, providing visibility into workload patterns that previously existed only in memory. [#165093](https://github.com/cockroachdb/cockroach/pull/165093) | `10m` | New setting | None | -| `obs.ash.log_top_n` | Added periodic ASH workload summary logging to the `OPS` channel. Two new cluster settings, `obs.ash.log_interval` (default: `10m`) and `obs.ash.log_top_n` (default: `10`), control how often and how many entries are emitted. Each summary reports the most frequently sampled workloads grouped by event type, event name, and workload ID, providing visibility into workload patterns that previously existed only in memory. [#165093](https://github.com/cockroachdb/cockroach/pull/165093) | `10` | New setting | None | -| `sql.schema.auto_unlock.enabled` | Added a new cluster setting, `sql.schema.auto_unlock.enabled`, that controls whether DDL operations automatically unlock `schema_locked` tables. When set to `false`, DDL on schema-locked tables is blocked unless the user manually unlocks the table first. This allows customers using LDR to enforce `schema_locked` as a hard lock that prevents user-initiated DDL. The default is `true`, preserving existing behavior. [#166471](https://github.com/cockroachdb/cockroach/pull/166471) | `true` | New setting | None | -| `sql.prepared_transactions.unsafe.enabled` | Added a new cluster setting `sql.prepared_transactions.unsafe.enabled` (default: `false`) that controls whether `PREPARE TRANSACTION` statements are accepted. This setting is marked unsafe and requires the unsafe setting interlock to change. When disabled, attempting to prepare a transaction returns an error. `COMMIT PREPARED` and `ROLLBACK PREPARED` remain available regardless of this setting to allow cleanup of existing prepared transactions. [#166855](https://github.com/cockroachdb/cockroach/pull/166855) | `false` | New setting | None | -| `changefeed.kafka.max_request_size` | Added a new cluster setting `changefeed.kafka.max_request_size` and a per-changefeed `Flush.MaxBytes` option in the Kafka sink config to control the maximum size of record batches sent to Kafka by the v2 sink. Lowering this from the default of 256 MiB can prevent spurious message-too-large errors when multiple batches are coalesced into a single broker request. [#166740](https://github.com/cockroachdb/cockroach/pull/166740) | `256 MiB` | New setting | None | -| `sql.stats.canary_fraction` | Cluster setting that controls the probability that table statistics will use canary mode (i.e., always use the freshest stats) instead of stable mode (i.e., use the second-freshest stats) for query planning. Valid range: [0.0-1.0]. [#167944](https://github.com/cockroachdb/cockroach/pull/167944) | `0.0` | New setting | None | -| `canary_stats_mode` | Session variable that controls which table statistics are used for query planning on the current session when `sql.stats.canary_fraction` is greater than `0`: `on` always uses the newest (canary) stats immediately when they are collected, `off` delays using new stats until they outlive the canary window, and `auto` selects probabilistically based on the canary fraction. Has no effect when `sql.stats.canary_fraction` is `0`. [#167944](https://github.com/cockroachdb/cockroach/pull/167944) | `auto` | New setting | None | +| `bulkio.import.elastic_control.enabled` | The `bulkio.import.elastic_control.enabled` cluster setting is now enabled by default, allowing import operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163867](https://github.com/cockroachdb/cockroach/pull/163867) | `false` | `true` | None | +| `bulkio.index_backfill.elastic_control.enabled` | The `bulkio.index_backfill.elastic_control.enabled` cluster setting is now enabled by default, allowing index backfill operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163866](https://github.com/cockroachdb/cockroach/pull/163866) | `false` | `true` | None | +| `bulkio.ingest.sst_batcher_elastic_control.enabled` | The `bulkio.ingest.sst_batcher_elastic_control.enabled` cluster setting is now enabled by default, allowing SST batcher operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163868](https://github.com/cockroachdb/cockroach/pull/163868) | `false` | `true` | None | +| `changefeed.max_retry_backoff` | Lowered the default value of the `changefeed.max_retry_backoff` cluster setting from `10m` to `30s` to reduce changefeed lag during rolling restarts. [#164874](https://github.com/cockroachdb/cockroach/pull/164874) | `10m` | `30s` | v25.4, v26.1 | +| `kv.range_split.load_sample_reset_duration` | The `kv.range_split.load_sample_reset_duration` cluster setting now defaults to `30m`. This should improve load-based splitting in rare edge cases. [#159499](https://github.com/cockroachdb/cockroach/pull/159499) | `0` | `30m` | None | +| `sql.catalog.allow_leased_descriptors.enabled` | Changed the default value of the `sql.catalog.allow_leased_descriptors.enabled` cluster setting to `true`. This setting allows introspection tables like `information_schema` and `pg_catalog` to use cached descriptors when building the table results, which improves the performance of introspection queries when there are many tables in the cluster. [#159162](https://github.com/cockroachdb/cockroach/pull/159162) | `false` | `true` | v26.1 | +| `sql.guardrails.max_row_size_err` | Lowered the default value of the `sql.guardrails.max_row_size_log` cluster setting from `64 MiB` to `16 MiB`, and the default value of `sql.guardrails.max_row_size_err` from `512 MiB` to `80 MiB`. These settings control the maximum size of a row (or column family) that SQL can write before logging a warning or returning an error, respectively. The previous defaults were high enough that large rows would hit other limits first (such as the Raft command size limit or the backup SST size limit), producing confusing errors. The new defaults align with existing system limits to provide clearer diagnostics. If your workload legitimately writes rows larger than these new defaults, you can restore the previous behavior by increasing these settings. [#164468](https://github.com/cockroachdb/cockroach/pull/164468) | `512 MiB` | `80 MiB` | None | +| `sql.guardrails.max_row_size_log` | Lowered the default value of the `sql.guardrails.max_row_size_log` cluster setting from `64 MiB` to `16 MiB`, and the default value of `sql.guardrails.max_row_size_err` from `512 MiB` to `80 MiB`. These settings control the maximum size of a row (or column family) that SQL can write before logging a warning or returning an error, respectively. The previous defaults were high enough that large rows would hit other limits first (such as the Raft command size limit or the backup SST size limit), producing confusing errors. The new defaults align with existing system limits to provide clearer diagnostics. If your workload legitimately writes rows larger than these new defaults, you can restore the previous behavior by increasing these settings. [#164468](https://github.com/cockroachdb/cockroach/pull/164468) | `64 MiB` | `16 MiB` | None | +| `sql.stats.automatic_full_concurrency_limit` | Increased the default value of `sql.stats.automatic_full_concurrency_limit` (which controls the maximum number of concurrent full statistics collections) from `1` to number of vCPUs divided by 2 (e.g., 4 vCPU nodes will have the value of `2`). [#161806](https://github.com/cockroachdb/cockroach/pull/161806) | `1` | number of vCPUs / 2 | None |
diff --git a/src/current/_includes/releases/v26.2/upgrade-finalization.md b/src/current/_includes/releases/v26.2/upgrade-finalization.md index 6a20b6d8050..b54b706bf01 100644 --- a/src/current/_includes/releases/v26.2/upgrade-finalization.md +++ b/src/current/_includes/releases/v26.2/upgrade-finalization.md @@ -1,4 +1,4 @@ -During a major-version upgrade, certain features and performance improvements are not available until the upgrade is finalized. In v26.2, these are: +This section summarizes the features that are not available until you [finalize the v26.2 upgrade]({% link v26.2/upgrade-cockroach-version.md %}#finalize-a-major-version-upgrade-manually). - **`security_invoker` option for views**: {% comment %}TODO: Verify with @shadiGh{% endcomment %}Views now support the PostgreSQL-compatible `security_invoker` option. When set via `CREATE VIEW ... WITH (security_invoker)` or `ALTER VIEW SET (security_invoker = true)`, privilege checks on the underlying tables are performed as the querying user rather than the view owner. The `security_invoker` option can be reset with `ALTER VIEW ... RESET (security_invoker)`. [#164184](https://github.com/cockroachdb/cockroach/pull/164184) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index 693960f9188..b677c9c3240 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -4,14 +4,9 @@ Release Date: April 28, 2026 {% include releases/new-release-downloads-docker-image.md release=include.release %} -

Feature Highlights in v26.2

+### Feature highlights -This section summarizes the most significant user-facing changes in CockroachDB v26.2 in the following categories: - - - [SQL](#v26-2-0-sql) - - [Security](#v26-2-0-security) - - [Observability](#v26-2-0-observability) - - [Performance](#v26-2-0-performance) +This section summarizes the most significant user-facing changes in [SQL](#sql-highlights), [security](#security-highlights), [observability](#observability-highlights), and [performance](#performance-highlights). {{site.data.alerts.callout_success}} You can also search the docs for sections labeled [New in v26.2](https://www.cockroachlabs.com/docs/search?query=New+in+v26.2). @@ -19,7 +14,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc
-

SQL

+

SQL highlights

@@ -91,7 +86,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc
-

Security

+

Security highlights

@@ -130,7 +125,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc
-

Observability

+

Observability highlights

@@ -169,7 +164,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc
-

Performance

+

Performance highlights

@@ -210,19 +205,13 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc -

Upgrade Details for v26.2

- -Before you upgrade, review these changes and other information about the new major version: + - - [Backward-incompatible changes](#v26-2-0-backward-incompatible-changes) - - [Key cluster setting changes](#key-cluster-setting-changes) - - [Deprecations](#deprecations) - - [Features that require upgrade finalization](#features-that-require-upgrade-finalization) - - [Known limitations](#known-limitations) +### Features that require upgrade finalization - +{% include releases/v26.2/upgrade-finalization.md %} -#### Backward-incompatible changes +### Backward-incompatible changes {% include releases/v26.2/backward-incompatible.md %} @@ -234,25 +223,21 @@ Before you upgrade, review these changes and other information about the new maj {% include releases/v26.2/deprecations.md %} -#### Features that require upgrade finalization - -{% include releases/v26.2/upgrade-finalization.md %} - -#### Known limitations -This section describes newly identified limitations in CockroachDB v26.2. +

Security updates

-##### Views +- LDAP authentication for the DB Console now supports automatic user provisioning. When the cluster setting `security.provisioning.ldap.enabled` is set to true, users who authenticate successfully via LDAP will be automatically created in CockroachDB if they do not already exist. [#163199][#163199] +- The new cluster setting `security.client_cert.san_required.enabled` enables Subject Alternative Name (SAN) based authentication for client certificates. When enabled, CockroachDB validates client identities using SAN attributes (URIs, DNS names, or IP addresses) from X.509 certificates instead of or in addition to the certificate's Common Name field. -- Statements within views do not currently respect hint injections. The workaround is to modify the inline hints directly in the body by replacing the view. [#166782](https://github.com/cockroachdb/cockroach/issues/166782) + Key capabilities include: -##### User-defined functions and stored procedures + - For privileged users (root and node): SAN identities are validated against values configured via the `--root-cert-san` and `--node-cert-san` startup flags, with automatic fallback to Distinguished Name validation when both methods are configured. -- Statements within routines do not currently respect hint injections. The workaround is to modify the inline hints directly in the body by replacing the routine. [#162627](https://github.com/cockroachdb/cockroach/issues/162627) + - For database users: SAN identities are extracted from client certificates and mapped to database usernames using Host-Based Authentication (HBA) identity mapping rules, allowing a single certificate with multiple SAN entries to authenticate as different database users based on context. + - Multiple identity attributes: A single certificate can contain multiple SAN entries (e.g., URI for service identity, DNS for hostname, IP for network location), providing flexible authentication options. -

Security updates

- + This authentication method works across both SQL client connections and internal RPC communication between cluster nodes, ensuring consistent identity verification throughout the system. Organizations using modern certificate management systems and service identity frameworks can now leverage their existing infrastructure for database authentication without requiring certificate reissuance or CN-based naming conventions. [#162583][#162583] - When the `security.provisioning.ldap.enabled` cluster setting is enabled, LDAP-authenticated DB Console logins now update the `estimated_last_login_time` column in the `system.users` table. [#163400][#163400] - When the `security.provisioning.oidc.enabled` cluster setting is enabled, OIDC-authenticated DB Console logins now populate the `estimated_last_login_time` column in `system.users`, allowing administrators to track when OIDC users last accessed the DB Console. [#164129][#164129] - Removed an overly restrictive TLS curve preference that limited FIPS mode to P-256. CockroachDB now uses Go's native FIPS curve selection, improving interoperability with clients that prefer other FIPS curves. [#166793][#166793] @@ -260,75 +245,96 @@ This section describes newly identified limitations in CockroachDB v26.2.

Enterprise edition changes

+- Added a new cluster setting, `security.provisioning.oidc.enabled`, to allow automatic provisioning of users when they log in for the first time via OIDC. When enabled, a new user will be created in CockroachDB upon their first successful OIDC authentication. This feature is disabled by default. [#159787][#159787] - LDAP authentication for the DB Console now additionally supports role-based access control (RBAC) through LDAP group membership. To use this feature, an administrator must first create roles in CockroachDB with names that match the Common Names (CN) of their LDAP groups. These roles should then be granted the desired privileges for DB Console access. When a user who is a member of a corresponding LDAP group logs into the DB Console, they will be automatically granted the role and its associated privileges, creating consistent behavior with SQL client connections. [#162302][#162302]

SQL language changes

-- Added the `MAINTAIN` privilege, which can be granted on tables and materialized views. Users with the `MAINTAIN` privilege on a materialized view can execute `REFRESH MATERIALIZED VIEW` without being the owner. Users with the `MAINTAIN` privilege on a table can execute `ANALYZE` without needing `SELECT`. This aligns with PostgreSQL 17 behavior. [#164236][#164236] -- CockroachDB now supports the PostgreSQL session variables `tcp_keepalives_idle`, `tcp_keepalives_interval`, `tcp_keepalives_count`, and `tcp_user_timeout`. These allow per-session control over TCP keepalive behavior on each connection. A value of 0 (the default) uses the corresponding cluster setting. Non-zero values override the cluster setting for that session only. Units match PostgreSQL: seconds for keepalive settings, milliseconds for `tcp_user_timeout`. [#164369][#164369] -- Added the `optimizer_inline_any_unnest_subquery` session setting to enable/disable the optimizer rule `InlineAnyProjectSet`. The setting is on by default in v26.2 and later. [#161880][#161880] +- Added cluster settings to control the number of concurrent automatic statistics collection jobs: + + - `sql.stats.automatic_full_concurrency_limit` controls the maximum number of concurrent full statistics collections. The default is 1. + - `sql.stats.automatic_extremes_concurrency_limit` controls the maximum number of concurrent partial statistics collections using extremes. The default is 128. + + Note that at most one statistics collection job can run on a single table at a time. [#158835][#158835] +- Added a new cluster setting `bulkio.import.distributed_merge.mode` to enable distributed merge support for `IMPORT` operations. When enabled (default: false), `IMPORT` jobs will use a two-phase approach where import processors first write SST files to local storage, then a coordinator merges and ingests them. This can improve performance for large imports by reducing L0 file counts and enabling merge-time optimizations. This feature requires all nodes to be running v26.1 or later. [#159330][#159330] +- Row count validation after `IMPORT` is now enabled by default in async mode. After an `IMPORT` completes, a background `INSPECT` job validates that the imported row count matches expectations. The `IMPORT` result now includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. The `bulkio.import.row_count_validation.mode` cluster setting controls this behavior, with valid values of `off`, `async` (default), and `sync`. [#163543][#163543] +- Added a new cluster setting, `sql.schema.auto_unlock.enabled`, that controls whether DDL operations automatically unlock `schema_locked` tables. When set to `false`, DDL on schema-locked tables is blocked unless the user manually unlocks the table first. This allows customers using LDR to enforce `schema_locked` as a hard lock that prevents user-initiated DDL. The default is `true`, preserving existing behavior. [#166471][#166471] +- Added a new cluster setting `sql.prepared_transactions.unsafe.enabled` (default: `false`) that controls whether `PREPARE TRANSACTION` statements are accepted. This setting is marked unsafe and requires the unsafe setting interlock to change. When disabled, attempting to prepare a transaction returns an error. `COMMIT PREPARED` and `ROLLBACK PREPARED` remain available regardless of this setting to allow cleanup of existing prepared transactions. [#166855][#166855] - Users can now set the `use_backups_with_ids` session setting to enable a new `SHOW BACKUPS IN` experience. When enabled, `SHOW BACKUPS IN {collection}` displays all backups in the collection. Results can be filtered by backup end time using `OLDER THAN {timestamp}` or `NEWER THAN {timestamp}` clauses. Example usage: `SET use_backups_with_ids = true; SHOW BACKUPS IN '{collection}' OLDER THAN '2026-01-09 12:13:14' NEWER THAN '2026-01-04 15:16:17';` [#160137][#160137] - If the new `SHOW BACKUP` experience is enabled by setting the `use_backups_with_ids` session variable to true, `SHOW BACKUP` will parse the IDs provided by `SHOW BACKUPS` and display contents for single backups. [#160812][#160812] - If the new `RESTORE` experience is enabled by setting the `use_backups_with_ids` session variable to true, `RESTORE` will parse the IDs provided by `SHOW BACKUPS` and will restore the specified backup without the use of `AS OF SYSTEM TIME`. [#161294][#161294] - `SHOW BACKUP` and `RESTORE` now allow backup IDs even if the `use_backups_with_ids` session variable is not set. Setting the variable only configures whether `LATEST` is resolved using the new or legacy path. [#162329][#162329] - Added the `REVISION START TIME` option to the new `SHOW BACKUPS` experience enabled via the `use_backups_with_ids` session variable. Use the `REVISION START TIME` option to view the revision start times of revision history backups. [#161328][#161328] +- Added the `STRICT` option for locality-aware backups. When enabled, backups fail if data from a KV node with one locality tag would be backed up to a bucket with a different locality tag, ensuring data domiciling compliance. [#158999][#158999] +- `RESTORE TABLE/DATABASE` now supports the `WITH GRANTS` option, which restores grants on restore targets for users in the restoring cluster. Note that using this option with `new_db_name` will cause the new database to inherit the privileges in the backed-up database. [#164444][#164444] - Added support for `SHOW STATEMENT HINTS`, which displays information about the statement hints (if any) associated with the given statement fingerprint string. The fingerprint is normalized in the same way as `EXPLAIN (FINGERPRINT)` before hints are matched. Example usage: `SHOW STATEMENT HINTS FOR ' SELECT * FROM xy WHERE x = 10 '` or `SHOW STATEMENT HINTS FOR $$ SELECT * FROM xy WHERE x = 10 $$ WITH DETAILS`. [#159231][#159231] +- Added support for a new statement hint used to change session variable values for the duration of a single statement without application changes. The new hint type can be created using the `information_schema.crdb_set_session_variable_hint` built-in function. The override applies only when executing a statement matching the given fingerprint and does not persist on the session or surrounding transaction. [#164909][#164909] +- Introduced the `information_schema.crdb_delete_statement_hints` built-in function, which accepts 2 kinds of payload: `row_id` (int): the primary key of `system.statement_hints`; `fingerprint` (string). The function returns the number of rows deleted. [#163891][#163891] +- CockroachDB now includes `information_schema.crdb_rewrite_inline_hints` statements in the `schema.sql` file of a statement diagnostics bundle for re-creating all the statement hints bound to the statement. The hint recreation statements are sorted in ascending order of the original hint creation time. [#164164][#164164] +- Rewrite-inline-hints rules can now be scoped to a specific database, and will only apply to matching statements when the current database also matches. This database can be specified with an optional third argument to `information_schema.crdb_rewrite_inline_hints`. [#165457][#165457] +- `SHOW STATEMENT HINTS` now includes `database` and `enabled` columns in its output. The `database` column indicates which database the hint applies to, and the `enabled` column indicates whether the hint is active. [#165712][#165712] +- The `information_schema.crdb_delete_statement_hints` built-in function now accepts an optional second `database` argument to delete only hints scoped to a specific database. [#167192][#167192] - `CREATE OR REPLACE TRIGGER` is now supported. If a trigger with the same name already exists on the same table, it is replaced with the new definition. If no trigger with that name exists, a new trigger is created. [#162633][#162633] - Updated `DROP TRIGGER` to accept the `CASCADE` option for PostgreSQL compatibility. Since triggers in CockroachDB cannot have dependents, `CASCADE` behaves the same as `RESTRICT` or omitting the option entirely. [#161915][#161915] - `DROP COLUMN` and `DROP INDEX` with `CASCADE` now properly drop dependent triggers. Previously, these operations would fail with an unimplemented error when a trigger depended on the column or index being dropped. [#163296][#163296] - `CREATE OR REPLACE FUNCTION` now works on trigger functions that have active triggers. Previously, this was blocked with an unimplemented error, requiring users to drop and recreate triggers. The replacement now atomically updates all dependent triggers to execute the new function body. [#163348][#163348] -- Updated CockroachDB to allow a prefix of index key columns to be used for the shard column in a hash-sharded index. The `shard_columns` storage parameter may be used to override the default, which uses all index key columns in the shard column. [#161422][#161422] - Added support for the `pg_trigger_depth()` builtin function, which returns the current nesting level of PostgreSQL triggers (0 if not called from inside a trigger). [#162286][#162286] +- Added the `pg_get_triggerdef` builtin function, which returns the `CREATE TRIGGER` statement for a given trigger OID. This improves PostgreSQL compatibility for databases that contain triggers. [#165849][#165849] - A database-level changefeed with no tables will periodically poll to check for tables added to the database. The new option `hibernation_polling_frequency` sets the frequency at which the polling occurs, until a table is found, at which point polling ceases. [#156771][#156771] -- Added the `STRICT` option for locality-aware backups. When enabled, backups fail if data from a KV node with one locality tag would be backed up to a bucket with a different locality tag, ensuring data domiciling compliance. [#158999][#158999] +- `CREATE CHANGEFEED FOR DATABASE` now returns an error stating that the feature is not implemented. [#166920][#166920] +- Added the `MAINTAIN` privilege, which can be granted on tables and materialized views. Users with the `MAINTAIN` privilege on a materialized view can execute `REFRESH MATERIALIZED VIEW` without being the owner. Users with the `MAINTAIN` privilege on a table can execute `ANALYZE` without needing `SELECT`. This aligns with PostgreSQL 17 behavior. [#164236][#164236] +- Added support for the `aclitem` type and the `makeaclitem` and `acldefault` built-in functions for PostgreSQL compatibility. The existing `aclexplode` function, which previously always returned no rows, now correctly parses ACL strings and returns the individual privilege grants they contain. [#165744][#165744] +- CockroachDB now supports the PostgreSQL session variables `tcp_keepalives_idle`, `tcp_keepalives_interval`, `tcp_keepalives_count`, and `tcp_user_timeout`. These allow per-session control over TCP keepalive behavior on each connection. A value of 0 (the default) uses the corresponding cluster setting. Non-zero values override the cluster setting for that session only. Units match PostgreSQL: seconds for keepalive settings, milliseconds for `tcp_user_timeout`. [#164369][#164369] - Added support for the `dmetaphone()`, `dmetaphone_alt()`, and `daitch_mokotoff()` built-in functions, completing CockroachDB's implementation of the PostgreSQL `fuzzystrmatch` extension. `dmetaphone` and `dmetaphone_alt` return Double Metaphone phonetic codes for a string, and `daitch_mokotoff` returns an array of Daitch-Mokotoff soundex codes. These functions are useful for fuzzy string matching based on phonetic similarity. [#163430][#163430] -- `crdb_internal.datums_to_bytes` is now available in the `information_schema` system catalog as `information_schema.crdb_datums_to_bytes`. [#156963][#156963] -- The `information_schema.crdb_datums_to_bytes` built-in function is now documented. [#160486][#160486] +- Added `to_date(text, text)` and `to_timestamp(text, text)` SQL functions that parse dates and timestamps from formatted strings using PostgreSQL-compatible format patterns. For example, `to_date('2023-03-15', 'YYYY-MM-DD')` returns a date, and `to_timestamp('2023-03-15 14:30:45', 'YYYY-MM-DD HH24:MI:SS')` returns a `timestamptz`. [#164672][#164672] +- `SHOW ALL` now returns a third column, `description`, containing a human-readable description of each session variable. This matches the PostgreSQL behavior of `SHOW ALL`. [#165397][#165397] +- The `tableoid` system column is now supported on virtual tables such as those in `pg_catalog` and `information_schema`. This improves compatibility with PostgreSQL tools like `pg_dump` that reference `tableoid` in their introspection queries. [#165727][#165727] +- Added the `ST_AsMVT` aggregate function to generate Mapbox Vector Tile (MVT) binary format from geospatial data, providing PostgreSQL/PostGIS compatibility for web mapping applications. [#150663][#150663] +- Aggregation function `ST_AsMVT` can now also be used as a window function. [#166860][#166860] +- Updated CockroachDB to allow a prefix of index key columns to be used for the shard column in a hash-sharded index. The `shard_columns` storage parameter may be used to override the default, which uses all index key columns in the shard column. [#161422][#161422] - Queries executed via the vectorized engine now display their progress in the `phase` column of `SHOW QUERIES`. Previously, this feature was only available in the row-by-row engine. [#158029][#158029] - CockroachDB now shows execution statistics (like `execution time`) on `EXPLAIN ANALYZE` output for `render` nodes, which often handle built-in functions. [#161509][#161509] - The output of `EXPLAIN [ANALYZE]` in non-`VERBOSE` mode is now more succinct. [#153361][#153361] +- `crdb_internal.datums_to_bytes` is now available in the `information_schema` system catalog as `information_schema.crdb_datums_to_bytes`. [#156963][#156963] +- The `information_schema.crdb_datums_to_bytes` built-in function is now documented. [#160486][#160486] +- Active Session History tables are now accessible via `information_schema.crdb_node_active_session_history` and `information_schema.crdb_cluster_active_session_history`, in addition to the existing `crdb_internal` tables. This improves discoverability when browsing `information_schema` for available metadata. [#164969][#164969] +- Added a `workload_type` column to the `crdb_internal.node_active_session_history` and `crdb_internal.cluster_active_session_history` virtual tables, as well as the corresponding `information_schema` views. The column exposes the type of workload being sampled, with possible values `STATEMENT`, `JOB`, `SYSTEM`, or `UNKNOWN`. [#165866][#165866] +- Added the `optimizer_inline_any_unnest_subquery` session setting to enable/disable the optimizer rule `InlineAnyProjectSet`. The setting is on by default in v26.2 and later. [#161880][#161880] +- Exposed the following settings for canary table statistics: + - Cluster setting `sql.stats.canary_fraction`: probability that table statistics will use canary mode (i.e., always use the freshest stats) instead of stable mode (i.e., use the second-freshest stats) for query planning [0.0-1.0]. + - Session variable `canary_stats_mode`: When `sql.stats.canary_fraction` is greater than `0`, controls which table statistics are used for query planning on the current session: `on` always uses the newest (canary) stats immediately when they are collected, `off` delays using new stats until they outlive the canary window, and `auto` selects probabilistically based on the canary fraction. Has no effect when `sql.stats.canary_fraction` is `0`. [#167944][#167944] - CockroachDB now supports `COMMIT AND CHAIN` and `ROLLBACK AND CHAIN` (as well as `END AND CHAIN` and `ABORT AND CHAIN`). These statements finish the current transaction and immediately start a new explicit transaction with the same isolation level, priority, and read/write mode as the previous transaction. `AND NO CHAIN` is also accepted for PostgreSQL compatibility but behaves identically to a plain `COMMIT` or `ROLLBACK`. [#164403][#164403] -- `RESTORE TABLE/DATABASE` now supports the `WITH GRANTS` option, which restores grants on restore targets for users in the restoring cluster. Note that using this option with `new_db_name` will cause the new database to inherit the privileges in the backed-up database. [#164444][#164444] -- Added support for a new statement hint used to change session variable values for the duration of a single statement without application changes. The new hint type can be created using the `information_schema.crdb_set_session_variable_hint` built-in function. The override applies only when executing a statement matching the given fingerprint and does not persist on the session or surrounding transaction. [#164909][#164909] -- Added the `ST_AsMVT` aggregate function to generate Mapbox Vector Tile (MVT) binary format from geospatial data, providing PostgreSQL/PostGIS compatibility for web mapping applications. [#150663][#150663] -- Introduced the `information_schema.crdb_delete_statement_hints` built-in function, which accepts 2 kinds of payload: `row_id` (int): the primary key of `system.statement_hints`; `fingerprint` (string). The function returns the number of rows deleted. [#163891][#163891] -- Added `to_date(text, text)` and `to_timestamp(text, text)` SQL functions that parse dates and timestamps from formatted strings using PostgreSQL-compatible format patterns. For example, `to_date('2023-03-15', 'YYYY-MM-DD')` returns a date, and `to_timestamp('2023-03-15 14:30:45', 'YYYY-MM-DD HH24:MI:SS')` returns a `timestamptz`. [#164672][#164672] - Added support for importing Parquet files using the `IMPORT` statement. Parquet files can be imported from cloud storage URLs (`s3://`, `gs://`, `azure://`) or HTTP servers that support range requests (`Accept-Ranges: bytes`). This feature supports column-level compression formats (Snappy, GZIP, ZSTD, Brotli, etc.) as specified in the Parquet file format, but does not support additional file-level compression (e.g., `.parquet.gz` files). Nested Parquet types (lists, maps, structs) are not supported; only flat schemas with primitive types are supported at this time. [#163991][#163991] -- CockroachDB now includes `information_schema.crdb_rewrite_inline_hints` statements in the `schema.sql` file of a statement diagnostics bundle for re-creating all the statement hints bound to the statement. The hint recreation statements are sorted in ascending order of the original hint creation time. [#164164][#164164] - `ALTER TABLE ... SET LOCALITY` is now fully executed using the declarative schema changer, improving reliability and consistency with other schema change operations. [#161763][#161763] -- Active Session History tables are now accessible via `information_schema.crdb_node_active_session_history` and `information_schema.crdb_cluster_active_session_history`, in addition to the existing `crdb_internal` tables. This improves discoverability when browsing `information_schema` for available metadata. [#164969][#164969] -- Rewrite-inline-hints rules can now be scoped to a specific database, and will only apply to matching statements when the current database also matches. This database can be specified with an optional third argument to `information_schema.crdb_rewrite_inline_hints`. [#165457][#165457] -- Added support for the `aclitem` type and the `makeaclitem` and `acldefault` built-in functions for PostgreSQL compatibility. The existing `aclexplode` function, which previously always returned no rows, now correctly parses ACL strings and returns the individual privilege grants they contain. [#165744][#165744] -- Added the `pg_get_triggerdef` builtin function, which returns the `CREATE TRIGGER` statement for a given trigger OID. This improves PostgreSQL compatibility for databases that contain triggers. [#165849][#165849] -- `SHOW ALL` now returns a third column, `description`, containing a human-readable description of each session variable. This matches the PostgreSQL behavior of `SHOW ALL`. [#165397][#165397] -- `SHOW STATEMENT HINTS` now includes `database` and `enabled` columns in its output. The `database` column indicates which database the hint applies to, and the `enabled` column indicates whether the hint is active. [#165712][#165712] -- The `tableoid` system column is now supported on virtual tables such as those in `pg_catalog` and `information_schema`. This improves compatibility with PostgreSQL tools like `pg_dump` that reference `tableoid` in their introspection queries. [#165727][#165727] -- Added a `workload_type` column to the `crdb_internal.node_active_session_history` and `crdb_internal.cluster_active_session_history` virtual tables, as well as the corresponding `information_schema` views. The column exposes the type of workload being sampled, with possible values `STATEMENT`, `JOB`, `SYSTEM`, or `UNKNOWN`. [#165866][#165866] -- Aggregation function `ST_AsMVT` can now also be used as a window function. [#166860][#166860] -- `CREATE CHANGEFEED FOR DATABASE` now returns an error stating that the feature is not implemented. [#166920][#166920] -- The `information_schema.crdb_delete_statement_hints` built-in function now accepts an optional second `database` argument to delete only hints scoped to a specific database. [#167192][#167192] - Setting `skip_unique_checks = true` on an index now emits a notice warning that unique constraint enforcement is bypassed, with a pointer to the `INSPECT` documentation. [#167405][#167405]

Operational changes

+- Changefeeds now support the `partition_alg` option for specifying a Kafka partitioning algorithm. Currently `fnv-1a` (default) and `murmur2` are supported. The option is only valid on Kafka v2 sinks. This is protected by the cluster setting `changefeed.partition_alg.enabled`. An example usage: `SET CLUSTER SETTING changefeed.partition_alg.enabled=true; CREATE CHANGEFEED ... INTO 'kafka://...' WITH partition_alg='murmur2';`. Note that if a changefeed is created using the `murmur2` algorithm, and then the cluster setting is disabled, the changefeed will continue using the `murmur2` algorithm unless the changefeed is altered to use a different `partition_alg`. [#161265][#161265] +- Added the `server.sql_tcp_user.timeout` cluster setting, which specifies the maximum amount of time transmitted data can remain unacknowledged before the underlying TCP connection is forcefully closed. This setting is enabled by default with a value of 30 seconds and is supported on Linux and macOS (Darwin). [#164037][#164037] +- Introduced a new cluster setting `kvadmission.store.snapshot_ingest_bandwidth_control.min_rate.enabled`. When this setting is enabled and disk bandwidth-based admission control is active, snapshot ingestion will be admitted at a minimum rate. This prevents snapshot ingestion from being starved by other elastic work. [#159436][#159436] +- Added periodic ASH workload summary logging to the `OPS` channel. Two new cluster settings, `obs.ash.log_interval` (default: `10m`) and `obs.ash.log_top_n` (default: `10`), control how often and how many entries are emitted. Each summary reports the most frequently sampled workloads grouped by event type, event name, and workload ID, providing visibility into workload patterns that previously existed only in memory. [#165093][#165093] +- Added the opt-in cluster setting `server.oidc_authentication.tls_insecure_skip_verify.enabled` to skip TLS certificate verification for OIDC provider connections. [#164514][#164514] +- A new cluster setting, `server.gc_assist.enabled`, allows operators to dynamically disable GC assist in CockroachDB's forked Go runtime. By default, it follows the `GODEBUG=gcnoassist` flag. A new metric, `sys.gc.assist.enabled`, reports the current state (`1` = enabled, `0` = disabled). [#166555][#166555] +- Added a new cluster setting `changefeed.kafka.max_request_size` and a per-changefeed `Flush.MaxBytes` option in the Kafka sink config to control the maximum size of record batches sent to Kafka by the v2 sink. Lowering this from the default of 256 MiB can prevent spurious message-too-large errors when multiple batches are coalesced into a single broker request. [#166740][#166740] - The new `cockroach gen dashboard` command generates standardized monitoring dashboards from an embedded configuration file. It outputs a dashboard JSON file for either Datadog (`--tool=datadog`) or Grafana (`--tool=grafana`), with Grafana dashboards using Prometheus queries. The generated dashboards include metrics across Overview, Hardware, Runtime, Networking, SQL, and Storage categories. Use `--output` to set the output file path and `--rollup-interval` to control metric aggregation. [#161050][#161050] +- The `build.timestamp` Prometheus metric now carries `major` and `minor` labels identifying the release series of the running CockroachDB binary (e.g., `major="26", minor="1"` for any v26.1.x build). [#163834][#163834] - Added the `kv.protectedts.protect`, `kv.protectedts.release`, `kv.protectedts.update_timestamp`, `kv.protectedts.get_record`, and `kv.protectedts.mark_verified` metrics to track protected timestamp storage operations. These metrics help diagnose issues with excessive protected timestamp churn and operational errors. Each operation tracks both successful completions (`.success`) and failures (`.failed`, such as `ErrExists` or `ErrNotExists`). Operators can monitor these metrics to understand PTS system behavior and identify performance issues related to backups, changefeeds, and other features that use protected timestamps. [#160129][#160129] - Added a new metric `sql.rls.policies_applied.count` that tracks the number of SQL statements where row-level security (RLS) policies were applied during query planning. [#164405][#164405] -- External connections can now be used with online restore. [#159090][#159090] -- Changed goroutine profile dumps from human-readable `.txt.gz` files to binary proto `.pb.gz` files. This improves the performance of the goroutine dumper by eliminating brief in-process pauses that occurred when collecting goroutine stacks. [#160798][#160798] -- Added a new structured event of type `rewrite_inline_hints` that is emitted when an inline-hints rewrite rule is added using `information_schema.crdb_rewrite_inline_hints`. This event is written to both the event log and the `OPS` channel. [#160901][#160901] - Added a new metric `sql.query.with_statement_hints.count` that is incremented whenever a statement is executed with one or more external statement hints applied. An example of an external statement hint is an inline-hints rewrite rule added by calling `information_schema.crdb_rewrite_inline_hints`. [#161043][#161043] +- Promoted the following admission control metrics to `ESSENTIAL` status, making them more discoverable in monitoring dashboards and troubleshooting workflows: `admission.wait_durations.*` (`sql-kv-response`, `sql-sql-response`, `elastic-stores`, `elastic-cpu`), `admission.granter.*_exhausted_duration.kv` (`slots`, `io_tokens`, `elastic_io_tokens`), `admission.elastic_cpu.nanos_exhausted_duration`, `kvflowcontrol.eval_wait.*.duration` (`elastic`, `regular`), and `kvflowcontrol.send_queue.bytes`. These metrics track admission control wait times, resource exhaustion, and replication flow control, providing visibility into cluster health and performance throttling. [#164827][#164827] +- Added two new metrics, `auth.cert.san.conn.total` and `auth.cert.san.conn.success`, to track SAN-based certificate authentication attempts and successes. [#166829][#166829] - Logical Data Replication (LDR) now supports hash-sharded indexes and secondary indexes with virtual computed columns. Previously, tables with these index types could not be replicated using LDR. [#161062][#161062] +- External connections can now be used with online restore. [#159090][#159090] - Backup schedules that utilize the `revision_history` option now apply that option only to incremental backups triggered by that schedule, rather than duplicating the revision history in the full backups as well. [#162105][#162105] -- The `build.timestamp` Prometheus metric now carries `major` and `minor` labels identifying the release series of the running CockroachDB binary (e.g., `major="26", minor="1"` for any v26.1.x build). [#163834][#163834] -- Jobs now clear their running status messages upon successful completion. [#163765][#163765] - Changefeed ranges are now more accurately reported as lagging. [#163427][#163427] -- Promoted the following admission control metrics to `ESSENTIAL` status, making them more discoverable in monitoring dashboards and troubleshooting workflows: `admission.wait_durations.*` (`sql-kv-response`, `sql-sql-response`, `elastic-stores`, `elastic-cpu`), `admission.granter.*_exhausted_duration.kv` (`slots`, `io_tokens`, `elastic_io_tokens`), `admission.elastic_cpu.nanos_exhausted_duration`, `kvflowcontrol.eval_wait.*.duration` (`elastic`, `regular`), and `kvflowcontrol.send_queue.bytes`. These metrics track admission control wait times, resource exhaustion, and replication flow control, providing visibility into cluster health and performance throttling. [#164827][#164827] -- Red Hat certified CockroachDB container images are now published as multi-arch manifests supporting `linux/amd64`, `linux/arm64`, and `linux/s390x`. Previously only `linux/amd64` was published to the Red Hat registry. [#165725][#165725] +- Jobs now clear their running status messages upon successful completion. [#163765][#163765] +- Added a new structured event of type `rewrite_inline_hints` that is emitted when an inline-hints rewrite rule is added using `information_schema.crdb_rewrite_inline_hints`. This event is written to both the event log and the `OPS` channel. [#160901][#160901] - When hash-based redaction is enabled in the logging configuration, usernames in authentication logs now produce deterministic hashes instead of being fully redacted. This lets support engineers correlate the same user across multiple log entries without revealing the actual values. [#165804][#165804] -- Added two new metrics, `auth.cert.san.conn.total` and `auth.cert.san.conn.success`, to track SAN-based certificate authentication attempts and successes. [#166829][#166829] +- Changed goroutine profile dumps from human-readable `.txt.gz` files to binary proto `.pb.gz` files. This improves the performance of the goroutine dumper by eliminating brief in-process pauses that occurred when collecting goroutine stacks. [#160798][#160798] +- Red Hat certified CockroachDB container images are now published as multi-arch manifests supporting `linux/amd64`, `linux/arm64`, and `linux/s390x`. Previously only `linux/amd64` was published to the Red Hat registry. [#165725][#165725]

Command-line changes

@@ -336,8 +342,8 @@ This section describes newly identified limitations in CockroachDB v26.2. - The `cockroach debug tsdump` command now defaults to `--format=raw` instead of `--format=text`. The `raw` (gob) format is optimized for Datadog ingestion. A new `--output` flag lets you write output directly to a file, avoiding potential file corruption that can occur with shell redirection. If `--output` is not specified, output is written to `stdout`. [#160538][#160538] - The `cockroach debug tsdump` command now supports ZSTD encoding via `--format=raw --encoding=zstd`. This generates compressed tsdump files that are approximately 85% smaller than raw format. The `tsdump upload` command automatically detects and decompresses ZSTD files, allowing direct upload without manual decompression. [#161998][#161998] - The `cockroach debug zip` command's `--include-files` and `--exclude-files` flags now support full zip path patterns. Patterns containing `/` are matched against the full path within the zip archive (e.g., `--include-files='debug/nodes/1/*.json'`). Patterns without `/` continue to match the base file name as before. [#163266][#163266] -- Added a `--list-dbs` flag to `workload init workload_generator` that lists all user databases found in debug logs without initializing tables. This helps users discover which databases are available in the debug zip before running the full init command. [#163930][#163930] - Added the `--exclude-log-severities` flag to `cockroach debug zip` that filters log entries by severity server-side. For example, `--exclude-log-severities=INFO` excludes all `INFO`-level log entries from the collected log files, which can significantly reduce zip file size for large clusters. Valid severity names are `INFO`, `WARNING`, `ERROR`, and `FATAL`. The flag accepts a comma-delimited list or can be specified multiple times. [#165802][#165802] +- Added a `--list-dbs` flag to `workload init workload_generator` that lists all user databases found in debug logs without initializing tables. This helps users discover which databases are available in the debug zip before running the full init command. [#163930][#163930]

DB Console changes

@@ -348,115 +354,124 @@ This section describes newly identified limitations in CockroachDB v26.2.

Bug fixes

-- Fixed a bug that caused a routine with an `INSERT` statement to unnecessarily block dropping a hash-sharded index or computed column on the target table. This fix applies only to newly created routines. In releases prior to v25.3, the fix must be enabled by setting the session variable `use_improved_routine_dependency_tracking` to `on`. [#146250][#146250] -- Fixed a bug where creating a routine could create unnecessary column dependencies when the routine references columns through CHECK constraints (including those for RLS policies and hash-sharded indexes) or partial index predicates. These unnecessary dependencies prevented dropping the column without first dropping the routine. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default prior to v26.1. [#159126][#159126] +- The fix for `node descriptor not found` errors for changefeeds with `execution_locality` filters in CockroachDB Basic and Standard clusters is now controlled by cluster setting `sql.instance_info.use_instance_resolver.enabled` (default: `true`). [#163947][#163947] +- Statistics histogram collection is now skipped for JSON columns referenced in partial index predicates, except when `sql.stats.non_indexed_json_histograms.enabled` is true (default: false). [#164477][#164477] - CockroachDB could previously encounter internal errors like `column statistics cannot be determined for empty column set` and `invalid union` in some edge cases with `UNION`, `EXCEPT`, and `INTERCEPT`. This has now been fixed. [#150706][#150706] - Fixed a bug that could cause a scan over a secondary index to read significantly more KVs than necessary in order to satisfy a limit when the scanned index had more than one column family. [#156672][#156672] -- Fixed an issue where long-running transactions with many statements could cause unbounded memory growth in the SQL statistics subsystem. When a transaction includes a large number of statements, the SQL statistics ingester now automatically flushes buffered statistics before the transaction commits. As a side effect, the flushed statement statistics might not have an associated transaction fingerprint ID because the transaction has not yet completed. In such cases, the transaction fingerprint ID cannot be backfilled after the fact. [#158527][#158527] +- Fixed a bug where a query predicate could be ignored when all of the following conditions were met: the query used a lookup join to an index, the predicate constrained a column to multiple values (e.g., `column IN (1, 2)`), and the constrained column followed one or more columns with optional multi-value constraints in the index. This bug was introduced in v24.3.0. [#159722][#159722] +- Fixed an error that occurred when using generic query plans that generates a lookup join on indexes containing identity computed columns. [#162036][#162036] +- Fixed a bug that prevented the `optimizer_min_row_count` setting from applying to anti-join expressions, which could lead to bad query plans. The fix is gated behind `optimizer_use_min_row_count_anti_join_fix`, which is on by default on v26.2 and later, and off by default in earlier versions. [#163244][#163244] +- Fixed an optimizer limitation that prevented index usage on computed columns when querying through views or subqueries containing JSON fetch expressions (such as `->`, `->>`, `#>`, or `#>>`). Queries that project JSON expressions matching indexed computed column definitions now correctly use indexes instead of performing full table scans, significantly improving performance for JSON workloads. [#163395][#163395] +- Statements within a UDF or stored procedure similar to (1) and (2) where the limit/offset is a reference to an argument of the UDF/SP. [#163500][#163500] +- Fixed an issue where `ORDER BY` expressions containing subqueries with non-default `NULLS` ordering (e.g., `NULLS LAST` for `ASC`, `NULLS FIRST` for `DESC`) could cause an error during query planning. [#163230][#163230] +- Fixed a bug where CockroachDB did not always promptly respond to the statement timeout when performing a hash join with `ON` filter that is mostly `false`. [#164879][#164879] +- Fixed a bug that caused a routine with an `INSERT` statement to unnecessarily block dropping a hash-sharded index or computed column on the target table. This fix applies only to newly created routines. In releases prior to v25.3, the fix must be enabled by setting the session variable `use_improved_routine_dependency_tracking` to `on`. [#146250][#146250] +- Fixed a bug where creating a routine could create unnecessary column dependencies when the routine references columns through CHECK constraints (including those for RLS policies and hash-sharded indexes) or partial index predicates. These unnecessary dependencies prevented dropping the column without first dropping the routine. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default prior to v26.1. [#159126][#159126] - Fixed a bug that allowed columns to be dropped despite being referenced by a routine. This could occur when a column was only referenced as a target column in the `SET` clause of an `UPDATE` statement within the routine. This fix only applies to newly-created routines. In versions prior to v26.1, the fix must be enabled by setting the session variable `prevent_update_set_column_drop`. [#158935][#158935] -- Fixed a bug where schema changes could fail after a `RESTORE` due to missing session data. [#159176][#159176] -- The `ascii` built-in function now returns `0` when the input is the empty string instead of an error. [#159178][#159178] -- Fixed a bug where comments associated with constraints were left behind after the column and constraint were dropped. [#159180][#159180] -- Fixed a bug which could cause prepared statements to fail with the error message `non-const expression` when they contained filters with stable functions. This bug has been present since 25.4.0. [#159201][#159201] -- Fixed a bug in the TPC-C workload where long-duration runs (>= 4 days or indefinite) would experience periodic performance degradation every 24 hours due to excessive concurrent `UPDATE` statements resetting warehouse and district year-to-date values. [#159286][#159286] -- Fixed a race condition where queries run after revoking `BYPASSRLS` could return wrong results because cached plans failed to notice the change immediately. [#159354][#159354] -- Fixed a bug where `TRUNCATE` did not behave correctly with respect to the `schema_locked` storage parameter, and was not being blocked when Logical Data Replication (LDR) was in use. This behavior was incorrect and has been fixed. [#159378][#159378] -- Fixed a race condition that could occur during context cancellation of an incoming snapshot. [#159403][#159403] -- Fixed a bug that could cause a panic during changefeed startup if an error occurred while initializing the metrics controller. [#159431][#159431] -- Fixed a memory accounting issue that could occur when a lease expired due to a SQL liveness session-based timeout. [#159527][#159527] - Fixed a bug that caused `SHOW CREATE FUNCTION` to error when the function body contained casts from columns to user-defined types. [#159642][#159642] -- Fixed a bug where a query predicate could be ignored when all of the following conditions were met: the query used a lookup join to an index, the predicate constrained a column to multiple values (e.g., `column IN (1, 2)`), and the constrained column followed one or more columns with optional multi-value constraints in the index. This bug was introduced in v24.3.0. [#159722][#159722] -- Fixed a bug where rolling back a transaction that had just rolled back a savepoint would block other transactions accessing the same rows for five seconds. [#160346][#160346] -- Fixed a deadlock that could occur when a statistics creation task panicked. [#160348][#160348] -- Fixed a bug where CockroachDB could crash when handling decimals with negative scales via the extended PGWire protocol. An error is now returned instead, matching PostgreSQL behavior. [#160499][#160499] -- Fixed a bug where the `pprof` UI endpoints for allocs, heap, block, and mutex profiles ignored the seconds parameter and returned immediate snapshots instead of delta profiles. [#160608][#160608] -- Previously, v26.1.0-beta.1 and v26.1.0-beta.2 could encounter a rare process crash when running TTL jobs. This has been fixed. [#160674][#160674] +- Fixed a bug in which PL/pgSQL UDFs with many `IF` statements would cause a timeout and/or OOM when executed from a prepared statement. This bug was introduced in v23.2.22, v24.1.15, v24.3.9, v25.1.2, and v25.2.0. [#162512][#162512] +- Fixed a bug where running `EXPLAIN ANALYZE (DEBUG)` on a query that invokes a UDF with many blocks could cause out-of-memory errors (OOMs). [#166132][#166132] +- Fixed a bug where `ALTER FUNCTION ... RENAME TO` and `ALTER PROCEDURE ... RENAME TO` could create duplicate functions in non-public schemas. [#166681][#166681] +- Fixed a race condition/conflict between concurrent `ALTER FUNCTION ... SET SCHEMA` and `DROP SCHEMA` operations. [#164043][#164043] +- Fixed a bug where schema changes could fail after a `RESTORE` due to missing session data. [#159176][#159176] - Fixed a bug where schema changes adding a `NOT NULL` constraint could enter an infinite retry loop if a row violated the constraint and contained certain content (e.g., `"EOF"`). Such errors are now correctly classified and don't cause retries. [#160780][#160780] -- An error will now be reported when the database provided as the argument to a `SHOW REGIONS` or `SHOW SUPER REGIONS` statement does not exist. This bug had been present since version v21.1. [#161014][#161014] - Fixed a bug where `CREATE INDEX` on a table with `PARTITION ALL BY` would fail if the partition columns were explicitly included in the primary key definition. [#161083][#161083] -- Fixed a bug in which inline-hints rewrite rules created with `information_schema.crdb_rewrite_inline_hints` were not correctly applied to statements run with `EXPLAIN ANALYZE`. This bug was introduced in v26.1.0-alpha.2. [#161273][#161273] +- `ALTER TABLE ... ALTER PRIMARY KEY USING COLUMNS (col) USING HASH` is now correctly treated as a no-op when the table already has a matching hash-sharded primary key, instead of attempting an unnecessary schema change. [#164557][#164557] +- Fixed a bug where `ALTER TABLE ... ALTER COLUMN ... SET DATA TYPE` from an unbounded string or bit type to a bounded type with a length `>= 64` (for example, `STRING` to `STRING(100)`) would skip validating existing data against the new length constraint. This could leave rows in the table that violate the column's type, with values longer than the specified limit. [#164739][#164739] +- Context cancellation is now surfaced if a `statement_timeout` occurs while waiting for a schema change. [#167112][#167112] +- Fixed a bug that could cause a panic during changefeed startup if an error occurred while initializing the metrics controller. [#159431][#159431] +- Fixed a bug that could cause changefeeds using Kafka v1 sinks to hang when the changefeed was cancelled. [#162058][#162058] +- Fixed an issue where changefeeds with `execution_locality` filters could fail in multi-tenant clusters with `node descriptor not found` errors. [#163507][#163507] +- Fixed a bug where running **changefeeds** with `envelope=enriched` and `enriched_properties` containing `source` would cause failures during a **cluster upgrade**. [#163885][#163885] +- Fixed a bug introduced in v25.4+ where setting `min_checkpoint_frequency` to `0` prevented changefeeds from advancing their resolved timestamp (high-water mark) and emitting resolved messages. Note that setting `min_checkpoint_frequency` to lower than `500ms` is **not** recommended as it may cause degraded changefeed performance. [#164765][#164765] +- Changefeed retry backoff now resets when the changefeed's resolved timestamp (high-water mark) advances between retries, in addition to the existing time-based reset (configured by `changefeed.retry_backoff_reset`). This prevents transient rolling restarts from causing changefeeds to fall behind because of excessive backoff. [#164933][#164933] +- Fixed a bug where `RESTORE` with `skip_missing_foreign_keys` could fail with an internal error if the restored table had an in-progress schema change that added a foreign key constraint whose referenced table was not included in the restore. [#164757][#164757] +- Fixed a bug where incremental backups taken after downgrading a mixed-version cluster to v25.4 could result in inconsistent backup indexes. [#164301][#164301] +- Fixed a bug where restoring a database backup containing default privileges that referenced non-existent users would leave dangling user references in the restored database descriptor. [#166183][#166183] - Fixed a bug where AVRO file imports of data with JSON or binary records could hang indefinitely when encountering stream errors from cloud storage (such as `HTTP/2` `CANCEL` errors). Import jobs will now properly fail with an error instead of hanging. [#161290][#161290] - Fixed a bug where IMPORT with AVRO data using OCF format could silently lose data if the underlying storage (e.g., S3) returned an error during read. Such errors are now properly reported. Other formats (specified via `data_as_binary_records` and `data_as_json_records` options) are unaffected. The bug has been present since about v20.1. [#161318][#161318] -- Fixed a bug that prevented successfully injecting hints using `information_schema.crdb_rewrite_inline_hints` for `INSERT`, `UPSERT`, `UPDATE`, and `DELETE` statements. This bug had existed since hint injection was introduced in v26.1.0-alpha.2. [#161773][#161773] -- Fixed prepared statements failing with `version mismatch` errors when user-defined types are modified between preparation and execution. Prepared statements now automatically detect UDT changes and re-parse to use current type definitions. [#161827][#161827] -- Previously, CockroachDB could hit an internal error when evaluating built-in functions with `'{}'` as an argument (without explicit type casts, such as on a query like `SELECT cardinality('{}');`). This is now fixed and a regular error is returned instead (matching PostgreSQL behavior). [#161835][#161835] -- Fixed a bug where the index definition shown in `pg_indexes` for hash sharded indexes with `STORING` columns was not valid SQL. The `STORING` clause now appears in the correct position. [#161882][#161882] -- Fixed a bug where `DROP TABLE ... CASCADE` would incorrectly drop tables that had triggers or row-level security (RLS) policies referencing the dropped table. Now only the triggers/policies are dropped, and the tables owning them remain intact. [#161914][#161914] +- Fixed a bug where import rollback could incorrectly revert data in a table that was already online. This could only occur if an import job was cancelled or failed after the import had already succeeded and the table was made available for use. [#159627][#159627] +- Invalid `avro_schema_prefix` is now caught during statement time. The prefix must start with `[A-Za-z_]` and subsequently contain only `[A-Za-z0-9_]`, as specified in the [Avro specification](https://avro.apache.org/docs/1.8.1/spec.html). [#159869][#159869] +- Fixed a bug where `IMPORT` error messages could include unredacted cloud storage credentials from the source URI. Credentials are now stripped from URIs before they appear in error messages. [#164881][#164881] - Reduced contention when dropping descriptors or running concurrent imports. [#161941][#161941] +- Fixed a bug where rolling back a transaction that had just rolled back a savepoint would block other transactions accessing the same rows for five seconds. [#160346][#160346] - Fixed a bug where multi-statement explicit transactions using `SAVEPOINT` to recover from certain errors (like duplicate key-value violations) could lose writes performed before the savepoint was created, in rare cases when buffered writes were enabled (off by default). This bug was introduced in v25.2. [#161972][#161972] -- Fixed a bug introduced in v26.1.0-beta.1 in which row-level TTL jobs could encounter GC threshold errors if each node had a large number of spans to process. [#161979][#161979] -- Fixed an error that occurred when using generic query plans that generates a lookup join on indexes containing identity computed columns. [#162036][#162036] -- Fixed a bug that could cause changefeeds using Kafka v1 sinks to hang when the changefeed was cancelled. [#162058][#162058] +- Fixed a race condition that could occur during context cancellation of an incoming snapshot. [#159403][#159403] +- Fixed a bug which could cause prepared statements to fail with the error message `non-const expression` when they contained filters with stable functions. This bug has been present since 25.4.0. [#159201][#159201] +- Fixed prepared statements failing with `version mismatch` errors when user-defined types are modified between preparation and execution. Prepared statements now automatically detect UDT changes and re-parse to use current type definitions. [#161827][#161827] - Fixed an internal error `could not find format code for column N` that occurred when executing `EXPLAIN ANALYZE EXECUTE` statements via JDBC or other clients using the PostgreSQL binary protocol. [#162115][#162115] +- Fixed a bug where CockroachDB returned "cached plan must not change result type" errors during the `Execute` phase instead of the `Bind` phase of the extended pgwire protocol. This caused compatibility issues with drivers like pgx that expect the error before `BindComplete` is sent, particularly when using batch operations with prepared statements after schema changes. [#164406][#164406] +- Fixed a bug where CockroachDB could crash when handling decimals with negative scales via the extended PGWire protocol. An error is now returned instead, matching PostgreSQL behavior. [#160499][#160499] +- Fixed a bug where the index definition shown in `pg_indexes` for hash sharded indexes with `STORING` columns was not valid SQL. The `STORING` clause now appears in the correct position. [#161882][#161882] +- Fixed a bug where concurrent updates to a table using multiple column families during a partial index creation could result in data loss, incorrect `NULL` values, or validation failures in the resulting index. [#166325][#166325] - Fixed a bug where statement bundles were missing `CREATE TYPE` statements for user-defined types used as array column types. [#162357][#162357] -- Fixed a bug in which PL/pgSQL UDFs with many `IF` statements would cause a timeout and/or OOM when executed from a prepared statement. This bug was introduced in v23.2.22, v24.1.15, v24.3.9, v25.1.2, and v25.2.0. [#162512][#162512] -- Fixed a bug where an error would occur when defining a foreign key on a hash-sharded primary key without explicitly providing the primary key columns. [#162608][#162608] -- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using `json` or `json-compact` formatting. This bug was introduced in v24.1. [#163224][#163224] -- Fixed a bug that prevented the `optimizer_min_row_count` setting from applying to anti-join expressions, which could lead to bad query plans. The fix is gated behind `optimizer_use_min_row_count_anti_join_fix`, which is on by default on v26.2 and later, and off by default in earlier versions. [#163244][#163244] -- Fixed an optimizer limitation that prevented index usage on computed columns when querying through views or subqueries containing JSON fetch expressions (such as `->`, `->>`, `#>`, or `#>>`). Queries that project JSON expressions matching indexed computed column definitions now correctly use indexes instead of performing full table scans, significantly improving performance for JSON workloads. [#163395][#163395] -- Statements within a UDF or stored procedure similar to (1) and (2) where the limit/offset is a reference to an argument of the UDF/SP. [#163500][#163500] -- Dropping a region from the system database no longer leaves `REGIONAL BY TABLE` system tables referencing the removed region, preventing descriptor validation errors. [#163503][#163503] -- Fixed an issue where changefeeds with `execution_locality` filters could fail in multi-tenant clusters with `node descriptor not found` errors. [#163507][#163507] -- Fixed a bug where `EXPLAIN ANALYZE (DEBUG)` statement bundles did not include triggers, their functions, or tables modified by those triggers. The bundle's `schema.sql` file now contains the `CREATE TRIGGER`, `CREATE FUNCTION`, and `CREATE TABLE` statements needed to fully reproduce the query environment when triggers are involved. [#163584][#163584] - Fixed a rare data race during parallel constraint checks where a fresh descriptor collection could resolve a stale enum type version. This bug was introduced in v26.1.0. [#163883][#163883] -- Fixed a bug where running **changefeeds** with `envelope=enriched` and `enriched_properties` containing `source` would cause failures during a **cluster upgrade**. [#163885][#163885] +- Fixed a bug where creating a table with a user-defined type column failed when the user had `USAGE` privilege on the base type but not on its implicit array type. The array type now inherits privileges from the base type, matching PostgreSQL behavior. [#164471][#164471] +- Fixed a bug where rolling back a `CREATE TABLE` that referenced user-defined types or sequences would leave orphaned back-references on the type and sequence descriptors, causing them to appear in `crdb_internal.invalid_objects` after the table was GC'd. [#166223][#166223] +- Fixed a race condition where queries run after revoking `BYPASSRLS` could return wrong results because cached plans failed to notice the change immediately. [#159354][#159354] +- Fixed a bug where `DROP TABLE ... CASCADE` would incorrectly drop tables that had triggers or row-level security (RLS) policies referencing the dropped table. Now only the triggers/policies are dropped, and the tables owning them remain intact. [#161914][#161914] +- Fixed a bug where `EXPLAIN ANALYZE (DEBUG)` statement bundles did not include triggers, their functions, or tables modified by those triggers. The bundle's `schema.sql` file now contains the `CREATE TRIGGER`, `CREATE FUNCTION`, and `CREATE TABLE` statements needed to fully reproduce the query environment when triggers are involved. [#163584][#163584] - Fixed a bug where dropped columns appeared in `pg_catalog.pg_attribute` with the `atttypid` column equal to 2283 (`anyelement`). Now this column will be 0 for dropped columns. This matches PostgreSQL behavior, where `atttypid=0` is used for dropped columns. [#163950][#163950] -- Fixed a race condition/conflict between concurrent `ALTER FUNCTION ... SET SCHEMA` and `DROP SCHEMA` operations. [#164043][#164043] +- Fixed a bug where temporary tables created in one session could fail to appear in `pg_catalog` queries from another session because the parent temporary schema could not be resolved by ID. [#165395][#165395] +- The `information_schema.crdb_node_active_session_history` and `information_schema.crdb_cluster_active_session_history` views now include the `app_name` column, matching the underlying `crdb_internal` tables. [#165367][#165367] +- An error will now be reported when the database provided as the argument to a `SHOW REGIONS` or `SHOW SUPER REGIONS` statement does not exist. This bug had been present since version v21.1. [#161014][#161014] +- Dropping a region from the system database no longer leaves `REGIONAL BY TABLE` system tables referencing the removed region, preventing descriptor validation errors. [#163503][#163503] - Fixed a bug where super region zone configurations did not constrain all replicas to regions within the super region. [#164285][#164285] -- Fixed a bug where CockroachDB returned "cached plan must not change result type" errors during the `Execute` phase instead of the `Bind` phase of the extended pgwire protocol. This caused compatibility issues with drivers like pgx that expect the error before `BindComplete` is sent, particularly when using batch operations with prepared statements after schema changes. [#164406][#164406] -- Fixed a bug where import rollback could incorrectly revert data in a table that was already online. This could only occur if an import job was cancelled or failed after the import had already succeeded and the table was made available for use. [#159627][#159627] -- Invalid `avro_schema_prefix` is now caught during statement time. The prefix must start with `[A-Za-z_]` and subsequently contain only `[A-Za-z0-9_]`, as specified in the [Avro specification](https://avro.apache.org/docs/1.8.1/spec.html). [#159869][#159869] -- JWT authentication now returns a clear error when HTTP requests to fetch JWKS or OpenID configuration return non-`2xx` status codes, instead of silently passing the response body to the JSON parser. [#158294][#158294] -- Fixed an issue where `ORDER BY` expressions containing subqueries with non-default `NULLS` ordering (e.g., `NULLS LAST` for `ASC`, `NULLS FIRST` for `DESC`) could cause an error during query planning. [#163230][#163230] -- Fixed a bug where incremental backups taken after downgrading a mixed-version cluster to v25.4 could result in inconsistent backup indexes. [#164301][#164301] -- Fixed a bug where creating a table with a user-defined type column failed when the user had `USAGE` privilege on the base type but not on its implicit array type. The array type now inherits privileges from the base type, matching PostgreSQL behavior. [#164471][#164471] -- `ALTER TABLE ... ALTER PRIMARY KEY USING COLUMNS (col) USING HASH` is now correctly treated as a no-op when the table already has a matching hash-sharded primary key, instead of attempting an unnecessary schema change. [#164557][#164557] -- Fixed a bug in `appBatchStats.merge` where the `numEmptyEntries` field was not being properly accumulated when merging statistics. This could result in incorrect statistics tracking for empty Raft log entries. [#164671][#164671] -- Fixed a bug where `ALTER TABLE ... ALTER COLUMN ... SET DATA TYPE` from an unbounded string or bit type to a bounded type with a length `>= 64` (for example, `STRING` to `STRING(100)`) would skip validating existing data against the new length constraint. This could leave rows in the table that violate the column's type, with values longer than the specified limit. [#164739][#164739] -- Fixed a bug where `RESTORE` with `skip_missing_foreign_keys` could fail with an internal error if the restored table had an in-progress schema change that added a foreign key constraint whose referenced table was not included in the restore. [#164757][#164757] -- Fixed a bug introduced in v25.4+ where setting `min_checkpoint_frequency` to `0` prevented changefeeds from advancing their resolved timestamp (high-water mark) and emitting resolved messages. Note that setting `min_checkpoint_frequency` to lower than `500ms` is **not** recommended as it may cause degraded changefeed performance. [#164765][#164765] -- Fixed a bug where CockroachDB did not always promptly respond to the statement timeout when performing a hash join with `ON` filter that is mostly `false`. [#164879][#164879] -- Fixed a bug where `IMPORT` error messages could include unredacted cloud storage credentials from the source URI. Credentials are now stripped from URIs before they appear in error messages. [#164881][#164881] -- Changefeed retry backoff now resets when the changefeed's resolved timestamp (high-water mark) advances between retries, in addition to the existing time-based reset (configured by `changefeed.retry_backoff_reset`). This prevents transient rolling restarts from causing changefeeds to fall behind because of excessive backoff. [#164933][#164933] -- Fixed a rare race condition where `SHOW CREATE TABLE` could fail with a `"relation does not exist"` error if a table referenced by a foreign key was being concurrently dropped. [#164942][#164942] - Fixed a bug that had previously allowed the primary and secondary to be in separate super regions. [#164943][#164943] +- Fixed a bug where converting a table from `REGIONAL BY ROW` to `GLOBAL` would not clear the `skip_unique_checks` storage parameter on the primary key, even though implicit partitioning was removed. [#167484][#167484] +- Fixed a bug where `TRUNCATE` did not behave correctly with respect to the `schema_locked` storage parameter, and was not being blocked when Logical Data Replication (LDR) was in use. This behavior was incorrect and has been fixed. [#159378][#159378] +- The PCR job now switches into the cutover phase more promptly after a failover is requested, terminating the replication phase more quickly and more reliably when components of the ingestion process are hung due to network errors. [#166778][#166778] +- Fixed an issue where long-running transactions with many statements could cause unbounded memory growth in the SQL statistics subsystem. When a transaction includes a large number of statements, the SQL statistics ingester now automatically flushes buffered statistics before the transaction commits. As a side effect, the flushed statement statistics might not have an associated transaction fingerprint ID because the transaction has not yet completed. In such cases, the transaction fingerprint ID cannot be backfilled after the fact. [#158527][#158527] +- Fixed a deadlock that could occur when a statistics creation task panicked. [#160348][#160348] - Fixed a bug that could cause row sampling for table statistics to crash a node due to a data race when processing a collated string column with values larger than 400 bytes. This bug has existed since before v23.1. [#165260][#165260] -- The `information_schema.crdb_node_active_session_history` and `information_schema.crdb_cluster_active_session_history` views now include the `app_name` column, matching the underlying `crdb_internal` tables. [#165367][#165367] -- Fixed a bug where temporary tables created in one session could fail to appear in `pg_catalog` queries from another session because the parent temporary schema could not be resolved by ID. [#165395][#165395] +- Fixed a bug where CockroachDB might not have respected the table-level parameters `sql_stats_automatic_full_collection_enabled` and `sql_stats_automatic_partial_collection_enabled` and defaulted to using the corresponding cluster settings when deciding whether to perform automatic statistics collection on a table. [#167681][#167681] +- Previously, v26.1.0-beta.1 and v26.1.0-beta.2 could encounter a rare process crash when running TTL jobs. This has been fixed. [#160674][#160674] +- Fixed a bug introduced in v26.1.0-beta.1 in which row-level TTL jobs could encounter GC threshold errors if each node had a large number of spans to process. [#161979][#161979] +- Fixed a bug where an error would occur when defining a foreign key on a hash-sharded primary key without explicitly providing the primary key columns. [#162608][#162608] +- Fixed a rare race condition where `SHOW CREATE TABLE` could fail with a `"relation does not exist"` error if a table referenced by a foreign key was being concurrently dropped. [#164942][#164942] - Fixed a bug in the legacy schema changer where rolling back a `CREATE TABLE` with inline `FOREIGN KEY` constraints could leave orphaned foreign key back-references on the referenced table, causing descriptor validation errors. [#165551][#165551] -- Fixed a bug where running `EXPLAIN ANALYZE (DEBUG)` on a query that invokes a UDF with many blocks could cause out-of-memory errors (OOMs). [#166132][#166132] -- Fixed a bug where restoring a database backup containing default privileges that referenced non-existent users would leave dangling user references in the restored database descriptor. [#166183][#166183] -- Fixed a bug where rolling back a `CREATE TABLE` that referenced user-defined types or sequences would leave orphaned back-references on the type and sequence descriptors, causing them to appear in `crdb_internal.invalid_objects` after the table was GC'd. [#166223][#166223] -- Fixed a bug where concurrent updates to a table using multiple column families during a partial index creation could result in data loss, incorrect `NULL` values, or validation failures in the resulting index. [#166325][#166325] -- Fixed a crash (`traceRegion: alloc too large`) that could occur when Go's execution tracer was enabled and a range cache lookup used a key longer than about 64 KB. [#166705][#166705] +- Fixed a bug where the `lock_timeout` and `deadlock_timeout` session settings were not honored by FK existence checks performed during insert fast path execution. This could cause inserts to block indefinitely on conflicting locks instead of returning a timeout error. [#167532][#167532] +- JWT authentication now returns a clear error when HTTP requests to fetch JWKS or OpenID configuration return non-`2xx` status codes, instead of silently passing the response body to the JSON parser. [#158294][#158294] - Fixed a data race that could cause certificate expiration metrics (`security.certificate.expiration.node-client`, `security.certificate.expiration.client-tenant`, `security.certificate.expiration.ca-client-tenant` and their TTL counterparts) to not update after certificate rotation via `SIGHUP`. [#166664][#166664] -- Fixed a bug where `ALTER FUNCTION ... RENAME TO` and `ALTER PROCEDURE ... RENAME TO` could create duplicate functions in non-public schemas. [#166681][#166681] -- The PCR job now switches into the cutover phase more promptly after a failover is requested, terminating the replication phase more quickly and more reliably when components of the ingestion process are hung due to network errors. [#166778][#166778] +- Fixed a bug in which inline-hints rewrite rules created with `information_schema.crdb_rewrite_inline_hints` were not correctly applied to statements run with `EXPLAIN ANALYZE`. This bug was introduced in v26.1.0-alpha.2. [#161273][#161273] +- Fixed a bug that prevented successfully injecting hints using `information_schema.crdb_rewrite_inline_hints` for `INSERT`, `UPSERT`, `UPDATE`, and `DELETE` statements. This bug had existed since hint injection was introduced in v26.1.0-alpha.2. [#161773][#161773] +- The `ascii` built-in function now returns `0` when the input is the empty string instead of an error. [#159178][#159178] +- Previously, CockroachDB could hit an internal error when evaluating built-in functions with `'{}'` as an argument (without explicit type casts, such as on a query like `SELECT cardinality('{}');`). This is now fixed and a regular error is returned instead (matching PostgreSQL behavior). [#161835][#161835] +- Fixed a bug where comments associated with constraints were left behind after the column and constraint were dropped. [#159180][#159180] +- Fixed a memory accounting issue that could occur when a lease expired due to a SQL liveness session-based timeout. [#159527][#159527] +- Fixed a bug where the `pprof` UI endpoints for allocs, heap, block, and mutex profiles ignored the seconds parameter and returned immediate snapshots instead of delta profiles. [#160608][#160608] +- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using `json` or `json-compact` formatting. This bug was introduced in v24.1. [#163224][#163224] +- Fixed a bug in the TPC-C workload where long-duration runs (>= 4 days or indefinite) would experience periodic performance degradation every 24 hours due to excessive concurrent `UPDATE` statements resetting warehouse and district year-to-date values. [#159286][#159286] +- Fixed a bug in `appBatchStats.merge` where the `numEmptyEntries` field was not being properly accumulated when merging statistics. This could result in incorrect statistics tracking for empty Raft log entries. [#164671][#164671] - Fixed a bug where descriptor version fetching could be incorrectly throttled by the elastic CPU limiter, potentially leading to increased query latency or timeouts under high CPU load. [#166810][#166810] -- Context cancellation is now surfaced if a `statement_timeout` occurs while waiting for a schema change. [#167112][#167112] +- Fixed a crash (`traceRegion: alloc too large`) that could occur when Go's execution tracer was enabled and a range cache lookup used a key longer than about 64 KB. [#166705][#166705] - Fixed a bug where transient I/O errors (such as cloud storage network timeouts) during split or merge trigger evaluation were misidentified as replica corruption, causing the node to crash. These errors now correctly fail the operation, which is retried automatically. [#167377][#167377] - Fixed a bug where executing a mutation in a subquery (e.g., as a CTE) could cause the "rows written" metrics like `sql.statements.index_rows_written.count` and `sql.statements.index_bytes_written.count` to not be incremented correctly. [#167432][#167432] -- Fixed a bug where converting a table from `REGIONAL BY ROW` to `GLOBAL` would not clear the `skip_unique_checks` storage parameter on the primary key, even though implicit partitioning was removed. [#167484][#167484] -- Fixed a bug where the `lock_timeout` and `deadlock_timeout` session settings were not honored by FK existence checks performed during insert fast path execution. This could cause inserts to block indefinitely on conflicting locks instead of returning a timeout error. [#167532][#167532] -- Fixed a bug where CockroachDB might not have respected the table-level parameters `sql_stats_automatic_full_collection_enabled` and `sql_stats_automatic_partial_collection_enabled` and defaulted to using the corresponding cluster settings when deciding whether to perform automatic statistics collection on a table. [#167681][#167681]

Performance improvements

- Database- and table-level backups no longer fetch all object descriptors from disk in order to resolve the backup targets. Now only the objects that are referenced by the targeted objects will be fetched. This improves performance when there are many tables in the cluster. [#157790][#157790] -- Various background tasks and jobs now more actively yield to foreground work when that work is waiting to run. [#159205][#159205] +- The optimizer now better optimizes query plans of statements within UDFs and stored procedures that have `IN` subqueries. [#160503][#160503] +- The optimizer can now better handle filters that redundantly `unnest()` an array placeholder argument within an `IN` or `ANY` filter. Previously, this pattern could prevent the filters from being used to constrain a table scan. Example: `SELECT k FROM a WHERE k = ANY(SELECT * FROM unnest($1:::INT[]))` [#161816][#161816] +- The query optimizer now eliminates redundant filter and projection operators over inputs with zero cardinality, even when the filter or projection expressions are not leakproof. This produces simpler, more efficient query plans in cases where joins or other operations fold to zero rows. [#164212][#164212] - Improved changefeed performance when filtering unwatched column families and offline tables by replacing expensive error chain traversal with direct status enum comparisons. [#159745][#159745] -- Fixed a performance regression in `pg_catalog.pg_roles` and `pg_catalog.pg_authid` by avoiding privilege lookups for each row in the table. [#160121][#160121] +- Improved changefeed checkpointing performance when changefeeds are lagging. Previously, checkpoint updates could be redundantly applied multiple times per checkpoint operation. [#162546][#162546] - Queries that have comparison expressions with the `levenshtein` built-in are now up to 30% faster. [#160394][#160394] -- The optimizer now better optimizes query plans of statements within UDFs and stored procedures that have `IN` subqueries. [#160503][#160503] +- Fixed a performance regression in `pg_catalog.pg_roles` and `pg_catalog.pg_authid` by avoiding privilege lookups for each row in the table. [#160121][#160121] - Significantly reduced WAL write latency when using encryption at rest by properly recycling WAL files instead of deleting and recreating them. [#160784][#160784] - Optimized the logic that applies zone config constraints so it no longer fetches all descriptors in the cluster during background constraint reconciliation. [#160966][#160966] -- The optimizer can now better handle filters that redundantly `unnest()` an array placeholder argument within an `IN` or `ANY` filter. Previously, this pattern could prevent the filters from being used to constrain a table scan. Example: `SELECT k FROM a WHERE k = ANY(SELECT * FROM unnest($1:::INT[]))` [#161816][#161816] -- Improved changefeed checkpointing performance when changefeeds are lagging. Previously, checkpoint updates could be redundantly applied multiple times per checkpoint operation. [#162546][#162546] -- The query optimizer now eliminates redundant filter and projection operators over inputs with zero cardinality, even when the filter or projection expressions are not leakproof. This produces simpler, more efficient query plans in cases where joins or other operations fold to zero rows. [#164212][#164212] +- Various background tasks and jobs now more actively yield to foreground work when that work is waiting to run. [#159205][#159205] - Statement executions using canary stats will no longer use cached plans, which prevents cache thrashing but causes a slight increase in planning time over statement executions using stable stats. [#167503][#167503] +### Known limitations + +This section describes newly identified limitations in CockroachDB v26.2. + +- Statements within views do not currently respect hint injections. The workaround is to modify the inline hints directly in the body by replacing the view. [#166782](https://github.com/cockroachdb/cockroach/issues/166782) +- Statements within routines do not currently respect hint injections. The workaround is to modify the inline hints directly in the body by replacing the routine. [#162627](https://github.com/cockroachdb/cockroach/issues/162627) + [#146250]: https://github.com/cockroachdb/cockroach/pull/146250 [#150663]: https://github.com/cockroachdb/cockroach/pull/150663 [#150706]: https://github.com/cockroachdb/cockroach/pull/150706 diff --git a/src/current/_includes/releases/whats-new-intro.md b/src/current/_includes/releases/whats-new-intro.md index c5286eaf67c..5f46571ba1f 100644 --- a/src/current/_includes/releases/whats-new-intro.md +++ b/src/current/_includes/releases/whats-new-intro.md @@ -101,8 +101,8 @@ CockroachDB {{ page.major_version }}{% if lts == true %} [(LTS)]({% link release {% comment %}Only show these bullet points if the version has been released{% endcomment %} {% if released == true %} {% comment %}v1.0 has no #v1-0-0 anchor, and before GA other releases also do not.{% endcomment %} -- For a summary of the most significant changes in {{ page.major_version }}, refer to [Feature Highlights in {{ page.major_version }}](#feature-highlights). -- Before [upgrading to CockroachDB {{ page.major_version }}]({% link {{ page.major_version }}/upgrade-cockroach-version.md %}), review important [Upgrade Details for {{ page.major_version }}](#upgrade-details). +- For a summary of the most significant changes in {{ page.major_version }}, refer to [Feature highlights](#feature-highlights). +- Before [upgrading to CockroachDB {{ page.major_version }}]({% link {{ page.major_version }}/upgrade-cockroach-version.md %}), review the [backward-incompatible changes](#{{ page.major_version | replace: ".", "-" }}-0-backward-incompatible-changes) and newly identified [known limitations](#known-limitations). {% endif %} {% endif %}{% comment %}End GA-only content{% endcomment %} - For details about the support window for this release type, review the [Release Support Policy]({% link releases/release-support-policy.md %}). From bcba65df06d29ed63bfe2396cb9853516a0b1066 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Tue, 21 Apr 2026 11:28:35 -0400 Subject: [PATCH 05/32] cleanup and corrections --- .../releases/v26.2/backward-incompatible.md | 2 +- .../_includes/releases/v26.2/v26.2.0.md | 23 +++++++++++++++++-- src/current/releases/cloud.md | 4 ++-- 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/src/current/_includes/releases/v26.2/backward-incompatible.md b/src/current/_includes/releases/v26.2/backward-incompatible.md index 2550a9fcf35..bb2c2962580 100644 --- a/src/current/_includes/releases/v26.2/backward-incompatible.md +++ b/src/current/_includes/releases/v26.2/backward-incompatible.md @@ -20,7 +20,7 @@ This section summarizes changes that can cause applications, scripts, or manual - **`incremental_location` option:** Removed the `incremental_location` option from `SHOW BACKUP` and `RESTORE`. [#160416](https://github.com/cockroachdb/cockroach/pull/160416) -- **View privilege checking:** When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) +- **View owner privilege checking:** When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) - **`ALTER CHANGEFEED ADD` validation:** Using `ALTER CHANGEFEED ADD ...` for a table that is already watched will now return an error: `target already watched by changefeed`. [#164433](https://github.com/cockroachdb/cockroach/pull/164433) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index b677c9c3240..371526b92e8 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -258,7 +258,6 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc Note that at most one statistics collection job can run on a single table at a time. [#158835][#158835] - Added a new cluster setting `bulkio.import.distributed_merge.mode` to enable distributed merge support for `IMPORT` operations. When enabled (default: false), `IMPORT` jobs will use a two-phase approach where import processors first write SST files to local storage, then a coordinator merges and ingests them. This can improve performance for large imports by reducing L0 file counts and enabling merge-time optimizations. This feature requires all nodes to be running v26.1 or later. [#159330][#159330] -- Row count validation after `IMPORT` is now enabled by default in async mode. After an `IMPORT` completes, a background `INSPECT` job validates that the imported row count matches expectations. The `IMPORT` result now includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. The `bulkio.import.row_count_validation.mode` cluster setting controls this behavior, with valid values of `off`, `async` (default), and `sync`. [#163543][#163543] - Added a new cluster setting, `sql.schema.auto_unlock.enabled`, that controls whether DDL operations automatically unlock `schema_locked` tables. When set to `false`, DDL on schema-locked tables is blocked unless the user manually unlocks the table first. This allows customers using LDR to enforce `schema_locked` as a hard lock that prevents user-initiated DDL. The default is `true`, preserving existing behavior. [#166471][#166471] - Added a new cluster setting `sql.prepared_transactions.unsafe.enabled` (default: `false`) that controls whether `PREPARE TRANSACTION` statements are accepted. This setting is marked unsafe and requires the unsafe setting interlock to change. When disabled, attempting to prepare a transaction returns an error. `COMMIT PREPARED` and `ROLLBACK PREPARED` remain available regardless of this setting to allow cleanup of existing prepared transactions. [#166855][#166855] - Users can now set the `use_backups_with_ids` session setting to enable a new `SHOW BACKUPS IN` experience. When enabled, `SHOW BACKUPS IN {collection}` displays all backups in the collection. Results can be filtered by backup end time using `OLDER THAN {timestamp}` or `NEWER THAN {timestamp}` clauses. Example usage: `SET use_backups_with_ids = true; SHOW BACKUPS IN '{collection}' OLDER THAN '2026-01-09 12:13:14' NEWER THAN '2026-01-04 15:16:17';` [#160137][#160137] @@ -323,6 +322,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc - The `build.timestamp` Prometheus metric now carries `major` and `minor` labels identifying the release series of the running CockroachDB binary (e.g., `major="26", minor="1"` for any v26.1.x build). [#163834][#163834] - Added the `kv.protectedts.protect`, `kv.protectedts.release`, `kv.protectedts.update_timestamp`, `kv.protectedts.get_record`, and `kv.protectedts.mark_verified` metrics to track protected timestamp storage operations. These metrics help diagnose issues with excessive protected timestamp churn and operational errors. Each operation tracks both successful completions (`.success`) and failures (`.failed`, such as `ErrExists` or `ErrNotExists`). Operators can monitor these metrics to understand PTS system behavior and identify performance issues related to backups, changefeeds, and other features that use protected timestamps. [#160129][#160129] - Added a new metric `sql.rls.policies_applied.count` that tracks the number of SQL statements where row-level security (RLS) policies were applied during query planning. [#164405][#164405] +- RPC connection metrics now include a `protocol` label. The following metrics are affected: `rpc.connection.avg_round_trip_latency`, `rpc.connection.failures`, `rpc.connection.healthy`, `rpc.connection.healthy_nanos`, `rpc.connection.heartbeats`, `rpc.connection.tcp_rtt`, `rpc.connection.tcp_rtt_var`, `rpc.connection.unhealthy`, `rpc.connection.unhealthy_nanos`, and `rpc.connection.inactive`. In v26.2, the label value is always `grpc`. For example: `rpc_connection_healthy{node_id="1",remote_node_id="0",remote_addr="localhost:26258",class="system",protocol="grpc"} 1` [#162528][#162528] - Added a new metric `sql.query.with_statement_hints.count` that is incremented whenever a statement is executed with one or more external statement hints applied. An example of an external statement hint is an inline-hints rewrite rule added by calling `information_schema.crdb_rewrite_inline_hints`. [#161043][#161043] - Promoted the following admission control metrics to `ESSENTIAL` status, making them more discoverable in monitoring dashboards and troubleshooting workflows: `admission.wait_durations.*` (`sql-kv-response`, `sql-sql-response`, `elastic-stores`, `elastic-cpu`), `admission.granter.*_exhausted_duration.kv` (`slots`, `io_tokens`, `elastic_io_tokens`), `admission.elastic_cpu.nanos_exhausted_duration`, `kvflowcontrol.eval_wait.*.duration` (`elastic`, `regular`), and `kvflowcontrol.send_queue.bytes`. These metrics track admission control wait times, resource exhaustion, and replication flow control, providing visibility into cluster health and performance throttling. [#164827][#164827] - Added two new metrics, `auth.cert.san.conn.total` and `auth.cert.san.conn.success`, to track SAN-based certificate authentication attempts and successes. [#166829][#166829] @@ -376,6 +376,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc - Fixed a bug where schema changes could fail after a `RESTORE` due to missing session data. [#159176][#159176] - Fixed a bug where schema changes adding a `NOT NULL` constraint could enter an infinite retry loop if a row violated the constraint and contained certain content (e.g., `"EOF"`). Such errors are now correctly classified and don't cause retries. [#160780][#160780] - Fixed a bug where `CREATE INDEX` on a table with `PARTITION ALL BY` would fail if the partition columns were explicitly included in the primary key definition. [#161083][#161083] +- Fixed a bug that caused `ALTER INDEX ... PARTITION BY` statements to fail on a nonexistent index even if `IF EXISTS` was used. [#163378][#163378] - `ALTER TABLE ... ALTER PRIMARY KEY USING COLUMNS (col) USING HASH` is now correctly treated as a no-op when the table already has a matching hash-sharded primary key, instead of attempting an unnecessary schema change. [#164557][#164557] - Fixed a bug where `ALTER TABLE ... ALTER COLUMN ... SET DATA TYPE` from an unbounded string or bit type to a bounded type with a length `>= 64` (for example, `STRING` to `STRING(100)`) would skip validating existing data against the new length constraint. This could leave rows in the table that violate the column's type, with values longer than the specified limit. [#164739][#164739] - Context cancellation is now surfaced if a `statement_timeout` occurs while waiting for a schema change. [#167112][#167112] @@ -483,6 +484,7 @@ This section describes newly identified limitations in CockroachDB v26.2. [#158029]: https://github.com/cockroachdb/cockroach/pull/158029 [#158294]: https://github.com/cockroachdb/cockroach/pull/158294 [#158527]: https://github.com/cockroachdb/cockroach/pull/158527 +[#158835]: https://github.com/cockroachdb/cockroach/pull/158835 [#158935]: https://github.com/cockroachdb/cockroach/pull/158935 [#158999]: https://github.com/cockroachdb/cockroach/pull/158999 [#159090]: https://github.com/cockroachdb/cockroach/pull/159090 @@ -494,15 +496,18 @@ This section describes newly identified limitations in CockroachDB v26.2. [#159205]: https://github.com/cockroachdb/cockroach/pull/159205 [#159231]: https://github.com/cockroachdb/cockroach/pull/159231 [#159286]: https://github.com/cockroachdb/cockroach/pull/159286 +[#159330]: https://github.com/cockroachdb/cockroach/pull/159330 [#159354]: https://github.com/cockroachdb/cockroach/pull/159354 [#159378]: https://github.com/cockroachdb/cockroach/pull/159378 [#159403]: https://github.com/cockroachdb/cockroach/pull/159403 [#159431]: https://github.com/cockroachdb/cockroach/pull/159431 +[#159436]: https://github.com/cockroachdb/cockroach/pull/159436 [#159527]: https://github.com/cockroachdb/cockroach/pull/159527 [#159627]: https://github.com/cockroachdb/cockroach/pull/159627 [#159642]: https://github.com/cockroachdb/cockroach/pull/159642 [#159722]: https://github.com/cockroachdb/cockroach/pull/159722 [#159745]: https://github.com/cockroachdb/cockroach/pull/159745 +[#159787]: https://github.com/cockroachdb/cockroach/pull/159787 [#159869]: https://github.com/cockroachdb/cockroach/pull/159869 [#160121]: https://github.com/cockroachdb/cockroach/pull/160121 [#160129]: https://github.com/cockroachdb/cockroach/pull/160129 @@ -529,6 +534,7 @@ This section describes newly identified limitations in CockroachDB v26.2. [#161050]: https://github.com/cockroachdb/cockroach/pull/161050 [#161062]: https://github.com/cockroachdb/cockroach/pull/161062 [#161083]: https://github.com/cockroachdb/cockroach/pull/161083 +[#161265]: https://github.com/cockroachdb/cockroach/pull/161265 [#161273]: https://github.com/cockroachdb/cockroach/pull/161273 [#161290]: https://github.com/cockroachdb/cockroach/pull/161290 [#161294]: https://github.com/cockroachdb/cockroach/pull/161294 @@ -558,15 +564,19 @@ This section describes newly identified limitations in CockroachDB v26.2. [#162329]: https://github.com/cockroachdb/cockroach/pull/162329 [#162357]: https://github.com/cockroachdb/cockroach/pull/162357 [#162512]: https://github.com/cockroachdb/cockroach/pull/162512 +[#162528]: https://github.com/cockroachdb/cockroach/pull/162528 [#162546]: https://github.com/cockroachdb/cockroach/pull/162546 +[#162583]: https://github.com/cockroachdb/cockroach/pull/162583 [#162608]: https://github.com/cockroachdb/cockroach/pull/162608 [#162633]: https://github.com/cockroachdb/cockroach/pull/162633 +[#163199]: https://github.com/cockroachdb/cockroach/pull/163199 [#163224]: https://github.com/cockroachdb/cockroach/pull/163224 [#163230]: https://github.com/cockroachdb/cockroach/pull/163230 [#163244]: https://github.com/cockroachdb/cockroach/pull/163244 [#163266]: https://github.com/cockroachdb/cockroach/pull/163266 [#163296]: https://github.com/cockroachdb/cockroach/pull/163296 [#163348]: https://github.com/cockroachdb/cockroach/pull/163348 +[#163378]: https://github.com/cockroachdb/cockroach/pull/163378 [#163395]: https://github.com/cockroachdb/cockroach/pull/163395 [#163400]: https://github.com/cockroachdb/cockroach/pull/163400 [#163427]: https://github.com/cockroachdb/cockroach/pull/163427 @@ -581,8 +591,10 @@ This section describes newly identified limitations in CockroachDB v26.2. [#163885]: https://github.com/cockroachdb/cockroach/pull/163885 [#163891]: https://github.com/cockroachdb/cockroach/pull/163891 [#163930]: https://github.com/cockroachdb/cockroach/pull/163930 +[#163947]: https://github.com/cockroachdb/cockroach/pull/163947 [#163950]: https://github.com/cockroachdb/cockroach/pull/163950 [#163991]: https://github.com/cockroachdb/cockroach/pull/163991 +[#164037]: https://github.com/cockroachdb/cockroach/pull/164037 [#164043]: https://github.com/cockroachdb/cockroach/pull/164043 [#164129]: https://github.com/cockroachdb/cockroach/pull/164129 [#164164]: https://github.com/cockroachdb/cockroach/pull/164164 @@ -596,6 +608,8 @@ This section describes newly identified limitations in CockroachDB v26.2. [#164406]: https://github.com/cockroachdb/cockroach/pull/164406 [#164444]: https://github.com/cockroachdb/cockroach/pull/164444 [#164471]: https://github.com/cockroachdb/cockroach/pull/164471 +[#164477]: https://github.com/cockroachdb/cockroach/pull/164477 +[#164514]: https://github.com/cockroachdb/cockroach/pull/164514 [#164557]: https://github.com/cockroachdb/cockroach/pull/164557 [#164671]: https://github.com/cockroachdb/cockroach/pull/164671 [#164672]: https://github.com/cockroachdb/cockroach/pull/164672 @@ -610,6 +624,7 @@ This section describes newly identified limitations in CockroachDB v26.2. [#164942]: https://github.com/cockroachdb/cockroach/pull/164942 [#164943]: https://github.com/cockroachdb/cockroach/pull/164943 [#164969]: https://github.com/cockroachdb/cockroach/pull/164969 +[#165093]: https://github.com/cockroachdb/cockroach/pull/165093 [#165260]: https://github.com/cockroachdb/cockroach/pull/165260 [#165367]: https://github.com/cockroachdb/cockroach/pull/165367 [#165395]: https://github.com/cockroachdb/cockroach/pull/165395 @@ -628,13 +643,17 @@ This section describes newly identified limitations in CockroachDB v26.2. [#166183]: https://github.com/cockroachdb/cockroach/pull/166183 [#166223]: https://github.com/cockroachdb/cockroach/pull/166223 [#166325]: https://github.com/cockroachdb/cockroach/pull/166325 +[#166471]: https://github.com/cockroachdb/cockroach/pull/166471 +[#166555]: https://github.com/cockroachdb/cockroach/pull/166555 [#166664]: https://github.com/cockroachdb/cockroach/pull/166664 [#166681]: https://github.com/cockroachdb/cockroach/pull/166681 [#166705]: https://github.com/cockroachdb/cockroach/pull/166705 +[#166740]: https://github.com/cockroachdb/cockroach/pull/166740 [#166778]: https://github.com/cockroachdb/cockroach/pull/166778 [#166793]: https://github.com/cockroachdb/cockroach/pull/166793 [#166810]: https://github.com/cockroachdb/cockroach/pull/166810 [#166829]: https://github.com/cockroachdb/cockroach/pull/166829 +[#166855]: https://github.com/cockroachdb/cockroach/pull/166855 [#166860]: https://github.com/cockroachdb/cockroach/pull/166860 [#166920]: https://github.com/cockroachdb/cockroach/pull/166920 [#167112]: https://github.com/cockroachdb/cockroach/pull/167112 @@ -643,8 +662,8 @@ This section describes newly identified limitations in CockroachDB v26.2. [#167405]: https://github.com/cockroachdb/cockroach/pull/167405 [#167432]: https://github.com/cockroachdb/cockroach/pull/167432 [#167484]: https://github.com/cockroachdb/cockroach/pull/167484 -[#167532]: https://github.com/cockroachdb/cockroach/pull/167532 [#167503]: https://github.com/cockroachdb/cockroach/pull/167503 +[#167532]: https://github.com/cockroachdb/cockroach/pull/167532 [#167681]: https://github.com/cockroachdb/cockroach/pull/167681 [#167944]: https://github.com/cockroachdb/cockroach/pull/167944 diff --git a/src/current/releases/cloud.md b/src/current/releases/cloud.md index aafbb4030b8..86320a5a0c1 100644 --- a/src/current/releases/cloud.md +++ b/src/current/releases/cloud.md @@ -14,9 +14,9 @@ Get future release notes emailed to you: {% include marketo.html formId=1083 %} -## May 13, 2026 +## April 28, 2026 -

v26.2 Cloud Feature Highlights

+

v26.2 feature highlights

From 9e290946e9679ede33b32e546eb3381d64339a13 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Tue, 21 Apr 2026 12:17:04 -0400 Subject: [PATCH 06/32] copyedit feature highlights --- .../_includes/releases/v26.2/v26.2.0.md | 39 +++++++------------ src/current/releases/cloud.md | 35 ++++++----------- 2 files changed, 26 insertions(+), 48 deletions(-) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index 371526b92e8..3a526be7de9 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -30,8 +30,8 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc
@@ -63,8 +63,8 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc @@ -102,8 +102,8 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc @@ -113,8 +113,8 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc @@ -141,8 +141,8 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc @@ -150,17 +150,6 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc - - - - - - - -
-

SQL Triggers

-

SQL triggers are now generally available in CockroachDB. A trigger executes a function when one or more specified SQL operations is performed on a table. Triggers respond to data changes by adding logic within the database, rather than in an application. They can be used to modify data before it is inserted, maintain data consistency across rows or tables, or record an update to a row.

+

SQL triggers

+

SQL triggers are now generally available. CockroachDB supports PostgreSQL-compatible BEFORE and AFTER triggers that activate on INSERT, UPDATE, or DELETE operations.

GA {% include icon-yes.html %}
-

Prevent auto-unlock of schema_locked tables

-

A new cluster setting sql.schema.auto_unlock.enabled controls whether DDL operations automatically unlock schema_locked tables. When set to false, DDL statements on schema-locked tables are blocked unless the user manually unlocks the table first. This allows customers using LDR to enforce schema_locked as a hard lock preventing user-initiated DDL. The default is true, preserving existing behavior.

+

Schema lock enforcement

+

The cluster setting sql.schema.auto_unlock.enabled controls whether DDL operations automatically unlock schema_locked tables. When set to false, DDL statements on schema-locked tables are blocked unless manually unlocked. This allows users of Logical Data Replication (LDR) to enforce schema_locked as a hard lock preventing user-initiated DDL. The default is true.

GA {% include icon-yes.html %}
-

Modernizing Database Authentication: CockroachDB Embraces Zero Trust with SPIFFE and SPIRE Support

-

CockroachDB now supports Subject Alternative Names (SAN) in X.509 certificates, improving compatibility with modern TLS clients and standard certificate management tooling.

+

Certificate-based authentication using X.509 Subject field

+

CockroachDB now supports mapping SQL user roles to distinguished name attributes in the Subject field of X.509 certificates, including OU, UID, and CN. The cluster setting security.client_cert.san_required.enabled optionally allows mapping to Subject Alternative Name (SAN) fields instead. This enables authentication using your existing Certificate Authority infrastructure without requiring CommonName-based restrictions.

Preview {% include icon-yes.html %}
-

Post-Quantum Cryptography Readiness

-

CockroachDB now supports post-quantum cryptographic algorithms, preparing database deployments for the security requirements of the quantum computing era and future-proofing encryption at rest and in transit.

+

Post-quantum cryptography support

+

CockroachDB now supports post-quantum cryptographic algorithms for TLS connections. This applies to both client-to-node and inter-node communication.

Preview {% include icon-yes.html %}
-

Active Session History (ASH): Pinpoint Bottlenecks Across CPU, I/O & Contention

-

Track CPU, I/O, wait events, and contention for session activity (statements, jobs, etc.), enabling faster diagnosis of performance bottlenecks and precise correlation of activity to resource usage.

+

Active Session History

+

Active Session History (ASH) tracks CPU usage, I/O activity, wait events, and contention for session activity including SQL statements and background jobs. Samples are captured at regular intervals, enabling faster diagnosis of performance bottlenecks by correlating session activity with resource consumption.

Preview {% include icon-yes.html %}{% include icon-yes.html %} {% include icon-yes.html %}
-

OpenTelemetry Export Support

-

Export CockroachDB telemetry—logs, metrics, and traces—to your preferred observability stack via OpenTelemetry-compatible sinks, reducing dependency on proprietary collectors.

-
GA{% include icon-yes.html %}{% include icon-no.html %}{% include icon-no.html %}{% include icon-no.html %}
@@ -180,8 +169,8 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc -

Leader Leases

-

CockroachDB now maintains more stable leadership across nodes by reducing unnecessary lease transfers, resulting in more consistent query response times and fewer latency spikes—so your applications stay fast and predictable even as the cluster scales.

+

Leader leases

+

Leader leases are now generally available. This feature maintains more stable leadership across nodes by reducing unnecessary lease transfers, resulting in more consistent query response times and fewer latency spikes.

GA {% include icon-yes.html %} @@ -191,8 +180,8 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc -

Buffered Writes

-

Buffered writes are now generally available, delivering improved throughput and reduced tail latency under heavy write workloads by batching writes efficiently before flushing to disk.

+

Buffered writes

+

Buffered writes are now generally available. This feature improves throughput and reducing tail latency under heavy write workloads by batching writes efficiently before flushing to disk.

GA {% include icon-yes.html %} diff --git a/src/current/releases/cloud.md b/src/current/releases/cloud.md index 86320a5a0c1..1f7acb54e2e 100644 --- a/src/current/releases/cloud.md +++ b/src/current/releases/cloud.md @@ -34,8 +34,8 @@ Get future release notes emailed to you: -

2-DC Active-Passive Architecture

-

Deploy CockroachDB Advanced clusters across two geographically separated data centers in an active-passive configuration, providing resilience against data center failures with near-zero downtime failover.

+

Active-passive deployment across two data centers

+

CockroachDB Advanced clusters can be deployed across two geographically separated data centers in an active-passive configuration, providing resilience against data center failures.

Preview {% include icon-no.html %} @@ -45,8 +45,8 @@ Get future release notes emailed to you: -

CockroachDB Cloud CLI Revamp

-

A modernized CockroachDB Cloud CLI with improved commands, better discoverability, and a more intuitive interface for managing clusters, users, and cloud resources from the terminal.

+

CockroachDB Cloud CLI

+

The CockroachDB Cloud CLI has been redesigned with updated commands for managing clusters, users, and cloud resources from the terminal.

GA {% include icon-no.html %} @@ -56,19 +56,8 @@ Get future release notes emailed to you: -

Agent Roach: AI-Powered In-Console Assistant

-

An AI-powered in-console assistant that helps you troubleshoot issues, optimize queries, and manage CockroachDB clusters using natural language, without leaving the Cloud console.

- - Preview - {% include icon-no.html %} - {% include icon-yes.html %} - {% include icon-yes.html %} - {% include icon-yes.html %} - - - -

Multi-Factor Authentication for CockroachDB Cloud

-

Multi-factor authentication is now enforced for all CockroachDB Cloud users. This reduces the risk of unauthorized access from compromised credentials and strengthens organizational security posture.

+

Multi-factor authentication

+

Multi-factor authentication is now enforced for all CockroachDB Cloud users, reducing the risk of unauthorized access from compromised credentials.

GA {% include icon-no.html %} @@ -78,8 +67,8 @@ Get future release notes emailed to you: -

Bring Your Own Cloud (BYOC) for AWS, Azure, and GCP

-

Run CockroachDB Cloud clusters entirely within your own AWS, Azure, or GCP account with Bring Your Own Cloud, giving you full control over networking, security, and data residency while retaining fully managed database operations.

+

Bring your own cloud (BYOC)

+

CockroachDB Cloud clusters can run within your own AWS, Azure, or GCP account. This gives you control over networking, security, and data residency while retaining managed database operations.

Preview {% include icon-no.html %} @@ -89,8 +78,8 @@ Get future release notes emailed to you: -

CockroachDB Cloud MCP Server

-

Connect AI agents and LLM-powered applications to CockroachDB using the Model Context Protocol (MCP), enabling intelligent, database-aware AI workflows without custom integration work.

+

Model Context Protocol (MCP) server

+

The CockroachDB Cloud MCP server allows AI agents and LLM-powered applications to connect to CockroachDB using the Model Context Protocol (MCP).

GA {% include icon-no.html %} @@ -100,8 +89,8 @@ Get future release notes emailed to you: -

Fault Tolerance Demo

-

Experience CockroachDB's resilience firsthand by simulating an AZ failure in a live production cluster and watching the cluster auto-recover, and traffic stay unimpacted.

+

Fault tolerance demo

+

CockroachDB Cloud includes a built-in demo that simulates an availability zone failure in a live cluster. You can watch the cluster automatically recover as traffic continues uninterrupted.

GA {% include icon-no.html %} From eb79e01b0f0e2196e50da9c3c4714ffde0f953a7 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Tue, 21 Apr 2026 12:33:54 -0400 Subject: [PATCH 07/32] clean up notes --- .../releases/v26.2/backward-incompatible.md | 34 +++++++++---------- .../releases/v26.2/upgrade-finalization.md | 16 ++++----- .../_includes/releases/v26.2/v26.2.0.md | 2 +- src/current/releases/cloud.md | 4 +-- 4 files changed, 27 insertions(+), 29 deletions(-) diff --git a/src/current/_includes/releases/v26.2/backward-incompatible.md b/src/current/_includes/releases/v26.2/backward-incompatible.md index bb2c2962580..6a757cca727 100644 --- a/src/current/_includes/releases/v26.2/backward-incompatible.md +++ b/src/current/_includes/releases/v26.2/backward-incompatible.md @@ -1,36 +1,36 @@ This section summarizes changes that can cause applications, scripts, or manual workflows to fail or behave differently than in previous releases. This includes [key cluster setting changes](#key-cluster-setting-changes) and [deprecations](#deprecations). -- **`TG_ARGV` indexing:** The `TG_ARGV` trigger function parameter now uses 0-based indexing to match PostgreSQL behavior. Previously, `TG_ARGV[1]` returned the first argument; now `TG_ARGV[0]` returns the first argument and `TG_ARGV[1]` returns the second argument. Additionally, usage of `TG_ARGV` no longer requires setting the `allow_create_trigger_function_with_argv_references` session variable. [#161925](https://github.com/cockroachdb/cockroach/pull/161925) +- The `TG_ARGV` trigger function parameter now uses 0-based indexing to match PostgreSQL behavior. Previously, `TG_ARGV[1]` returned the first argument; now `TG_ARGV[0]` returns the first argument and `TG_ARGV[1]` returns the second argument. Additionally, usage of `TG_ARGV` no longer requires setting the `allow_create_trigger_function_with_argv_references` session variable. [#161925](https://github.com/cockroachdb/cockroach/pull/161925) -- **DistSQL scan planning:** The session variable `distsql_prevent_partitioning_soft_limited_scans` is now enabled by default. This prevents scans with soft limits from being planned as multiple TableReaders, which decreases the initial setup costs of some fully-distributed query plans. [#160051](https://github.com/cockroachdb/cockroach/pull/160051) +- The session variable `distsql_prevent_partitioning_soft_limited_scans` is now enabled by default. This prevents scans with soft limits from being planned as multiple TableReaders, which decreases the initial setup costs of some fully-distributed query plans. [#160051](https://github.com/cockroachdb/cockroach/pull/160051) -- **Empty `topic_name` validation:** Creating or altering a changefeed or Kafka/Pub/Sub external connection now returns an error when the `topic_name` query parameter is explicitly set to an empty string in the sink URI, rather than silently falling back to using the table name as the topic name. Existing changefeeds with an empty `topic_name` are not affected. [#164225](https://github.com/cockroachdb/cockroach/pull/164225) +- Creating or altering a changefeed or Kafka/Pub/Sub external connection now returns an error when the `topic_name` query parameter is explicitly set to an empty string in the sink URI, rather than silently falling back to using the table name as the topic name. Existing changefeeds with an empty `topic_name` are not affected. [#164225](https://github.com/cockroachdb/cockroach/pull/164225) -- **TTL job ownership:** TTL jobs are now owned by the schedule owner instead of the `node` user. This allows users with `CONTROLJOB` privilege to cancel TTL jobs, provided the schedule owner is not an admin (`CONTROLJOB` does not grant control over admin-owned jobs). [#161226](https://github.com/cockroachdb/cockroach/pull/161226) +- TTL jobs are now owned by the schedule owner instead of the `node` user. This allows users with `CONTROLJOB` privilege to cancel TTL jobs, provided the schedule owner is not an admin (`CONTROLJOB` does not grant control over admin-owned jobs). [#161226](https://github.com/cockroachdb/cockroach/pull/161226) -- **Inline hints privilege:** Calling `information_schema.crdb_rewrite_inline_hints` now requires the `REPAIRCLUSTER` privilege. [#160716](https://github.com/cockroachdb/cockroach/pull/160716) +- Calling `information_schema.crdb_rewrite_inline_hints` now requires the `REPAIRCLUSTER` privilege. [#160716](https://github.com/cockroachdb/cockroach/pull/160716) -- **Statement Details page URL:** The **Statement Details** page URL format has changed from `/statement/{implicitTxn}/{statementId}` to `/statement/{statementId}`. As a result, bookmarks using the old URL structure will no longer work. [#159558](https://github.com/cockroachdb/cockroach/pull/159558) +- The **Statement Details** page URL format has changed from `/statement/{implicitTxn}/{statementId}` to `/statement/{statementId}`. As a result, bookmarks using the old URL structure will no longer work. [#159558](https://github.com/cockroachdb/cockroach/pull/159558) -- **Admission control metrics units:** Changed the unit of measurement for admission control duration metrics from microseconds to nanoseconds. The following metrics are affected: `admission.granter.slots_exhausted_duration.kv`, `admission.granter.cpu_load_short_period_duration.kv`, `admission.granter.cpu_load_long_period_duration.kv`, `admission.granter.io_tokens_exhausted_duration.kv`, `admission.granter.elastic_io_tokens_exhausted_duration.kv`, and `admission.elastic_cpu.nanos_exhausted_duration`. Note that dashboards displaying these metrics will show a discontinuity at upgrade time, with pre-upgrade values appearing much lower due to the unit change. [#160956](https://github.com/cockroachdb/cockroach/pull/160956) +- Changed the unit of measurement for admission control duration metrics from microseconds to nanoseconds. The following metrics are affected: `admission.granter.slots_exhausted_duration.kv`, `admission.granter.cpu_load_short_period_duration.kv`, `admission.granter.cpu_load_long_period_duration.kv`, `admission.granter.io_tokens_exhausted_duration.kv`, `admission.granter.elastic_io_tokens_exhausted_duration.kv`, and `admission.elastic_cpu.nanos_exhausted_duration`. Note that dashboards displaying these metrics will show a discontinuity at upgrade time, with pre-upgrade values appearing much lower due to the unit change. [#160956](https://github.com/cockroachdb/cockroach/pull/160956) -- **Builtin function rename:** Renamed the builtin function `crdb_internal.inject_hint` (introduced in v26.1.0-alpha.2) to `information_schema.crdb_rewrite_inline_hints`. [#160716](https://github.com/cockroachdb/cockroach/pull/160716) +- Renamed the builtin function `crdb_internal.inject_hint` (introduced in v26.1.0-alpha.2) to `information_schema.crdb_rewrite_inline_hints`. [#160716](https://github.com/cockroachdb/cockroach/pull/160716) -- **`incremental_location` option:** Removed the `incremental_location` option from `BACKUP` and `CREATE SCHEDULE FOR BACKUP`. [#159189](https://github.com/cockroachdb/cockroach/pull/159189) +- Removed the `incremental_location` option from `BACKUP` and `CREATE SCHEDULE FOR BACKUP`. [#159189](https://github.com/cockroachdb/cockroach/pull/159189) -- **`incremental_location` option:** Removed the `incremental_location` option from `SHOW BACKUP` and `RESTORE`. [#160416](https://github.com/cockroachdb/cockroach/pull/160416) +- Removed the `incremental_location` option from `SHOW BACKUP` and `RESTORE`. [#160416](https://github.com/cockroachdb/cockroach/pull/160416) -- **View owner privilege checking:** When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) +- When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) -- **`ALTER CHANGEFEED ADD` validation:** Using `ALTER CHANGEFEED ADD ...` for a table that is already watched will now return an error: `target already watched by changefeed`. [#164433](https://github.com/cockroachdb/cockroach/pull/164433) +- Using `ALTER CHANGEFEED ADD ...` for a table that is already watched will now return an error: `target already watched by changefeed`. [#164433](https://github.com/cockroachdb/cockroach/pull/164433) -- **PCR reader AOST restriction:** Explicit `AS OF SYSTEM TIME` queries are no longer allowed on a Physical Cluster Replication (PCR) reader virtual cluster, unless the `bypass_pcr_reader_catalog_aost` session variable is set to `true`. This session variable should only be used during investigation or for changing cluster settings specific to the reader virtual cluster. [#165382](https://github.com/cockroachdb/cockroach/pull/165382) +- Explicit `AS OF SYSTEM TIME` queries are no longer allowed on a Physical Cluster Replication (PCR) reader virtual cluster, unless the `bypass_pcr_reader_catalog_aost` session variable is set to `true`. This session variable should only be used during investigation or for changing cluster settings specific to the reader virtual cluster. [#165382](https://github.com/cockroachdb/cockroach/pull/165382) -- **`TEMPORARY` database privilege:** Added the `TEMPORARY` database privilege, which controls whether users can create temporary tables and views. On new databases, this privilege is granted to the `public` role by default, matching PostgreSQL behavior. [#165992](https://github.com/cockroachdb/cockroach/pull/165992) +- Added the `TEMPORARY` database privilege, which controls whether users can create temporary tables and views. On new databases, this privilege is granted to the `public` role by default, matching PostgreSQL behavior. [#165992](https://github.com/cockroachdb/cockroach/pull/165992) -- **Statement diagnostics bundles:** Statement diagnostics requests with `sampling_probability` and `expires_at` now collect up to 10 bundles (configurable via `sql.stmt_diagnostics.max_bundles_per_request`) instead of a single bundle. Set the cluster setting to `1` to restore single-bundle behavior. [#166159](https://github.com/cockroachdb/cockroach/pull/166159) +- Statement diagnostics requests with `sampling_probability` and `expires_at` now collect up to 10 bundles (configurable via `sql.stmt_diagnostics.max_bundles_per_request`) instead of a single bundle. Set the cluster setting to `1` to restore single-bundle behavior. [#166159](https://github.com/cockroachdb/cockroach/pull/166159) -- **`crdb_internal` view access checks:** User-defined views that reference `crdb_internal` virtual tables now enforce unsafe access checks. To restore the previous behavior, set the session variable `allow_unsafe_internals` or the cluster setting `sql.override.allow_unsafe_internals.enabled` to `true`. [#167023](https://github.com/cockroachdb/cockroach/pull/167023) +- User-defined views that reference `crdb_internal` virtual tables now enforce unsafe access checks. To restore the previous behavior, set the session variable `allow_unsafe_internals` or the cluster setting `sql.override.allow_unsafe_internals.enabled` to `true`. [#167023](https://github.com/cockroachdb/cockroach/pull/167023) -- **`REFRESH MATERIALIZED VIEW` RLS:** `REFRESH MATERIALIZED VIEW` now evaluates row-level security (RLS) policies using the view owner's identity instead of the invoker's, matching PostgreSQL's definer semantics. [#167419](https://github.com/cockroachdb/cockroach/pull/167419) +- `REFRESH MATERIALIZED VIEW` now evaluates row-level security (RLS) policies using the view owner's identity instead of the invoker's, matching PostgreSQL's definer semantics. [#167419](https://github.com/cockroachdb/cockroach/pull/167419) diff --git a/src/current/_includes/releases/v26.2/upgrade-finalization.md b/src/current/_includes/releases/v26.2/upgrade-finalization.md index b54b706bf01..e67384b7695 100644 --- a/src/current/_includes/releases/v26.2/upgrade-finalization.md +++ b/src/current/_includes/releases/v26.2/upgrade-finalization.md @@ -1,17 +1,17 @@ This section summarizes the features that are not available until you [finalize the v26.2 upgrade]({% link v26.2/upgrade-cockroach-version.md %}#finalize-a-major-version-upgrade-manually). -- **`security_invoker` option for views**: {% comment %}TODO: Verify with @shadiGh{% endcomment %}Views now support the PostgreSQL-compatible `security_invoker` option. When set via `CREATE VIEW ... WITH (security_invoker)` or `ALTER VIEW SET (security_invoker = true)`, privilege checks on the underlying tables are performed as the querying user rather than the view owner. The `security_invoker` option can be reset with `ALTER VIEW ... RESET (security_invoker)`. [#164184](https://github.com/cockroachdb/cockroach/pull/164184) +- {% comment %}TODO: Verify with @shadiGh{% endcomment %}Views now support the PostgreSQL-compatible `security_invoker` option. When set via `CREATE VIEW ... WITH (security_invoker)` or `ALTER VIEW SET (security_invoker = true)`, privilege checks on the underlying tables are performed as the querying user rather than the view owner. The `security_invoker` option can be reset with `ALTER VIEW ... RESET (security_invoker)`. [#164184](https://github.com/cockroachdb/cockroach/pull/164184) -- **`ALTER TABLE ENABLE/DISABLE TRIGGER` syntax**: {% comment %}TODO: Verify with @rafiss{% endcomment %}Added support for `ALTER TABLE ENABLE TRIGGER` and `ALTER TABLE DISABLE TRIGGER` syntax. This allows users to temporarily disable triggers without dropping them, and later re-enable them. The syntax supports disabling/enabling individual triggers by name, or all triggers on a table using the `ALL` or `USER` keywords. [#161924](https://github.com/cockroachdb/cockroach/pull/161924) +- {% comment %}TODO: Verify with @rafiss{% endcomment %}Added support for `ALTER TABLE ENABLE TRIGGER` and `ALTER TABLE DISABLE TRIGGER` syntax. This allows users to temporarily disable triggers without dropping them, and later re-enable them. The syntax supports disabling/enabling individual triggers by name, or all triggers on a table using the `ALL` or `USER` keywords. [#161924](https://github.com/cockroachdb/cockroach/pull/161924) -- **`skip_unique_checks` storage parameter**: {% comment %}TODO: Verify with @DrewKimball{% endcomment %}Added an index storage parameter `skip_unique_checks` that can be used to disable unique constraint checks for indexes with implicit partition columns, including indexes in `REGIONAL BY ROW` tables. This should **only** be used if the application can guarantee uniqueness, for example, by using external UUID values or relying on a `unique_rowid()` default value. Incorrectly applying this setting when uniqueness is not guaranteed by the application could result in logically duplicate keys in different partitions of a unique index. [#163378](https://github.com/cockroachdb/cockroach/pull/163378) +- {% comment %}TODO: Verify with @DrewKimball{% endcomment %}Added an index storage parameter `skip_unique_checks` that can be used to disable unique constraint checks for indexes with implicit partition columns, including indexes in `REGIONAL BY ROW` tables. This should **only** be used if the application can guarantee uniqueness, for example, by using external UUID values or relying on a `unique_rowid()` default value. Incorrectly applying this setting when uniqueness is not guaranteed by the application could result in logically duplicate keys in different partitions of a unique index. [#163378](https://github.com/cockroachdb/cockroach/pull/163378) -- **`DROP CONSTRAINT` on unique indexes**: {% comment %}TODO: Verify with @rafiss{% endcomment %}`ALTER TABLE ... DROP CONSTRAINT` can now be used to drop `UNIQUE` constraints. The backing `UNIQUE` index will also be dropped, as CockroachDB treats the constraint and index as the same thing. [#162345](https://github.com/cockroachdb/cockroach/pull/162345) +- {% comment %}TODO: Verify with @rafiss{% endcomment %}`ALTER TABLE ... DROP CONSTRAINT` can now be used to drop `UNIQUE` constraints. The backing `UNIQUE` index will also be dropped, as CockroachDB treats the constraint and index as the same thing. [#162345](https://github.com/cockroachdb/cockroach/pull/162345) -- **Canary stats in `EXPLAIN` output**: {% comment %}TODO: Verify with @ZhouXing19{% endcomment %}`EXPLAIN` and `EXPLAIN ANALYZE` now display a `table stats mode` field (`canary` or `stable`) when the `sql.stats.canary_fraction` cluster setting is greater than 0, indicating which table statistics were used for query planning. Scan nodes for tables with active canary stats also show the configured canary window duration. [#166129](https://github.com/cockroachdb/cockroach/pull/166129) +- {% comment %}TODO: Verify with @ZhouXing19{% endcomment %}`EXPLAIN` and `EXPLAIN ANALYZE` now display a `table stats mode` field (`canary` or `stable`) when the `sql.stats.canary_fraction` cluster setting is greater than 0, indicating which table statistics were used for query planning. Scan nodes for tables with active canary stats also show the configured canary window duration. [#166129](https://github.com/cockroachdb/cockroach/pull/166129) -- **View owner privilege checking**: {% comment %}TODO: Verify with @shadiGh{% endcomment %}When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) +- {% comment %}TODO: Verify with @shadiGh{% endcomment %}When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) -- **`INSPECT` uniqueness validation for `REGIONAL BY ROW` tables**: {% comment %}TODO: Verify with @bgbg{% endcomment %}During an `INSPECT` run, a new check validates unique column values in `REGIONAL BY ROW` tables. [#164449](https://github.com/cockroachdb/cockroach/pull/164449) +- {% comment %}TODO: Verify with @bgbg{% endcomment %}During an `INSPECT` run, a new check validates unique column values in `REGIONAL BY ROW` tables. [#164449](https://github.com/cockroachdb/cockroach/pull/164449) -- **`IMPORT` row count validation with `INSPECT`**: {% comment %}TODO: Verify with @bgbg{% endcomment %}Row count validation after `IMPORT` is now enabled by default in async mode. After an `IMPORT` completes, a background `INSPECT` job validates that the imported row count matches expectations. The `IMPORT` result now includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. The `bulkio.import.row_count_validation.mode` cluster setting controls this behavior, with valid values of `off`, `async` (default), and `sync`. [#163543](https://github.com/cockroachdb/cockroach/pull/163543) +- {% comment %}TODO: Verify with @bgbg{% endcomment %}Row count validation after `IMPORT` is now enabled by default in async mode. After an `IMPORT` completes, a background `INSPECT` job validates that the imported row count matches expectations. The `IMPORT` result now includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. The `bulkio.import.row_count_validation.mode` cluster setting controls this behavior, with valid values of `off`, `async` (default), and `sync`. [#163543](https://github.com/cockroachdb/cockroach/pull/163543) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index 3a526be7de9..88f72f619ff 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -74,7 +74,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc -

Hash-sharded index with prefix columns

+

Hash-sharded indexes with prefix columns

Hash-sharded indexes now support computing the shard value from a subset of index key columns rather than all of them. This gives you finer control over how data is distributed across shards and significantly improves query performance when filtering on only a prefix of the indexed columns.

GA diff --git a/src/current/releases/cloud.md b/src/current/releases/cloud.md index 1f7acb54e2e..242ba31cb6c 100644 --- a/src/current/releases/cloud.md +++ b/src/current/releases/cloud.md @@ -16,8 +16,6 @@ Get future release notes emailed to you: ## April 28, 2026 -

v26.2 feature highlights

-
@@ -90,7 +88,7 @@ Get future release notes emailed to you: From 67a46e73b287c5e1a37096e6ccd4acd26d28fb88 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Tue, 21 Apr 2026 13:45:59 -0400 Subject: [PATCH 08/32] amend upgrade finalization notes --- .../releases/v26.2/upgrade-finalization.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/current/_includes/releases/v26.2/upgrade-finalization.md b/src/current/_includes/releases/v26.2/upgrade-finalization.md index e67384b7695..14579afd6f2 100644 --- a/src/current/_includes/releases/v26.2/upgrade-finalization.md +++ b/src/current/_includes/releases/v26.2/upgrade-finalization.md @@ -1,17 +1,17 @@ This section summarizes the features that are not available until you [finalize the v26.2 upgrade]({% link v26.2/upgrade-cockroach-version.md %}#finalize-a-major-version-upgrade-manually). -- {% comment %}TODO: Verify with @shadiGh{% endcomment %}Views now support the PostgreSQL-compatible `security_invoker` option. When set via `CREATE VIEW ... WITH (security_invoker)` or `ALTER VIEW SET (security_invoker = true)`, privilege checks on the underlying tables are performed as the querying user rather than the view owner. The `security_invoker` option can be reset with `ALTER VIEW ... RESET (security_invoker)`. [#164184](https://github.com/cockroachdb/cockroach/pull/164184) +- Views now support the PostgreSQL-compatible `security_invoker` option. When set via `CREATE VIEW ... WITH (security_invoker)` or `ALTER VIEW SET (security_invoker = true)`, privilege checks on the underlying tables are performed as the querying user rather than the view owner. The `security_invoker` option can be reset with `ALTER VIEW ... RESET (security_invoker)`. [#164184](https://github.com/cockroachdb/cockroach/pull/164184) -- {% comment %}TODO: Verify with @rafiss{% endcomment %}Added support for `ALTER TABLE ENABLE TRIGGER` and `ALTER TABLE DISABLE TRIGGER` syntax. This allows users to temporarily disable triggers without dropping them, and later re-enable them. The syntax supports disabling/enabling individual triggers by name, or all triggers on a table using the `ALL` or `USER` keywords. [#161924](https://github.com/cockroachdb/cockroach/pull/161924) +- Added support for `ALTER TABLE ENABLE TRIGGER` and `ALTER TABLE DISABLE TRIGGER` syntax. This allows users to temporarily disable triggers without dropping them, and later re-enable them. The syntax supports disabling/enabling individual triggers by name, or all triggers on a table using the `ALL` or `USER` keywords. [#161924](https://github.com/cockroachdb/cockroach/pull/161924) -- {% comment %}TODO: Verify with @DrewKimball{% endcomment %}Added an index storage parameter `skip_unique_checks` that can be used to disable unique constraint checks for indexes with implicit partition columns, including indexes in `REGIONAL BY ROW` tables. This should **only** be used if the application can guarantee uniqueness, for example, by using external UUID values or relying on a `unique_rowid()` default value. Incorrectly applying this setting when uniqueness is not guaranteed by the application could result in logically duplicate keys in different partitions of a unique index. [#163378](https://github.com/cockroachdb/cockroach/pull/163378) +- Added an index storage parameter `skip_unique_checks` that can be used to disable unique constraint checks for indexes with implicit partition columns, including indexes in `REGIONAL BY ROW` tables. This should **only** be used if the application can guarantee uniqueness, for example, by using external UUID values or relying on a `unique_rowid()` default value. Incorrectly applying this setting when uniqueness is not guaranteed by the application could result in logically duplicate keys in different partitions of a unique index. [#163378](https://github.com/cockroachdb/cockroach/pull/163378) -- {% comment %}TODO: Verify with @rafiss{% endcomment %}`ALTER TABLE ... DROP CONSTRAINT` can now be used to drop `UNIQUE` constraints. The backing `UNIQUE` index will also be dropped, as CockroachDB treats the constraint and index as the same thing. [#162345](https://github.com/cockroachdb/cockroach/pull/162345) +- `ALTER TABLE ... DROP CONSTRAINT` can now be used to drop `UNIQUE` constraints. The backing `UNIQUE` index will also be dropped, as CockroachDB treats the constraint and index as the same thing. [#162345](https://github.com/cockroachdb/cockroach/pull/162345) - {% comment %}TODO: Verify with @ZhouXing19{% endcomment %}`EXPLAIN` and `EXPLAIN ANALYZE` now display a `table stats mode` field (`canary` or `stable`) when the `sql.stats.canary_fraction` cluster setting is greater than 0, indicating which table statistics were used for query planning. Scan nodes for tables with active canary stats also show the configured canary window duration. [#166129](https://github.com/cockroachdb/cockroach/pull/166129) -- {% comment %}TODO: Verify with @shadiGh{% endcomment %}When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) +- When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) -- {% comment %}TODO: Verify with @bgbg{% endcomment %}During an `INSPECT` run, a new check validates unique column values in `REGIONAL BY ROW` tables. [#164449](https://github.com/cockroachdb/cockroach/pull/164449) +- During an `INSPECT` run, a new check validates unique column values in `REGIONAL BY ROW` tables. [#164449](https://github.com/cockroachdb/cockroach/pull/164449) -- {% comment %}TODO: Verify with @bgbg{% endcomment %}Row count validation after `IMPORT` is now enabled by default in async mode. After an `IMPORT` completes, a background `INSPECT` job validates that the imported row count matches expectations. The `IMPORT` result now includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. The `bulkio.import.row_count_validation.mode` cluster setting controls this behavior, with valid values of `off`, `async` (default), and `sync`. [#163543](https://github.com/cockroachdb/cockroach/pull/163543) +- The `bulkio.import.row_count_validation.mode` cluster setting controls whether row count validation runs after `IMPORT` operations. When enabled, a background `INSPECT` job validates that the imported row count matches expectations after an `IMPORT` completes. The `IMPORT` result includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. Valid values are `off` (default), `async`, and `sync`. [#168403](https://github.com/cockroachdb/cockroach/pull/168403) From 6fe0423a11e120d40ed2250872362f7f037dbc85 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Tue, 21 Apr 2026 13:51:52 -0400 Subject: [PATCH 09/32] fix typos --- src/current/_includes/releases/v26.2/v26.2.0.md | 2 +- src/current/releases/cloud.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index 88f72f619ff..99bcb76a4f7 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -372,7 +372,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc - Fixed a bug that could cause a panic during changefeed startup if an error occurred while initializing the metrics controller. [#159431][#159431] - Fixed a bug that could cause changefeeds using Kafka v1 sinks to hang when the changefeed was cancelled. [#162058][#162058] - Fixed an issue where changefeeds with `execution_locality` filters could fail in multi-tenant clusters with `node descriptor not found` errors. [#163507][#163507] -- Fixed a bug where running **changefeeds** with `envelope=enriched` and `enriched_properties` containing `source` would cause failures during a **cluster upgrade**. [#163885][#163885] +- Fixed a bug where running changefeeds with `envelope=enriched` and `enriched_properties` containing `source` would cause failures during a cluster upgrade. [#163885][#163885] - Fixed a bug introduced in v25.4+ where setting `min_checkpoint_frequency` to `0` prevented changefeeds from advancing their resolved timestamp (high-water mark) and emitting resolved messages. Note that setting `min_checkpoint_frequency` to lower than `500ms` is **not** recommended as it may cause degraded changefeed performance. [#164765][#164765] - Changefeed retry backoff now resets when the changefeed's resolved timestamp (high-water mark) advances between retries, in addition to the existing time-based reset (configured by `changefeed.retry_backoff_reset`). This prevents transient rolling restarts from causing changefeeds to fall behind because of excessive backoff. [#164933][#164933] - Fixed a bug where `RESTORE` with `skip_missing_foreign_keys` could fail with an internal error if the restored table had an in-progress schema change that added a foreign key constraint whose referenced table was not included in the restore. [#164757][#164757] diff --git a/src/current/releases/cloud.md b/src/current/releases/cloud.md index 242ba31cb6c..25979ba53a8 100644 --- a/src/current/releases/cloud.md +++ b/src/current/releases/cloud.md @@ -88,7 +88,7 @@ Get future release notes emailed to you: From 06ddabdf9ef3959bf77206e61df3266044f69c2d Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Tue, 21 Apr 2026 14:07:05 -0400 Subject: [PATCH 10/32] fix link --- src/current/cockroachcloud/migrations-page.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/current/cockroachcloud/migrations-page.md b/src/current/cockroachcloud/migrations-page.md index 9e9462285bd..3a4b0fd4541 100644 --- a/src/current/cockroachcloud/migrations-page.md +++ b/src/current/cockroachcloud/migrations-page.md @@ -220,7 +220,7 @@ After updating the schema, click [**Retry Migration**](#retry-the-migration). If | Column | Description | |-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Description | One of the following suggestion types:
  • **Sequences:** A statement that uses a sequence to define a primary key column. [Using a sequence for a primary key column is not recommended.]({% link {{version_prefix}}create-sequence.md %}#considerations)
  • **Missing Primary Key:** A statement that does not define an explicit primary key for a table. [Defining an explicit primary key on every table is recommended.]({% link {{version_prefix}}schema-design-table.md %}#select-primary-key-columns)
  • **Index Set On Timestamp Related Column:** A statement that creates an index on a [`TIMESTAMP`/`TIMESTAMPTZ`]({% link {{version_prefix}}timestamp.md %}) column. [Indexing on sequential keys can negatively affect performance.]({% link {{version_prefix}}schema-design-indexes.md %}#best-practices)
| +| Description | One of the following suggestion types:
  • **Sequences:** A statement that uses a sequence to define a primary key column. [Using a sequence for a primary key column is not recommended.]({% link {{version_prefix}}create-sequence.md %}#considerations)
  • **Missing Primary Key:** A statement that does not define an explicit primary key for a table. [Defining an explicit primary key on every table is recommended.]({% link {{version_prefix}}performance-best-practices-overview.md %}#unique-id-best-practices)
  • **Index Set On Timestamp Related Column:** A statement that creates an index on a [`TIMESTAMP`/`TIMESTAMPTZ`]({% link {{version_prefix}}timestamp.md %}) column. [Indexing on sequential keys can negatively affect performance.]({% link {{version_prefix}}schema-design-indexes.md %}#best-practices)
| | Complexity | The estimated difficulty of addressing the suggestion. | | Instances | The number of times the suggestion occurs on the provided schema. Click the `+` icon on the row to view up to 20 individual statements where this occurs. | | Actions | The option to **Acknowledge** all instances of the suggestion. This is not required for schema migration. | From 4175b5b2a0fcc3ae65ee0f434aee9484a29232a7 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Tue, 21 Apr 2026 14:52:05 -0400 Subject: [PATCH 11/32] fix broken links --- src/current/_includes/releases/v26.2/v26.2.0.md | 12 ++++++------ src/current/_includes/releases/whats-new-intro.md | 2 +- src/current/v26.2/fips.md | 4 +++- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index 99bcb76a4f7..bfe1cb5ff5d 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -14,7 +14,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc
-

SQL highlights

+

SQL highlights

Fault tolerance demo

-

CockroachDB Cloud includes a built-in demo that simulates an availability zone failure in a live cluster. You can watch the cluster automatically recover as traffic continues uninterrupted.

+

The built-in [fault tolerance demo]({% link {{ site.versions["stable"] }}/demo-cockroachdb-resilience.md %}#run-a-guided-demo-in-cockroachdb-cloud) is now generally available. This demo simulates an availability zone failure in a live cluster, allowing you to watch the cluster automatically recover as traffic continues uninterrupted.

GA {% include icon-no.html %}

Fault tolerance demo

-

The built-in [fault tolerance demo]({% link {{ site.versions["stable"] }}/demo-cockroachdb-resilience.md %}#run-a-guided-demo-in-cockroachdb-cloud) is now generally available. This demo simulates an availability zone failure in a live cluster, allowing you to watch the cluster automatically recover as traffic continues uninterrupted.

+

The built-in fault tolerance demo is now generally available. This demo simulates an availability zone failure in a live cluster, allowing you to watch the cluster automatically recover as traffic continues uninterrupted.

GA {% include icon-no.html %}
@@ -86,7 +86,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc
-

Security highlights

+

Security highlights

@@ -125,7 +125,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc
-

Observability highlights

+

Observability highlights

@@ -153,7 +153,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc
-

Performance highlights

+

Performance highlights

@@ -204,11 +204,11 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc {% include releases/v26.2/backward-incompatible.md %} -#### Key cluster setting changes +

Key cluster setting changes

{% include releases/v26.2/cluster-setting-changes.md %} -#### Deprecations +

Deprecations

{% include releases/v26.2/deprecations.md %} diff --git a/src/current/_includes/releases/whats-new-intro.md b/src/current/_includes/releases/whats-new-intro.md index 5f46571ba1f..189f0971660 100644 --- a/src/current/_includes/releases/whats-new-intro.md +++ b/src/current/_includes/releases/whats-new-intro.md @@ -102,7 +102,7 @@ CockroachDB {{ page.major_version }}{% if lts == true %} [(LTS)]({% link release {% if released == true %} {% comment %}v1.0 has no #v1-0-0 anchor, and before GA other releases also do not.{% endcomment %} - For a summary of the most significant changes in {{ page.major_version }}, refer to [Feature highlights](#feature-highlights). -- Before [upgrading to CockroachDB {{ page.major_version }}]({% link {{ page.major_version }}/upgrade-cockroach-version.md %}), review the [backward-incompatible changes](#{{ page.major_version | replace: ".", "-" }}-0-backward-incompatible-changes) and newly identified [known limitations](#known-limitations). +- Before [upgrading to CockroachDB {{ page.major_version }}]({% link {{ page.major_version }}/upgrade-cockroach-version.md %}), review the [backward-incompatible changes](#{{ page.major_version | replace: ".", "-" }}-0-backward-incompatible-changes), including [key cluster setting changes](#{{ page.major_version | replace: ".", "-" }}-0-key-cluster-setting-changes) and [deprecations](#{{ page.major_version | replace: ".", "-" }}-0-deprecations). {% endif %} {% endif %}{% comment %}End GA-only content{% endcomment %} - For details about the support window for this release type, review the [Release Support Policy]({% link releases/release-support-policy.md %}). diff --git a/src/current/v26.2/fips.md b/src/current/v26.2/fips.md index f1d6cf8c51d..ed89a5f8f50 100644 --- a/src/current/v26.2/fips.md +++ b/src/current/v26.2/fips.md @@ -107,6 +107,7 @@ If you do not want to use the FIPS-ready CockroachDB Docker image directly, you To download FIPS-ready CockroachDB runtimes, use the following links. {% for s in sections %} + {% if s == "Production" %} {% assign releases = fips_releases | where_exp: "releases", "releases.release_type == s" | sort: "release_date" | reverse %} {% comment %} Fetch all releases for that major version based on release type (Production/Testing). {% endcomment %} @@ -141,7 +142,7 @@ To download FIPS-ready CockroachDB runtimes, use the following links. {% comment %} Add "Latest" class to release if it's the latest release. {% endcomment %}
{% comment %}Version{% endcomment %} - {{ r.release_name }} {% comment %} Add link to each release r. {% endcomment %} + {{ r.release_name }} {% if r.release_name == latest_hotfix.release_name %} Latest {% comment %} Add "Latest" badge to release if it's the latest release. {% endcomment %} {% endif %} @@ -182,6 +183,7 @@ To download FIPS-ready CockroachDB runtimes, use the following links. {% endfor %} {% comment %}Releases {% endcomment %}
{% endif %} + {% endif %} {% endfor %}{% comment %}Sections {%endcomment %} {% else %} No FIPS-ready runtimes are available at this time. Please check again later. From 04c1d53c0c261132e79711770ad898b1e338c7a9 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Tue, 21 Apr 2026 15:27:12 -0400 Subject: [PATCH 12/32] fix links --- src/current/_includes/releases/v26.2/backward-incompatible.md | 2 +- src/current/_includes/releases/v26.2/v26.2.0.md | 2 +- src/current/_includes/releases/whats-new-intro.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/current/_includes/releases/v26.2/backward-incompatible.md b/src/current/_includes/releases/v26.2/backward-incompatible.md index 6a757cca727..5fea0ebf9f8 100644 --- a/src/current/_includes/releases/v26.2/backward-incompatible.md +++ b/src/current/_includes/releases/v26.2/backward-incompatible.md @@ -1,4 +1,4 @@ -This section summarizes changes that can cause applications, scripts, or manual workflows to fail or behave differently than in previous releases. This includes [key cluster setting changes](#key-cluster-setting-changes) and [deprecations](#deprecations). +This section summarizes changes that can cause applications, scripts, or manual workflows to fail or behave differently than in previous releases. This includes [key cluster setting changes](#v26-2-0-cluster-settings) and [deprecations](#v26-2-0-deprecations). - The `TG_ARGV` trigger function parameter now uses 0-based indexing to match PostgreSQL behavior. Previously, `TG_ARGV[1]` returned the first argument; now `TG_ARGV[0]` returns the first argument and `TG_ARGV[1]` returns the second argument. Additionally, usage of `TG_ARGV` no longer requires setting the `allow_create_trigger_function_with_argv_references` session variable. [#161925](https://github.com/cockroachdb/cockroach/pull/161925) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index bfe1cb5ff5d..b33cf9a9d1b 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -204,7 +204,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc {% include releases/v26.2/backward-incompatible.md %} -

Key cluster setting changes

+

Key cluster setting changes

{% include releases/v26.2/cluster-setting-changes.md %} diff --git a/src/current/_includes/releases/whats-new-intro.md b/src/current/_includes/releases/whats-new-intro.md index 189f0971660..d85fa78a466 100644 --- a/src/current/_includes/releases/whats-new-intro.md +++ b/src/current/_includes/releases/whats-new-intro.md @@ -102,7 +102,7 @@ CockroachDB {{ page.major_version }}{% if lts == true %} [(LTS)]({% link release {% if released == true %} {% comment %}v1.0 has no #v1-0-0 anchor, and before GA other releases also do not.{% endcomment %} - For a summary of the most significant changes in {{ page.major_version }}, refer to [Feature highlights](#feature-highlights). -- Before [upgrading to CockroachDB {{ page.major_version }}]({% link {{ page.major_version }}/upgrade-cockroach-version.md %}), review the [backward-incompatible changes](#{{ page.major_version | replace: ".", "-" }}-0-backward-incompatible-changes), including [key cluster setting changes](#{{ page.major_version | replace: ".", "-" }}-0-key-cluster-setting-changes) and [deprecations](#{{ page.major_version | replace: ".", "-" }}-0-deprecations). +- Before [upgrading to CockroachDB {{ page.major_version }}]({% link {{ page.major_version }}/upgrade-cockroach-version.md %}), review the [backward-incompatible changes](#{{ page.major_version | replace: ".", "-" }}-0-backward-incompatible-changes), including [key cluster setting changes](#{{ page.major_version | replace: ".", "-" }}-0-cluster-settings) and [deprecations](#{{ page.major_version | replace: ".", "-" }}-0-deprecations). {% endif %} {% endif %}{% comment %}End GA-only content{% endcomment %} - For details about the support window for this release type, review the [Release Support Policy]({% link releases/release-support-policy.md %}). From 989a4cbd581f8dc3a3f48bd7b329cd306abe6e46 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Thu, 23 Apr 2026 11:25:40 -0400 Subject: [PATCH 13/32] update backward-incompatible changes, Cloud-first messaging --- src/current/_data/releases.yml | 2 +- .../releases/v26.2/backward-incompatible.md | 40 +++++++++++-------- .../_includes/releases/v26.2/deprecations.md | 2 +- .../_includes/releases/v26.2/v26.2.0.md | 6 +-- 4 files changed, 27 insertions(+), 23 deletions(-) diff --git a/src/current/_data/releases.yml b/src/current/_data/releases.yml index fee9a61ffe1..b7376ab1439 100644 --- a/src/current/_data/releases.yml +++ b/src/current/_data/releases.yml @@ -10926,7 +10926,7 @@ cloud_only_message: > This version is currently available only for select CockroachDB Cloud clusters. Binaries for self-hosted clusters will be available - on May 13, 2026. + approximately 2 weeks after Cloud availability. - release_name: v26.2.0-alpha.2 major_version: v26.2 diff --git a/src/current/_includes/releases/v26.2/backward-incompatible.md b/src/current/_includes/releases/v26.2/backward-incompatible.md index 5fea0ebf9f8..935a0650d8d 100644 --- a/src/current/_includes/releases/v26.2/backward-incompatible.md +++ b/src/current/_includes/releases/v26.2/backward-incompatible.md @@ -2,35 +2,43 @@ This section summarizes changes that can cause applications, scripts, or manual - The `TG_ARGV` trigger function parameter now uses 0-based indexing to match PostgreSQL behavior. Previously, `TG_ARGV[1]` returned the first argument; now `TG_ARGV[0]` returns the first argument and `TG_ARGV[1]` returns the second argument. Additionally, usage of `TG_ARGV` no longer requires setting the `allow_create_trigger_function_with_argv_references` session variable. [#161925](https://github.com/cockroachdb/cockroach/pull/161925) -- The session variable `distsql_prevent_partitioning_soft_limited_scans` is now enabled by default. This prevents scans with soft limits from being planned as multiple TableReaders, which decreases the initial setup costs of some fully-distributed query plans. [#160051](https://github.com/cockroachdb/cockroach/pull/160051) - -- Creating or altering a changefeed or Kafka/Pub/Sub external connection now returns an error when the `topic_name` query parameter is explicitly set to an empty string in the sink URI, rather than silently falling back to using the table name as the topic name. Existing changefeeds with an empty `topic_name` are not affected. [#164225](https://github.com/cockroachdb/cockroach/pull/164225) +- When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) -- TTL jobs are now owned by the schedule owner instead of the `node` user. This allows users with `CONTROLJOB` privilege to cancel TTL jobs, provided the schedule owner is not an admin (`CONTROLJOB` does not grant control over admin-owned jobs). [#161226](https://github.com/cockroachdb/cockroach/pull/161226) +- `REFRESH MATERIALIZED VIEW` now evaluates row-level security (RLS) policies using the view owner's identity instead of the invoker's, matching PostgreSQL's definer semantics. [#167419](https://github.com/cockroachdb/cockroach/pull/167419) -- Calling `information_schema.crdb_rewrite_inline_hints` now requires the `REPAIRCLUSTER` privilege. [#160716](https://github.com/cockroachdb/cockroach/pull/160716) +- User-defined views that reference `crdb_internal` virtual tables now enforce unsafe access checks. To restore the previous behavior, set the session variable `allow_unsafe_internals` or the cluster setting `sql.override.allow_unsafe_internals.enabled` to `true`. [#167023](https://github.com/cockroachdb/cockroach/pull/167023) -- The **Statement Details** page URL format has changed from `/statement/{implicitTxn}/{statementId}` to `/statement/{statementId}`. As a result, bookmarks using the old URL structure will no longer work. [#159558](https://github.com/cockroachdb/cockroach/pull/159558) +- Removed the `incremental_location` option from `BACKUP` and `CREATE SCHEDULE FOR BACKUP`. [#159189](https://github.com/cockroachdb/cockroach/pull/159189) -- Changed the unit of measurement for admission control duration metrics from microseconds to nanoseconds. The following metrics are affected: `admission.granter.slots_exhausted_duration.kv`, `admission.granter.cpu_load_short_period_duration.kv`, `admission.granter.cpu_load_long_period_duration.kv`, `admission.granter.io_tokens_exhausted_duration.kv`, `admission.granter.elastic_io_tokens_exhausted_duration.kv`, and `admission.elastic_cpu.nanos_exhausted_duration`. Note that dashboards displaying these metrics will show a discontinuity at upgrade time, with pre-upgrade values appearing much lower due to the unit change. [#160956](https://github.com/cockroachdb/cockroach/pull/160956) +- Removed the `incremental_location` option from `SHOW BACKUP` and `RESTORE`. [#160416](https://github.com/cockroachdb/cockroach/pull/160416) -- Renamed the builtin function `crdb_internal.inject_hint` (introduced in v26.1.0-alpha.2) to `information_schema.crdb_rewrite_inline_hints`. [#160716](https://github.com/cockroachdb/cockroach/pull/160716) +- `CREATE CHANGEFEED FOR DATABASE` now returns an error stating that the feature is not implemented. [#166920](https://github.com/cockroachdb/cockroach/pull/166920) -- Removed the `incremental_location` option from `BACKUP` and `CREATE SCHEDULE FOR BACKUP`. [#159189](https://github.com/cockroachdb/cockroach/pull/159189) +- Added the `TEMPORARY` database privilege, which controls whether users can create temporary tables and views. On new databases, this privilege is granted to the `public` role by default, matching PostgreSQL behavior. [#165992](https://github.com/cockroachdb/cockroach/pull/165992) -- Removed the `incremental_location` option from `SHOW BACKUP` and `RESTORE`. [#160416](https://github.com/cockroachdb/cockroach/pull/160416) +- Explicit `AS OF SYSTEM TIME` queries are no longer allowed on a Physical Cluster Replication (PCR) reader virtual cluster, unless the `bypass_pcr_reader_catalog_aost` session variable is set to `true`. This session variable should only be used during investigation or for changing cluster settings specific to the reader virtual cluster. [#165382](https://github.com/cockroachdb/cockroach/pull/165382) -- When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) +- Changed goroutine profile dumps from human-readable `.txt.gz` files to binary proto `.pb.gz` files. This improves the performance of the goroutine dumper by eliminating brief in-process pauses that occurred when collecting goroutine stacks. [#160798](https://github.com/cockroachdb/cockroach/pull/160798) - Using `ALTER CHANGEFEED ADD ...` for a table that is already watched will now return an error: `target already watched by changefeed`. [#164433](https://github.com/cockroachdb/cockroach/pull/164433) -- Explicit `AS OF SYSTEM TIME` queries are no longer allowed on a Physical Cluster Replication (PCR) reader virtual cluster, unless the `bypass_pcr_reader_catalog_aost` session variable is set to `true`. This session variable should only be used during investigation or for changing cluster settings specific to the reader virtual cluster. [#165382](https://github.com/cockroachdb/cockroach/pull/165382) +- Creating or altering a changefeed or Kafka/Pub/Sub external connection now returns an error when the `topic_name` query parameter is explicitly set to an empty string in the sink URI, rather than silently falling back to using the table name as the topic name. Existing changefeeds with an empty `topic_name` are not affected. [#164225](https://github.com/cockroachdb/cockroach/pull/164225) -- Added the `TEMPORARY` database privilege, which controls whether users can create temporary tables and views. On new databases, this privilege is granted to the `public` role by default, matching PostgreSQL behavior. [#165992](https://github.com/cockroachdb/cockroach/pull/165992) +- The `cockroach debug tsdump` command now defaults to `--format=raw` instead of `--format=text`. The `raw` (gob) format is optimized for Datadog ingestion. A new `--output` flag lets you write output directly to a file, avoiding potential file corruption that can occur with shell redirection. If `--output` is not specified, output is written to `stdout`. [#160538](https://github.com/cockroachdb/cockroach/pull/160538) -- Statement diagnostics requests with `sampling_probability` and `expires_at` now collect up to 10 bundles (configurable via `sql.stmt_diagnostics.max_bundles_per_request`) instead of a single bundle. Set the cluster setting to `1` to restore single-bundle behavior. [#166159](https://github.com/cockroachdb/cockroach/pull/166159) +- TTL jobs are now owned by the schedule owner instead of the `node` user. This allows users with `CONTROLJOB` privilege to cancel TTL jobs, provided the schedule owner is not an admin (`CONTROLJOB` does not grant control over admin-owned jobs). [#161226](https://github.com/cockroachdb/cockroach/pull/161226) -- User-defined views that reference `crdb_internal` virtual tables now enforce unsafe access checks. To restore the previous behavior, set the session variable `allow_unsafe_internals` or the cluster setting `sql.override.allow_unsafe_internals.enabled` to `true`. [#167023](https://github.com/cockroachdb/cockroach/pull/167023) +- The session variable `distsql_prevent_partitioning_soft_limited_scans` is now enabled by default. This prevents scans with soft limits from being planned as multiple TableReaders, which decreases the initial setup costs of some fully-distributed query plans. [#160051](https://github.com/cockroachdb/cockroach/pull/160051) -- `REFRESH MATERIALIZED VIEW` now evaluates row-level security (RLS) policies using the view owner's identity instead of the invoker's, matching PostgreSQL's definer semantics. [#167419](https://github.com/cockroachdb/cockroach/pull/167419) +- The `build.timestamp` Prometheus metric now carries `major` and `minor` labels identifying the release series of the running CockroachDB binary (e.g., `major="26", minor="1"` for any v26.1.x build). [#163834](https://github.com/cockroachdb/cockroach/pull/163834) + +- RPC connection metrics now include a `protocol` label. The following metrics are affected: `rpc.connection.avg_round_trip_latency`, `rpc.connection.failures`, `rpc.connection.healthy`, `rpc.connection.healthy_nanos`, `rpc.connection.heartbeats`, `rpc.connection.tcp_rtt`, `rpc.connection.tcp_rtt_var`, `rpc.connection.unhealthy`, `rpc.connection.unhealthy_nanos`, and `rpc.connection.inactive`. In v26.2, the label value is always `grpc`. For example: `rpc_connection_healthy{node_id="1",remote_node_id="0",remote_addr="localhost:26258",class="system",protocol="grpc"} 1` [#162528](https://github.com/cockroachdb/cockroach/pull/162528) + +- Calling `information_schema.crdb_rewrite_inline_hints` now requires the `REPAIRCLUSTER` privilege. [#160716](https://github.com/cockroachdb/cockroach/pull/160716) + +- Renamed the builtin function `crdb_internal.inject_hint` (introduced in v26.1.0-alpha.2) to `information_schema.crdb_rewrite_inline_hints`. [#160716](https://github.com/cockroachdb/cockroach/pull/160716) + +- Changed the unit of measurement for admission control duration metrics from microseconds to nanoseconds. The following metrics are affected: `admission.granter.slots_exhausted_duration.kv`, `admission.granter.cpu_load_short_period_duration.kv`, `admission.granter.cpu_load_long_period_duration.kv`, `admission.granter.io_tokens_exhausted_duration.kv`, `admission.granter.elastic_io_tokens_exhausted_duration.kv`, and `admission.elastic_cpu.nanos_exhausted_duration`. Note that dashboards displaying these metrics will show a discontinuity at upgrade time, with pre-upgrade values appearing much lower due to the unit change. [#160956](https://github.com/cockroachdb/cockroach/pull/160956) + +- The **Statement Details** page URL format has changed from `/statement/{implicitTxn}/{statementId}` to `/statement/{statementId}`. As a result, bookmarks using the old URL structure will no longer work. [#159558](https://github.com/cockroachdb/cockroach/pull/159558) diff --git a/src/current/_includes/releases/v26.2/deprecations.md b/src/current/_includes/releases/v26.2/deprecations.md index 50e084a26a8..beaa353fba0 100644 --- a/src/current/_includes/releases/v26.2/deprecations.md +++ b/src/current/_includes/releases/v26.2/deprecations.md @@ -2,8 +2,8 @@ | Deprecated | Description | |---|---| +| `cockroach encode-uri` command | The `cockroach encode-uri` command has been merged into the `cockroach convert-url` command and `encode-uri` has been deprecated. As a result, the flags `--inline`, `--database`, `--user`, `--password`, `--cluster`, `--certs-dir`, `--ca-cert`, `--cert`, and `--key` have been added to `convert-url`. [#164561](https://github.com/cockroachdb/cockroach/pull/164561) | | `enable_inspect_command` session variable | `INSPECT` is now a generally available (GA) feature. The `enable_inspect_command` session variable has been deprecated, and is now effectively always set to `true`. [#159659](https://github.com/cockroachdb/cockroach/pull/159659) | | `enable_super_regions` session variable and `sql.defaults.super_regions.enabled` cluster setting | The `enable_super_regions` session variable and the `sql.defaults.super_regions.enabled` cluster setting are no longer required to use super regions. Super region DDL operations (`ADD`, `DROP`, and `ALTER SUPER REGION`) now work without any experimental flag. The session variable and cluster setting are deprecated, and existing scripts that set them will continue to work without error. [#165227](https://github.com/cockroachdb/cockroach/pull/165227) | -| `cockroach encode-uri` command | The `cockroach encode-uri` command has been merged into the `cockroach convert-url` command and `encode-uri` has been deprecated. As a result, the flags `--inline`, `--database`, `--user`, `--password`, `--cluster`, `--certs-dir`, `--ca-cert`, `--cert`, and `--key` have been added to `convert-url`. [#164561](https://github.com/cockroachdb/cockroach/pull/164561) |
diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index b33cf9a9d1b..137cfc11bb6 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -270,7 +270,6 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc - Added support for the `pg_trigger_depth()` builtin function, which returns the current nesting level of PostgreSQL triggers (0 if not called from inside a trigger). [#162286][#162286] - Added the `pg_get_triggerdef` builtin function, which returns the `CREATE TRIGGER` statement for a given trigger OID. This improves PostgreSQL compatibility for databases that contain triggers. [#165849][#165849] - A database-level changefeed with no tables will periodically poll to check for tables added to the database. The new option `hibernation_polling_frequency` sets the frequency at which the polling occurs, until a table is found, at which point polling ceases. [#156771][#156771] -- `CREATE CHANGEFEED FOR DATABASE` now returns an error stating that the feature is not implemented. [#166920][#166920] - Added the `MAINTAIN` privilege, which can be granted on tables and materialized views. Users with the `MAINTAIN` privilege on a materialized view can execute `REFRESH MATERIALIZED VIEW` without being the owner. Users with the `MAINTAIN` privilege on a table can execute `ANALYZE` without needing `SELECT`. This aligns with PostgreSQL 17 behavior. [#164236][#164236] - Added support for the `aclitem` type and the `makeaclitem` and `acldefault` built-in functions for PostgreSQL compatibility. The existing `aclexplode` function, which previously always returned no rows, now correctly parses ACL strings and returns the individual privilege grants they contain. [#165744][#165744] - CockroachDB now supports the PostgreSQL session variables `tcp_keepalives_idle`, `tcp_keepalives_interval`, `tcp_keepalives_count`, and `tcp_user_timeout`. These allow per-session control over TCP keepalive behavior on each connection. A value of 0 (the default) uses the corresponding cluster setting. Non-zero values override the cluster setting for that session only. Units match PostgreSQL: seconds for keepalive settings, milliseconds for `tcp_user_timeout`. [#164369][#164369] @@ -307,11 +306,10 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc - Added the opt-in cluster setting `server.oidc_authentication.tls_insecure_skip_verify.enabled` to skip TLS certificate verification for OIDC provider connections. [#164514][#164514] - A new cluster setting, `server.gc_assist.enabled`, allows operators to dynamically disable GC assist in CockroachDB's forked Go runtime. By default, it follows the `GODEBUG=gcnoassist` flag. A new metric, `sys.gc.assist.enabled`, reports the current state (`1` = enabled, `0` = disabled). [#166555][#166555] - Added a new cluster setting `changefeed.kafka.max_request_size` and a per-changefeed `Flush.MaxBytes` option in the Kafka sink config to control the maximum size of record batches sent to Kafka by the v2 sink. Lowering this from the default of 256 MiB can prevent spurious message-too-large errors when multiple batches are coalesced into a single broker request. [#166740][#166740] +- Statement diagnostics requests with `sampling_probability` and `expires_at` now collect up to 10 bundles (configurable via `sql.stmt_diagnostics.max_bundles_per_request`) instead of a single bundle. Set the cluster setting to `1` to restore single-bundle behavior. [#166159][#166159] - The new `cockroach gen dashboard` command generates standardized monitoring dashboards from an embedded configuration file. It outputs a dashboard JSON file for either Datadog (`--tool=datadog`) or Grafana (`--tool=grafana`), with Grafana dashboards using Prometheus queries. The generated dashboards include metrics across Overview, Hardware, Runtime, Networking, SQL, and Storage categories. Use `--output` to set the output file path and `--rollup-interval` to control metric aggregation. [#161050][#161050] -- The `build.timestamp` Prometheus metric now carries `major` and `minor` labels identifying the release series of the running CockroachDB binary (e.g., `major="26", minor="1"` for any v26.1.x build). [#163834][#163834] - Added the `kv.protectedts.protect`, `kv.protectedts.release`, `kv.protectedts.update_timestamp`, `kv.protectedts.get_record`, and `kv.protectedts.mark_verified` metrics to track protected timestamp storage operations. These metrics help diagnose issues with excessive protected timestamp churn and operational errors. Each operation tracks both successful completions (`.success`) and failures (`.failed`, such as `ErrExists` or `ErrNotExists`). Operators can monitor these metrics to understand PTS system behavior and identify performance issues related to backups, changefeeds, and other features that use protected timestamps. [#160129][#160129] - Added a new metric `sql.rls.policies_applied.count` that tracks the number of SQL statements where row-level security (RLS) policies were applied during query planning. [#164405][#164405] -- RPC connection metrics now include a `protocol` label. The following metrics are affected: `rpc.connection.avg_round_trip_latency`, `rpc.connection.failures`, `rpc.connection.healthy`, `rpc.connection.healthy_nanos`, `rpc.connection.heartbeats`, `rpc.connection.tcp_rtt`, `rpc.connection.tcp_rtt_var`, `rpc.connection.unhealthy`, `rpc.connection.unhealthy_nanos`, and `rpc.connection.inactive`. In v26.2, the label value is always `grpc`. For example: `rpc_connection_healthy{node_id="1",remote_node_id="0",remote_addr="localhost:26258",class="system",protocol="grpc"} 1` [#162528][#162528] - Added a new metric `sql.query.with_statement_hints.count` that is incremented whenever a statement is executed with one or more external statement hints applied. An example of an external statement hint is an inline-hints rewrite rule added by calling `information_schema.crdb_rewrite_inline_hints`. [#161043][#161043] - Promoted the following admission control metrics to `ESSENTIAL` status, making them more discoverable in monitoring dashboards and troubleshooting workflows: `admission.wait_durations.*` (`sql-kv-response`, `sql-sql-response`, `elastic-stores`, `elastic-cpu`), `admission.granter.*_exhausted_duration.kv` (`slots`, `io_tokens`, `elastic_io_tokens`), `admission.elastic_cpu.nanos_exhausted_duration`, `kvflowcontrol.eval_wait.*.duration` (`elastic`, `regular`), and `kvflowcontrol.send_queue.bytes`. These metrics track admission control wait times, resource exhaustion, and replication flow control, providing visibility into cluster health and performance throttling. [#164827][#164827] - Added two new metrics, `auth.cert.san.conn.total` and `auth.cert.san.conn.success`, to track SAN-based certificate authentication attempts and successes. [#166829][#166829] @@ -322,13 +320,11 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc - Jobs now clear their running status messages upon successful completion. [#163765][#163765] - Added a new structured event of type `rewrite_inline_hints` that is emitted when an inline-hints rewrite rule is added using `information_schema.crdb_rewrite_inline_hints`. This event is written to both the event log and the `OPS` channel. [#160901][#160901] - When hash-based redaction is enabled in the logging configuration, usernames in authentication logs now produce deterministic hashes instead of being fully redacted. This lets support engineers correlate the same user across multiple log entries without revealing the actual values. [#165804][#165804] -- Changed goroutine profile dumps from human-readable `.txt.gz` files to binary proto `.pb.gz` files. This improves the performance of the goroutine dumper by eliminating brief in-process pauses that occurred when collecting goroutine stacks. [#160798][#160798] - Red Hat certified CockroachDB container images are now published as multi-arch manifests supporting `linux/amd64`, `linux/arm64`, and `linux/s390x`. Previously only `linux/amd64` was published to the Red Hat registry. [#165725][#165725]

Command-line changes

-- The `cockroach debug tsdump` command now defaults to `--format=raw` instead of `--format=text`. The `raw` (gob) format is optimized for Datadog ingestion. A new `--output` flag lets you write output directly to a file, avoiding potential file corruption that can occur with shell redirection. If `--output` is not specified, output is written to `stdout`. [#160538][#160538] - The `cockroach debug tsdump` command now supports ZSTD encoding via `--format=raw --encoding=zstd`. This generates compressed tsdump files that are approximately 85% smaller than raw format. The `tsdump upload` command automatically detects and decompresses ZSTD files, allowing direct upload without manual decompression. [#161998][#161998] - The `cockroach debug zip` command's `--include-files` and `--exclude-files` flags now support full zip path patterns. Patterns containing `/` are matched against the full path within the zip archive (e.g., `--include-files='debug/nodes/1/*.json'`). Patterns without `/` continue to match the base file name as before. [#163266][#163266] - Added the `--exclude-log-severities` flag to `cockroach debug zip` that filters log entries by severity server-side. For example, `--exclude-log-severities=INFO` excludes all `INFO`-level log entries from the collected log files, which can significantly reduce zip file size for large clusters. Valid severity names are `INFO`, `WARNING`, `ERROR`, and `FATAL`. The flag accepts a comma-delimited list or can be specified multiple times. [#165802][#165802] From 1dd2c8604d12e12485ee9da329afeef8d6635af1 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Thu, 23 Apr 2026 13:36:28 -0400 Subject: [PATCH 14/32] reorder release notes by potential impact --- .../releases/v26.2/upgrade-finalization.md | 10 +-- .../_includes/releases/v26.2/v26.2.0.md | 83 ++++++++++--------- 2 files changed, 51 insertions(+), 42 deletions(-) diff --git a/src/current/_includes/releases/v26.2/upgrade-finalization.md b/src/current/_includes/releases/v26.2/upgrade-finalization.md index 14579afd6f2..b5d83cab1de 100644 --- a/src/current/_includes/releases/v26.2/upgrade-finalization.md +++ b/src/current/_includes/releases/v26.2/upgrade-finalization.md @@ -1,17 +1,17 @@ This section summarizes the features that are not available until you [finalize the v26.2 upgrade]({% link v26.2/upgrade-cockroach-version.md %}#finalize-a-major-version-upgrade-manually). +- When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) + - Views now support the PostgreSQL-compatible `security_invoker` option. When set via `CREATE VIEW ... WITH (security_invoker)` or `ALTER VIEW SET (security_invoker = true)`, privilege checks on the underlying tables are performed as the querying user rather than the view owner. The `security_invoker` option can be reset with `ALTER VIEW ... RESET (security_invoker)`. [#164184](https://github.com/cockroachdb/cockroach/pull/164184) - Added support for `ALTER TABLE ENABLE TRIGGER` and `ALTER TABLE DISABLE TRIGGER` syntax. This allows users to temporarily disable triggers without dropping them, and later re-enable them. The syntax supports disabling/enabling individual triggers by name, or all triggers on a table using the `ALL` or `USER` keywords. [#161924](https://github.com/cockroachdb/cockroach/pull/161924) -- Added an index storage parameter `skip_unique_checks` that can be used to disable unique constraint checks for indexes with implicit partition columns, including indexes in `REGIONAL BY ROW` tables. This should **only** be used if the application can guarantee uniqueness, for example, by using external UUID values or relying on a `unique_rowid()` default value. Incorrectly applying this setting when uniqueness is not guaranteed by the application could result in logically duplicate keys in different partitions of a unique index. [#163378](https://github.com/cockroachdb/cockroach/pull/163378) - - `ALTER TABLE ... DROP CONSTRAINT` can now be used to drop `UNIQUE` constraints. The backing `UNIQUE` index will also be dropped, as CockroachDB treats the constraint and index as the same thing. [#162345](https://github.com/cockroachdb/cockroach/pull/162345) -- {% comment %}TODO: Verify with @ZhouXing19{% endcomment %}`EXPLAIN` and `EXPLAIN ANALYZE` now display a `table stats mode` field (`canary` or `stable`) when the `sql.stats.canary_fraction` cluster setting is greater than 0, indicating which table statistics were used for query planning. Scan nodes for tables with active canary stats also show the configured canary window duration. [#166129](https://github.com/cockroachdb/cockroach/pull/166129) +- The `bulkio.import.row_count_validation.mode` cluster setting controls whether row count validation runs after `IMPORT` operations. When enabled, a background `INSPECT` job validates that the imported row count matches expectations after an `IMPORT` completes. The `IMPORT` result includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. Valid values are `off` (default), `async`, and `sync`. [#168403](https://github.com/cockroachdb/cockroach/pull/168403) -- When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) +- Added an index storage parameter `skip_unique_checks` that can be used to disable unique constraint checks for indexes with implicit partition columns, including indexes in `REGIONAL BY ROW` tables. This should **only** be used if the application can guarantee uniqueness, for example, by using external UUID values or relying on a `unique_rowid()` default value. Incorrectly applying this setting when uniqueness is not guaranteed by the application could result in logically duplicate keys in different partitions of a unique index. [#163378](https://github.com/cockroachdb/cockroach/pull/163378) - During an `INSPECT` run, a new check validates unique column values in `REGIONAL BY ROW` tables. [#164449](https://github.com/cockroachdb/cockroach/pull/164449) -- The `bulkio.import.row_count_validation.mode` cluster setting controls whether row count validation runs after `IMPORT` operations. When enabled, a background `INSPECT` job validates that the imported row count matches expectations after an `IMPORT` completes. The `IMPORT` result includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. Valid values are `off` (default), `async`, and `sync`. [#168403](https://github.com/cockroachdb/cockroach/pull/168403) +- {% comment %}TODO: Verify with @michae2{% endcomment %}`EXPLAIN` and `EXPLAIN ANALYZE` now display a `table stats mode` field (`canary` or `stable`) when the `sql.stats.canary_fraction` cluster setting is greater than 0, indicating which table statistics were used for query planning. Scan nodes for tables with active canary stats also show the configured canary window duration. [#166129](https://github.com/cockroachdb/cockroach/pull/166129) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index 137cfc11bb6..05d70ba5764 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -240,15 +240,12 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc

SQL language changes

-- Added cluster settings to control the number of concurrent automatic statistics collection jobs: - - - `sql.stats.automatic_full_concurrency_limit` controls the maximum number of concurrent full statistics collections. The default is 1. - - `sql.stats.automatic_extremes_concurrency_limit` controls the maximum number of concurrent partial statistics collections using extremes. The default is 128. - - Note that at most one statistics collection job can run on a single table at a time. [#158835][#158835] -- Added a new cluster setting `bulkio.import.distributed_merge.mode` to enable distributed merge support for `IMPORT` operations. When enabled (default: false), `IMPORT` jobs will use a two-phase approach where import processors first write SST files to local storage, then a coordinator merges and ingests them. This can improve performance for large imports by reducing L0 file counts and enabling merge-time optimizations. This feature requires all nodes to be running v26.1 or later. [#159330][#159330] -- Added a new cluster setting, `sql.schema.auto_unlock.enabled`, that controls whether DDL operations automatically unlock `schema_locked` tables. When set to `false`, DDL on schema-locked tables is blocked unless the user manually unlocks the table first. This allows customers using LDR to enforce `schema_locked` as a hard lock that prevents user-initiated DDL. The default is `true`, preserving existing behavior. [#166471][#166471] -- Added a new cluster setting `sql.prepared_transactions.unsafe.enabled` (default: `false`) that controls whether `PREPARE TRANSACTION` statements are accepted. This setting is marked unsafe and requires the unsafe setting interlock to change. When disabled, attempting to prepare a transaction returns an error. `COMMIT PREPARED` and `ROLLBACK PREPARED` remain available regardless of this setting to allow cleanup of existing prepared transactions. [#166855][#166855] +- `CREATE OR REPLACE TRIGGER` is now supported. If a trigger with the same name already exists on the same table, it is replaced with the new definition. If no trigger with that name exists, a new trigger is created. [#162633][#162633] +- Updated `DROP TRIGGER` to accept the `CASCADE` option for PostgreSQL compatibility. Since triggers in CockroachDB cannot have dependents, `CASCADE` behaves the same as `RESTRICT` or omitting the option entirely. [#161915][#161915] +- `DROP COLUMN` and `DROP INDEX` with `CASCADE` now properly drop dependent triggers. Previously, these operations would fail with an unimplemented error when a trigger depended on the column or index being dropped. [#163296][#163296] +- `CREATE OR REPLACE FUNCTION` now works on trigger functions that have active triggers. Previously, this was blocked with an unimplemented error, requiring users to drop and recreate triggers. The replacement now atomically updates all dependent triggers to execute the new function body. [#163348][#163348] +- Added support for the `pg_trigger_depth()` builtin function, which returns the current nesting level of PostgreSQL triggers (0 if not called from inside a trigger). [#162286][#162286] +- Added the `pg_get_triggerdef` builtin function, which returns the `CREATE TRIGGER` statement for a given trigger OID. This improves PostgreSQL compatibility for databases that contain triggers. [#165849][#165849] - Users can now set the `use_backups_with_ids` session setting to enable a new `SHOW BACKUPS IN` experience. When enabled, `SHOW BACKUPS IN {collection}` displays all backups in the collection. Results can be filtered by backup end time using `OLDER THAN {timestamp}` or `NEWER THAN {timestamp}` clauses. Example usage: `SET use_backups_with_ids = true; SHOW BACKUPS IN '{collection}' OLDER THAN '2026-01-09 12:13:14' NEWER THAN '2026-01-04 15:16:17';` [#160137][#160137] - If the new `SHOW BACKUP` experience is enabled by setting the `use_backups_with_ids` session variable to true, `SHOW BACKUP` will parse the IDs provided by `SHOW BACKUPS` and display contents for single backups. [#160812][#160812] - If the new `RESTORE` experience is enabled by setting the `use_backups_with_ids` session variable to true, `RESTORE` will parse the IDs provided by `SHOW BACKUPS` and will restore the specified backup without the use of `AS OF SYSTEM TIME`. [#161294][#161294] @@ -263,62 +260,65 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc - Rewrite-inline-hints rules can now be scoped to a specific database, and will only apply to matching statements when the current database also matches. This database can be specified with an optional third argument to `information_schema.crdb_rewrite_inline_hints`. [#165457][#165457] - `SHOW STATEMENT HINTS` now includes `database` and `enabled` columns in its output. The `database` column indicates which database the hint applies to, and the `enabled` column indicates whether the hint is active. [#165712][#165712] - The `information_schema.crdb_delete_statement_hints` built-in function now accepts an optional second `database` argument to delete only hints scoped to a specific database. [#167192][#167192] -- `CREATE OR REPLACE TRIGGER` is now supported. If a trigger with the same name already exists on the same table, it is replaced with the new definition. If no trigger with that name exists, a new trigger is created. [#162633][#162633] -- Updated `DROP TRIGGER` to accept the `CASCADE` option for PostgreSQL compatibility. Since triggers in CockroachDB cannot have dependents, `CASCADE` behaves the same as `RESTRICT` or omitting the option entirely. [#161915][#161915] -- `DROP COLUMN` and `DROP INDEX` with `CASCADE` now properly drop dependent triggers. Previously, these operations would fail with an unimplemented error when a trigger depended on the column or index being dropped. [#163296][#163296] -- `CREATE OR REPLACE FUNCTION` now works on trigger functions that have active triggers. Previously, this was blocked with an unimplemented error, requiring users to drop and recreate triggers. The replacement now atomically updates all dependent triggers to execute the new function body. [#163348][#163348] -- Added support for the `pg_trigger_depth()` builtin function, which returns the current nesting level of PostgreSQL triggers (0 if not called from inside a trigger). [#162286][#162286] -- Added the `pg_get_triggerdef` builtin function, which returns the `CREATE TRIGGER` statement for a given trigger OID. This improves PostgreSQL compatibility for databases that contain triggers. [#165849][#165849] -- A database-level changefeed with no tables will periodically poll to check for tables added to the database. The new option `hibernation_polling_frequency` sets the frequency at which the polling occurs, until a table is found, at which point polling ceases. [#156771][#156771] +- Added support for importing Parquet files using the `IMPORT` statement. Parquet files can be imported from cloud storage URLs (`s3://`, `gs://`, `azure://`) or HTTP servers that support range requests (`Accept-Ranges: bytes`). This feature supports column-level compression formats (Snappy, GZIP, ZSTD, Brotli, etc.) as specified in the Parquet file format, but does not support additional file-level compression (e.g., `.parquet.gz` files). Nested Parquet types (lists, maps, structs) are not supported; only flat schemas with primitive types are supported at this time. [#163991][#163991] +- Added a new cluster setting `bulkio.import.distributed_merge.mode` to enable distributed merge support for `IMPORT` operations. When enabled (default: false), `IMPORT` jobs will use a two-phase approach where import processors first write SST files to local storage, then a coordinator merges and ingests them. This can improve performance for large imports by reducing L0 file counts and enabling merge-time optimizations. This feature requires all nodes to be running v26.1 or later. [#159330][#159330] +- Updated CockroachDB to allow a prefix of index key columns to be used for the shard column in a hash-sharded index. The `shard_columns` storage parameter may be used to override the default, which uses all index key columns in the shard column. [#161422][#161422] - Added the `MAINTAIN` privilege, which can be granted on tables and materialized views. Users with the `MAINTAIN` privilege on a materialized view can execute `REFRESH MATERIALIZED VIEW` without being the owner. Users with the `MAINTAIN` privilege on a table can execute `ANALYZE` without needing `SELECT`. This aligns with PostgreSQL 17 behavior. [#164236][#164236] - Added support for the `aclitem` type and the `makeaclitem` and `acldefault` built-in functions for PostgreSQL compatibility. The existing `aclexplode` function, which previously always returned no rows, now correctly parses ACL strings and returns the individual privilege grants they contain. [#165744][#165744] -- CockroachDB now supports the PostgreSQL session variables `tcp_keepalives_idle`, `tcp_keepalives_interval`, `tcp_keepalives_count`, and `tcp_user_timeout`. These allow per-session control over TCP keepalive behavior on each connection. A value of 0 (the default) uses the corresponding cluster setting. Non-zero values override the cluster setting for that session only. Units match PostgreSQL: seconds for keepalive settings, milliseconds for `tcp_user_timeout`. [#164369][#164369] - Added support for the `dmetaphone()`, `dmetaphone_alt()`, and `daitch_mokotoff()` built-in functions, completing CockroachDB's implementation of the PostgreSQL `fuzzystrmatch` extension. `dmetaphone` and `dmetaphone_alt` return Double Metaphone phonetic codes for a string, and `daitch_mokotoff` returns an array of Daitch-Mokotoff soundex codes. These functions are useful for fuzzy string matching based on phonetic similarity. [#163430][#163430] - Added `to_date(text, text)` and `to_timestamp(text, text)` SQL functions that parse dates and timestamps from formatted strings using PostgreSQL-compatible format patterns. For example, `to_date('2023-03-15', 'YYYY-MM-DD')` returns a date, and `to_timestamp('2023-03-15 14:30:45', 'YYYY-MM-DD HH24:MI:SS')` returns a `timestamptz`. [#164672][#164672] -- `SHOW ALL` now returns a third column, `description`, containing a human-readable description of each session variable. This matches the PostgreSQL behavior of `SHOW ALL`. [#165397][#165397] -- The `tableoid` system column is now supported on virtual tables such as those in `pg_catalog` and `information_schema`. This improves compatibility with PostgreSQL tools like `pg_dump` that reference `tableoid` in their introspection queries. [#165727][#165727] +- CockroachDB now supports `COMMIT AND CHAIN` and `ROLLBACK AND CHAIN` (as well as `END AND CHAIN` and `ABORT AND CHAIN`). These statements finish the current transaction and immediately start a new explicit transaction with the same isolation level, priority, and read/write mode as the previous transaction. `AND NO CHAIN` is also accepted for PostgreSQL compatibility but behaves identically to a plain `COMMIT` or `ROLLBACK`. [#164403][#164403] - Added the `ST_AsMVT` aggregate function to generate Mapbox Vector Tile (MVT) binary format from geospatial data, providing PostgreSQL/PostGIS compatibility for web mapping applications. [#150663][#150663] - Aggregation function `ST_AsMVT` can now also be used as a window function. [#166860][#166860] -- Updated CockroachDB to allow a prefix of index key columns to be used for the shard column in a hash-sharded index. The `shard_columns` storage parameter may be used to override the default, which uses all index key columns in the shard column. [#161422][#161422] +- CockroachDB now supports the PostgreSQL session variables `tcp_keepalives_idle`, `tcp_keepalives_interval`, `tcp_keepalives_count`, and `tcp_user_timeout`. These allow per-session control over TCP keepalive behavior on each connection. A value of 0 (the default) uses the corresponding cluster setting. Non-zero values override the cluster setting for that session only. Units match PostgreSQL: seconds for keepalive settings, milliseconds for `tcp_user_timeout`. [#164369][#164369] +- `SHOW ALL` now returns a third column, `description`, containing a human-readable description of each session variable. This matches the PostgreSQL behavior of `SHOW ALL`. [#165397][#165397] +- The `tableoid` system column is now supported on virtual tables such as those in `pg_catalog` and `information_schema`. This improves compatibility with PostgreSQL tools like `pg_dump` that reference `tableoid` in their introspection queries. [#165727][#165727] +- Added cluster settings to control the number of concurrent automatic statistics collection jobs: + + - `sql.stats.automatic_full_concurrency_limit` controls the maximum number of concurrent full statistics collections. The default is 1. + - `sql.stats.automatic_extremes_concurrency_limit` controls the maximum number of concurrent partial statistics collections using extremes. The default is 128. + + Note that at most one statistics collection job can run on a single table at a time. [#158835][#158835] +- Exposed the following settings for canary table statistics: + - Cluster setting `sql.stats.canary_fraction`: probability that table statistics will use canary mode (i.e., always use the freshest stats) instead of stable mode (i.e., use the second-freshest stats) for query planning [0.0-1.0]. + - Session variable `canary_stats_mode`: When `sql.stats.canary_fraction` is greater than `0`, controls which table statistics are used for query planning on the current session: `on` always uses the newest (canary) stats immediately when they are collected, `off` delays using new stats until they outlive the canary window, and `auto` selects probabilistically based on the canary fraction. Has no effect when `sql.stats.canary_fraction` is `0`. [#167944][#167944] +- Added a new cluster setting, `sql.schema.auto_unlock.enabled`, that controls whether DDL operations automatically unlock `schema_locked` tables. When set to `false`, DDL on schema-locked tables is blocked unless the user manually unlocks the table first. This allows customers using LDR to enforce `schema_locked` as a hard lock that prevents user-initiated DDL. The default is `true`, preserving existing behavior. [#166471][#166471] +- `ALTER TABLE ... SET LOCALITY` is now fully executed using the declarative schema changer, improving reliability and consistency with other schema change operations. [#161763][#161763] +- Setting `skip_unique_checks = true` on an index now emits a notice warning that unique constraint enforcement is bypassed, with a pointer to the `INSPECT` documentation. [#167405][#167405] +- A database-level changefeed with no tables will periodically poll to check for tables added to the database. The new option `hibernation_polling_frequency` sets the frequency at which the polling occurs, until a table is found, at which point polling ceases. [#156771][#156771] +- Added a new cluster setting `sql.prepared_transactions.unsafe.enabled` (default: `false`) that controls whether `PREPARE TRANSACTION` statements are accepted. This setting is marked unsafe and requires the unsafe setting interlock to change. When disabled, attempting to prepare a transaction returns an error. `COMMIT PREPARED` and `ROLLBACK PREPARED` remain available regardless of this setting to allow cleanup of existing prepared transactions. [#166855][#166855] - Queries executed via the vectorized engine now display their progress in the `phase` column of `SHOW QUERIES`. Previously, this feature was only available in the row-by-row engine. [#158029][#158029] - CockroachDB now shows execution statistics (like `execution time`) on `EXPLAIN ANALYZE` output for `render` nodes, which often handle built-in functions. [#161509][#161509] - The output of `EXPLAIN [ANALYZE]` in non-`VERBOSE` mode is now more succinct. [#153361][#153361] +- Added the `optimizer_inline_any_unnest_subquery` session setting to enable/disable the optimizer rule `InlineAnyProjectSet`. The setting is on by default in v26.2 and later. [#161880][#161880] - `crdb_internal.datums_to_bytes` is now available in the `information_schema` system catalog as `information_schema.crdb_datums_to_bytes`. [#156963][#156963] - The `information_schema.crdb_datums_to_bytes` built-in function is now documented. [#160486][#160486] - Active Session History tables are now accessible via `information_schema.crdb_node_active_session_history` and `information_schema.crdb_cluster_active_session_history`, in addition to the existing `crdb_internal` tables. This improves discoverability when browsing `information_schema` for available metadata. [#164969][#164969] - Added a `workload_type` column to the `crdb_internal.node_active_session_history` and `crdb_internal.cluster_active_session_history` virtual tables, as well as the corresponding `information_schema` views. The column exposes the type of workload being sampled, with possible values `STATEMENT`, `JOB`, `SYSTEM`, or `UNKNOWN`. [#165866][#165866] -- Added the `optimizer_inline_any_unnest_subquery` session setting to enable/disable the optimizer rule `InlineAnyProjectSet`. The setting is on by default in v26.2 and later. [#161880][#161880] -- Exposed the following settings for canary table statistics: - - Cluster setting `sql.stats.canary_fraction`: probability that table statistics will use canary mode (i.e., always use the freshest stats) instead of stable mode (i.e., use the second-freshest stats) for query planning [0.0-1.0]. - - Session variable `canary_stats_mode`: When `sql.stats.canary_fraction` is greater than `0`, controls which table statistics are used for query planning on the current session: `on` always uses the newest (canary) stats immediately when they are collected, `off` delays using new stats until they outlive the canary window, and `auto` selects probabilistically based on the canary fraction. Has no effect when `sql.stats.canary_fraction` is `0`. [#167944][#167944] -- CockroachDB now supports `COMMIT AND CHAIN` and `ROLLBACK AND CHAIN` (as well as `END AND CHAIN` and `ABORT AND CHAIN`). These statements finish the current transaction and immediately start a new explicit transaction with the same isolation level, priority, and read/write mode as the previous transaction. `AND NO CHAIN` is also accepted for PostgreSQL compatibility but behaves identically to a plain `COMMIT` or `ROLLBACK`. [#164403][#164403] -- Added support for importing Parquet files using the `IMPORT` statement. Parquet files can be imported from cloud storage URLs (`s3://`, `gs://`, `azure://`) or HTTP servers that support range requests (`Accept-Ranges: bytes`). This feature supports column-level compression formats (Snappy, GZIP, ZSTD, Brotli, etc.) as specified in the Parquet file format, but does not support additional file-level compression (e.g., `.parquet.gz` files). Nested Parquet types (lists, maps, structs) are not supported; only flat schemas with primitive types are supported at this time. [#163991][#163991] -- `ALTER TABLE ... SET LOCALITY` is now fully executed using the declarative schema changer, improving reliability and consistency with other schema change operations. [#161763][#161763] -- Setting `skip_unique_checks = true` on an index now emits a notice warning that unique constraint enforcement is bypassed, with a pointer to the `INSPECT` documentation. [#167405][#167405]

Operational changes

-- Changefeeds now support the `partition_alg` option for specifying a Kafka partitioning algorithm. Currently `fnv-1a` (default) and `murmur2` are supported. The option is only valid on Kafka v2 sinks. This is protected by the cluster setting `changefeed.partition_alg.enabled`. An example usage: `SET CLUSTER SETTING changefeed.partition_alg.enabled=true; CREATE CHANGEFEED ... INTO 'kafka://...' WITH partition_alg='murmur2';`. Note that if a changefeed is created using the `murmur2` algorithm, and then the cluster setting is disabled, the changefeed will continue using the `murmur2` algorithm unless the changefeed is altered to use a different `partition_alg`. [#161265][#161265] +- The new `cockroach gen dashboard` command generates standardized monitoring dashboards from an embedded configuration file. It outputs a dashboard JSON file for either Datadog (`--tool=datadog`) or Grafana (`--tool=grafana`), with Grafana dashboards using Prometheus queries. The generated dashboards include metrics across Overview, Hardware, Runtime, Networking, SQL, and Storage categories. Use `--output` to set the output file path and `--rollup-interval` to control metric aggregation. [#161050][#161050] +- Statement diagnostics requests with `sampling_probability` and `expires_at` now collect up to 10 bundles (configurable via `sql.stmt_diagnostics.max_bundles_per_request`) instead of a single bundle. Set the cluster setting to `1` to restore single-bundle behavior. [#166159][#166159] +- Logical Data Replication (LDR) now supports hash-sharded indexes and secondary indexes with virtual computed columns. Previously, tables with these index types could not be replicated using LDR. [#161062][#161062] - Added the `server.sql_tcp_user.timeout` cluster setting, which specifies the maximum amount of time transmitted data can remain unacknowledged before the underlying TCP connection is forcefully closed. This setting is enabled by default with a value of 30 seconds and is supported on Linux and macOS (Darwin). [#164037][#164037] - Introduced a new cluster setting `kvadmission.store.snapshot_ingest_bandwidth_control.min_rate.enabled`. When this setting is enabled and disk bandwidth-based admission control is active, snapshot ingestion will be admitted at a minimum rate. This prevents snapshot ingestion from being starved by other elastic work. [#159436][#159436] - Added periodic ASH workload summary logging to the `OPS` channel. Two new cluster settings, `obs.ash.log_interval` (default: `10m`) and `obs.ash.log_top_n` (default: `10`), control how often and how many entries are emitted. Each summary reports the most frequently sampled workloads grouped by event type, event name, and workload ID, providing visibility into workload patterns that previously existed only in memory. [#165093][#165093] -- Added the opt-in cluster setting `server.oidc_authentication.tls_insecure_skip_verify.enabled` to skip TLS certificate verification for OIDC provider connections. [#164514][#164514] - A new cluster setting, `server.gc_assist.enabled`, allows operators to dynamically disable GC assist in CockroachDB's forked Go runtime. By default, it follows the `GODEBUG=gcnoassist` flag. A new metric, `sys.gc.assist.enabled`, reports the current state (`1` = enabled, `0` = disabled). [#166555][#166555] +- Added the opt-in cluster setting `server.oidc_authentication.tls_insecure_skip_verify.enabled` to skip TLS certificate verification for OIDC provider connections. [#164514][#164514] +- Changefeeds now support the `partition_alg` option for specifying a Kafka partitioning algorithm. Currently `fnv-1a` (default) and `murmur2` are supported. The option is only valid on Kafka v2 sinks. This is protected by the cluster setting `changefeed.partition_alg.enabled`. An example usage: `SET CLUSTER SETTING changefeed.partition_alg.enabled=true; CREATE CHANGEFEED ... INTO 'kafka://...' WITH partition_alg='murmur2';`. Note that if a changefeed is created using the `murmur2` algorithm, and then the cluster setting is disabled, the changefeed will continue using the `murmur2` algorithm unless the changefeed is altered to use a different `partition_alg`. [#161265][#161265] - Added a new cluster setting `changefeed.kafka.max_request_size` and a per-changefeed `Flush.MaxBytes` option in the Kafka sink config to control the maximum size of record batches sent to Kafka by the v2 sink. Lowering this from the default of 256 MiB can prevent spurious message-too-large errors when multiple batches are coalesced into a single broker request. [#166740][#166740] -- Statement diagnostics requests with `sampling_probability` and `expires_at` now collect up to 10 bundles (configurable via `sql.stmt_diagnostics.max_bundles_per_request`) instead of a single bundle. Set the cluster setting to `1` to restore single-bundle behavior. [#166159][#166159] -- The new `cockroach gen dashboard` command generates standardized monitoring dashboards from an embedded configuration file. It outputs a dashboard JSON file for either Datadog (`--tool=datadog`) or Grafana (`--tool=grafana`), with Grafana dashboards using Prometheus queries. The generated dashboards include metrics across Overview, Hardware, Runtime, Networking, SQL, and Storage categories. Use `--output` to set the output file path and `--rollup-interval` to control metric aggregation. [#161050][#161050] +- Changefeed ranges are now more accurately reported as lagging. [#163427][#163427] +- Promoted the following admission control metrics to `ESSENTIAL` status, making them more discoverable in monitoring dashboards and troubleshooting workflows: `admission.wait_durations.*` (`sql-kv-response`, `sql-sql-response`, `elastic-stores`, `elastic-cpu`), `admission.granter.*_exhausted_duration.kv` (`slots`, `io_tokens`, `elastic_io_tokens`), `admission.elastic_cpu.nanos_exhausted_duration`, `kvflowcontrol.eval_wait.*.duration` (`elastic`, `regular`), and `kvflowcontrol.send_queue.bytes`. These metrics track admission control wait times, resource exhaustion, and replication flow control, providing visibility into cluster health and performance throttling. [#164827][#164827] - Added the `kv.protectedts.protect`, `kv.protectedts.release`, `kv.protectedts.update_timestamp`, `kv.protectedts.get_record`, and `kv.protectedts.mark_verified` metrics to track protected timestamp storage operations. These metrics help diagnose issues with excessive protected timestamp churn and operational errors. Each operation tracks both successful completions (`.success`) and failures (`.failed`, such as `ErrExists` or `ErrNotExists`). Operators can monitor these metrics to understand PTS system behavior and identify performance issues related to backups, changefeeds, and other features that use protected timestamps. [#160129][#160129] - Added a new metric `sql.rls.policies_applied.count` that tracks the number of SQL statements where row-level security (RLS) policies were applied during query planning. [#164405][#164405] - Added a new metric `sql.query.with_statement_hints.count` that is incremented whenever a statement is executed with one or more external statement hints applied. An example of an external statement hint is an inline-hints rewrite rule added by calling `information_schema.crdb_rewrite_inline_hints`. [#161043][#161043] -- Promoted the following admission control metrics to `ESSENTIAL` status, making them more discoverable in monitoring dashboards and troubleshooting workflows: `admission.wait_durations.*` (`sql-kv-response`, `sql-sql-response`, `elastic-stores`, `elastic-cpu`), `admission.granter.*_exhausted_duration.kv` (`slots`, `io_tokens`, `elastic_io_tokens`), `admission.elastic_cpu.nanos_exhausted_duration`, `kvflowcontrol.eval_wait.*.duration` (`elastic`, `regular`), and `kvflowcontrol.send_queue.bytes`. These metrics track admission control wait times, resource exhaustion, and replication flow control, providing visibility into cluster health and performance throttling. [#164827][#164827] - Added two new metrics, `auth.cert.san.conn.total` and `auth.cert.san.conn.success`, to track SAN-based certificate authentication attempts and successes. [#166829][#166829] -- Logical Data Replication (LDR) now supports hash-sharded indexes and secondary indexes with virtual computed columns. Previously, tables with these index types could not be replicated using LDR. [#161062][#161062] - External connections can now be used with online restore. [#159090][#159090] - Backup schedules that utilize the `revision_history` option now apply that option only to incremental backups triggered by that schedule, rather than duplicating the revision history in the full backups as well. [#162105][#162105] -- Changefeed ranges are now more accurately reported as lagging. [#163427][#163427] -- Jobs now clear their running status messages upon successful completion. [#163765][#163765] - Added a new structured event of type `rewrite_inline_hints` that is emitted when an inline-hints rewrite rule is added using `information_schema.crdb_rewrite_inline_hints`. This event is written to both the event log and the `OPS` channel. [#160901][#160901] +- Jobs now clear their running status messages upon successful completion. [#163765][#163765] - When hash-based redaction is enabled in the logging configuration, usernames in authentication logs now produce deterministic hashes instead of being fully redacted. This lets support engineers correlate the same user across multiple log entries without revealing the actual values. [#165804][#165804] - Red Hat certified CockroachDB container images are now published as multi-arch manifests supporting `linux/amd64`, `linux/arm64`, and `linux/s390x`. Previously only `linux/amd64` was published to the Red Hat registry. [#165725][#165725] @@ -339,6 +339,16 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc

Bug fixes

+- Fixed a bug where IMPORT with AVRO data using OCF format could silently lose data if the underlying storage (e.g., S3) returned an error during read. Such errors are now properly reported. Other formats (specified via `data_as_binary_records` and `data_as_json_records` options) are unaffected. The bug has been present since about v20.1. [#161318][#161318] +- Fixed a bug where import rollback could incorrectly revert data in a table that was already online. This could only occur if an import job was cancelled or failed after the import had already succeeded and the table was made available for use. [#159627][#159627] +- Fixed a bug where concurrent updates to a table using multiple column families during a partial index creation could result in data loss, incorrect `NULL` values, or validation failures in the resulting index. [#166325][#166325] +- Fixed a bug where transient I/O errors (such as cloud storage network timeouts) during split or merge trigger evaluation were misidentified as replica corruption, causing the node to crash. These errors now correctly fail the operation, which is retried automatically. [#167377][#167377] +- Fixed a bug that could cause row sampling for table statistics to crash a node due to a data race when processing a collated string column with values larger than 400 bytes. This bug has existed since before v23.1. [#165260][#165260] +- Previously, v26.1.0-beta.1 and v26.1.0-beta.2 could encounter a rare process crash when running TTL jobs. This has been fixed. [#160674][#160674] +- Fixed a crash (`traceRegion: alloc too large`) that could occur when Go's execution tracer was enabled and a range cache lookup used a key longer than about 64 KB. [#166705][#166705] +- Fixed a bug where CockroachDB could crash when handling decimals with negative scales via the extended PGWire protocol. An error is now returned instead, matching PostgreSQL behavior. [#160499][#160499] +- Fixed a bug that could cause a panic during changefeed startup if an error occurred while initializing the metrics controller. [#159431][#159431] +- Fixed a deadlock that could occur when a statistics creation task panicked. [#160348][#160348] - The fix for `node descriptor not found` errors for changefeeds with `execution_locality` filters in CockroachDB Basic and Standard clusters is now controlled by cluster setting `sql.instance_info.use_instance_resolver.enabled` (default: `true`). [#163947][#163947] - Statistics histogram collection is now skipped for JSON columns referenced in partial index predicates, except when `sql.stats.non_indexed_json_histograms.enabled` is true (default: false). [#164477][#164477] - CockroachDB could previously encounter internal errors like `column statistics cannot be determined for empty column set` and `invalid union` in some edge cases with `UNION`, `EXCEPT`, and `INTERCEPT`. This has now been fixed. [#150706][#150706] @@ -365,7 +375,6 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc - `ALTER TABLE ... ALTER PRIMARY KEY USING COLUMNS (col) USING HASH` is now correctly treated as a no-op when the table already has a matching hash-sharded primary key, instead of attempting an unnecessary schema change. [#164557][#164557] - Fixed a bug where `ALTER TABLE ... ALTER COLUMN ... SET DATA TYPE` from an unbounded string or bit type to a bounded type with a length `>= 64` (for example, `STRING` to `STRING(100)`) would skip validating existing data against the new length constraint. This could leave rows in the table that violate the column's type, with values longer than the specified limit. [#164739][#164739] - Context cancellation is now surfaced if a `statement_timeout` occurs while waiting for a schema change. [#167112][#167112] -- Fixed a bug that could cause a panic during changefeed startup if an error occurred while initializing the metrics controller. [#159431][#159431] - Fixed a bug that could cause changefeeds using Kafka v1 sinks to hang when the changefeed was cancelled. [#162058][#162058] - Fixed an issue where changefeeds with `execution_locality` filters could fail in multi-tenant clusters with `node descriptor not found` errors. [#163507][#163507] - Fixed a bug where running changefeeds with `envelope=enriched` and `enriched_properties` containing `source` would cause failures during a cluster upgrade. [#163885][#163885] From 497910a786cfb349c460e5a3e196d233fa40cf9e Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Thu, 23 Apr 2026 18:37:32 -0400 Subject: [PATCH 15/32] address docs review comments --- .../releases/v26.2/backward-incompatible.md | 2 ++ .../releases/v26.2/cluster-setting-changes.md | 4 +-- .../releases/v26.2/upgrade-finalization.md | 12 ++++--- .../_includes/releases/v26.2/v26.2.0.md | 36 +++++++++++-------- src/current/releases/cloud.md | 33 +++++------------ 5 files changed, 43 insertions(+), 44 deletions(-) diff --git a/src/current/_includes/releases/v26.2/backward-incompatible.md b/src/current/_includes/releases/v26.2/backward-incompatible.md index 935a0650d8d..c2e743eb309 100644 --- a/src/current/_includes/releases/v26.2/backward-incompatible.md +++ b/src/current/_includes/releases/v26.2/backward-incompatible.md @@ -42,3 +42,5 @@ This section summarizes changes that can cause applications, scripts, or manual - The **Statement Details** page URL format has changed from `/statement/{implicitTxn}/{statementId}` to `/statement/{statementId}`. As a result, bookmarks using the old URL structure will no longer work. [#159558](https://github.com/cockroachdb/cockroach/pull/159558) +- Added the `server.sql_tcp_user.timeout` cluster setting, which specifies the maximum amount of time transmitted data can remain unacknowledged before the underlying TCP connection is forcefully closed. This setting is enabled by default with a value of 30 seconds and is supported on Linux and macOS (Darwin). [#164037](https://github.com/cockroachdb/cockroach/pull/164037) + diff --git a/src/current/_includes/releases/v26.2/cluster-setting-changes.md b/src/current/_includes/releases/v26.2/cluster-setting-changes.md index a43b8532222..d52305d0071 100644 --- a/src/current/_includes/releases/v26.2/cluster-setting-changes.md +++ b/src/current/_includes/releases/v26.2/cluster-setting-changes.md @@ -7,8 +7,8 @@ Review the following changes **before** upgrading. New default values will be us | `bulkio.import.elastic_control.enabled` | The `bulkio.import.elastic_control.enabled` cluster setting is now enabled by default, allowing import operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163867](https://github.com/cockroachdb/cockroach/pull/163867) | `false` | `true` | None | | `bulkio.index_backfill.elastic_control.enabled` | The `bulkio.index_backfill.elastic_control.enabled` cluster setting is now enabled by default, allowing index backfill operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163866](https://github.com/cockroachdb/cockroach/pull/163866) | `false` | `true` | None | | `bulkio.ingest.sst_batcher_elastic_control.enabled` | The `bulkio.ingest.sst_batcher_elastic_control.enabled` cluster setting is now enabled by default, allowing SST batcher operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163868](https://github.com/cockroachdb/cockroach/pull/163868) | `false` | `true` | None | -| `changefeed.max_retry_backoff` | Lowered the default value of the `changefeed.max_retry_backoff` cluster setting from `10m` to `30s` to reduce changefeed lag during rolling restarts. [#164874](https://github.com/cockroachdb/cockroach/pull/164874) | `10m` | `30s` | v25.4, v26.1 | -| `kv.range_split.load_sample_reset_duration` | The `kv.range_split.load_sample_reset_duration` cluster setting now defaults to `30m`. This should improve load-based splitting in rare edge cases. [#159499](https://github.com/cockroachdb/cockroach/pull/159499) | `0` | `30m` | None | +| `changefeed.max_retry_backoff` | Lowered the default value of the `changefeed.max_retry_backoff` cluster setting from `10m` to `30s` to reduce changefeed lag during rolling restarts. [#164874](https://github.com/cockroachdb/cockroach/pull/164874) | `10m` | `30s` | v25.4, v26.1 | +| `kv.range_split.load_sample_reset_duration` | The `kv.range_split.load_sample_reset_duration` cluster setting now defaults to `30m`. This should improve load-based splitting in rare edge cases. [#159499](https://github.com/cockroachdb/cockroach/pull/159499) | `0` | `30m` | v26.1 | | `sql.catalog.allow_leased_descriptors.enabled` | Changed the default value of the `sql.catalog.allow_leased_descriptors.enabled` cluster setting to `true`. This setting allows introspection tables like `information_schema` and `pg_catalog` to use cached descriptors when building the table results, which improves the performance of introspection queries when there are many tables in the cluster. [#159162](https://github.com/cockroachdb/cockroach/pull/159162) | `false` | `true` | v26.1 | | `sql.guardrails.max_row_size_err` | Lowered the default value of the `sql.guardrails.max_row_size_log` cluster setting from `64 MiB` to `16 MiB`, and the default value of `sql.guardrails.max_row_size_err` from `512 MiB` to `80 MiB`. These settings control the maximum size of a row (or column family) that SQL can write before logging a warning or returning an error, respectively. The previous defaults were high enough that large rows would hit other limits first (such as the Raft command size limit or the backup SST size limit), producing confusing errors. The new defaults align with existing system limits to provide clearer diagnostics. If your workload legitimately writes rows larger than these new defaults, you can restore the previous behavior by increasing these settings. [#164468](https://github.com/cockroachdb/cockroach/pull/164468) | `512 MiB` | `80 MiB` | None | | `sql.guardrails.max_row_size_log` | Lowered the default value of the `sql.guardrails.max_row_size_log` cluster setting from `64 MiB` to `16 MiB`, and the default value of `sql.guardrails.max_row_size_err` from `512 MiB` to `80 MiB`. These settings control the maximum size of a row (or column family) that SQL can write before logging a warning or returning an error, respectively. The previous defaults were high enough that large rows would hit other limits first (such as the Raft command size limit or the backup SST size limit), producing confusing errors. The new defaults align with existing system limits to provide clearer diagnostics. If your workload legitimately writes rows larger than these new defaults, you can restore the previous behavior by increasing these settings. [#164468](https://github.com/cockroachdb/cockroach/pull/164468) | `64 MiB` | `16 MiB` | None | diff --git a/src/current/_includes/releases/v26.2/upgrade-finalization.md b/src/current/_includes/releases/v26.2/upgrade-finalization.md index b5d83cab1de..3c76a0dae44 100644 --- a/src/current/_includes/releases/v26.2/upgrade-finalization.md +++ b/src/current/_includes/releases/v26.2/upgrade-finalization.md @@ -2,16 +2,20 @@ This section summarizes the features that are not available until you [finalize - When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664](https://github.com/cockroachdb/cockroach/pull/164664) -- Views now support the PostgreSQL-compatible `security_invoker` option. When set via `CREATE VIEW ... WITH (security_invoker)` or `ALTER VIEW SET (security_invoker = true)`, privilege checks on the underlying tables are performed as the querying user rather than the view owner. The `security_invoker` option can be reset with `ALTER VIEW ... RESET (security_invoker)`. [#164184](https://github.com/cockroachdb/cockroach/pull/164184) +- Views now support the PostgreSQL-compatible security_invoker option. When set via `CREATE VIEW ... WITH (security_invoker)` or `ALTER VIEW SET (security_invoker = true)`, privilege checks on the underlying tables are performed as the querying user rather than the view owner. The `security_invoker` option can be reset with `ALTER VIEW ... RESET (security_invoker)`. [#164184](https://github.com/cockroachdb/cockroach/pull/164184) - Added support for `ALTER TABLE ENABLE TRIGGER` and `ALTER TABLE DISABLE TRIGGER` syntax. This allows users to temporarily disable triggers without dropping them, and later re-enable them. The syntax supports disabling/enabling individual triggers by name, or all triggers on a table using the `ALL` or `USER` keywords. [#161924](https://github.com/cockroachdb/cockroach/pull/161924) - `ALTER TABLE ... DROP CONSTRAINT` can now be used to drop `UNIQUE` constraints. The backing `UNIQUE` index will also be dropped, as CockroachDB treats the constraint and index as the same thing. [#162345](https://github.com/cockroachdb/cockroach/pull/162345) -- The `bulkio.import.row_count_validation.mode` cluster setting controls whether row count validation runs after `IMPORT` operations. When enabled, a background `INSPECT` job validates that the imported row count matches expectations after an `IMPORT` completes. The `IMPORT` result includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. Valid values are `off` (default), `async`, and `sync`. [#168403](https://github.com/cockroachdb/cockroach/pull/168403) +- Added the sql.stats.canary_fraction cluster setting to enable canary table statistics. This setting controls the probability that table statistics will use canary mode (i.e., always use the freshest stats) instead of stable mode (i.e., use the second-freshest stats) for query planning `[0.0-1.0]`. [#167944](https://github.com/cockroachdb/cockroach/pull/167944) + +- Added the `canary_stats_mode` session variable. When `sql.stats.canary_fraction` is greater than `0`, `canary_stats_mode` controls which table statistics are used for query planning on the current session. Valid values are: `on` (always uses the newest canary stats immediately when they are collected), `off` (delays using new stats until they outlive the canary window), and `auto` (selects probabilistically based on the canary fraction). Has no effect when `sql.stats.canary_fraction` is `0`. [#167944](https://github.com/cockroachdb/cockroach/pull/167944) + +- `EXPLAIN` and `EXPLAIN ANALYZE` now display a `table stats mode` field (`canary` or `stable`) when the sql.stats.canary_fraction cluster setting is greater than 0, indicating which table statistics were used for query planning. Scan nodes for tables with active canary stats also show the configured canary window duration. [#166129](https://github.com/cockroachdb/cockroach/pull/166129) + +- The bulkio.import.row_count_validation.mode cluster setting controls whether row count validation runs after IMPORT operations. When enabled, a background `INSPECT` job validates that the imported row count matches expectations after an `IMPORT` completes. The `IMPORT` result includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. Valid values are `off` (default), `async`, and `sync`. [#168403](https://github.com/cockroachdb/cockroach/pull/168403) - Added an index storage parameter `skip_unique_checks` that can be used to disable unique constraint checks for indexes with implicit partition columns, including indexes in `REGIONAL BY ROW` tables. This should **only** be used if the application can guarantee uniqueness, for example, by using external UUID values or relying on a `unique_rowid()` default value. Incorrectly applying this setting when uniqueness is not guaranteed by the application could result in logically duplicate keys in different partitions of a unique index. [#163378](https://github.com/cockroachdb/cockroach/pull/163378) - During an `INSPECT` run, a new check validates unique column values in `REGIONAL BY ROW` tables. [#164449](https://github.com/cockroachdb/cockroach/pull/164449) - -- {% comment %}TODO: Verify with @michae2{% endcomment %}`EXPLAIN` and `EXPLAIN ANALYZE` now display a `table stats mode` field (`canary` or `stable`) when the `sql.stats.canary_fraction` cluster setting is greater than 0, indicating which table statistics were used for query planning. Scan nodes for tables with active canary stats also show the configured canary window duration. [#166129](https://github.com/cockroachdb/cockroach/pull/166129) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index 05d70ba5764..651af0961fd 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -42,7 +42,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc

PostgreSQL-compatible fuzzystrmatch functions support

-

CockroachDB now supports PostgreSQL-compatible fuzzystrmatch built-in functions: dmetaphone(), dmetaphone_alt(), and daitch_mokotoff(). These functions are useful for fuzzy string matching based on phonetic similarity. dmetaphone and dmetaphone_alt return Double Metaphone phonetic codes for a string, and daitch_mokotoff returns an array of Daitch-Mokotoff soundex codes.

+

CockroachDB now supports PostgreSQL-compatible fuzzystrmatch built-in functions: soundex(), difference(), levenshtein(), levenshtein_less_equal(), metaphone(), dmetaphone(), dmetaphone_alt(), and daitch_mokotoff(). These functions are useful for fuzzy string matching based on phonetic similarity.

GA {% include icon-yes.html %} @@ -53,7 +53,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc

PostgreSQL-compatible TCP keepalive session variables

-

CockroachDB now supports PostgreSQL-compatible TCP keepalive session variables tcp_keepalives_idle, tcp_keepalives_interval, tcp_keepalives_count, and tcp_user_timeout.

+

CockroachDB now supports PostgreSQL-compatible TCP keepalive session variables tcp_keepalives_idle, tcp_keepalives_interval, tcp_keepalives_count, and tcp_user_timeout.

GA {% include icon-yes.html %} @@ -64,7 +64,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc

Schema lock enforcement

-

The cluster setting sql.schema.auto_unlock.enabled controls whether DDL operations automatically unlock schema_locked tables. When set to false, DDL statements on schema-locked tables are blocked unless manually unlocked. This allows users of Logical Data Replication (LDR) to enforce schema_locked as a hard lock preventing user-initiated DDL. The default is true.

+

The cluster setting sql.schema.auto_unlock.enabled controls whether DDL operations attempt to automatically unlock and re-lock schema_locked tables. When set to false, DDL statements on schema-locked tables are blocked unless manually unlocked. This allows users of Logical Data Replication (LDR) to enforce schema_locked as a hard lock preventing user-initiated DDL. Some schema changes still require manual unlock even when this setting is true. The default is true.

GA {% include icon-yes.html %} @@ -75,7 +75,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc

Hash-sharded indexes with prefix columns

-

Hash-sharded indexes now support computing the shard value from a subset of index key columns rather than all of them. This gives you finer control over how data is distributed across shards and significantly improves query performance when filtering on only a prefix of the indexed columns.

+

Hash-sharded indexes now support computing the shard value from a subset of index key columns rather than all of them. This gives you finer control over how data is distributed across shards and significantly improves query performance when filtering on only a prefix of the indexed columns.

GA {% include icon-yes.html %} @@ -103,7 +103,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc

Certificate-based authentication using X.509 Subject field

-

CockroachDB now supports mapping SQL user roles to distinguished name attributes in the Subject field of X.509 certificates, including OU, UID, and CN. The cluster setting security.client_cert.san_required.enabled optionally allows mapping to Subject Alternative Name (SAN) fields instead. This enables authentication using your existing Certificate Authority infrastructure without requiring CommonName-based restrictions.

+

CockroachDB now supports mapping SQL user roles to distinguished name attributes in the Subject field of X.509 certificates, including OU, UID, and CN. The cluster setting security.client_cert.san_required.enabled optionally allows mapping to Subject Alternative Name (SAN) fields instead. This enables authentication using your existing Certificate Authority infrastructure without requiring CommonName-based restrictions.

Preview {% include icon-yes.html %} @@ -114,7 +114,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc

Post-quantum cryptography support

-

CockroachDB now supports post-quantum cryptographic algorithms for TLS connections. This applies to both client-to-node and inter-node communication.

+

CockroachDB now supports post-quantum cryptographic algorithms for TLS 1.3 connections. This applies to both client-to-node and inter-node communication.

Preview {% include icon-yes.html %} @@ -142,7 +142,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc

Active Session History

-

Active Session History (ASH) tracks CPU usage, I/O activity, wait events, and contention for session activity including SQL statements and background jobs. Samples are captured at regular intervals, enabling faster diagnosis of performance bottlenecks by correlating session activity with resource consumption.

+

Active Session History (ASH) tracks CPU usage, I/O activity, wait events, and contention for session activity including SQL statements and background jobs. Samples are captured at regular intervals, enabling faster diagnosis of performance bottlenecks by correlating session activity with resource consumption.

Preview {% include icon-yes.html %} @@ -170,7 +170,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc

Leader leases

-

Leader leases are now generally available. This feature maintains more stable leadership across nodes by reducing unnecessary lease transfers, resulting in more consistent query response times and fewer latency spikes.

+

Leader leases are now generally available. This feature maintains more stable leadership across nodes by reducing unnecessary lease transfers, resulting in more consistent query response times and fewer latency spikes.

GA {% include icon-yes.html %} @@ -181,7 +181,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc

Buffered writes

-

Buffered writes are now generally available. This feature improves throughput and reducing tail latency under heavy write workloads by batching writes efficiently before flushing to disk.

+

Buffered writes are now generally available. This feature improves throughput and reducing tail latency under heavy write workloads by batching writes efficiently before flushing to disk.

GA {% include icon-yes.html %} @@ -216,7 +216,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc

Security updates

- LDAP authentication for the DB Console now supports automatic user provisioning. When the cluster setting `security.provisioning.ldap.enabled` is set to true, users who authenticate successfully via LDAP will be automatically created in CockroachDB if they do not already exist. [#163199][#163199] -- The new cluster setting `security.client_cert.san_required.enabled` enables Subject Alternative Name (SAN) based authentication for client certificates. When enabled, CockroachDB validates client identities using SAN attributes (URIs, DNS names, or IP addresses) from X.509 certificates instead of or in addition to the certificate's Common Name field. +- The new cluster setting `security.client_cert.san_required.enabled` enables Subject Alternative Name (SAN) based authentication for client certificates. When enabled, CockroachDB validates client identities using SAN attributes (URIs, DNS names, or IP addresses) from X.509 certificates instead of or in addition to the certificate's Common Name field. Key capabilities include: @@ -234,6 +234,10 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc

Enterprise edition changes

+{{site.data.alerts.callout_info}} +For details on license types, refer to Licensing FAQs. +{{site.data.alerts.end}} + - Added a new cluster setting, `security.provisioning.oidc.enabled`, to allow automatic provisioning of users when they log in for the first time via OIDC. When enabled, a new user will be created in CockroachDB upon their first successful OIDC authentication. This feature is disabled by default. [#159787][#159787] - LDAP authentication for the DB Console now additionally supports role-based access control (RBAC) through LDAP group membership. To use this feature, an administrator must first create roles in CockroachDB with names that match the Common Names (CN) of their LDAP groups. These roles should then be granted the desired privileges for DB Console access. When a user who is a member of a corresponding LDAP group logs into the DB Console, they will be automatically granted the role and its associated privileges, creating consistent behavior with SQL client connections. [#162302][#162302] @@ -279,9 +283,6 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc - `sql.stats.automatic_extremes_concurrency_limit` controls the maximum number of concurrent partial statistics collections using extremes. The default is 128. Note that at most one statistics collection job can run on a single table at a time. [#158835][#158835] -- Exposed the following settings for canary table statistics: - - Cluster setting `sql.stats.canary_fraction`: probability that table statistics will use canary mode (i.e., always use the freshest stats) instead of stable mode (i.e., use the second-freshest stats) for query planning [0.0-1.0]. - - Session variable `canary_stats_mode`: When `sql.stats.canary_fraction` is greater than `0`, controls which table statistics are used for query planning on the current session: `on` always uses the newest (canary) stats immediately when they are collected, `off` delays using new stats until they outlive the canary window, and `auto` selects probabilistically based on the canary fraction. Has no effect when `sql.stats.canary_fraction` is `0`. [#167944][#167944] - Added a new cluster setting, `sql.schema.auto_unlock.enabled`, that controls whether DDL operations automatically unlock `schema_locked` tables. When set to `false`, DDL on schema-locked tables is blocked unless the user manually unlocks the table first. This allows customers using LDR to enforce `schema_locked` as a hard lock that prevents user-initiated DDL. The default is `true`, preserving existing behavior. [#166471][#166471] - `ALTER TABLE ... SET LOCALITY` is now fully executed using the declarative schema changer, improving reliability and consistency with other schema change operations. [#161763][#161763] - Setting `skip_unique_checks = true` on an index now emits a notice warning that unique constraint enforcement is bypassed, with a pointer to the `INSPECT` documentation. [#167405][#167405] @@ -302,7 +303,6 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc - The new `cockroach gen dashboard` command generates standardized monitoring dashboards from an embedded configuration file. It outputs a dashboard JSON file for either Datadog (`--tool=datadog`) or Grafana (`--tool=grafana`), with Grafana dashboards using Prometheus queries. The generated dashboards include metrics across Overview, Hardware, Runtime, Networking, SQL, and Storage categories. Use `--output` to set the output file path and `--rollup-interval` to control metric aggregation. [#161050][#161050] - Statement diagnostics requests with `sampling_probability` and `expires_at` now collect up to 10 bundles (configurable via `sql.stmt_diagnostics.max_bundles_per_request`) instead of a single bundle. Set the cluster setting to `1` to restore single-bundle behavior. [#166159][#166159] - Logical Data Replication (LDR) now supports hash-sharded indexes and secondary indexes with virtual computed columns. Previously, tables with these index types could not be replicated using LDR. [#161062][#161062] -- Added the `server.sql_tcp_user.timeout` cluster setting, which specifies the maximum amount of time transmitted data can remain unacknowledged before the underlying TCP connection is forcefully closed. This setting is enabled by default with a value of 30 seconds and is supported on Linux and macOS (Darwin). [#164037][#164037] - Introduced a new cluster setting `kvadmission.store.snapshot_ingest_bandwidth_control.min_rate.enabled`. When this setting is enabled and disk bandwidth-based admission control is active, snapshot ingestion will be admitted at a minimum rate. This prevents snapshot ingestion from being starved by other elastic work. [#159436][#159436] - Added periodic ASH workload summary logging to the `OPS` channel. Two new cluster settings, `obs.ash.log_interval` (default: `10m`) and `obs.ash.log_top_n` (default: `10`), control how often and how many entries are emitted. Each summary reports the most frequently sampled workloads grouped by event type, event name, and workload ID, providing visibility into workload patterns that previously existed only in memory. [#165093][#165093] - A new cluster setting, `server.gc_assist.enabled`, allows operators to dynamically disable GC assist in CockroachDB's forked Go runtime. By default, it follows the `GODEBUG=gcnoassist` flag. A new metric, `sys.gc.assist.enabled`, reports the current state (`1` = enabled, `0` = disabled). [#166555][#166555] @@ -464,9 +464,17 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc This section describes newly identified limitations in CockroachDB v26.2. +#### Hint injections + - Statements within views do not currently respect hint injections. The workaround is to modify the inline hints directly in the body by replacing the view. [#166782](https://github.com/cockroachdb/cockroach/issues/166782) - Statements within routines do not currently respect hint injections. The workaround is to modify the inline hints directly in the body by replacing the routine. [#162627](https://github.com/cockroachdb/cockroach/issues/162627) +#### Active Session History + +- ASH is not recommended for nodes with 64 or more vCPUs, due to degraded performance on those nodes. [#168289](https://github.com/cockroachdb/cockroach/issues/168289) +- On Basic and Standard CockroachDB {{ site.data.products.cloud }} clusters, ASH samples only cover work running on the [SQL]({% link {{ page.version.version }}/architecture/sql-layer.md %}) pod. KV-level work ([storage]({% link {{ page.version.version }}/architecture/storage-layer.md %}) I/O, [lock waits]({% link {{ page.version.version }}/troubleshoot-lock-contention.md %}), [replication]({% link {{ page.version.version }}/architecture/replication-layer.md %}), etc.) is not visible in ASH samples. +- KV work triggered during [COMMIT]({% link {{ page.version.version }}/commit-transaction.md %}) (for example, [intent resolution]({% link {{ page.version.version }}/architecture/transaction-layer.md %}), [Raft]({% link {{ page.version.version }}/architecture/replication-layer.md %}#raft) proposals deferred from earlier statements in an [explicit transaction]({% link {{ page.version.version }}/begin-transaction.md %})) is attributed to the last [statement's fingerprint]({% link {{ page.version.version }}/ui-statements-page.md %}), not the statement that originally caused the work. [#165864](https://github.com/cockroachdb/cockroach/issues/165864) + [#146250]: https://github.com/cockroachdb/cockroach/pull/146250 [#150663]: https://github.com/cockroachdb/cockroach/pull/150663 [#150706]: https://github.com/cockroachdb/cockroach/pull/150706 diff --git a/src/current/releases/cloud.md b/src/current/releases/cloud.md index 25979ba53a8..1b525508dd8 100644 --- a/src/current/releases/cloud.md +++ b/src/current/releases/cloud.md @@ -30,21 +30,10 @@ Get future release notes emailed to you: - - -

Active-passive deployment across two data centers

-

CockroachDB Advanced clusters can be deployed across two geographically separated data centers in an active-passive configuration, providing resilience against data center failures.

- - Preview - {% include icon-no.html %} - {% include icon-no.html %} - {% include icon-no.html %} - {% include icon-yes.html %} -

CockroachDB Cloud CLI

-

The CockroachDB Cloud CLI has been redesigned with updated commands for managing clusters, users, and cloud resources from the terminal.

+

The CockroachDB Cloud CLI has been redesigned with updated commands for managing clusters, users, and cloud resources from the terminal.

GA {% include icon-no.html %} @@ -54,10 +43,10 @@ Get future release notes emailed to you: -

Multi-factor authentication

-

Multi-factor authentication is now enforced for all CockroachDB Cloud users, reducing the risk of unauthorized access from compromised credentials.

+

Multi-factor authentication enforcement for the Cloud Console

+

Multi-factor authentication can now be enforced for CockroachDB Cloud users who access the Console via username and password, reducing the risk of unauthorized access from compromised credentials.

- GA + Preview {% include icon-no.html %} {% include icon-yes.html %} {% include icon-yes.html %} @@ -65,8 +54,8 @@ Get future release notes emailed to you: -

Bring your own cloud (BYOC)

-

CockroachDB Cloud clusters can run within your own AWS, Azure, or GCP account. This gives you control over networking, security, and data residency while retaining managed database operations.

+

Bring Your Own Cloud (BYOC)

+

You can deploy CockroachDB Cloud clusters within your own AWS, Azure, or GCP account. This gives you access to your existing cloud savings and control over networking, security, and data residency while retaining the managed database operations of CockroachDB Cloud.

Preview {% include icon-no.html %} @@ -77,7 +66,7 @@ Get future release notes emailed to you:

Model Context Protocol (MCP) server

-

The CockroachDB Cloud MCP server allows AI agents and LLM-powered applications to connect to CockroachDB using the Model Context Protocol (MCP).

+

The CockroachDB Cloud MCP server allows your AI agents and LLM-powered applications to connect to CockroachDB using the Model Context Protocol (MCP).

GA {% include icon-no.html %} @@ -88,9 +77,9 @@ Get future release notes emailed to you:

Fault tolerance demo

-

The built-in fault tolerance demo is now generally available. This demo simulates an availability zone failure in a live cluster, allowing you to watch the cluster automatically recover as traffic continues uninterrupted.

+

Run a built-in fault tolerance demo in the CockroachDB Cloud Console that allows you to monitor query execution during a simulated failure and recovery.

- GA + Preview {% include icon-no.html %} {% include icon-yes.html %} {% include icon-yes.html %} @@ -101,10 +90,6 @@ Get future release notes emailed to you:
-## Feb 24, 2026 - -CockroachDB {{ site.data.products.cloud }} {{ site.data.products.advanced }} users can now run a built-in [fault tolerance demo]({% link {{ site.versions["stable"] }}/demo-cockroachdb-resilience.md %}#run-a-guided-demo-in-cockroachdb-cloud) that allows you to monitor query execution during a simulated failure and recovery. The fault tolerance demo is in [Preview]({% link {{ site.versions["stable"] }}/cockroachdb-feature-availability.md %}). - ## Aug 5, 2025 Console users with the [Billing Coordinator role]({% link cockroachcloud/authorization.md %}#billing-coordinator) can now [export invoices]({% link cockroachcloud/billing-management.md %}#export-invoices) in a PDF format, rendering billing information into a traditional invoice format for ease of distribution. From 95df897578b9bca19914b7a6aa539846008f90a4 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Thu, 23 Apr 2026 19:00:43 -0400 Subject: [PATCH 16/32] Retrigger CI From 43ceca1ee9ec0bd265b9684c8e96de7aec1aa25c Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Fri, 24 Apr 2026 11:17:56 -0400 Subject: [PATCH 17/32] update release date --- src/current/_data/releases.yml | 2 +- src/current/_data/versions.csv | 2 +- src/current/_includes/releases/v26.2/v26.2.0.md | 2 +- src/current/releases/cloud.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/current/_data/releases.yml b/src/current/_data/releases.yml index b7376ab1439..bcebbba9056 100644 --- a/src/current/_data/releases.yml +++ b/src/current/_data/releases.yml @@ -10897,7 +10897,7 @@ - release_name: v26.2.0 major_version: v26.2 - release_date: '2026-04-28' + release_date: '2026-04-27' release_type: Production go_version: go1.25.5 sha: TBD diff --git a/src/current/_data/versions.csv b/src/current/_data/versions.csv index 190d4cd72e3..276288281f5 100644 --- a/src/current/_data/versions.csv +++ b/src/current/_data/versions.csv @@ -21,4 +21,4 @@ v25.2,2025-05-09,2026-05-12,2026-11-12,25.2.9,25.2.10,2025-12-17,2026-12-17,2027 v25.3,2025-08-04,2026-02-04,N/A,N/A,N/A,N/A,N/A,N/A,v25.2,release-25.3,2029-08-04 v25.4,2025-11-03,2026-11-03,2027-05-03,N/A,N/A,N/A,N/A,N/A,v25.3,release-25.4,2029-11-03 v26.1,2026-02-02,2026-08-02,N/A,N/A,N/A,N/A,N/A,N/A,v25.4,release-26.1,2030-02-02 -v26.2,2026-04-28,2027-04-28,2027-10-28,N/A,N/A,N/A,N/A,N/A,v26.1,release-26.2,2030-04-28 +v26.2,2026-04-27,2027-04-27,2027-10-27,N/A,N/A,N/A,N/A,N/A,v26.1,release-26.2,2030-04-27 diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index 651af0961fd..0234c9e0026 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -1,6 +1,6 @@ ## v26.2.0 -Release Date: April 28, 2026 +Release Date: April 27, 2026 {% include releases/new-release-downloads-docker-image.md release=include.release %} diff --git a/src/current/releases/cloud.md b/src/current/releases/cloud.md index 1b525508dd8..8698905c14d 100644 --- a/src/current/releases/cloud.md +++ b/src/current/releases/cloud.md @@ -14,7 +14,7 @@ Get future release notes emailed to you: {% include marketo.html formId=1083 %} -## April 28, 2026 +## April 27, 2026
From 49a036c21d4ddbabf148340dbaa2207d9cae283c Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Fri, 24 Apr 2026 11:20:37 -0400 Subject: [PATCH 18/32] remove MFA highlight --- src/current/releases/cloud.md | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/src/current/releases/cloud.md b/src/current/releases/cloud.md index 8698905c14d..ceb7faf1cb3 100644 --- a/src/current/releases/cloud.md +++ b/src/current/releases/cloud.md @@ -41,17 +41,6 @@ Get future release notes emailed to you: {% include icon-yes.html %} {% include icon-yes.html %} - - -

Multi-factor authentication enforcement for the Cloud Console

-

Multi-factor authentication can now be enforced for CockroachDB Cloud users who access the Console via username and password, reducing the risk of unauthorized access from compromised credentials.

- - Preview - {% include icon-no.html %} - {% include icon-yes.html %} - {% include icon-yes.html %} - {% include icon-yes.html %} -

Bring Your Own Cloud (BYOC)

From ff0e5e54ea5f93043990a62c9bd45a89c87c8adf Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Fri, 24 Apr 2026 11:40:54 -0400 Subject: [PATCH 19/32] update releases table; fix links and dupes --- src/current/_includes/releases/v26.2/v26.2.0.md | 7 ++----- src/current/releases/index.md | 3 ++- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index 0234c9e0026..031c0ff87a4 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -384,8 +384,6 @@ For details on license types, refer to = 4 days or indefinite) would experience periodic performance degradation every 24 hours due to excessive concurrent `UPDATE` statements resetting warehouse and district year-to-date values. [#159286][#159286] - Fixed a bug in `appBatchStats.merge` where the `numEmptyEntries` field was not being properly accumulated when merging statistics. This could result in incorrect statistics tracking for empty Raft log entries. [#164671][#164671] - Fixed a bug where descriptor version fetching could be incorrectly throttled by the elastic CPU limiter, potentially leading to increased query latency or timeouts under high CPU load. [#166810][#166810] -- Fixed a crash (`traceRegion: alloc too large`) that could occur when Go's execution tracer was enabled and a range cache lookup used a key longer than about 64 KB. [#166705][#166705] - Fixed a bug where transient I/O errors (such as cloud storage network timeouts) during split or merge trigger evaluation were misidentified as replica corruption, causing the node to crash. These errors now correctly fail the operation, which is retried automatically. [#167377][#167377] - Fixed a bug where executing a mutation in a subquery (e.g., as a CTE) could cause the "rows written" metrics like `sql.statements.index_rows_written.count` and `sql.statements.index_bytes_written.count` to not be incremented correctly. [#167432][#167432] @@ -472,8 +469,8 @@ This section describes newly identified limitations in CockroachDB v26.2. #### Active Session History - ASH is not recommended for nodes with 64 or more vCPUs, due to degraded performance on those nodes. [#168289](https://github.com/cockroachdb/cockroach/issues/168289) -- On Basic and Standard CockroachDB {{ site.data.products.cloud }} clusters, ASH samples only cover work running on the [SQL]({% link {{ page.version.version }}/architecture/sql-layer.md %}) pod. KV-level work ([storage]({% link {{ page.version.version }}/architecture/storage-layer.md %}) I/O, [lock waits]({% link {{ page.version.version }}/troubleshoot-lock-contention.md %}), [replication]({% link {{ page.version.version }}/architecture/replication-layer.md %}), etc.) is not visible in ASH samples. -- KV work triggered during [COMMIT]({% link {{ page.version.version }}/commit-transaction.md %}) (for example, [intent resolution]({% link {{ page.version.version }}/architecture/transaction-layer.md %}), [Raft]({% link {{ page.version.version }}/architecture/replication-layer.md %}#raft) proposals deferred from earlier statements in an [explicit transaction]({% link {{ page.version.version }}/begin-transaction.md %})) is attributed to the last [statement's fingerprint]({% link {{ page.version.version }}/ui-statements-page.md %}), not the statement that originally caused the work. [#165864](https://github.com/cockroachdb/cockroach/issues/165864) +- On Basic and Standard CockroachDB {{ site.data.products.cloud }} clusters, ASH samples only cover work running on the [SQL]({% link v26.2/architecture/sql-layer.md %}) pod. KV-level work ([storage]({% link v26.2/architecture/storage-layer.md %}) I/O, [lock waits]({% link v26.2/troubleshoot-lock-contention.md %}), [replication]({% link v26.2/architecture/replication-layer.md %}), etc.) is not visible in ASH samples. +- KV work triggered during [COMMIT]({% link v26.2/commit-transaction.md %}) (for example, [intent resolution]({% link v26.2/architecture/transaction-layer.md %}), [Raft]({% link v26.2/architecture/replication-layer.md %}#raft) proposals deferred from earlier statements in an [explicit transaction]({% link v26.2/begin-transaction.md %})) is attributed to the last [statement's fingerprint]({% link v26.2/ui-statements-page.md %}), not the statement that originally caused the work. [#165864](https://github.com/cockroachdb/cockroach/issues/165864) [#146250]: https://github.com/cockroachdb/cockroach/pull/146250 [#150663]: https://github.com/cockroachdb/cockroach/pull/150663 diff --git a/src/current/releases/index.md b/src/current/releases/index.md index 0dfd3317042..8a31cf0c11a 100644 --- a/src/current/releases/index.md +++ b/src/current/releases/index.md @@ -67,6 +67,7 @@ For more information, read the [CockroachDB {{ site.data.products.cloud }} upgra | Version | Release Type | GA date | | :---: | :---: | :---: | +| [v26.2]({% link releases/v26.2.md %}) | Regular | 2026-04-27 | | [v26.1]({% link releases/v26.1.md %}) | Innovation | 2026-02-02 | | [v25.4]({% link releases/v25.4.md %}) | Regular | 2025-11-03 | | [v25.2]({% link releases/v25.2.md %}) | Regular | 2025-05-12 | @@ -80,7 +81,7 @@ The following releases and their descriptions represent proposed plans that are | Version | Release Type | Expected GA date | | :---: | :---: | :---: | -| v26.2 | Regular | 2026 Q2 | +| v26.3 | Innovation | 2026 Q3 | ## Archived releases From a39edf7c9fede11c7e430bdc2740c750bc0bf8bd Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Fri, 24 Apr 2026 13:21:06 -0400 Subject: [PATCH 20/32] fix links --- src/current/_includes/v26.2/ui/statement-details.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/current/_includes/v26.2/ui/statement-details.md b/src/current/_includes/v26.2/ui/statement-details.md index d4811e71ecf..56485fa48a9 100644 --- a/src/current/_includes/v26.2/ui/statement-details.md +++ b/src/current/_includes/v26.2/ui/statement-details.md @@ -34,8 +34,8 @@ The **Overview** section also displays the SQL statement fingerprint statistics |**KV CPU Time** | The average KV CPU time spent executing the statement within the specified time interval. This represents [KV]({{ link_prefix }}architecture/overview.html#layers) work that is on the critical path of serving the query. It excludes time spent on asynchronous replication and in the [storage layer]({{ link_prefix }}architecture/storage-layer.html). | |**Admission Wait Time** | Average time spent waiting in [admission control]({{ link_prefix }}admission-control.html) queues within the specified time interval. | |**Client Wait Time** | The time spent waiting for the client to send the statement while holding the transaction open. A high wait time indicates that you should revisit the entire transaction and [batch your statements]({{ link_prefix }}transactions.html#batched-statements). | -|**Canary vs Stable Statement Times** | Comparison of the time taken by the [planner]({{ link_prefix }}architecture/sql-layer.html#sql-parser-planner-executor) to create an execution plan and for CockroachDB to execute statements when using [canary statistics]({% link {{ page.version.version }}/canary-statistics.md %}) and when using stable statistics. This chart only appears when canary statistics are enabled. | -|**Canary vs Stable Plan Distribution** | Comparison of which execution plans were used during each time period when using [canary statistics]({% link {{ page.version.version }}/canary-statistics.md %}) and when using stable statistics. This chart only appears when canary statistics are enabled. | +|**Canary vs Stable Statement Times** | Comparison of the time taken by the [planner]({{ link_prefix }}architecture/sql-layer.html#sql-parser-planner-executor) to create an execution plan and for CockroachDB to execute statements when using [canary statistics]({{ link_prefix }}canary-statistics.html) and when using stable statistics. This chart only appears when canary statistics are enabled. | +|**Canary vs Stable Plan Distribution** | Comparison of which execution plans were used during each time period when using [canary statistics]({{ link_prefix }}canary-statistics.html) and when using stable statistics. This chart only appears when canary statistics are enabled. | The following screenshot shows the statement fingerprint of the query described in [Use the right index]({{ link_prefix }}apply-statement-performance-rules.html#rule-2-use-the-right-index): From fe999d8728bc5583a6524f5331fc6ec7ba09ac23 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Fri, 24 Apr 2026 13:36:08 -0400 Subject: [PATCH 21/32] link known limitations in intro --- src/current/_includes/releases/v26.2/v26.2.0.md | 2 +- src/current/_includes/releases/whats-new-intro.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index 031c0ff87a4..5b239727ee7 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -457,7 +457,7 @@ For details on license types, refer to Known limitations This section describes newly identified limitations in CockroachDB v26.2. diff --git a/src/current/_includes/releases/whats-new-intro.md b/src/current/_includes/releases/whats-new-intro.md index d85fa78a466..227631ca405 100644 --- a/src/current/_includes/releases/whats-new-intro.md +++ b/src/current/_includes/releases/whats-new-intro.md @@ -102,7 +102,7 @@ CockroachDB {{ page.major_version }}{% if lts == true %} [(LTS)]({% link release {% if released == true %} {% comment %}v1.0 has no #v1-0-0 anchor, and before GA other releases also do not.{% endcomment %} - For a summary of the most significant changes in {{ page.major_version }}, refer to [Feature highlights](#feature-highlights). -- Before [upgrading to CockroachDB {{ page.major_version }}]({% link {{ page.major_version }}/upgrade-cockroach-version.md %}), review the [backward-incompatible changes](#{{ page.major_version | replace: ".", "-" }}-0-backward-incompatible-changes), including [key cluster setting changes](#{{ page.major_version | replace: ".", "-" }}-0-cluster-settings) and [deprecations](#{{ page.major_version | replace: ".", "-" }}-0-deprecations). +- Before [upgrading to CockroachDB {{ page.major_version }}]({% link {{ page.major_version }}/upgrade-cockroach-version.md %}), review the [backward-incompatible changes](#{{ page.major_version | replace: ".", "-" }}-0-backward-incompatible-changes), including [key cluster setting changes](#{{ page.major_version | replace: ".", "-" }}-0-cluster-settings) and [deprecations](#{{ page.major_version | replace: ".", "-" }}-0-deprecations); as well as newly identified [known limtiations](#{{ page.major_version | replace: ".", "-" }}-0-known-limitations). {% endif %} {% endif %}{% comment %}End GA-only content{% endcomment %} - For details about the support window for this release type, review the [Release Support Policy]({% link releases/release-support-policy.md %}). From 99c666d36d24427b81f57df908a02ebe8811365a Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Fri, 24 Apr 2026 13:58:01 -0400 Subject: [PATCH 22/32] retroactively fix link --- src/current/_includes/releases/v25.4/v25.4.0.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/current/_includes/releases/v25.4/v25.4.0.md b/src/current/_includes/releases/v25.4/v25.4.0.md index aac3d5d33c4..c21fe212546 100644 --- a/src/current/_includes/releases/v25.4/v25.4.0.md +++ b/src/current/_includes/releases/v25.4/v25.4.0.md @@ -17,7 +17,7 @@ Before [upgrading to CockroachDB v25.4]({% link v25.4/upgrade-cockroach-version. - [Features that require upgrade finalization](#v25-4-0-features-that-require-upgrade-finalization) - [Key cluster setting changes](#v25-4-0-key-cluster-setting-changes) - [Deprecations] (#v25-4-0-deprecations) - - [Known limitations](#known-limitations) + - [Known limitations](#25-4-0-known-limitations) {% include releases/new-release-downloads-docker-image.md release=include.release %} @@ -270,7 +270,7 @@ Before you upgrade, review these changes and other information about the new maj {% include releases/v25.4/deprecations.md %} -

Known limitations

+

Known limitations

This section describes newly identified limitations in CockroachDB v25.4. From b2fd07cd6de6cf45d03b7ae11712454d99d28a24 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Fri, 24 Apr 2026 16:49:28 -0400 Subject: [PATCH 23/32] update known limitations; fix link again --- .../_includes/releases/v25.4/v25.4.0.md | 2 +- .../_includes/releases/v26.2/v26.2.0.md | 20 +++++++++++++++++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/current/_includes/releases/v25.4/v25.4.0.md b/src/current/_includes/releases/v25.4/v25.4.0.md index c21fe212546..1c242387535 100644 --- a/src/current/_includes/releases/v25.4/v25.4.0.md +++ b/src/current/_includes/releases/v25.4/v25.4.0.md @@ -17,7 +17,7 @@ Before [upgrading to CockroachDB v25.4]({% link v25.4/upgrade-cockroach-version. - [Features that require upgrade finalization](#v25-4-0-features-that-require-upgrade-finalization) - [Key cluster setting changes](#v25-4-0-key-cluster-setting-changes) - [Deprecations] (#v25-4-0-deprecations) - - [Known limitations](#25-4-0-known-limitations) + - [Known limitations](#v25-4-0-known-limitations) {% include releases/new-release-downloads-docker-image.md release=include.release %} diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index 5b239727ee7..700231c2026 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -461,10 +461,26 @@ For details on license types, refer to
Disaster Recovery highlights + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAvailabilitySelf-hostedBasicStandardAdvanced
+

Faster disaster recovery with improved restore performance

+

Restore operations are now up to 4× faster using WITH EXPERIMENTAL COPY, significantly reducing recovery time objectives (RTO) for large-scale disaster recovery scenarios. This capability is in Preview for self-hosted clusters. For details, refer to Run faster restores.

+
Preview{% include icon-yes.html %}{% include icon-no.html %}{% include icon-no.html %}{% include icon-no.html %}
+

Simplified backup management using backup IDs

+

SHOW BACKUPS now returns unique backup IDs that can be used directly in RESTORE operations with FROM <backup_id> IN <collection>, eliminating the need for subdirectory paths or AS OF SYSTEM TIME clauses. Server-side time filtering with NEWER THAN and OLDER THAN makes it easier to find backups within a specific recovery window. This capability is in Preview for self-hosted clusters. For details, refer to Query backups more efficiently.

+
Preview{% include icon-yes.html %}{% include icon-no.html %}{% include icon-no.html %}{% include icon-no.html %}
+

Performance highlights

From 11286b557e7cb232899a5021930925f86eae1e50 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Fri, 24 Apr 2026 21:50:06 -0400 Subject: [PATCH 27/32] fix links --- src/current/_includes/releases/v26.2/v26.2.0.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index 2fcdf54b986..3e8ddeb1c77 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -170,7 +170,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc @@ -181,7 +181,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc From 71ab160348b48eb5c0a035ea073aa1e4365e15b5 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Fri, 24 Apr 2026 21:51:40 -0400 Subject: [PATCH 28/32] amend feature highlights intro --- src/current/_includes/releases/v26.2/v26.2.0.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index 3e8ddeb1c77..a5cbd3bca08 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -6,7 +6,7 @@ Release Date: April 27, 2026 ### Feature highlights -This section summarizes the most significant user-facing changes in [SQL](#sql-highlights), [security](#security-highlights), [observability](#observability-highlights), and [performance](#performance-highlights). +This section summarizes the most significant user-facing changes in [SQL](#sql-highlights), [security](#security-highlights), [observability](#observability-highlights), [disaster recovery](#disaster-recovery-highlights), and [performance](#performance-highlights). {{site.data.alerts.callout_success}} You can also search the docs for sections labeled [New in v26.2](https://www.cockroachlabs.com/docs/search?query=New+in+v26.2). From 5171830ba9eebd6a3a7e3b8ea161238b9fe96e8a Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Fri, 24 Apr 2026 21:54:22 -0400 Subject: [PATCH 29/32] copyedit feature highlights --- src/current/_includes/releases/v26.2/v26.2.0.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index a5cbd3bca08..689e9e5547e 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -170,7 +170,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc @@ -181,7 +181,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc From 627d058404f68fddd0ef7d83b5f33f679f32fca2 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Fri, 24 Apr 2026 22:05:27 -0400 Subject: [PATCH 30/32] correct formatting --- src/current/_includes/releases/v26.2/v26.2.0.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index 689e9e5547e..6c12407371e 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -181,7 +181,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc From b4186cdb13ddee9ec9a6f543012d945600102611 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Fri, 24 Apr 2026 22:13:07 -0400 Subject: [PATCH 31/32] remove stale reference to incremental backups --- .../cockroachcloud/take-and-restore-self-managed-backups.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/current/cockroachcloud/take-and-restore-self-managed-backups.md b/src/current/cockroachcloud/take-and-restore-self-managed-backups.md index 426ef4085a8..f1d09cd1736 100644 --- a/src/current/cockroachcloud/take-and-restore-self-managed-backups.md +++ b/src/current/cockroachcloud/take-and-restore-self-managed-backups.md @@ -115,8 +115,6 @@ BACKUP INTO {'subdirectory'} IN 'external://backup_s3' AS OF SYSTEM TIME '-10s'; If you intend to take a **full** backup, we recommend running `BACKUP INTO {collectionURI}` without specifying a subdirectory. {{site.data.alerts.end}} -To explicitly control where you store your incremental backups, use the [`incremental_location`]({% link {{site.current_cloud_version}}/backup.md %}#options) option. For more detail, see [this example]({% link {{site.current_cloud_version}}/take-full-and-incremental-backups.md %}#incremental-backups-with-explicitly-specified-destinations) demonstrating the `incremental_location` option. - ### Scheduled backup This example [creates a schedule]({% link {{site.current_cloud_version}}/create-schedule-for-backup.md %}) for a cluster backup with revision history that is taken every day at midnight: From 06aab428b4a782f3dbae5c95545caea0f7dbeee4 Mon Sep 17 00:00:00 2001 From: Ryan Kuo Date: Fri, 24 Apr 2026 22:25:45 -0400 Subject: [PATCH 32/32] add feature links --- src/current/_includes/releases/v26.2/upgrade-finalization.md | 2 +- src/current/_includes/releases/v26.2/v26.2.0.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/current/_includes/releases/v26.2/upgrade-finalization.md b/src/current/_includes/releases/v26.2/upgrade-finalization.md index 3c76a0dae44..0289596009a 100644 --- a/src/current/_includes/releases/v26.2/upgrade-finalization.md +++ b/src/current/_includes/releases/v26.2/upgrade-finalization.md @@ -4,7 +4,7 @@ This section summarizes the features that are not available until you [finalize - Views now support the PostgreSQL-compatible security_invoker option. When set via `CREATE VIEW ... WITH (security_invoker)` or `ALTER VIEW SET (security_invoker = true)`, privilege checks on the underlying tables are performed as the querying user rather than the view owner. The `security_invoker` option can be reset with `ALTER VIEW ... RESET (security_invoker)`. [#164184](https://github.com/cockroachdb/cockroach/pull/164184) -- Added support for `ALTER TABLE ENABLE TRIGGER` and `ALTER TABLE DISABLE TRIGGER` syntax. This allows users to temporarily disable triggers without dropping them, and later re-enable them. The syntax supports disabling/enabling individual triggers by name, or all triggers on a table using the `ALL` or `USER` keywords. [#161924](https://github.com/cockroachdb/cockroach/pull/161924) +- Added support for ALTER TABLE ENABLE TRIGGER and ALTER TABLE DISABLE TRIGGER syntax. This allows users to temporarily disable triggers without dropping them, and later re-enable them. The syntax supports disabling/enabling individual triggers by name, or all triggers on a table using the `ALL` or `USER` keywords. [#161924](https://github.com/cockroachdb/cockroach/pull/161924) - `ALTER TABLE ... DROP CONSTRAINT` can now be used to drop `UNIQUE` constraints. The backing `UNIQUE` index will also be dropped, as CockroachDB treats the constraint and index as the same thing. [#162345](https://github.com/cockroachdb/cockroach/pull/162345) diff --git a/src/current/_includes/releases/v26.2/v26.2.0.md b/src/current/_includes/releases/v26.2/v26.2.0.md index 6c12407371e..f1a930ba99e 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0.md +++ b/src/current/_includes/releases/v26.2/v26.2.0.md @@ -31,7 +31,7 @@ You can also search the docs for sections labeled [New in v26.2](https://www.coc

Faster disaster recovery with improved restore performance

-

Restore operations are now up to 4× faster using WITH EXPERIMENTAL COPY, significantly reducing recovery time objectives (RTO) for large-scale disaster recovery scenarios. This capability is in Preview for self-hosted clusters. For details, refer to Run faster restores.

+

Restore operations are now up to 4× faster using WITH EXPERIMENTAL COPY, significantly reducing recovery time objectives (RTO) for large-scale disaster recovery scenarios. This capability is in Preview for self-hosted clusters. For details, refer to Run faster restores.

Preview {% include icon-yes.html %}

Simplified backup management using backup IDs

-

SHOW BACKUPS now returns unique backup IDs that can be used directly in RESTORE operations with FROM <backup_id> IN <collection>, eliminating the need for subdirectory paths or AS OF SYSTEM TIME clauses. Server-side time filtering with NEWER THAN and OLDER THAN makes it easier to find backups within a specific recovery window. This capability is in Preview for self-hosted clusters. For details, refer to Query backups more efficiently.

+

SHOW BACKUPS now returns unique backup IDs that can be used directly in RESTORE operations with FROM <backup_id> IN <collection>, eliminating the need for subdirectory paths or AS OF SYSTEM TIME clauses. Server-side time filtering with NEWER THAN and OLDER THAN makes it easier to find backups within a specific recovery window. This capability is in Preview for self-hosted clusters. For details, refer to Query backups more efficiently.

Preview {% include icon-yes.html %}

Faster disaster recovery with improved restore performance

-

Restore operations are now up to 4× faster using WITH EXPERIMENTAL COPY, significantly reducing recovery time objectives (RTO) for large-scale disaster recovery scenarios. This capability is in Preview for self-hosted clusters. For details, refer to Run faster restores.

+

Restore operations are now up to four times faster using WITH EXPERIMENTAL COPY, significantly reducing recovery time objectives (RTO) for large-scale disaster recovery scenarios.

Preview {% include icon-yes.html %}

Simplified backup management using backup IDs

-

SHOW BACKUPS now returns unique backup IDs that can be used directly in RESTORE operations with FROM <backup_id> IN <collection>, eliminating the need for subdirectory paths or AS OF SYSTEM TIME clauses. Server-side time filtering with NEWER THAN and OLDER THAN makes it easier to find backups within a specific recovery window. This capability is in Preview for self-hosted clusters. For details, refer to Query backups more efficiently.

+

SHOW BACKUPS now returns unique backup IDs that can be used directly in RESTORE operations with FROM <backup_id> IN <collection>, eliminating the need for subdirectory paths or AS OF SYSTEM TIME clauses. Server-side time filtering with NEWER THAN and OLDER THAN makes it easier to find backups within a specific recovery window.

Preview {% include icon-yes.html %}

Simplified backup management using backup IDs

-

SHOW BACKUPS now returns unique backup IDs that can be used directly in RESTORE operations with FROM <backup_id> IN <collection>, eliminating the need for subdirectory paths or AS OF SYSTEM TIME clauses. Server-side time filtering with NEWER THAN and OLDER THAN makes it easier to find backups within a specific recovery window.

+

SHOW BACKUPS now returns unique backup IDs that can be used directly in RESTORE operations with FROM {backup_id} IN {collection}, eliminating the need for subdirectory paths or AS OF SYSTEM TIME clauses. Server-side time filtering with NEWER THAN and OLDER THAN makes it easier to find backups within a specific recovery window.

Preview {% include icon-yes.html %}

SQL triggers

-

SQL triggers are now generally available. CockroachDB supports PostgreSQL-compatible BEFORE and AFTER triggers that activate on INSERT, UPDATE, or DELETE operations.

+

SQL triggers are now generally available. CockroachDB supports PostgreSQL-compatible BEFORE and AFTER triggers that activate on INSERT, UPDATE, or DELETE operations.

GA {% include icon-yes.html %}