diff --git a/modules/ROOT/nav.adoc b/modules/ROOT/nav.adoc index 25ce9e34c5..03e753fbdf 100644 --- a/modules/ROOT/nav.adoc +++ b/modules/ROOT/nav.adoc @@ -82,6 +82,8 @@ include::third-party:partial$nav.adoc[] **** xref:learn:clusters-and-availability/xdcr-filtering.adoc[XDCR Advanced Filtering] **** xref:learn:clusters-and-availability/xdcr-conflict-resolution.adoc[XDCR Conflict Resolution] **** xref:learn:clusters-and-availability/xdcr-with-scopes-and-collections.adoc[XDCR with Scopes and Collections] + **** xref:learn:clusters-and-availability/xdcr-enable-crossclusterversioning.adoc[XDCR enableCrossClusterVersioning] + **** xref:learn:clusters-and-availability/xdcr-active-active-sgw.adoc[XDCR Active-Active with Sync Gateway] *** xref:learn:clusters-and-availability/groups.adoc[Server Group Awareness] * xref:learn:security/security-overview.adoc[Security] ** xref:learn:security/authentication.adoc[Authentication] @@ -230,11 +232,15 @@ include::third-party:partial$nav.adoc[] **** xref:install:upgrade-docker-cluster-online-full-capacity.adoc[] *** xref:install:upgrade-ipv6-cluster.adoc[IPv6 Cluster-Upgrade] ** xref:install:upgrade-feature-availability.adoc[Feature Availability During Upgrade] -* Migration - ** xref:install:migrating-application-data.adoc[Migrating Data] +* xref:install:couchbase-migrations.adoc[Data Migration] + ** xref:install:migrating-application-data.adoc[] ** xref:install:migration.adoc[Enabling Timestamp-based Conflict Resolution for Migrated Data] - // ** xref:install:migrate-couchdb.adoc[Migrating from Apache CouchDB] - // ** xref:install:migrate-mysql.adoc[Migrating from Relational Databases] + +//// + ** xref:install:migrate-couchdb.adoc[Migrating from Apache CouchDB] + ** xref:install:migrate-mysql.adoc[Migrating from Relational Databases] +//// + @@ -243,6 +249,7 @@ include::third-party:partial$nav.adoc[] * xref:elasticsearch-connector::index.adoc[Elasticsearch Plug-in] * xref:kafka-connector::index.adoc[Kafka Connector] * xref:spark-connector::index.adoc[Spark Connector] +* xref:power-bi-connector::index.adoc[Power BI Connector] * xref:tableau-connector::index.adoc[Tableau Connector] * xref:connectors:odbc-jdbc-drivers.adoc[Couchbase ODBC and JDBC Drivers] @@ -311,10 +318,13 @@ include::cli:partial$cbcli/nav.adoc[] *** xref:cli:cbstats/cbstats-workload.adoc[workload] ** xref:cli:cbsummary.adoc[cbsummary] ** xref:cli:cbtools/cbtransfer.adoc[cbtransfer] + ** xref:cli:cbmigrate-tool.adoc[cbmigrate] ** xref:cli:cbtools/cbworkloadgen.adoc[cbworkloadgen] ** xref:cli:couchbase-server.adoc[couchbase-server] + ** xref:cli:finderr.adoc[finderr] ** xref:cli:mctimings.adoc[mctimings] ** xref:cli:mcstat.adoc[mcstat] + ** xref:cli:mctestauth.adoc[mctestauth] * xref:rest-api:rest-intro.adoc[REST API Reference] ** xref:rest-api:rest-cluster-intro.adoc[Nodes and Clusters API] @@ -415,7 +425,7 @@ include::cli:partial$cbcli/nav.adoc[] *** xref:rest-api:rest-servergroup-delete.adoc[Deleting Groups] ** xref:rest-api:rest-xdcr-intro.adoc[XDCR API] - *** xref:rest-api:rest-xdcr-create-ref.adoc[Creating a Reference] + *** xref:rest-api:rest-xdcr-create-ref.adoc[Creating or Editing a Reference] *** xref:rest-api:rest-xdcr-connection-precheck.adoc[Checking Connections] *** xref:rest-api:rest-xdcr-get-ref.adoc[Getting a Reference] *** xref:rest-api:rest-xdcr-create-replication.adoc[Creating a Replication] @@ -457,7 +467,7 @@ include::cli:partial$cbcli/nav.adoc[] **** xref:rest-api:rotate-data-key.adoc[Rotate the Data Key] ** xref:n1ql:n1ql-rest-api/intro.adoc[] - *** xref:n1ql:n1ql-rest-api/index.adoc[] + *** xref:n1ql-rest-query:index.adoc[] **** xref:n1ql:n1ql-rest-api/examplesrest.adoc[] ***** xref:n1ql:n1ql-rest-api/exsuccessful.adoc[] ***** xref:n1ql:n1ql-rest-api/expositional.adoc[] @@ -469,14 +479,14 @@ include::cli:partial$cbcli/nav.adoc[] ***** xref:n1ql:n1ql-rest-api/exunsupportedhttp.adoc[] ***** xref:n1ql:n1ql-rest-api/exauthhttp.adoc[] ***** xref:n1ql:n1ql-rest-api/exauthrequest.adoc[] - *** xref:n1ql:n1ql-rest-api/admin.adoc[Query Admin REST API] - *** xref:rest-api:rest-cluster-query-settings.adoc[Query Settings REST API] - *** xref:n1ql:n1ql-rest-api/functions.adoc[Query Functions REST API] + *** xref:n1ql-rest-admin:index.adoc[] + *** xref:n1ql-rest-settings:index.adoc[] + *** xref:n1ql-rest-functions:index.adoc[] ** xref:rest-api:rest-index-service.adoc[Index Service API] *** xref:rest-api:get-settings-indexes.adoc[Retrieve GSI Settings] *** xref:rest-api:post-settings-indexes.adoc[Set GSI Settings] - *** xref:rest-api:rest-index-stats.adoc[Index Statistics API] + *** xref:index-rest-stats:index.adoc[Index Statistics API] *** xref:rest-api:rest-modify-index-batch-size.adoc[Modify Index Batch Size] ** xref:rest-api:backup-rest-api.adoc[Backup Service API] @@ -500,22 +510,22 @@ include::cli:partial$cbcli/nav.adoc[] *** xref:rest-api:backup-node-threads.adoc[] ** xref:rest-api:rest-fts.adoc[Search Service API] - *** xref:rest-api:rest-fts-node.adoc[Node Configuration] - *** xref:rest-api:rest-fts-indexing.adoc[Indexes] - *** xref:rest-api:rest-fts-advanced.adoc[Advanced] - *** xref:rest-api:rest-fts-statistics.adoc[Search Statistics] - *** xref:rest-api:rest-fts-query.adoc[Active Queries] - *** xref:rest-api:rest-fts-partition-file-transfer.adoc[Rebalance Based on File Transfer] + *** xref:fts-rest-nodes:index.adoc[Node Configuration] + *** xref:fts-rest-indexing:index.adoc[Indexes] + *** xref:fts-rest-advanced:index.adoc[Advanced] + *** xref:fts-rest-stats:index.adoc[Search Statistics] + *** xref:fts-rest-query:index.adoc[Active Queries] + *** xref:fts-rest-manage:index.adoc[Search Manager Options] - ** xref:eventing:eventing-api.adoc[Eventing Service API] + ** xref:eventing-rest-api:index.adoc[Eventing Service API] ** xref:analytics:rest-analytics.adoc[Analytics Service API] - *** xref:analytics:rest-service.adoc[Analytics Service REST API] - *** xref:analytics:rest-admin.adoc[Analytics Admin REST API] - *** xref:analytics:rest-config.adoc[Analytics Config REST API] - *** xref:analytics:rest-settings.adoc[Analytics Settings REST API] - *** xref:analytics:rest-links.adoc[Analytics Links REST API] - *** xref:analytics:rest-library.adoc[Analytics Library REST API] + *** xref:analytics-rest-service:index.adoc[Analytics Service REST API] + *** xref:analytics-rest-admin:index.adoc[Analytics Admin REST API] + *** xref:analytics-rest-config:index.adoc[Analytics Config REST API] + *** xref:analytics-rest-settings:index.adoc[Analytics Settings REST API] + *** xref:analytics-rest-links:index.adoc[Analytics Links REST API] + *** xref:analytics-rest-library:index.adoc[Analytics Library REST API] * xref:xdcr-reference:xdcr-reference-intro.adoc[XDCR Reference] ** xref:xdcr-reference:xdcr-advanced-settings.adoc[Advanced Settings] @@ -530,14 +540,19 @@ include::cli:partial$cbcli/nav.adoc[] * xref:rebalance-reference:rebalance-reference.adoc[Rebalance Reference] * xref:metrics-reference:metrics-reference.adoc[Metrics Reference] ** xref:metrics-reference:data-service-metrics.adoc[Data Service Metrics] + *** xref:metrics-reference:data-service-metrics-cross-reference.adoc[Data Service Metrics Cross Reference] ** xref:metrics-reference:query-service-metrics.adoc[Query Service Metrics] + *** xref:metrics-reference:query-service-metrics-cross-reference.adoc[Query Service Metrics Cross Reference] ** xref:metrics-reference:index-service-metrics.adoc[Index Service Metrics] + *** xref:metrics-reference:index-service-metrics-cross-reference.adoc[Index Service Metrics Cross Reference] ** xref:metrics-reference:search-service-metrics.adoc[Search Service Metrics] ** xref:metrics-reference:eventing-service-metrics.adoc[Eventing Service Metrics] ** xref:metrics-reference:analytics-service-metrics.adoc[Analytics Service Metrics] + *** xref:metrics-reference:analytics-service-metrics-cross-reference.adoc[Analytics Service Metrics Cross Reference] ** xref:metrics-reference:backup-service-metrics.adoc[Backup Service Metrics] ** xref:metrics-reference:ns-server-metrics.adoc[Cluster Manager Metrics] ** xref:metrics-reference:xdcr-metrics.adoc[XDCR Metrics] + *** xref:metrics-reference:xdcr-metrics-cross-reference.adoc[XDCR Metrics Cross Reference] * xref:learn:views/views-intro.adoc[Views Reference] ** xref:learn:views/views-basics.adoc[View Concepts] *** xref:learn:views/views-streaming.adoc[Stream-based Views] diff --git a/modules/backup-restore/pages/enterprise-backup-restore.adoc b/modules/backup-restore/pages/enterprise-backup-restore.adoc index 715447552a..3c511dd40d 100644 --- a/modules/backup-restore/pages/enterprise-backup-restore.adoc +++ b/modules/backup-restore/pages/enterprise-backup-restore.adoc @@ -10,11 +10,17 @@ The `cbbackupmgr` tool backs up and restores data, scripts, configurations, and It allows large data sets to be managed with extremely high performance. Use of AWS S3 storage is supported. -Only Full Administrators can use `cbbackupmgr`; which is available in Couchbase Server _Enterprise Edition_ only. -Note that `cbbackupmgr` is _not_ backward compatible with backups created by means of `cbbackup`. +Only Full Administrators can use `cbbackupmgr`; + which is available for both Couchbase Server _Enterprise Edition_ and Couchbase Server _Community Edition_. -Note that in Couchbase Enterprise Server 7.2 and after, `cbbackupmgr` is available in a tools package that must be downloaded. +[NOTE] +==== +`cbbackupmgr` is _not_ backward compatible with backups created by means of `cbbackup`. + +In Couchbase Enterprise Server 7.2 and after, `cbbackupmgr` is available in the `Tools` package that must be downloaded. See xref:cli:cli-intro.adoc#server-tools-packages[Server Tools Packages]. +==== + === Planning for Disaster Recovery @@ -212,4 +218,17 @@ Unless otherwise specified, backup and restore apply both to _local_ and to _clo |=== -* For local backup only -- _not_ for cloud. ++*+ For local backup only — _not_ for cloud. + +.Restoring metadata and users +**** +* When restoring metadata to a newer Server version, +if the feature that the metadata applies to no longer exists in the newer Server version, then the metadata may not be restorable. + +* If the user roles no longer exist in the version that you wish to restore to, then an error will be logged for the target user. + +* In general, if you can upgrade _directly_ to the new version, then you should be able to restore the users. +If you cannot upgrade directly, then restoring users may cause errors. + For example, if some of the user roles no longer exist in the newer Server version. + +**** diff --git a/modules/cli/pages/cbmigrate-tool.adoc b/modules/cli/pages/cbmigrate-tool.adoc new file mode 100644 index 0000000000..07b9211714 --- /dev/null +++ b/modules/cli/pages/cbmigrate-tool.adoc @@ -0,0 +1,310 @@ += cbmigrate +:description: pass:q[Use the `cbmigrate` command-line tool to migrate your data from other platforms.] +:page-topic-type: reference + +[abstract] +{description} + +== Description + +The `cbmigrate` tool will migrate your existing data from the following platforms: + +* https://www.mongodb.com[MongoDB] +* https://aws.amazon.com/dynamodb/[DynamoDB] +* https://huggingface.co[Hugging Face] + +== Installation + +. Download the latest version of the `cbmigrate` package from its https://github.com/couchbaselabs/cbmigrate/releases[GitHub repository]. +. Unpack the downloaded package to its own directory. +. Execute the tool by running the following from the command line: ++ +[source,console] +---- +$ ./cbmigrate [command] [flags] +---- + + +[#syntax] +== Syntax + +[source,console] +---- +$ cbmigrate [--version] [--help HELP] +---- + +[source,console] +---- +$ cbmigrate [command] [flags] +---- + +=== Command options + +`cbmigrate` takes one of three optional commands. +Depending on the command used, the `cbmigrate` tool will also accept a range of flags for additional information required for its execution. + +[tabs] +==== + +mongoDB:: ++ +-- +.Command options +[cols="20a, 40a"] +|=== +|Command |Flags + +|`mongo` + +Migrate the data from a https://www.mongodb.com[MongoDB] installation to Couchbase server. +| + +`--mongodb-uri` string:: +The mongodb connection string. + +`--mongodb-database` string:: +The name of the database that you wish to migrate. + +`--mongodb-collection`:: +The name of the collection within the database you are migrating. + +// tag::cb-parameter-section[] + + +// tag::cb-user-name-password-section[] +`--cb-username` string:: +The username granting access to the target cluster. + +`--cb-password` string:: +The password (attached to `--cb-username`) for accessing the target cluster. +// end::cb-user-name-password-section[] + +`--cb-cluster` string:: +The URL of the target cluster node for the import. + +// tag::cb-bucket-scope-collection-section[] +`--cb-bucket string`:: +The name of the target bucket. + +`--cb-scope` string:: +The target scope for the migration. + +`--cb-collection` string:: +The target collection name for the import. +// end::cb-bucket-scope-collection-section[] + +`--cb-generate-key` string:: +Specifies a key expression used for generating a unique key for each imported document. +It allows for the creation of document keys by combining static text, +field values (denoted by `%fieldname%`), and custom generators (like `+#UUID#+`). +For example, using a combination of static text, field names, and custom generators, +you can generate a unique key of the form: "key::%name%::#UUID#" + + + (*Default*: `"%_id%"`) + +`--cb-cacert` string:: +Specifies a CA certificate that will be used to verify the identity of the server being connected to. +Either this flag or the `--cb-no-ssl-verify` flag must be specified when using an SSL encrypted connection. + +`--cb-no-ssl-verify`:: +Skips the SSL verification phase. +Specifying this flag will allow a connection using SSL encryption but will not verify the identity of the server you connect to. ++ +WARNING: You are vulnerable to a man-in-the-middle attack if you use this flag. ++ +Either this flag or the `--cb-cacert` flag must be specified when using an SSL encrypted connection + +`--cb-client-cert` string:: +The path to a client certificate used to authenticate when connecting to a cluster. +May be supplied with `--cb-client-key` as an alternative to the `--cb-username` and `--cb-password` flags. + +`--cb-client-cert-password`:: +The password for the certificate provided to the `--cb-client-cert` flag, +when using this flag, the certificate/key pair is expected to be in the PKCS#12 format + +`--cb-client-key` string:: +The path to the client private key whose public key is contained in the certificate provided to the `--cb-client-cert` flag. +May be supplied with `--cb-client-cert` as an alternative to the --username and --password flags. + +`--cb-client-key-password` string:: +The password for the key provided to the `--cb-client-key` flag, +when using this flag, the key is expected to be in the PKCS#8 format + +`--cb-buffer-size` int:: +An integer value denoting the size of the memory buffer used during the import. +(*Default*: 10000) + +`--cb-batch-size` int:: +The number of documents processed as a batch during the import. +(*Default*: 200) + +`--copy-indexes`:: + Copy indexes for the collection (*default*: true) + +`--hash-document-key` string:: +Hash the couchbase document key. +Can be `sha256` or `sha512)` + +`--keep-primary-key`:: +Keep the non-composite primary key in the document. +By default, if the key is a non-composite primary key, it is deleted. + + +`--help`:: +Help for the MongoDB migration parameters and flags + +`--debug`:: +Enable debug output. + +// end::cb-parameter-section[] + +|=== + +-- + +DynamoDB:: ++ +-- +.Command options +[cols="20a, 40a"] +|=== +|Command |Flags + +|`dynamodb` + +Migrate the data from a https://aws.amazon.com/dynamodb/[DynamoDB] installation to Couchbase server. +| + +`--aws-access-key-id` string:: +Your AWS Access Key ID + +`--aws-ca-bundle` string:: +The CA certificate bundle to use when verifying SSL certificates. +Overrides config/env settings + +`--aws-endpoint-url` string :: + Override the AWS default endpoint url with the given URL + +`--aws-no-verify-ssl`:: +By default, `cbmigrate` uses SSL when communicating with AWS services. +For each SSL connection, `cbmigrate` will verify SSL certificates. +This option overrides the default behavior of verifying SSL certificates. + +`--aws-profile` string:: +Use a specific aws profile from your credential file. + +`--aws-region` string:: +The region to use. +Overrides config/env settings. + +`--aws-secret-access-key` string:: +The AWS secret access key. + +`--dynamodb-limit` int:: +Specifies the maximum number of items to retrieve per page during a scan operation. +Use this option to control the amount of data fetched in a single request, helping to manage memory usage and API call rates during scanning. + +`--dynamodb-segments` int:: +Specifies the total number of segments to divide the DynamoDB table into for parallel scanning. +Each segment is scanned independently, allowing multiple threads or processes to work concurrently for faster data retrieval. Use this option to optimize performance for large tables. +By default, the entire table is scanned sequentially without segmentation (*Default*: 1) + +`--dynamodb-table-name` string:: +The name of the table containing the requested item. +You can also provide the Amazon Resource Name (ARN) of the table in this parameter. + +include::cbmigrate-tool.adoc[tag="cb-parameter-section"] + +|=== + +-- + +Hugging Face:: ++ +-- +.Command options +[cols="20a, 40a"] +|=== +|Command |Flags + +|`hugging-face` + +Migrate the data from a https://huggingface.co[Hugging Face] installation to Couchbase server. + +| + +`--path` string:: +The path or name of the dataset. (Required) + +`--name`:: +Configuration name of the dataset. (Optional) + +`--data-files` string:: +Path(s) to the source data file(s). (Optional) + +`--split` string:: +The split of the data to load. (Optional) + +`--cache-dir` string:: +The cache directory to store the datasets. (Optional) + +`--download-config` string:: +Specific download configuration parameters. (Optional) + +`--download-mode` reuse_dataset_if_exists \| force_redownload:: +Specifies whether to reuse existing downloaded data or force a fresh download. +(Optional) + + +`--verification-mode` no_checks \| basic_checks \| all_checks:: +Sets the level of verification during the migration. +(Optional) + +`--keep-in-memory`:: +Use this flag to keep the migrated dataset in memory. + +`--save-infos`:: +Save the dataset information. (Default: false) + +`--revision` string:: +The version of the dataset script to load. (Optional) + +`--token` string:: +Authentication token for private datasets. (Optional) + +`--no-streaming`:: +Disable streaming mode for dataset loading. (Default: false) + +`num-proc` int:: +Number of processes to use for the migration. (Optional) + +`--storage-options` string:: +Storage options for remote filesystems. (Optional) + +`--trust-remote-code`:: +Allow loading arbitrary code from the dataset repository. (Optional) + +`--id-fields` string:: +Comma-separated list of field names to use as the document ID. + +`--cb-url` string:: +The URL for the target Couchbase cluster +(e.g., `couchbase://localhost`) + +include::cbmigrate-tool.adoc[tag="cb-user-name-password-section"] + +include::cbmigrate-tool.adoc[tag="cb-bucket-scope-collection-section"] + +`cb-batch-size` int:: +The number of documents to insert per batch. (Default: 1000) + +`--help`:: +Show the help screen for the hugging face migration. + +|=== +-- +==== + + + + diff --git a/modules/cli/pages/cbstats/cbstats-dcp.adoc b/modules/cli/pages/cbstats/cbstats-dcp.adoc index 920d6044df..911172b7f6 100644 --- a/modules/cli/pages/cbstats/cbstats-dcp.adoc +++ b/modules/cli/pages/cbstats/cbstats-dcp.adoc @@ -7,7 +7,7 @@ == Syntax -Request syuntax: +Request syntax: .... cbstats HOST:11210 dcp @@ -42,10 +42,10 @@ For example, if your client is named `slave1`, the identifier for the DCP statis | True if the DCP stream is reserved | `supports_ack` -| True if the connection use flow control +| True if the connection uses flow control | `total_acked_bytes` -| The amount of bytes that the consumer has acknowledged +| The number of bytes that the consumer has acknowledged | `type` | The connection type (producer, consumer, or notifier) @@ -58,10 +58,10 @@ Consumer connection per-stream statistics:: | Name | Description | `buffer_bytes` -| The amount of unprocessed bytes +| The number of unprocessed bytes | `buffer_items` -| The amount of unprocessed items +| The number of unprocessed items | `end_seqno` | The sequence number where this stream should end @@ -91,14 +91,14 @@ Consumer connection per-stream statistics:: | The vBucket UUID used to create this stream |=== -Producer and notifier connection statistics:: +Producer and notifier connection statistics (stream-level statistics):: + [cols="20,67"] |=== | Name | Description | `backfilled` -| The amount of items sent from disk +| The number of items sent from disk | `cur_snapshot_end` | The end sequence number of the current snapshot being received @@ -122,7 +122,7 @@ Producer and notifier connection statistics:: | The last sequence number sent by this stream | `memory` -| The amount of items sent from memory +| The number of items sent from memory | `opaque` | The unique stream identifier @@ -143,13 +143,13 @@ Producer and notifier connection statistics:: | The vBucket UUID used in the stream request |=== -Producer and notifier connection statistics:: +Producer and notifier connection statistics (producer-level statistics):: + |=== | Name | Description | `bytes_sent` -| The amount of unacknowledged bytes sent to the consumer. +| The number of unacknowledged bytes sent to the consumer. | `connected` | True if this client is connected. @@ -161,16 +161,16 @@ Producer and notifier connection statistics:: | True if the connection uses flow control. | `items_remaining` -| The amount of items remaining to be sent. +| The number of items remaining to be sent. | `items_sent` -| The amount of items already sent to the consumer. +| The number of items already sent to the consumer. | `last_sent_time` | The last time items have been sent. | `noop_enabled` -| Indicates whether this connection sends noop's . +| Indicates whether this connection sends noops. | `noop_wait` | Indicates whether this connection is waiting for a noop response from the consumer. @@ -194,7 +194,7 @@ Producer and notifier connection statistics:: | The connection type (producer, consumer, or notifier). | `unacked_bytes` -| The amount of bytes the consumer has not acknowledged. +| The number of bytes the consumer has not acknowledged. |=== == Options diff --git a/modules/cli/pages/cbstats/cbstats-reset.adoc b/modules/cli/pages/cbstats/cbstats-reset.adoc index 53dbd25754..f8251d8fb1 100644 --- a/modules/cli/pages/cbstats/cbstats-reset.adoc +++ b/modules/cli/pages/cbstats/cbstats-reset.adoc @@ -1,5 +1,5 @@ = reset -:description: Resets the following reset and reset histogram statistics. +:description: For dev and test use only. Resets the following reset and reset histogram statistics. :page-topic-type: reference [abstract] @@ -15,7 +15,9 @@ cbstats [host]:11210 reset [options] == Description -This command resets the following `reset` and `reset histogram` statistics. +For dev and test use only. This command resets the following `reset` and `reset histogram` statistics. + +Note: Using the reset command in production can cause production problems, as well as the inability to diagnose issues due to lack of stats. The reset command is not a single atomic operation; therefore, threads may keep updating some stats while the reset operation is causing others to be cleared. This can cause overflows for calculated values and other stats used by the cluster. .Reset stats |=== diff --git a/modules/cli/pages/cbstats/cbstats-warmup.adoc b/modules/cli/pages/cbstats/cbstats-warmup.adoc index 229f9a664b..7a7831ec41 100644 --- a/modules/cli/pages/cbstats/cbstats-warmup.adoc +++ b/modules/cli/pages/cbstats/cbstats-warmup.adoc @@ -10,7 +10,7 @@ The basic syntax is: ---- -cbstats [host]:[dataport] -b [bucket_name] -p [bucket_password] raw warmup +cbstats [host]:[dataport] -b [bucket_name] -p [bucket_password] warmup ---- == Description @@ -53,7 +53,7 @@ Look for values: loading keys, loading access log, and done. Default: unknown | ep_warmup_estimate_time -| Thne estimated time in microseconds to do warmup. +| The time taken, measured in milliseconds, to discover the estimated number of keys that may be warmed up. | Integer. | ep_warmup_estimated_value_count @@ -61,7 +61,7 @@ Default: unknown | Integer. Default: unknown -| ep_warmup_keys_count +| ep_warmup_key_count | Number of keys warmed up. | Integer @@ -119,7 +119,10 @@ The following are the command options: *Request* ---- -cbstats 10.5.2.117:11210 warmup +cbstats localhost:11210 warmup \ +-u Administrator \ +-p password \ +-b travel-sample ---- *Response* @@ -127,18 +130,18 @@ cbstats 10.5.2.117:11210 warmup Example response: ---- - ep_warmup: enabled - ep_warmup_dups: 0 - ep_warmup_estimate_time: 57546 - ep_warmup_estimated_key_count: 0 - ep_warmup_estimated_value_count: unknown - ep_warmup_key_count: 0 - ep_warmup_keys_time: 529022 - ep_warmup_min_items_threshold: 100 - ep_warmup_min_memory_threshold: 100 - ep_warmup_oom: 0 - ep_warmup_state: done - ep_warmup_thread: complete - ep_warmup_time: 529192 - ep_warmup_value_count: 0 +ep_warmup: enabled +ep_warmup_dups: 0 +ep_warmup_estimate_time: 36013 +ep_warmup_estimated_key_count: 63310 +ep_warmup_estimated_value_count: 63310 +ep_warmup_key_count: 63310 +ep_warmup_keys_time: 523406 +ep_warmup_min_item_threshold: 100 +ep_warmup_min_memory_threshold: 100 +ep_warmup_oom: 0 +ep_warmup_state: done +ep_warmup_thread: complete +ep_warmup_time: 584419 +ep_warmup_value_count: 63310 ---- diff --git a/modules/cli/pages/cli-intro.adoc b/modules/cli/pages/cli-intro.adoc index efac7f54fe..b6c756e3ec 100644 --- a/modules/cli/pages/cli-intro.adoc +++ b/modules/cli/pages/cli-intro.adoc @@ -1,6 +1,6 @@ = CLI Reference :description: The command-line interface (CLI) tools let you manage and monitor your Couchbase Server installation including clusters, servers, vBuckets, and XDCR. -:tools-ver: 7.6.2 +:tools-ver: 7.6.8 [abstract] {description} @@ -39,31 +39,35 @@ You can collect diagnostics through the command-line interface by using the xref [#server-tools-packages] == Server Tools Packages -For convenience, Couchbase provides a tools package that contains the following utilities: +For convenience, Couchbase provides the Server developer tools package and the Server admin tools package. -* xref:tools:cbimport.adoc[`cbimport`] +[#server-developer-tools-package] +=== Server Developer Tools Package + +The Server developer tools package lets you install the following EE Server utilities on the systems where you have not installed Couchbase Server: + +* xref:backup-restore:cbbackupmgr.adoc[`cbbackupmgr`] * xref:tools:cbexport.adoc[`cbexport`] +* xref:tools:cbimport.adoc[`cbimport`] * xref:cli:cbq-tool.adoc[`cbq`] -* xref:backup-restore:cbbackupmgr.adoc[`cbbackupmgr`] -This package lets you install these tools on systems where you have not installed Couchbase Server. Download the command line tools package for your platform from the following links: -* Linux: https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-tools_{tools-ver}-linux_x86_64.tar.gz[] -* Linux aarch64: https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-tools_{tools-ver}-linux_aarch64.tar.gz[] -* macOS: https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-tools_{tools-ver}-macos_x86_64.zip[] -* macOS arm64: https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-tools_{tools-ver}-macos_arm64.zip[] -* Windows: https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-tools_{tools-ver}-windows_amd64.zip[] +* Linux: https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-dev-tools-{tools-ver}-linux_x86_64.tar.gz[] +* Linux aarch64: https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-dev-tools-{tools-ver}-linux_aarch64.tar.gz[] +* macOS: https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-dev-tools-{tools-ver}-macos_x86_64.zip[] +* macOS arm64: https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-dev-tools-{tools-ver}-macos_arm64.zip[] +* Windows: https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-dev-tools-{tools-ver}-windows_amd64.zip[] Unzip or untar the packages, and the binaries are ready to run. For example: [source,console,subs="attributes+"] ---- -wget https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-tools_{tools-ver}-linux_x86_64.tar.gz +wget https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-dev-tools-{tools-ver}-linux_x86_64.tar.gz -tar -xf couchbase-server-tools_{tools-ver}-linux_x86_64.tar.gz +tar -xf couchbase-server-dev-tools-{tools-ver}-linux_x86_64.tar.gz ---- Each package also contains a `README` file and a copy of the software license. @@ -71,14 +75,62 @@ Each package also contains a `README` file and a copy of the software license. NOTE: On Windows, you must have a recent version of the Microsoft Visual {cpp} Redistributable runtime libraries installed. If you do not have these libraries installed, download them from https://docs.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170[Microsoft Visual {cpp} Redistributable latest supported downloads]. +=== Server Admin Tools Package + +The Server admin tools package lets you install the following Server utilities on the systems where you have not installed Couchbase Server: + +* xref:backup-restore:cbbackupmgr.adoc[`cbbackupmgr`] +* https://docs.couchbase.com/sdk-api/couchbase-c-client/md_doc_2cbc.html[`cbc`] +* xref:tools:cbdatarecovery.adoc[`cbdatarecovery`] +* xref:tools:cbexport.adoc[`cbexport`] +* xref:tools:cbimport.adoc[`cbimport`] +* xref:cli:cbq-tool.adoc[`cbq`] +* xref:cli:cbstats-intro.adoc[`cbstats`] +* xref:cli:cbcli/couchbase-cli.adoc[`couchbase-cli`] +* xref:cli:mcstat.adoc[`mcstat`] +* xref:cli:mctimings.adoc[`mctimings`] +* xref:cli:mctestauth.adoc[`mctestauth`] + +Download the command line tools package for your platform from the following links: + +* Linux: https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-admin-tools-{tools-ver}-linux_x86_64.tar.gz[] +* Linux aarch64: https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-admin-tools-{tools-ver}-linux_aarch64.tar.gz[] +* macOS: https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-admin-tools-{tools-ver}-macos_x86_64.zip[] +* macOS arm64: https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-admin-tools-{tools-ver}-macos_arm64.zip[] +* Windows: https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-admin-tools-{tools-ver}-windows_amd64.zip[] + +Unzip or untar the packages, and the binaries are ready to run. +For example: + +[source,console,subs="attributes+"] +---- +wget https://packages.couchbase.com/releases/{tools-ver}/couchbase-server-admin-tools-{tools-ver}-linux_x86_64.tar.gz + +tar -xf couchbase-server-admin-tools-{tools-ver}-linux_x86_64.tar.gz +---- + +Each package also contains a `README` file and a copy of the software license. + +NOTE: On Windows, you must have a recent version of the Microsoft Visual {cpp} Redistributable runtime libraries installed. +If you do not have these libraries installed, download them from https://docs.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist[Microsoft Visual {cpp} Redistributable latest supported downloads]. + + [#version-compatibility] == Version Compatibility The versions of the utilities installed by the tools package are the same as the corresponding Couchbase Server installation package. -The {tools-ver} `cbimport`, `cbexport`, `cbbackupmgr`, and `cbq` utilities are compatible with the following Couchbase Server versions: +For the Server developer tools package, the {tools-ver} version of the utilities `cbimport`, `cbexport`, `cbbackupmgr`, and `cbq` are compatible with the following Couchbase Server versions: -* 7.6.0, 7.6.1 +* 7.6.0, 7.6.1, 7.6.2, 7.6.3 * 7.2.x * 7.1.x * 7.0.x + +You can download and use the latest version of the utilities (`cbimport`, `cbexport`, `cbbackupmgr`, and `cbq`) with earlier Couchbase Server versions. + +For the Server admin tools package, do the following: + +* Use the `--version` option to get the version of the utility. However, for the `cbc` utility, use the `cbc version` option. +* `couchbase-cli` is a utility for administering the Couchbase cluster. Make sure that the versions of `couchbase-cli` and the Couchbase cluster match. +* For all utilities, a feature that exists in a latest Server version only is not supported by an earlier version utility. diff --git a/modules/cli/pages/finderr.adoc b/modules/cli/pages/finderr.adoc new file mode 100644 index 0000000000..c5a5090041 --- /dev/null +++ b/modules/cli/pages/finderr.adoc @@ -0,0 +1,221 @@ += finderr +:description: pass:q[The [.cmd]`finderr` tool returns the full details of any Query service or cbq shell error.] +:page-status: Couchbase Server 7.6.4 +:page-topic-type: reference + +[abstract] +{description} + +== Syntax + +The basic syntax is: + +---- +finderr +---- + +== Description + +ifeval::['{page-component-version}' == '7.6'] +This tool is only available in Couchbase Server 7.6.4 and later. +endif::[] + +Depending upon your platform, this tool is at the following locations: + +[cols="1,3"] +|=== +| Operating system | Location + +| Linux +| [.path]`/opt/couchbase/bin/` + +| Windows +| [.path]`C:\Program Files\Couchbase\Server\bin\` + +| Mac OS X +| [.path]`/Applications/CouchbaseServer.app/Contents/Resources/couchbase-core/bin/` +|=== + +== Options + +The tool takes a single argument, which must be one of the following: + +number:: +A number representing an error code. +In this case, the tool returns the full details of the error matching the error code. + +string:: +A string. +In this case, the tool searches for the target string in all of the error message fields except for `USER ERROR`, and returns the full details of any errors that match the string. + +regex:: +A regular expression. +In this case, the tool searches for the regular expression in all of the error message fields except for `USER ERROR`, and returns the full details of any errors that match the pattern. + +== Output + +If the tool finds a single error that matches the find argument, it outputs the full details of the error. + +If the tool finds multiple errors that match the find argument, it outputs a list showing the code and description of each matching error. +You can use the tool again, passing the code or description as an argument, to get the full details of any of these errors. + +Full error details include some or all of the following fields. + +[options="header", cols="50a,147a"] +|=== +|Name|Description + +|**APPLIES TO** +|One of the following: + +* `cbq-shell`: The error applies to the cbq shell. +* `Server`: The error applies to the server. + +|**CODE** +|A number representing the error. + +|**DESCRIPTION** +|Message describing why the error occurred. + +|**REASON** +|List of possible causes of the error. + +|**USER ACTION** +|List of possible steps a user can take to mitigate the error. + +|**USER ERROR** +|One of the following: + +* `Yes`: The error was caused by the user. +* `No`: The error was caused by other services, or was internal to the server. +* `Maybe`: A combination of both. +|=== + +NOTE: The error details also include a `SYMBOL` field, which contains a representation string for the error. +This field is for internal use only, and is not shown in the output. +However, the tool does search this field when the find argument is a string or a regular expression. + +== Examples + +[[finderr-ex1]] +.Find error details by code number +==== +.Command +[source,sh] +---- +./finderr 5011 +---- + +.Output +---- +CODE + 5011 (error) + + +DESCRIPTION + Abort: «reason» + + +REASON + The SQL++ abort() function was called in the statement. + e.g. SELECT abort('An example cause') + + +USER ERROR + Yes + + +APPLIES TO + Server +---- +==== + +[[finderr-ex2]] +.Find error details by matching a string +==== +.Command +[source,sh] +---- +./finderr "A semantic error is present in the statement." +---- + +.Output +---- +CODE + 3100 (error) + + +DESCRIPTION + A semantic error is present in the statement. + + +REASON + The statement includes portions that violate semantic constraints. + + +USER ACTION + The cause will contain more detail on the violation; revise the statement and re-submit. + + +USER ERROR + Yes + + +APPLIES TO + Server +---- +==== + +[[finderr-ex3]] +.Find multiple errors by matching a string +==== +.Command +[source,sh] +---- +./finderr "semantic" +---- + +.Output +---- +Matching errors + 3100 A semantic error is present in the statement. + 3220 «name» window function «clause» «reason» + 3300 recursive_with semantics: «cause» +---- +==== + +[[finderr-ex4]] +.Find multiple errors by matching a regular expression +==== +.Command +[source,sh] +---- +./finderr "[UI][NP]SERT" +---- + +.Output +---- +Matching errors + 3150 MERGE with ON KEY clause cannot have document key specification in INSERT action. + 3160 MERGE with ON clause must have document key specification in INSERT action + 3180 MERGE with ON KEY clause cannot have USE INDEX hint specified on target. + 5006 Out of key validation space. + 5050 No INSERT key for «document» + 5060 No INSERT value for «document» + 5070 Cannot INSERT non-string key «key» of type «type» + 5071 Cannot INSERT non-OBJECT options «options» of type «type» + 5072 No UPSERT key for «value» + 5073 Cannot act on the same key multiple times in an UPSERT statement + 5075 No UPSERT value for «value» + 5078 Cannot UPSERT non-string key «key» of type «type». + 5079 Cannot UPSERT non-OBJECT options «value» of type «type». + 5330 Multiple INSERT of the same document (document key «key») in a MERGE statement + 12036 Error in INSERT of key: «key» + 15005 No keys to insert «details» +---- +==== + +== See Also + +* The {sqlpp} xref:n1ql:n1ql-language-reference/metafun.adoc#finderr[FINDERR()] function +* xref:n1ql:n1ql-language-reference/n1ql-error-codes.adoc[] diff --git a/modules/cli/pages/mcstat.adoc b/modules/cli/pages/mcstat.adoc index 3a77ecccd7..829c04781b 100644 --- a/modules/cli/pages/mcstat.adoc +++ b/modules/cli/pages/mcstat.adoc @@ -1,15 +1,11 @@ = mcstat -:description: pass:q[The `mcstat` tool provides memory-related information for a specified bucket, or for all buckets on a cluster.] +:description: pass:q[The mcstat tool provides detailed information for a node, specified bucket, or for all buckets on a cluster.] :page-topic-type: reference :page-aliases: cli:cbstats/cbstats-allocator [abstract] {description} -== Description - -The `mcstat` tool provides memory-related information for a specified bucket, or for all buckets on a cluster. - The tool is located as follows: [cols="2,3"] @@ -40,9 +36,7 @@ The `options` are as follows: | Options | Description | `-h[=statkey]`or `--help[=statkey]` -| Show the help message and exit. -If `=statkey` is not specified, the common options for the command are listed. -If `=statkey` _is_ specified, the available _statkeys_ for the command are listed instead. +| Outputs possible `statkey` values with descriptions and indications of the stat's scope and required privileges. | `-h` or `--hostname`, with the parameter `` (for IPv4), or `[address]:port` (for IPv6) | The name of the host (and optionally, the port number) to connect to. diff --git a/modules/cli/pages/mctestauth.adoc b/modules/cli/pages/mctestauth.adoc new file mode 100644 index 0000000000..0c5b56b4c7 --- /dev/null +++ b/modules/cli/pages/mctestauth.adoc @@ -0,0 +1,93 @@ += mctestauth +:description: pass:q[The mctestauth tool allows you to troubleshoot authentication issues in data services.] +:page-topic-type: reference + +[abstract] +{description} + +== Description + +The `mctestauth` tool allows you to troubleshoot the authentication issues in data services. Data services use the https://en.wikipedia.org/wiki/Simple_Authentication_and_Security_Layer[Simple Authentication and Security Layer] (SASL) framework for authentication and provide you access to use the underlying mechanisms. For example, SCRAM-SHA512 or PLAIN. + +The tool is located as follows: + +[cols="2,3"] +|=== +| Platform | Location + +| Linux +| [.path]_/opt/couchbase/bin/mctestauth_ + +| Windows +| [.path]_C:\Program Files\Couchbase\Server\bin\mctestauth.exe_ + +| Mac OS X +| [.path]_/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/bin/mctestauth_ +|=== + +== Syntax + +---- +mctestauth [options] +---- + +[#options] +The `options` are as follows: + +[cols="1,2"] +|=== +| Options | Description + +| `--host` with parameter `` +| The name of the host (with an optional port) to connect to. For IPv6, use `[address]:port` if you need to specify the port. + +| `--user ` +| The username to be used in authentication. + +| `--password ` +| The password to be used in authentication. + +| `--tls` +| Optionally, use TLS to authenticate. + +| `--ipv4` +| Connect over IPv4. + +| `--ipv6` +| Connect over IPv6. + +| `--version` +| Find program version. + +| `--no-color` +| Print output without any colors. + +| `--help` +| Display help text. +|=== + +== Example + +The following call tries to connect to the data server running on the host `192.168.86.101`. The call then tries the various authentication mechanisms supported by the server and prints out the result of each mechanism. The call succeeds only if you provide the correct credentials. + +---- +./mctestauth --host 192.168.86.101 --user jones --password password +---- + +If successful, the command returns output as follows: + +---- + SCRAM-SHA512: OK + SCRAM-SHA256: OK + SCRAM-SHA1: OK + PLAIN: OK +---- + +If `jones` was defined as an external user in LDAP, the output is as follows: + +---- + SCRAM-SHA512: FAILED + SCRAM-SHA256: FAILED + SCRAM-SHA1: FAILED + PLAIN: OK +---- diff --git a/modules/cloud/pages/couchbase-aws-syncgateway-deployment.adoc b/modules/cloud/pages/couchbase-aws-syncgateway-deployment.adoc index 2ac0eb8777..2f2ba2e971 100644 --- a/modules/cloud/pages/couchbase-aws-syncgateway-deployment.adoc +++ b/modules/cloud/pages/couchbase-aws-syncgateway-deployment.adoc @@ -3,7 +3,6 @@ [abstract] {description} - This solution is based on Amazon CloudFormation templates that incorporate the latest features and best practices for deploying Couchbase Server on Amazon Web Services. Couchbase Sync Gateway on AWS Marketplace provides one of the fastest and easiest ways to get up and running on Amazon Web Services (AWS). diff --git a/modules/connectors/pages/intro.adoc b/modules/connectors/pages/intro.adoc index 0be0ec3aa8..0af6c0c533 100644 --- a/modules/connectors/pages/intro.adoc +++ b/modules/connectors/pages/intro.adoc @@ -7,8 +7,10 @@ Connectors are provided for: +* xref:superset-connector::index.adoc[Apache Superset Connector] * xref:elasticsearch-connector::index.adoc[Elasticsearch] * xref:kafka-connector::index.adoc[Kafka] +* xref:power-bi-connector::index.adoc[Power BI Connector] * xref:spark-connector::index.adoc[Spark] * xref:tableau-connector::index.adoc[Tableau] * xref:odbc-jdbc-drivers.adoc[Couchbase ODBC and JDBC Drivers] diff --git a/modules/connectors/pages/odbc-jdbc-drivers.adoc b/modules/connectors/pages/odbc-jdbc-drivers.adoc index 93bd83fa45..da99b6c990 100644 --- a/modules/connectors/pages/odbc-jdbc-drivers.adoc +++ b/modules/connectors/pages/odbc-jdbc-drivers.adoc @@ -1,5 +1,5 @@ = Couchbase ODBC and JDBC Drivers -:description: The ODBC and JDBC drivers enable any application based on the ODBC/JDBC standards, for example Microsoft Excel, QlikView, SAP Lumira, or Tableau, to connect to a Couchbase Server or cluster. +:description: The ODBC and JDBC drivers enable any application based on the ODBC/JDBC standards. For example, Microsoft Excel, QlikView, SAP Lumira, or Tableau, to connect to a Couchbase Server or cluster. {description} These drivers enable fast and seamless access to Couchbase data. @@ -8,5 +8,10 @@ Using these drivers, the applications can leverage on the powerful {sqlpp} funct Couchbase ODBC and JDBC drivers are compliant with ODBC 3.8 and JDBC 4.0/4.1 standards, and compatible with ANSI SQL-92. With the support for {sqlpp}, these drivers empower both ODBC/JDBC and {sqlpp} applications to connect to Couchbase server. The drivers are available from our preferred and supported connectivity partner, CDATA (http://www.cdata.com/drivers/[^]). -For more information, refer to https://www.couchbase.com/sql-integration[^]. +For more information, refer to https://www.couchbase.com/developers/integrations/?query=jdbc[the developer integrations page^]. For more information on downloading the drivers and complete documentation, refer to our xref:server:develop:integrations.adoc#partner-integrations-with-couchbase[partner integrations page]. + +Capella Provisioned, Couchbase Analytics, and Couchbase Columnar offer native BI connectivity for several popular BI tools. +For more information on how to use these tools, +refer to xref:columnar:query:bi.adoc[Business Intelligence Tools] + diff --git a/modules/fts/pages/fts-advanced-settings-ftsMemoryQuota.adoc b/modules/fts/pages/fts-advanced-settings-ftsMemoryQuota.adoc index ddea8d0ecd..eaf52cf16f 100644 --- a/modules/fts/pages/fts-advanced-settings-ftsMemoryQuota.adoc +++ b/modules/fts/pages/fts-advanced-settings-ftsMemoryQuota.adoc @@ -1,28 +1,18 @@ = ftsMemoryQuota -The `ftsMemoryQuota` setting controls the maximum usable memory for the FTS Search service from the total amount of RAM available in the system. +The `ftsMemoryQuota` setting controls the maximum usable memory, in MiB, available to the Search Service on each node in your cluster. +This RAM allocation applies to each node that has the Search Service in 1 of its Service Groups. -Sparing enough RAM memory for the filesystem cache is crucial to override the ftsMemoryQuota. +You can configure the `ftsMemoryQuota` from the Couchbase Server Web Console at any time. -The FTS Search service recommends a minimum of 512 MB memory-resident ratio for its index. However, users can spare more memory quota for a healthy resident ratio of the index. This lets the system have sufficient memory available to perform indexing, querying, or other lifecycle operations like rebalances, etc. +If you're using the Search Service as the only Service on a node, set the `ftsMemoryQuota` to 80% or less of the total available RAM for that node. +This guideline leaves enough RAM for your operating system to manage its on-demand filesystem cache. +The search process memory maps your index files to support fast access to indexed content. -Using the manager option, users can control the FTS/Search service's memory quota at run time without a service reboot. +== Configure the ftsMemoryQuota -== Spare enough memory for filesystem cache -Another important aspect while configuring the Search memory quota is to leave sufficient leeway RAM for the Operating System to manage the file system cache. +To configure the `ftsMemoryQuota` setting: -The Search’s internal text indexing library (bleve) uses memory mapping for the index files, so having enough RAM extra for the operating system helps in keeping the hot regions of index files in the file system cache memory. - -This helps in better search performance. -The usual guideline is to set the Search memory quota to 60-70% of the available RAM in a Search node. - -Configuring enough RAM memory in the system and allocating sufficient Search quota memory is essential for optimal search performance. - -=== Example - -[source,console] ----- -curl -XPUT -H "Content-type:application/json" http://username:password@:8094/api/managerOptions \-d '{ - "ftsMemoryQuota": "1024" -} ----- \ No newline at end of file +. From the Couchbase Server Web Console, go to *Settings*. +. Under *Memory Quotas*, in the *Search* field, enter a value, in MiB, for the total amount of RAM you want to allocate to the Search Service for each node in your cluster. +. Click btn:[Save]. \ No newline at end of file diff --git a/modules/fts/pages/fts-pagination.adoc b/modules/fts/pages/fts-pagination.adoc deleted file mode 100644 index 696635715e..0000000000 --- a/modules/fts/pages/fts-pagination.adoc +++ /dev/null @@ -1,107 +0,0 @@ -[#pagination] -= Pagination - -The number of results obtained for a Full Text Search request can be large. Pagination of these results becomes essential for sorting and displaying a subset of these results. - -There are multiple ways to achieve pagination with settings within a search request. Pagination will fetch a deterministic set of results when the results are sorted in a certain fashion. - -Pagination provides the following options: - -== Size/from or offset/limit - -This pagination settings can be used to obtain a subset of results and works deterministically when combined with a certain sort order. - -Using `size/limit` and `offset/from` would fetch at least `size + from` ordered results from a partition and then return the `size` number of results starting at offset `from`. - -Deep pagination can therefore get pretty expensive when using `size + from` on a sharded index due to each shard having to possibly return large resultsets (at least `size + from`) over the network for merging at the coordinating node before returning the `size` number of results starting at offset `from`. - -The default sort order is based on _score_ (relevance) where the results are ordered from the highest to the lowest score. - -=== Example - -Here's an example query that fetches results from the 11th onwards to the 15th that have been ordered by _score_. - ----- -{ - "query": { - "match": "California", - "field": "state" - }, - "size": 5, - "from": 10 -} ----- - -//---- -//{ -// "query": { -// "match": "California", -// "field": "state" -// }, -// "offset": 5, -// "limit": 10 -//} -//---- - -== search_after, search_before - -For an efficient pagination, you can use the `search_after/search_before` settings. - -`search_after` is designed to fetch the `size` number of results after the key specified and `search_before` is designed to fetch the `size` number of results before the key specified. - -These settings allow for the client to maintain state while paginating - the sort key of the last result (for search_after) or the first result (for search_before) in the current page. - -Both the attributes accept an array of strings (sort keys) - the length of this array will need to be the same length of the "sort" array within the search request. - -NOTE: You cannot use both `search_after` and `search_before` in the same search request. - -=== Example - -Here are some examples using `search_after/search_before` over sort key "_id" (an internal field that carries the document ID). - ----- -{ - "query": { - "match": "California", - "field": "state" - }, - "sort": ["_id"], - "search_after": ["hotel_10180"], - "size": 3 -} ----- - ----- -{ - "query": { - "match": "California", - "field": "state" - }, - "sort": ["_id"], - "search_before": ["hotel_17595"], - "size": 4 -} ----- - -NOTE: A Full Text Search request that doesn't carry any pagination settings will return the first 10 results (`"size: 10", "from": 0`) ordered by _score_ sequentially from the highest to lowest. - -== Pagination tips and recommendations - -The pagination of search results can be done using the 'from' and 'size' parameters in the search request. But as the search gets into deeper pages, it starts consuming more resources. - -To safeguard against any arbitrary higher memory requirements, FTS provides a configurable limit bleveMaxResultWindow (10000 default) on the maximum allowable page offsets. However, bumping this limit to higher levels is not a scalable solution. - -To circumvent this problem, the concept of key set pagination in FTS, is introduced. - -Instead of providing _from_ as a number of search results to skip, the user will provide the sort value of a previously seen search result (usually, the last result shown on the current page). The idea is that to show the next page of the results, we just want the top N results of that sort after the last result from the previous page. - -This solution requires a few preconditions be met: - -* The search request must specify a sort order. -NOTE: The sort order must impose a total order on the results. Without this, any results which share the same sort value might be left out when handling the page navigation boundaries. - -A common solution to this is to always include the document ID as the final sort criteria. - -For example, if you want to sort by [“name”, “-age”], instead of sort by [“name”, “-age”, “_id”]. - -With `search_after`/`search_before` paginations, the heap memory requirement of deeper page searches is made proportional to the requested page size alone. So it reduces the heap memory requirement of deeper page searches significantly down from the offset+from values. \ No newline at end of file diff --git a/modules/getting-started/assets/images/collectLogInformationOptions.png b/modules/getting-started/assets/images/collectLogInformationOptions.png new file mode 100644 index 0000000000..0f52991594 Binary files /dev/null and b/modules/getting-started/assets/images/collectLogInformationOptions.png differ diff --git a/modules/getting-started/assets/images/eventLogging.png b/modules/getting-started/assets/images/eventLogging.png new file mode 100644 index 0000000000..37a254bc8d Binary files /dev/null and b/modules/getting-started/assets/images/eventLogging.png differ diff --git a/modules/getting-started/assets/images/showDashboardIndex.png b/modules/getting-started/assets/images/showDashboardIndex.png new file mode 100644 index 0000000000..692e76e05e Binary files /dev/null and b/modules/getting-started/assets/images/showDashboardIndex.png differ diff --git a/modules/getting-started/pages/do-a-quick-install.adoc b/modules/getting-started/pages/do-a-quick-install.adoc index 4a1d85afeb..0823bdf07d 100644 --- a/modules/getting-started/pages/do-a-quick-install.adoc +++ b/modules/getting-started/pages/do-a-quick-install.adoc @@ -10,7 +10,7 @@ Once you install Docker, you can use a single command to download and install a NOTE: The Docker container image that is used in this topic is meant to set up a 'sandbox' version of Couchbase Server. It's based on the Enterprise Edition and contains scripts that automatically configure several Couchbase components during installation. Although this particular image works great for a demo, it is not meant for production. -For information about using containers to run Couchbase Server in production, see the xref:cloud:couchbase-cloud-deployment.adoc[Cloud and Container Deployment Overview]. +For information about installing Couchbase Server in production, see xref:install:get-started.adoc[]. [#initialize-cluster-web-console] == Install Couchbase Server @@ -25,7 +25,8 @@ Open a console window on your computer and enter the following command: [source,console] ---- -docker run -t --name db -p 8091-8096:8091-8096 -p 11210-11211:11210-11211 couchbase/server:enterprise-7.2.0 +docker run -t --name db -p 8091-8096:8091-8096 -p 11210-11211:11210-11211 \ + couchbase/server:enterprise ---- When you run the command, Docker downloads and installs Couchbase Server. You should see the following message once Couchbase Server is started in a Docker virtual environment: diff --git a/modules/getting-started/pages/look-at-the-results.adoc b/modules/getting-started/pages/look-at-the-results.adoc index 302f30dc9e..0cc2e59256 100644 --- a/modules/getting-started/pages/look-at-the-results.adoc +++ b/modules/getting-started/pages/look-at-the-results.adoc @@ -1,7 +1,7 @@ = Explore the Server Configuration :page-pagination: :imagesdir: ../assets/images -:description: Once you have Couchbase Server running, you can log into the Couchbase Server Web Console, and start to examine the different features that it provides. +:description: Once you have Couchbase Server running, you can log into the Couchbase Server Web Console and start to examine the different features that it provides. [abstract] {description} @@ -23,6 +23,7 @@ The Docker image that you installed comes pre-configured with a default username Type these credentials into the appropriate fields and click btn:[Sign In]. +[#examine-the-cluster-dashboard] == Examine the Cluster Dashboard Every time you log into the Web Console, you are brought to the [.ui]*Cluster Dashboard*: @@ -30,22 +31,23 @@ Every time you log into the Web Console, you are brought to the [.ui]*Cluster Da [#cluster_overview] image::ClusterOverview.png["The Cluster Dashboard",720] -The [.ui]*Cluster Dashboard* provides a graphical summary of the current state of your Couchbase cluster. +The [.ui]*Cluster Dashboard* provides a graphical summary of the Couchbase cluster's current state. The term _cluster_ might seem unexpected at this point, since you are only running a single instance of Couchbase Server; but nevertheless, it counts as a cluster of one. -All of the values that are displayed on this screen were automatically configured by the sandbox container image during installation. -In production, you will specify these values individually according to your needs. +All the values that are displayed on this screen were automatically configured by the sandbox container image during installation. +In production, you will specify these values individually, according to your needs. Notice the information panel at the very bottom. This shows that you have a single active bucket on the system — _bucket_ meaning a logical group of data-items. -Taking a closer look at this bucket will give you some idea of how Couchbase stores data, and prepare you to make your first data-queries. +Taking a closer look at this bucket will give you some idea +of how Couchbase stores data and prepares you to make your first data-queries. == Examine Your Bucket and Its Documents Click menu:Buckets[] in the left-hand navigation bar to bring up the [.ui]*Buckets* screen. The [.ui]*Buckets* screen shows that you have a single active bucket on the system (_bucket_ meaning a logical group of data-items). -Taking a closer look at this bucket will give you some idea of how Couchbase stores data, and prepare you to make your first data-queries: +Taking a closer look at this bucket will give you some idea of how Couchbase stores data and prepare you to make your first data-queries: [#travel_sample_bucket_screen_initial] image::travelSampleBucketScreenInitial.png["The Buckets screen",720] @@ -59,7 +61,8 @@ Click [.ui]*Scopes and Collections* to inspect the scopes and collections contai image::travelSampleScopesScreen.png["The travel-sample Scopes screen",720] The [.ui]*Scopes and Collections* screen shows, in a succession of page-views, the scopes that are contained within the bucket. -To see the collections that are contained within a given scope, left-click on the row for the scope -- for example, for the `inventory` scope: +To see the collections that are contained within a given scope, +left-click on the row for the scope, e.g., for the `inventory` scope: [#travel_sample_collections_screen] image::travelSampleCollectionsScreen.png["The travel-sample Scopes screen, showing the inventory collections",720] @@ -67,7 +70,7 @@ image::travelSampleCollectionsScreen.png["The travel-sample Scopes screen, showi In Couchbase Server 7.0 and later, all documents must be contained in a scope and collection. If you import a dataset that was created in earlier versions of Couchbase Server, the documents are automatically saved within a `_default` scope and a `_default` collection. For your initial work with the system, this will be fine. -But as you continue, and you create more documents, your will benefit from using scopes and collections to organize those documents in the best way: this will make data-access intuitive and clear. +But as you continue, and you create more documents, you will benefit from using scopes and collections to organize those documents in the best way: this will make data-access intuitive and clear. See the xref:getting-started:/look-at-the-results.adoc#other-destinations[Other Destinations] section, below, for a link to information on creating and managing your own scopes and collections. @@ -84,7 +87,7 @@ The following document retrieval controls are provided: The documents within the selected collection are those that will be retrieved. (For information on scopes and collections, see xref:learn:data/scopes-and-collections.adoc[Scopes and Collections]). * *Limit*: The maximum number of rows (documents) to retrieve and display at once. -* *Offset*: The number of documents in the entire set of the current collection that should be skipped, before display begins. +* *Offset*: The number of documents in the entire set of the current collection that should be skipped before displaying begins. Notice that when you click [.ui]*Next Batch >*, the [.ui]*Offset* increases by the same value that is specified in [.ui]*Limit*. * *Document ID*: Accepts the ID of a specific document. Leave this field blank to retrieve documents based on *Limit* and *Offset*. @@ -103,14 +106,14 @@ image::editDocumentDialog.png["The Edit Document dialog",480] + The document consists of a series of _key-value_ pairs (or, as they are sometimes expressed, _name-value_ pairs). You can make modifications to key-values directly in this editor. -As will be demonstrated later, Couchbase Server allows you to search for keys, and return the corresponding values, by means of a _query_. +As will be demonstrated later, Couchbase Server allows you to search for keys and return the corresponding values by using a _query_. For example, here, if you searched on the name `country`, you would return the value `United States`; if on the name `icao`, the value `MLA`. + If you make changes in the [.ui]*Edit Document* dialog, click [.ui]*Save* to save your changes. If you want to create a new document based on an existing document, you can click the *Make a copy of this document* button (described next). If you want to create an entirely new document, you can click the btn:[ADD DOCUMENT] button in the upper-right. + -Note that Couchbase Web Console supports the editing of documents that are up to 10 MB in size (although documents on Couchbase Server can be up to 20 MB in size). +Note that Couchbase Web Console supports the editing of documents that are up to 10 MB in size, (although documents on Couchbase Server can be up to 20 MB in size). * *Make a copy of this document*: Click this button to bring up the [.ui]*Save As* dialog, which allows you to create a new document based on the existing one: + @@ -127,6 +130,48 @@ Click it to save your changes to the document. To view successive sets of documents, use the [.ui]*Next Batch >* and [.ui]*< Prev Batch* buttons. +[#examining-the-cluster-logs] +== Examining the Cluster's Logs + +The Couchbase Server maintains a constant, configurable log of alerts and operations, +a subset of which can be examined from the web console. + +Start by selecting menu:Logs[] from the left-hand navigation menu. + +image::eventLogging.png[] + +You will be presented with a screen containing a series of events logged by the cluster. + +You can filter the log entries by entering a string in the [.ui]*filter logs ...* field. + +You can also opt to have logging data written to a file or a bucket on a Couchbase node +by selecting the menu:Collect Information[] option from the top menu: + +image::collectLogInformationOptions.png[] + +The [.ui]*Collection Information* screen will be displayed, giving you a number of options for collecting logs. +As well as specifying the nodes logged, you can also specify a location for writing the logging data. + +Once the options have been set, click on btn:[Start Collecting] to start recording logs to your specified location. + +TIP: If you are having a problem with your cluster setup, it may be useful to provide Couchbase Support with a log of the server activity. + +[#examining-the-cluster-metrics] +== Examining the Cluster's Metrics + +As well as examining the state of the cluster from the web console, (see the section on xref:examine-the-cluster-dashboard[Examining the cluster dashboard]), +you can monitor more specific elements of the cluster operation, such as the indexes in real-time: + + +. Select [.ui]*All Services* from the menu:Choose Dashboard[] dropdown menu. +. The page will expand to include all the services available on the node. +Click on [.ui]*Index* and you will be presented with a series of graphs depicting real-time metrics for the indexes running on the node. ++ +image::showDashboardIndex.png["Index from UI dashboard", 720] ++ +TIP: The graphs will allow you to see if loads across multiple nodes differ significantly. + + [#other-destinations] == Other Destinations diff --git a/modules/install/pages/couchbase-migrations.adoc b/modules/install/pages/couchbase-migrations.adoc new file mode 100644 index 0000000000..3f0c1b74ec --- /dev/null +++ b/modules/install/pages/couchbase-migrations.adoc @@ -0,0 +1,27 @@ += Migrating your Data to Couchbase +:description: Couchbase offers a number of options for migrating your data from other platforms to Couchbase Server/Capella. + +[abstract] +{description} + +Data migration can take one of two forms: + +== Data Migration from earlier versions of Couchbase + +With the release of Couchbase Version 7.0, we added support for scopes and collections, which adds more flexible abstraction for your data. + +For more information, see xref:learn:data/scopes-and-collections.adoc[] + +For more information on migrating from earlier versions of Couchbase, see xref:install:migrating-application-data.adoc[] + +== Migrating your data from other platforms + +You have the option of migrating your data from other platforms to Couchbase Server or Couchbase Capella + +We have a command line tool (`cbmigrate`) for this purpose, which currently supports migration from: + +* https://www.mongodb.com[Mongo DB] +* https://aws.amazon.com/dynamodb/[Dynamo DB] +* https://huggingface.co[Hugging Face] + +For more information, see xref:cli:cbmigrate-tool.adoc[] diff --git a/modules/install/pages/getting-started-docker.adoc b/modules/install/pages/getting-started-docker.adoc index 1b59a4fcce..92b9734541 100644 --- a/modules/install/pages/getting-started-docker.adoc +++ b/modules/install/pages/getting-started-docker.adoc @@ -4,6 +4,8 @@ [abstract] {description} +:shutdown-instructions-link: pass:q[(For instructions on starting up or shutting down a standalone instance of Couchbase server, see xref:startup-shutdown.adoc[Starting and stopping the Couchbase Server]).] + If you're trying Couchbase Server for the first time and just want to explore a Couchbase configuration, the quickest way to install a pre-configured single-node deployment using Docker is to follow the xref:getting-started:start-here.adoc[Get Started] tutorial. For more traditional Docker deployments, use the following sections below: @@ -23,7 +25,7 @@ To run a single-node cluster, you will need to deploy a single container represe ==== For detailed information about deploying Couchbase Server, make sure to review the Couchbase Server xref:plan-for-production.adoc[system requirements] and xref:install-production-deployment.adoc[deployment guidelines], paying particular attention to the following pages: -* xref:best-practices-vm.adoc[] +* xref:best-practices-vm.adoc[] * xref:deployment-considerations-lt-3nodes.adoc[]. ==== @@ -50,7 +52,8 @@ For example: After running the above command, a single instance (`db`) of the latest https://hub.docker.com/_/couchbase/[official Couchbase Server container image^] is downloaded and run on the host computer. If a traditional installation of Couchbase Server is running locally on the host computer, the port mappings specified using the `-p` option may fail. -Ensure that you stop any local instance of Couchbase Server before running this command. +Ensure that you stop any local instance of Couchbase Server before running this command. + +{shutdown-instructions-link} //// // Removed this statement as it is questionable whether it is actually supported. [TIP] @@ -145,7 +148,8 @@ $ docker run -d --name db3 -p 8091-8096:8091-8096 -p 11210-11211:11210-11211 cou After running the above commands, three instances (`db1`, `db2`, `db3`) of the latest https://hub.docker.com/_/couchbase/[official Couchbase Server container image^] are downloaded and run on the host computer. If a traditional installation of Couchbase Server is running locally on the host computer, the port mappings specified using the `-p` option may fail. -Ensure that you stop any local instance of Couchbase Server before running these commands. +Ensure that you stop any local instance of Couchbase Server before running these commands. + +{shutdown-instructions-link} //// // Removed this statement as it is questionable whether it is actually supported. [TIP] @@ -371,7 +375,8 @@ two instances (`db1` and `db2`) of the latest https://hub.docker.com/_/couchbase and run on the host computer. If a traditional installation of Couchbase Server is running locally on the host computer, the port mappings specified using the `-p` option may fail. -Ensure that you stop any local instance of Couchbase Server before running these commands. +Ensure that you stop any local instance of Couchbase Server before running these commands. + +{shutdown-instructions-link} + NOTE: If you're using encrypted communication for the Couchbase Web Console, client, and server, and using XDCR, you need to open up additional ports. diff --git a/modules/install/pages/install-package-windows.adoc b/modules/install/pages/install-package-windows.adoc index 656f1731a7..893252c0a0 100644 --- a/modules/install/pages/install-package-windows.adoc +++ b/modules/install/pages/install-package-windows.adoc @@ -15,8 +15,10 @@ If you're upgrading an existing installation of Couchbase Server, refer to xref: Couchbase Server works out-of-the-box with most OS configurations. However, the procedures on this page assume the following: -* You have _administrator privileges_. -These are required, for installing Couchbase Server on Windows. +* You must have administrator privileges to install Couchbase Server on Windows. +Once installed, Couchbase Server runs as a Windows Service using the Local System user account. +To start or stop the Windows Service, your windows account must either have administrator privileges or be granted sufficient privileges to start the service. +For more information, see Microsoft's https://learn.microsoft.com/en-us/troubleshoot/windows-server/windows-security/grant-users-rights-manage-services[How to grant users rights to manage services^]. * Your system meets the xref:pre-install.adoc[minimum requirements] and that your operating system version is xref:install-platforms.adoc[supported]. + diff --git a/modules/install/pages/install-platforms.adoc b/modules/install/pages/install-platforms.adoc index 142e722800..80e0d2c476 100644 --- a/modules/install/pages/install-platforms.adoc +++ b/modules/install/pages/install-platforms.adoc @@ -1,16 +1,18 @@ = Supported Platforms -:description: Couchbase Server is supported on several popular operating systems and virtual environments. +:description: Couchbase Server supports several popular operating systems and virtual environments. The Couchbase Server Web Console supports most recent major browsers. :page-aliases: install:install-browsers [abstract] {description} +[#oses] == Supported Operating Systems -Make sure that your chosen operating system is listed below, before you install Couchbase Server. +Choose an operating system from the following list for your Couchbase Server deployment. NOTE: Couchbase clusters on mixed platforms are not supported. -Nodes in a Couchbase cluster should all be running on the same OS, and every effort should be made to apply the same OS patches across the entire cluster. +Nodes in a Couchbase cluster should all be running on the same OS. +Be sure to apply the same OS updates to all nodes the cluster. ARM64 support requires ARMv8 CPUs, such as the Amazon Graviton series. @@ -20,48 +22,43 @@ ARM64 support requires ARMv8 CPUs, such as the Amazon Graviton series. | Operating System | Supported Versions (64-bit) | Alma Linux -| 9.x +a|* 9.x | Amazon Linux 2 -| LTS (x86-64, ARM64) (deprecated in Couchbase Server 7.6) +a|* LTS (x86-64, ARM64) (deprecated in Couchbase Server 7.6) | Amazon Linux 2023 -| AL2023 (x86-64, ARM64) +a|* AL2023 (x86-64, ARM64) | Debian -| 12.x +a| * 12.x +* 11.x -11.x - - -| Oracle Linux{empty}footnote:[Only the Red Hat Compatible Kernel (RHCK) is supported. The Unbreakable Enterprise Kernel (UEK) is not supported.] -| 8.x - -9.x +| Oracle Linux{empty}footnote:[Only the Red Hat Compatible Kernel (RHCK) is supported. +The Unbreakable Enterprise Kernel (UEK) is not supported.] +a|* 9.x +* 8.x | Red Hat Enterprise Linux (RHEL) -| 8.x - -9.x +a|* 9.x +* 8.x | Rocky Linux -| 9.x +a|* 9.x | SUSE Linux Enterprise Server (SLES) -a| 12.x - -15.x +a|* 15.x +* 12.x (Deprecated in Couchbase Server{nbsp}7.6) -NOTE: Versions earlier than SP2 are no longer supported in Couchbase Server 7.2 and later. +NOTE: Versions earlier than 12 SP2 are no longer supported in Couchbase Server 7.2 and later. | Ubuntu -| 20.04 LTS (x86, ARM64) (deprecated in Couchbase Server 7.6) - -22.x LTS (x86, ARM64) +a|* 24.04 LTS (x86, ARM64) +* 22.x LTS (x86, ARM64) +* 20.04 LTS (x86, ARM64) (deprecated in Couchbase Server 7.6) | Windows Server -| 2022 - -2019 (deprecated in Couchbase Server 7.6) +a|* 2022 +* 2019 (deprecated in Couchbase Server 7.6) |=== @@ -71,18 +68,20 @@ NOTE: Versions earlier than SP2 are no longer supported in Couchbase Server 7.2 | Operating System | Supported Versions (64-bit) | macOS -| 14 "Sonoma" +a|* 14 "Sonoma" +* 13 "Ventura" +* 12 "Monterey" (x86-64 and Apple Silicon ARM64) deprecated in Couchbase Server 7.6.0 -13 "Ventura" - -12 "Monterey" (x86-64 and Apple Silicon ARM64) (deprecated in Couchbase Server 7.6) - | Windows Desktop -| 10 (requires Anniversary Update) +a|* 11 +* 10 (requires Anniversary Update) Deprecated in 7.6.4 |=== == Supported Virtualization and Container Platforms +When running Couchbase Server in virtualized or containerized environments, base the container or VM on one of the operating systems listed under <>. +Couchbase Server has no operating system requirements for the system hosting the VM or container. + .Supported VM and Container Platforms [cols="100,135",options="header"] |=== @@ -91,7 +90,7 @@ NOTE: Versions earlier than SP2 are no longer supported in Couchbase Server 7.2 | Docker | Couchbase Server is compatible with Docker. -Official Docker images are available on https://hub.docker.com/_/couchbase[Docker Hub]. +You can find official Docker images at https://hub.docker.com/_/couchbase[Docker Hub]. Follow the best practices to run xref:best-practices-vm.adoc[Couchbase Server on a virtualized environment]. | Kernel-based Virtual Machine (KVM) @@ -100,10 +99,10 @@ Follow the best practices to run xref:best-practices-vm.adoc[Couchbase Server on Follow the best practices to run xref:best-practices-vm.adoc[Couchbase Server on a virtualized environment]. | Kubernetes -| First-party integration with Kubernetes is made available with the xref:operator::overview.adoc[Couchbase Autonomous Operator]. +| xref:operator::overview.adoc[Couchbase Autonomous Operator] provides Kubernetes integration. | Red Hat OpenShift -| First-party integration with Red Hat OpenShift is made available with the xref:operator::overview.adoc[Couchbase Autonomous Operator]. +| xref:operator::overview.adoc[Couchbase Autonomous Operator] provides Red Hat OpenShift integration. | VMware | Couchbase Server is compatible with VMware. diff --git a/modules/install/pages/install-ports.adoc b/modules/install/pages/install-ports.adoc index 19d86a4bec..d4b1deebc5 100644 --- a/modules/install/pages/install-ports.adoc +++ b/modules/install/pages/install-ports.adoc @@ -82,7 +82,7 @@ The following table lists all port numbers, grouped by category of communication | _Client-to-node_ | *Unencrypted*: 8091-8097, 9123, 9140 {fn-eventing-debug-port}, 11210, 11280 -*Encrypted*: 11207, 18091-18095, 18096, 18097 +*Encrypted*: 11207, 18091-18097 | _XDCR (cluster-to-cluster)_ a| * Version 2 (XMEM) diff --git a/modules/install/pages/migrate-mysql.adoc b/modules/install/pages/migrate-mysql.adoc index 88ffd0df91..9d51614730 100644 --- a/modules/install/pages/migrate-mysql.adoc +++ b/modules/install/pages/migrate-mysql.adoc @@ -11,7 +11,8 @@ When migrating from MySQL to Couchbase Server, there are several things that you == Data Model -- Mapping from MySQL to Couchbase Server Data modeling for RDBMS has been a well-defined discipline for many years. -Professionals, including novice users, have been practicing techniques such as logical to physical mapping and normalization / de-normalization. +Professionals, including novice users, +have been practicing techniques such as logical to physical mapping and normalization / denormalization. However, the old-school RDBMS data modeling techniques still play a meaningful role for those who are new to the NoSQL technology. .Concept mapping between MySQL and Couchbase Server @@ -38,7 +39,7 @@ However, the old-school RDBMS data modeling techniques still play a meaningful r |=== | Data type | MySQL | Couchbase Server -| Case sensitive +| Case-sensitive | Yes/No | Yes @@ -83,7 +84,10 @@ However, the old-school RDBMS data modeling techniques still play a meaningful r Like MySQL, Couchbase Server offers a rich set of features and functionality far beyond those offered in simple key-value stores. -With Couchbase Server, you also get an expressive SQL-like query language and query engine called xref:n1ql:n1ql-language-reference/index.adoc[{sqlpp}], which is combined with a new powerful indexing mechanism -- xref:learn:services-and-indexes/indexes/global-secondary-indexes.adoc[Global Secondary Indexes]. +With Couchbase Server, +you also get an expressive SQL-like query language and query engine called xref:n1ql:n1ql-language-reference/index.adoc[{sqlpp}], +which is combined with a new powerful indexing mechanism — +xref:learn:services-and-indexes/indexes/global-secondary-indexes.adoc[Global Secondary Indexes]. .Feature differences between MySQL and Couchbase Server [cols="1,3"] @@ -107,8 +111,17 @@ a| | Variation in command and results (JSON). |=== + +== Importing your data into Couchbase + +Once you have extracted your data in JSON format, +you can use the `cbimport` command-line tool to import to your Couchbase Server installation. + +For information on the use of `cbimport`, see xref:tools:cbimport.adoc[`cbimport`] page. + + == ETL Tools You might have a spectrum of relational, operational, and analytical data sources in your environment. -You might also need more sophistication applied to a data movement situation, such as more than just simple extract-load. +You might also need more sophistication applied to a data movement situation, rather than a simple extract-load. Various tools are available, but the most common use cases are best served by combining our xref:connectors:odbc-jdbc-drivers.adoc[JDBC drivers] with our xref:java-sdk:hello-world:start-using-sdk.adoc[Java SDK]. diff --git a/modules/install/pages/migrating-application-data.adoc b/modules/install/pages/migrating-application-data.adoc index 185ef74eb9..41d844991b 100644 --- a/modules/install/pages/migrating-application-data.adoc +++ b/modules/install/pages/migrating-application-data.adoc @@ -251,7 +251,7 @@ cbbackupmgr restore -a backup -r test-02 -c localhost -u Administrator -p passw A mostly online migration will require you to use replication (XDCR). -. Setup XDCR from source cluster to target cluster. Depending on the spare disk space and compute resources in your source cluster, you can shoose to perform self-XDCR, where the source and destination bucket are on the same cluster), or you can set up a separate cluster to replicate to. +. Setup XDCR from source cluster to target cluster. Depending on the spare disk space and compute resources in your source cluster, you can choose to perform self-XDCR, where the source and destination bucket are on the same cluster), or you can set up a separate cluster to replicate to. . Create new buckets, scopes, collections, and indexes. . Set up replications either directly from a bucket to a bucket.scope.collection or using Migration Mode (details shown below) if a single bucket's default collection has to be split to multiple collections. . Explicit mapping rules are specifiable for each destination to specify subset of the data. diff --git a/modules/install/pages/pre-install.adoc b/modules/install/pages/pre-install.adoc index 01ab4dcf91..5773c52da6 100644 --- a/modules/install/pages/pre-install.adoc +++ b/modules/install/pages/pre-install.adoc @@ -4,95 +4,172 @@ [abstract] {description} -[tabs] +== CPU Requirements + +Couchbase Server can run on x86 and ARM processors (including Apple Silicon processors). +This section explains the minimum requirements for of these platforms. + +[#x86-processors] +=== x86 Processors + + +[#avx2-requirements] +.deprecation notice +[IMPORTANT] ==== -x86 Processors:: -+ --- +The use of older x86 processors that do not implement the https://en.wikipedia.org/wiki/Advanced_Vector_Extensions#AVX2[Advanced Vector Extensions 2 (AVX2)] instruction set are deprecated in Couchbase Server 7.6.x. +Future versions will require processors that have AVX2 support. +This requirement is only for x86 processors--ARM processors have a separate set of vector instructions. + +The earliest processors that support AVX2 instructions include: + +* Intel 4th generation (Haswell) Core processors released in 2013. +* Intel 11th generation (Tiger Lake) Celeron and Pentium processors released in 2020. +* AMD Excavator processors released in 2015. + +Processors from these or later generations will be required to run Couchbase Server in the future. + +On Linux, you can tell if your processor has the AVX2 instructions by executing the following command: + +[source,bash] +---- +grep -q -i 'avx2' /proc/cpuinfo && \ + echo "Processor has AVX2" || echo "AVX2 not found" +---- + +If the command returns the text `Processor has AVX2`, your processor is supported in future Couchbase Server releases. +If the command returns `AVX2 not found`, your processor does not have AVX2 instructions and will not be supported in future Couchbase Server versions. +==== + + +Couchbase Server has the following requirements when running on x86 processors. + + [cols="80,180,180"] |=== -| | Minimum Specifications* | Recommended Specifications** +| | Minimum Specifications<<#note1,*>> | Recommended Specifications<<#note2,**>> | *CPU* -| 2 GHz dual core x86_64 CPU supporting SSE4.2 -| 3 GHz quad core x86_64 CPU supporting SSE4.2 and above +| 2 GHz dual-core x86_64 CPU supporting SSE4.2 +| 3 GHz quad-core x86_64 CPU supporting SSE4.2 and above 3 GHz six core x86_64 CPU supporting SSE4.2 when using Cross Datacenter Replication (XDCR) and Views | *RAM* -| 4 GB (physical) -| 16 GB (physical) and above +| 4 GiB (physical) +| 16 GiB (physical) and above | *Storage (disk space)* a| -8 GB (block-based; HDD, SSD, EBS, iSCSI) +8 GiB (block-based; HDD, SSD, EBS, iSCSI) Network file systems such as CIFS and NFS are not supported. a| -16 GB and above (block-based; HDD, SSD, EBS, iSCSI) +16 GiB and above (SSD) Network file systems such as CIFS and NFS are not supported. + +3+a| +[IMPORTANT] +.Backup Nodes +===== +If the node is used for administering backups, then be aware that the resource requirements will be higher. + +The minimum hardware requirement is four CPU cores and 8GiB RAM. + +The recommended hardware is sixteen CPU cores, 16GiB RAM, and SSD disks. + +===== + + |=== --- -ARM Processors:: -+ --- +=== ARM Processors + +Couchbase Server has the following requirements when running on ARM-based platforms. + + [cols="80,180,180"] |=== -| | Minimum Specifications* | Recommended Specifications** +| | Minimum Specifications<<#note1,*>> | Recommended Specifications<<#note2,**>> | *CPU* | 2 Ghz dual core 64bit ARM v8 CPU | 2.5 Ghz quad core 64bit ARM v8 CPU | *RAM* -| 4 GB (physical) -| 16 GB (physical) and above +| 4 GiB (physical) +| 16 GiB (physical) and above | *Storage (disk space)* a| -8 GB (block-based; HDD, SSD, EBS, iSCSI) +8 GiB (block-based; HDD, SSD, EBS, iSCSI) Network file systems such as CIFS and NFS are not supported. a| -16 GB and above (block-based; HDD, SSD, EBS, iSCSI) +16 GiB and above (SSD) Network file systems such as CIFS and NFS are not supported. -|=== --- -==== +3+a| +[IMPORTANT] +.Backup Nodes +===== +If the node is used for administering backups, then be aware that the resource requirements will be higher. + +The minimum hardware requirement is four CPU cores and 8GiB RAM. +The recommended hardware is sixteen CPU cores, 16GiB RAM, and SSD disks. +===== -*_You can reduce the CPU and RAM resources below the Minimum Specifications for development and testing purposes. + +|=== + +[#note1] +^*^You can reduce the CPU and RAM resources below the Minimum Specifications for development and testing purposes. Resources can be as low as 1 GB of free RAM beyond operating system requirements, and a single CPU core. However, you must adhere to the Minimum Specifications for production._ -**_The Recommended Specifications don't take into account your intended workload. -You should follow the xref:sizing-general.adoc[sizing guidelines] when determining system specifications for your Couchbase Server deployment._ +[#note2] +^**^The Recommended Specifications do not take into account your intended workload. +You should follow the xref:sizing-general.adoc[sizing guidelines] when determining system specifications for your Couchbase Server deployment. [#clock-source-linux] -Clock Source on Linux:: The Query service uses the OS monotonic clock for profiling and network timeout purposes. -+ -The Linux kernel uses the _Clock Source_ to obtain the current clock value and this information is stored in `/sys/devices/system/clocksource/clocksource0/current_clocksource`. There are several clock sources (TSC, XEN, and others), which are used depending on the hardware clock capabilities, and the OS installation. The XEN source, which is seen to be the default on AWS setups, can use up to 25% of all available CPU time to obtain the current timestamp. The TSC clock source, on the other hand, incurs very little CPU cost. We recommend changing the clock source to TSC if it is set to anything else. -+ -Check the clock source on your Linux OS using the following command: +== Clock Source on Linux + +The Query Service relies on the Linux operating system's monotonic clock when profiling and managing network timeouts. + +The Linux kernel uses a clock source to track elapsed time, handle scheduling and timers, and to get the current time. +It can use one of several possible sources, such as Time Stamp Counter (TSC), the XEN build into the Xen virtualization framework, and others. +See https://docs.kernel.org/timers/timekeeping.html[Clock sources, Clock events, `sched_clock()` and delay timers^] for more information about clock sources. +Which source the kernel uses depends on the hardware clock capabilities and Linux configuration settings. + +Some virtualization environments, such as older AWS EC2 clusters, use the XEN clock source. +This source can cause performance issues because reading it requires an expensive system call to the hypervisor. +In some cases, a XEN clock source has used up to 25% of CPU time when timers are in heavy use. + +The TSC clock source incurs little CPU cost because it's a CPU instruction instead of a kernel or hypervisor call. +If your platform has a reliable and invariant implementation of TSC, use it as the clock source. +Consult the documentation for your platform for more information about its TSC implementation. + +Use the following command to see which clock source Linux is using: + [source, bash] ---- cat /sys/devices/system/clocksource/clocksource0/current_clocksource ---- -+ -Change the clock source using the following commands: + +You can change the clock source to TSC by running the following command as root: + [source,bash] ---- echo tsc > /sys/devices/system/clocksource/clocksource0/current_clocksource ---- -+ -To verify the current setting of the clock source, use: + +Verify the current setting of the clock source, read the `current_clocksource` again: [source,bash] ---- cat /sys/devices/system/clocksource/clocksource0/current_clocksource ---- -+ -The output should read `tsc`. \ No newline at end of file + +The output should read `tsc`. diff --git a/modules/install/pages/thp-disable.adoc b/modules/install/pages/thp-disable.adoc index c5cce77ac0..39408cbc1a 100644 --- a/modules/install/pages/thp-disable.adoc +++ b/modules/install/pages/thp-disable.adoc @@ -3,18 +3,26 @@ :tabs: [abstract] +-- {description} -THP must be disabled in order for Couchbase Server to function correctly on Linux. + +THP must be disabled in order for Couchbase Server to function correctly on Linux, as having THP enabled can worsen performance and possibly lead to an OOM kill. +-- In Linux operating systems, _huge pages_ is a feature that provides a way for the CPU and OS to create pre-allocated contiguous memory space, and which is designed to improve application performance. -_Transparent huge pages (THP)_ is a Linux OS feature that automates the creation of contiguous memory space, and conceals much of the complexity of using actual huge pages on systems with large amounts of memory. +_Transparent huge pages (THP)_ is a Linux OS feature that automates the creation of contiguous memory space and conceals much of the complexity of using actual huge pages on systems with large amounts of memory. THP is enabled by default in most Linux operating systems, and functions very well for most applications and processes. However, THP is detrimental to Couchbase's performance (as it is for nearly all databases that tend to have sparse rather than contiguous memory access patterns). -You must disable THP on Linux systems to ensure the optimal performance of Couchbase Server. +Since we tend to have more random, sparse data access, we allocate pages that can remain mostly empty. +This leads to memory fragmentation as portions of memory are not used but still accounted for in the RSS. +As a result, the data stored which we keep track of may be smaller while RSS can be significantly higher, +leading to possible OOM kill. + +Therefore, you must disable THP on Linux systems to ensure the optimal performance of Couchbase Server. -NOTE: If you are using Rocky Linux, then <> +NOTE: If you are using Rocky Linux, then <> @@ -86,7 +94,7 @@ sudo chmod 755 /etc/init.d/disable-thp . Configure the OS to run the script on boot. + -[{tabs}] +[tabs] ==== Red Hat, CentOS, & Amazon Linux:: + @@ -130,7 +138,7 @@ When they are in use on a system, they can be used to enable and disable THP. To disable THP in `tuned` and `ktune`, you need to edit or create a new _profile_ that sets THP to `never`. -[{tabs}] +[tabs] ==== Red Hat/CentOS 7:: + diff --git a/modules/install/pages/upgrade-procedure-selection.adoc b/modules/install/pages/upgrade-procedure-selection.adoc index 87abe655fd..a7ed5d0041 100644 --- a/modules/install/pages/upgrade-procedure-selection.adoc +++ b/modules/install/pages/upgrade-procedure-selection.adoc @@ -62,6 +62,13 @@ _Swap Rebalance_ is automatically performed by Couchbase Server when all the fol Since the introduced nodes are recognized by Couchbase Server to have equivalent capacities and configurations to those that have been taken out, rebalance is performed as a _swap rebalance_; which largely confines its activity to the incoming and outgoing nodes. Thus, for example, if one Data Service node is removed and another added, the swap rebalance ensures that the vBucket layout of the outgoing node is created identically on the incoming node; with the layouts of other Data Service nodes not requiring modification. +[NOTE] +.Node Removal and Swap Rebalancing +==== +If you are removing a data node, then there is no need to perform a xref:manage:manage-nodes/failover-graceful.adoc[failover operation]. + +Remove the node using the xref:manage:manage-nodes/remove-node-and-rebalance.adoc#remove-a-node-with-the-ui[UI], the xref:manage:manage-nodes/remove-node-and-rebalance.adoc#remove-a-node-with-the-rest-api[REST API], or the xref:manage:manage-nodes/remove-node-and-rebalance.adoc#remove-a-node-with-the-cli[CLI], and then trigger a rebalance. +==== By contrast, if two Data Service nodes are taken out, and one Data Service node and one Search Service node are introduced, since the incoming and outgoing nodes differ in configuration, when rebalance is triggered by the administrator, Couchbase Server performs a _full_ rebalance; involving more nodes than those in transit; and indeed, potentially involving the entire cluster. Note that the effect of rebalance on different Couchbase Services is described in xref:learn:clusters-and-availability/rebalance.adoc[Rebalance]: familiarity with this information is required before proceeding. diff --git a/modules/install/pages/upgrade.adoc b/modules/install/pages/upgrade.adoc index d790f326db..78730b760a 100644 --- a/modules/install/pages/upgrade.adoc +++ b/modules/install/pages/upgrade.adoc @@ -27,14 +27,14 @@ Before upgrading, consider the following version compatibility concerns. // So long as upgrading from 6.x is supported, this notice will need to stay in some form in each new release. === Upgrading to Version 7.x With Earlier Versions of .NET SDK -When upgrading from Couchbase 6.5 or 6.6 to 7.0 or later determine if both of the following are true: +When upgrading from Couchbase 6.5 or 6.6 to 7.0 or later, determine if both of the following are true: * You use a version of the .NET SDK prior to 3.2.9. * Your cluster is in mixed mode networking where some nodes use IPv4 addressing and others use IPv6. See xref:manage:manage-nodes/manage-address-families.adoc#changing-address-family-to-IPv6[Changing Address Family] for steps to determine if your cluster is running in this mode. Using a version of the .NET SDK prior to 3.2.9 with mixed mode network addressing can cause issues with write operations. -Before upgrading, resolve the mixed mode networking issue. +Before upgrading, resolve the mixed-mode networking issue. === Upgrading from Pre-7.1 Versions of Couchbase Server @@ -43,7 +43,7 @@ You cannot upgrade directly from a version of Couchbase Server earlier than 7.1 For example, you can directly upgrade from version 6.6 to version 7.2.3. You cannot directly upgrade from version 6.6 to version 7.2.4. A compatibility issue with the Erlang version used by these earlier server versions prevents a direct upgrade to later versions of the server. -To upgrade from server versions 6.5, 6.6, or 7.0 to version 7.6 or later, first upgrade to version between 7.1 and 7.2.3. +To upgrade from server versions 6.5, 6.6, or 7.0 to version 7.6 or later, first upgrade to a version between 7.1 and 7.2.3. Then upgrade to version 7.6 or later. [#understanding-upgrade] @@ -60,7 +60,7 @@ A review of the factors that determine the appropriateness of an upgrade-procedu [#supported-upgrade-paths] == Upgrade Paths -An upgrade _path_ declares that the upgrade of one version of Couchbase Server to another is _supported_. +An upgrade _path_ declares that the upgrading one version of Couchbase Server to another is _supported_. The tables in the following subsections list upgrade paths for Enterprise Edition and for Community Edition, respectively. Each instance of the{nbsp}`->`{nbsp}sign declares support for the upgrade of the server-version on the left of the sign to the server-version on the right. @@ -104,10 +104,10 @@ TIP: As far as is possible, you should aim to keep your cluster up to date with | Starting Version | Path to Current Version | 5.x -| 5.x -> 6.6 -> 7.2.3 -> 7.6.x{blank}xref:erlang-7-2-4-footnote2[^+[1]+^] +| 5.x -> 6.6 -> 7.2.2 -> 7.6.x{blank}xref:erlang-7-2-4-footnote2[^+[1]+^] | 6.x -| 6.0 -> 6.6 -> 7.2.3 -> 7.6.x{blank}xref:erlang-7-2-4-footnote2[^+[1]+^] +| 6.0 -> 6.6 -> 7.2.2 -> 7.6.x{blank}xref:erlang-7-2-4-footnote2[^+[1]+^] | 7.x | 7.0 -> 7.1 -> 7.6.x{blank}xref:erlang-7-2-4-footnote2[^+[1]+^] @@ -153,15 +153,6 @@ For example, if you are upgrading three enterprise nodes (`*Node{nbsp}1*`, `*Nod `*Node{nbsp}2*` => 5.1x -> 6.6 + `*Node{nbsp}3*` => 5.1x -> 6.6 - - -| {counter: upgrade} -| Upgrade all nodes from 6.6 to 7.2.3 -| -`*Node{nbsp}1*` => 6.6 -> 7.2.3 + -`*Node{nbsp}2*` => 6.6 -> 7.2.3 + -`*Node{nbsp}3*` => 6.6 -> 7.2.3 - | {counter: upgrade} | Upgrade all nodes from 6.6 to 7.2.3 | @@ -196,7 +187,7 @@ and finally, from *7.2.3* to *7.6.x*. If you’re currently operating a Couchbase Server cluster on Community Edition, you can upgrade it to Enterprise Edition by way of a xref:upgrade-strategies.adoc#online-upgrade[rolling online upgrade]. This involves switching out the Community Edition nodes with fresh, net-new Enterprise Edition nodes. -Both swap rebalance and remove and reblance methods are supported. +Both swap rebalance and remove and rebalance methods are supported. Delta Recovery is not supported since the new nodes must be fresh Enterprise Edition installations without any pre-existing Community Edition data remaining on them. NOTE: Rolling upgrades from CE to EE are not supported if there are index service nodes running in the cluster. @@ -217,11 +208,12 @@ include::partial$diagrams.adoc[tag="upgrade-diagram"] .Additional Notes about Upgrading from Community to Enterprise [sidebar] **** -* Couchbase Server clusters _must_ be run either entirely on Enterprise Edition nodes, or entirely on Community Edition nodes. + +* Couchbase Server clusters _must_ be run either entirely on Enterprise Edition nodes or entirely on Community Edition nodes. + Once you've upgraded one node to Enterprise Edition, you must upgrade all the other nodes before the cluster is considered as being in a steady, supportable state. * CE does not support index service rebalancing. So, when the cluster is running with one or more CE nodes, then the indexes hosted on nodes being removed may be lost. + - Users can create equivalent indexes (same index with different name) on different nodes, to avoid loss of index functionality. + Users can create equivalent indexes (the same index with a different name) on different nodes + to avoid loss of index functionality. * If a rolling online upgrade to Enterprise Edition isn't possible in your environment, contact Couchbase for assistance. **** @@ -232,8 +224,10 @@ If you're interested in upgrading to Couchbase Server Enterprise Edition, check ==== -See xref:install:upgrade-procedure-selection.adoc[Upgrade Procedure-Selection], for a list of procedures that can be used when upgrading from Community Edition to Enterprise. -Note, however, that _Graceful Failover_ for Data Service nodes, with _Delta Recovery_, is _not_ supported for such upgrades: instead, _removal_, _addition_, and _swap rebalance_ should be used; for all nodes. +See xref:install:upgrade-procedure-selection.adoc[Upgrade Procedure-Selection] for a list of procedures +that can be used when upgrading from Community Edition to Enterprise. +Note, however, that _Graceful Failover_ for Data Service nodes, with _Delta Recovery_, +is _not_ supported for such upgrades: instead, _removal_, _addition_, and _swap rebalance_ should be used; for all nodes. [#node-naming-and-upgrade] == Node-Naming and Upgrade @@ -246,10 +240,13 @@ For information, see xref:learn:security/certificates.adoc#node-certificate-vali == Downgrade Once an upgrade of a Couchbase-Server cluster has started, -_downgrade_ to the earlier version of Couchbase Server can be performed, -as long as one node continues to run the earlier version. -To downgrade an existing node, you must first remove the existing Linux package installer, then install an earlier version. -However, once all nodes are running the later version, downgrade can no longer be performed: therefore, +_downgrading_ to an earlier version of Couchbase Server can be performed by using the _swap/rebalance_ method: + +. Remove the target node from the cluster, then perform a rebalance on the cluster. +. Downgrade the target node (or create a new node using the earlier version of Couchbase). +. Add the node to the cluster and rebalance. + +Bear in mind that once all nodes are running the later version, downgrade can no longer be performed: therefore, once all nodes are running the later version, should application-support require the earlier version, an entirely new cluster must be created, running the earlier version. diff --git a/modules/install/partials/diagrams.adoc b/modules/install/partials/diagrams.adoc index c6711fa864..f2a9aee439 100644 --- a/modules/install/partials/diagrams.adoc +++ b/modules/install/partials/diagrams.adoc @@ -6,20 +6,20 @@ Not built as part of the site // tag::upgrade-diagram[] .Example Upgrade Path from Community to Enterprise -[ditaa] +[ditaa, round-corners=true] .... /-----------------\ /-----------------\ /-----------------\ | Step 1: | | Step 2: | | Step 3: | - : Upgrade Edition | : Upgrade Version | : Upgrade Version | + | Upgrade Edition | | Upgrade Version | | Upgrade Version | \--------+--------/ \--------+--------/ \--------+--------/ | | | | | | -+-----------------+ : +-----------------+ : +-----------------+ : +-----------------+ -|cBLU | ---+---> |cC02 | ----+----> |cC02 | ----+----> |cC02 | -|Cluster 1 | Rolling |Cluster 1 | Any |Cluster 1 | Any |Cluster 1 | -|Version: 6.6 | Online |Version: 6.6 | Supported |Version: 7.2.3 | Supported |Version: 7.6 | -|Edition: CE | Upgrade |Edition: EE | Upgrade |Edition: EE | Upgrade |Edition: EE | -| {s}| | {s}| Type | {s}| Type | {s} | ++-----------------+ | +-----------------+ | +-----------------+ | +-----------------+ +|cFEB | ---+---> |cBEA | ----+----> |cBEA | ----+----> |cBEA | +|Cluster 1 | Rolling |Cluster 1 | Any |Cluster 1 | Any |Cluster 1 | +|Version 6.6 | Online |Version 6.6 | Supported |Version 7.2.3 | Supported |Version 7.6 | +|Edition CE | Upgrade |Edition EE | Upgrade |Edition EE | Upgrade |Edition EE | +| | | | Type | | Type | | +-----------------+ +-----------------+ +-----------------+ +-----------------+ .... // end::upgrade-diagram[] \ No newline at end of file diff --git a/modules/introduction/pages/whats-new.adoc b/modules/introduction/pages/whats-new.adoc index 14da19e71b..26804ce932 100644 --- a/modules/introduction/pages/whats-new.adoc +++ b/modules/introduction/pages/whats-new.adoc @@ -8,16 +8,22 @@ For information about platform support changes, deprecation notifications, notable improvements, and fixed and known issues, refer to the xref:release-notes:relnotes.adoc[Release Notes]. -[#new-features-762] -== New Features and Enhancements in 7.6.2 - -The following new features are provided in this release. - +.deprecation notice +[IMPORTANT] +==== +Using older x86 processors that do not have the AVX2 instruction set is deprecated in Couchbase Server 7.6.x. +Deprecated processors include pre-2013 Intel Core processors, pre-2020 Celeron or Pentium processors, and pre-2015 AMD processors. +See xref:install:pre-install.adoc[] for details. +==== + +.note regarding `cbbackupmgr` +[IMPORTANT] +==== +If you are performing a backup/restore operation on a Couchbase Server 7.6.x cluster, +ensure that you use `cbbackupmgr` version 7.6. +==== + +include::partial$new_features-76_6.adoc[] +include::partial$new_features-76_4.adoc[] include::partial$new-features-76_2.adoc[] - -[#new-features] -== New Features and Enhancements in 7.6.0 - -The following new features are provided in this release. - include::partial$new-features-76.adoc[] diff --git a/modules/introduction/partials/new-features-76.adoc b/modules/introduction/partials/new-features-76.adoc index 261d1ac8a8..ccdff30ed5 100644 --- a/modules/introduction/partials/new-features-76.adoc +++ b/modules/introduction/partials/new-features-76.adoc @@ -1,3 +1,9 @@ +[#new-features] +== New Features and Enhancements in 7.6.0 + +The following new features are provided in this release. + + === Platform Support * Couchbase Server 7.6 adds support for the following platforms: @@ -49,6 +55,8 @@ include::learn:partial$arbiter-node-benefits.adoc[] In addition, the `/pools/default/tasks` REST API endpoint now takes an optional `taskId` parameter to view details about a sample bucket loading task. See xref:manage:manage-settings/install-sample-buckets.adoc#install-sample-buckets-with-the-rest-api[Install Sample Buckets with the REST API] for more information. +* The minimum permitted duration for auto-failover on the nodes is reduced from 5 seconds to 1 second when set through the REST API. + === Backup and Restore * The Role-Based Access Control (RBAC) REST API has a new `backup` endpoint that lets you backup and restore user and user groups. See xref:rest-api:rbac.adoc#backup-and-restore-users-and-groups[Backup and Restore Users and Groups]. @@ -157,8 +165,10 @@ A developer-friendly vector indexing engine exposes a vector database and search With Couchbase Vector Search, you can enable fast and highly accurate semantic search, ground LLM responses in relevant data to reduce hallucinations, and enhance or enable use cases like personalized searches in e-commerce and media & entertainment, product recommendations, fraud detection, and reverse image search. You can also enable full access to an AI ecosystem with a LangChain integration, the most popular open-source framework for LLM-driven applications. + + A Vector Search database includes: + +-- ** Standard Couchbase vertical/horizontal scaling ** Indexing capable of efficient Insert/Update/Removal of Items (or documents) ** Storage of raw Embedding Vectors in the Data Service in the documents themselves @@ -166,11 +176,19 @@ A Vector Search database includes: ** {sqlpp}/N1QL integration ** Third-party framework integration: LangChain (later LlamaIndex + others) ** Full support for Replicas Partitions and file-based Rebalance - +-- ++ +-- NOTE: Vector Search is currently only supported on Couchbase Server 7.6.0 deployments running on Linux platforms. -MacOS and Windows platforms are not supported. +macOS and Windows platforms are not supported. For more information about vector search, see xref:vector-search:vector-search.adoc[] +-- + +* Couchbase Server 7.6 introduces Scoped Index Naming +as an optional part of the `WHERE` clause in an {sqlpp} statement. +For more information, see xref:n1ql:n1ql-language-reference/searchfun.adoc#search-function-arguments-section[SEARCH function arguments] + === Data Service diff --git a/modules/introduction/partials/new-features-76_2.adoc b/modules/introduction/partials/new-features-76_2.adoc index 69713919e6..fc6cbff55d 100644 --- a/modules/introduction/partials/new-features-76_2.adoc +++ b/modules/introduction/partials/new-features-76_2.adoc @@ -1,3 +1,9 @@ + +[#new-features-762] +== New Features and Enhancements in 7.6.2 + +The following new features are provided in this release. + === Platform Support * Couchbase Server 7.6.2 adds support for the following platforms: @@ -27,7 +33,7 @@ See xref:learn:services-and-indexes/services/backup-service.adoc#threads[Thread === Analytics Service * The Analytics Service REST API has two new endpoints that let you get information about active and completed requests. -See xref:analytics:rest-admin.adoc#_return_active_requests[Active Requests] and xref:analytics:rest-admin.adoc#_completed_requests[Completed Requests] in the xref:analytics:rest-admin.adoc[] page +See xref:analytics-rest-admin:index.adoc#return_active_requests[Active Requests] and xref:analytics-rest-admin:index.adoc#completed_requests[Completed Requests] in the xref:analytics-rest-admin:index.adoc[] page. === Cluster Manager @@ -39,7 +45,7 @@ See xref:analytics:rest-admin.adoc#_return_active_requests[Active Requests] and ** `cm_auto_failover_max_count`: the maximum number of auto-failovers allowed before Cluster Manager disables auto-failover. ** `cm_failover_total`: The total number of non-graceful failovers that have occurred. ** `cm_graceful_failover_total`: The total number of graceful failovers that have occurred. -** `cm_is_balanced`: Whether the Cluster Manager is balanced. Only reported by orchestrator nodes and only reported every 30 seconds. +** `cm_is_balanced`: Whether the cluster is balanced. Only reported by orchestrator nodes and only reported every 30 seconds. ** `cm_rebalance_in_progress`: Whether there is a rebalance occurring. Only reported by the orchestrator node. ** `cm_rebalance_progress`: An estimate of the progress of the current rebalance. Only reported by the orchestrator. ** `cm_rebalance_total`: The total number of rebalances that have occurred. diff --git a/modules/introduction/partials/new_features-76_4.adoc b/modules/introduction/partials/new_features-76_4.adoc new file mode 100644 index 0000000000..1440c73ffc --- /dev/null +++ b/modules/introduction/partials/new_features-76_4.adoc @@ -0,0 +1,78 @@ + +[#new-features-764] +== New Features and Enhancements in 7.6.4 + +The following new features are provided in this release. + + +[#new-features-764-cluster-manager] +=== Cluster Manager + +* *https://jira.issues.couchbase.com/browse/MB-63871[MB-63871]:* +The `/prometheus_sd_config` endpoint provides a new option `clusterLabels` +that specifies the cluster will return its name and UUID. +Prometheus will use the labels in time series data. +This provides a method +to guarantee uniqueness for stats with the same name +gathered from multiple clusters. ++ +[source,console] +---- +curl --get -u \ + http://:/prometheus_sd_config + -d disposition=[attachment|inline] + -d network=[default|external] + -d port=[insecure|secure] + -d type=[json|yaml] + -d clusterLabels=none|uuidOnly|uuidAndName +---- + +[#new-features-764-xdcr] +=== XDCR + + +* *https://jira.issues.couchbase.com/browse/MB-62412[MB-62412]:* + Once faulty remote cluster credentials are fixed, XDCR will now be able to more quickly restart replications that depend on the repaired references. + +[#new-features-764-search-service] +=== Search Service + +* The Search Service now supports pre-filtering on Vector Search queries. + Use pre-filtering to execute a vector search over a subset of your Vector Search index, + through a defined filter request. + +For more information, see xref:vector-search:pre-filtering-vector-search.adoc[] + +* The Search Service now supports the cosine similarity metric for xref:vector-search:vector-search.adoc[Vector Search indexes]. +For more information about Vector Search similarity metrics, see xref:search:child-field-options-reference.adoc[Child Field Options]. + +* The Search Service now supports a new option for optimizing Vector Search indexes: *memory-efficient*. +Choose this option to prioritize reduced memory and resource usage for Vector Searches, at the cost of accuracy and latency. +For more information, see xref:search:child-field-options-reference.adoc[Child Field Options]. + +* The Search Service has added a new object to JSON Search queries. +Use this new object to view detailed query debugging information and resolve query errors in the Web Console or through the REST API. +For more information about how to run a query with this new object, see xref:search:search-request-params.adoc#validate[the validate property], xref:search:simple-search-rest-api.adoc#example-validate-a-search-query[Run a Simple Search with the REST API and curl/HTTP] or xref:search:simple-search-ui.adoc#example-validate-a-search-query[Run A Simple Search with the Web Console]. + +[#new-features-764-eventing-service] +=== Eventing Service + +* The Eventing Service now supports Sync Gateway. +The Eventing REST API provides settings which enable individual Eventing functions to work with Sync Gateway. +For more information, see xref:eventing-rest-api:index.adoc#adv_settings_update[Update Function Settings]. + +=== Supported Platforms + +* Support for Windows 10 is deprecated in Couchbase Server 7.6.4. +A future release of Couchbase Server will remove support for it. + +[#new-features-764-tools] +=== Tools + +* *https://jira.issues.couchbase.com/browse/MB-63171[MB-63171]:* +Starting from version 7.6.4, the Couchbase Server tools packages are categorized into the Server developer tools package and the Server admin tools package. ++ +The Server developer tools package is the same as the previously named Server tools package, which includes `cbimport`, `cbexport`, `cbq`, and `cbbackupmgr`. ++ +The Server admin tools package is created for the Server admin users who want to download the necessary utilities to remotely administer and monitor multiple Couchbase clusters. The Server admin tools package includes the utilities `cbbackupmgr`, `cbc`, `cbdatarecovery`, `cbexport`, `cbimport`, `cbq`, `cbstats`, `couchbase-cli`, `mcstat`, `mctestauth`, and `mctimings`. ++ +For details, see xref:cli:cli-intro.adoc#server-tools-packages[CLI Reference]. diff --git a/modules/introduction/partials/new_features-76_6.adoc b/modules/introduction/partials/new_features-76_6.adoc new file mode 100644 index 0000000000..ca3ad7690b --- /dev/null +++ b/modules/introduction/partials/new_features-76_6.adoc @@ -0,0 +1,21 @@ + +NOTE: *Sync Gateway 4.0.0 is a future release version. + +[#new-features-766] +== New Features and Enhancements in 7.6.6 + +The following new features are provided in this release. + +* The following new platforms are supported. +** Windows Server 2025 + +[#new-features-766-xdcr] +== XDCR + +* *https://jira.issues.couchbase.com/browse/MB-57921[MB-57921]:* +Created provision to set up XDCR bidirectional replication with Sync Gateway (SGW) 4.0* or a later version. +In the versions earlier than Server 7.6.6 and Sync Gateway (SGW) 4.0.0*, only an active-passive setup was supported with both XDCR and SGW. +XDCR active-active replication with Sync Gateway for XDCR-Mobile interoperability configuration is introduced in the Server 7.6.6 version, where you can configure an active-active XDCR setup with Sync Gateway and mobile applications both on the XDCR source and target clusters. +You need to have at least a Server 7.6.6 version and SGW 4.0.0* version to use this setup. +For more info, see xref:learn:clusters-and-availability/xdcr-active-active-sgw.adoc[XDCR Active-Active with Sync Gateway]. + diff --git a/modules/learn/assets/images/clusters-and-availability/groups-two-equal_updated.png b/modules/learn/assets/images/clusters-and-availability/groups-two-equal_updated.png new file mode 100644 index 0000000000..48c286696f Binary files /dev/null and b/modules/learn/assets/images/clusters-and-availability/groups-two-equal_updated.png differ diff --git a/modules/learn/assets/images/clusters-and-availability/xdcr-active-sgw-after-upgrade.png b/modules/learn/assets/images/clusters-and-availability/xdcr-active-sgw-after-upgrade.png new file mode 100644 index 0000000000..ccb0c7d8b7 Binary files /dev/null and b/modules/learn/assets/images/clusters-and-availability/xdcr-active-sgw-after-upgrade.png differ diff --git a/modules/learn/assets/images/clusters-and-availability/xdcr-active-sgw-before-upgrade.png b/modules/learn/assets/images/clusters-and-availability/xdcr-active-sgw-before-upgrade.png new file mode 100644 index 0000000000..9f49db8e86 Binary files /dev/null and b/modules/learn/assets/images/clusters-and-availability/xdcr-active-sgw-before-upgrade.png differ diff --git a/modules/learn/pages/buckets-memory-and-storage/buckets.adoc b/modules/learn/pages/buckets-memory-and-storage/buckets.adoc index 2978be55ae..b4023071bd 100644 --- a/modules/learn/pages/buckets-memory-and-storage/buckets.adoc +++ b/modules/learn/pages/buckets-memory-and-storage/buckets.adoc @@ -53,8 +53,6 @@ For a Memcached bucket, this means that data, which is resident in memory (but, Therefore, if removed data is subsequently needed, it cannot be re-acquired from Couchbase Server. Ejection removes all of an item's data. -For all bucket-types, items are selected for ejection by means of the _Not Recently Used_ (NRU) algorithm. - All bucket types are fully compatible with the Memcached open source distributed key-value cache. == Bucket Capabilities diff --git a/modules/learn/pages/buckets-memory-and-storage/memory.adoc b/modules/learn/pages/buckets-memory-and-storage/memory.adoc index cba12609f1..09314ef886 100644 --- a/modules/learn/pages/buckets-memory-and-storage/memory.adoc +++ b/modules/learn/pages/buckets-memory-and-storage/memory.adoc @@ -71,10 +71,10 @@ This quota is allocated for the bucket on a per node basis and must be less than Set the memory quota based on the expected size of your dataset. The memory quota for a bucket must support the minimum memory resident ratio of its xref:learn:buckets-memory-and-storage/storage-engines.adoc[storage engine]: -* *Couchstore*: The memory quota is recommended to be at least 10-20% of your expected dataset size. -* *Magma*: The memory quota is recommended to be at least 2% of your expected dataset size. +* *Couchstore*: The memory quota is recommended to be at least 10% of your expected dataset size. +* *Magma*: The memory quota is recommended to be at least 1% of your expected dataset size. -For example, if you expect to have about 2TBs of data per node in your cluster and want to use the *Magma* engine, you could set the memory quota for a bucket to 40GB. +For example, if you expect to have about 2TBs of data per node in your cluster and want to use the *Magma* engine, you could set the memory quota for a bucket to 20GB. NOTE: These values are recommendations only. The specific memory quota requirements for your bucket are dependent on access patterns, data density, and other factors. diff --git a/modules/learn/pages/buckets-memory-and-storage/storage-settings.adoc b/modules/learn/pages/buckets-memory-and-storage/storage-settings.adoc index cf7553c04f..e3b731b5c1 100644 --- a/modules/learn/pages/buckets-memory-and-storage/storage-settings.adoc +++ b/modules/learn/pages/buckets-memory-and-storage/storage-settings.adoc @@ -9,7 +9,8 @@ == Understanding Couchbase Storage Couchbase Server stores certain items in compressed form on disk; and, whenever required, removes them. -This allows data-sets to exceed the size permitted by existing memory-resources; since undeleted items not currently in memory can be restored to memory from disk, as needed. +This allows data-sets to exceed the size permitted by existing memory resources, +since undeleted items not currently in memory can be restored to memory from disk, as needed. It also facilitates backup-and-restore procedures. Generally, a client's interactions with the server are not blocked during disk-access procedures. @@ -22,7 +23,7 @@ Items written to disk are always written in compressed form. Based on bucket configuration, items may be maintained in compressed form in memory also. See xref:buckets-memory-and-storage/compression.adoc[Compression] for information. -Items can be removed from disk based on a configured point of expiration, referred to as _Time-To-Live_. +Items can be removed from the disk based on a configured point of expiration, referred to as _Time-To-Live_. See xref:data/expiration.adoc[Expiration] for information. For illustrations of how Couchbase Server saves new and updates existing Couchbase-bucket items, thereby employing both memory and storage resources, see xref:buckets-memory-and-storage/memory-and-storage.adoc[Memory and Storage]. @@ -30,28 +31,30 @@ For illustrations of how Couchbase Server saves new and updates existing Couchba [#threading] == Threading -Synchronized, multi-threaded _readers_ and _writers_ provide simultaneous, high-performance operations for data on disk. +Synchronized, multithreaded _readers_ and _writers_ provide simultaneous, high-performance operations for data on disk. Conflicts are avoided by assigning each thread (reader or writer) a specific subset of the 1024 vBuckets for each Couchbase bucket. Couchbase Server allows the number of threads allocated per node for reading and writing to be configured by the administrator. -The maximum thread-allocation that can be specified for each is _64_, the minimum _1_. +The maximum thread-allocation that can be specified for each is _64_, the minimum is _1_. -A high thread-allocation may improve performance on systems whose hardware-resources are commensurately supportive (for example, where the number of CPU cores is high). +A high thread-allocation may improve performance on systems whose hardware resources are commensurately supportive, (for example, where the number of CPU cores is high). In particular, a high number of _writer_ threads on such systems may significantly optimize the performance of _durable writes_: see xref:learn:data/durability.adoc[Durability], for information. +A high number of reader and writer threads will benefit disk based workloads that require high throughput especially when using high end disk drives such as NVMe SSDs. This is likely to be the case when using Magma as the storage engine. In this case it is best to choose xref:manage:manage-settings/general-settings.adoc#data-settings['Disk i/o optimized'] mode for Reader and Writer thread settings. + Note, however, that a high thread-allocation might _impair_ some aspects of performance on less appropriately resourced nodes. Consequently, changes to the default thread-allocation should not be made to production systems without prior testing. A starting-point for experimentation is to establish the numbers for reader threads and writer threads as each equal to the _queue depth_ of the underlying I/O subsystem. -See the _General-Settings_ information on xref:manage:manage-settings/general-settings.adoc#data-settings[Data Settings], for details on how to establish appropriate numbers of reader and writer threads. +See the _General-Settings_ information on xref:manage:manage-settings/general-settings.adoc#data-settings[Data Settings] for details on how to establish appropriate numbers of reader and writer threads. Note also that the number of threads can also be configured for the _NonIO_ and _AuxIO_ thread pools: -* The _NonIO_ thread pool is used to run _in memory_ tasks -- for example, the _durability timeout_ task. +* The _NonIO_ thread pool is used to run _in memory_ tasks — for example, the _durability timeout_ task. -* The _AuxIO_ thread pool is used to run _auxiliary I/O_ tasks -- for example, the _access log_ task. +* The _AuxIO_ thread pool is used to run _auxiliary I/O_ tasks — for example, the _access log_ task. -Again, the maximum thread-allocation that can be specified for each is _64_, the minimum _1_. +Again, the maximum thread-allocation that can be specified for each is _64_, the minimum is _1_. Thread-status can be viewed, by means of the [.cmd]`cbstats` command, specified with the [.param]`raw workload` option. See xref:cli:cbstats-intro.adoc[cbstats] for information. @@ -61,18 +64,20 @@ For information on using the REST API to manage thread counts, see xref:rest-api [#deletion] == Deletion -Items can be deleted by a client application: either by immediate action, or by setting a _Time-To-Live_ (TTL) value: this value is established through accessing the `TTL` metadata field of the item, which establishes a future point-in-time for the item's _expiration_. +Items can be deleted by a client application: either by immediate action or by setting a _Time-To-Live_ (TTL) value: +this value is established through accessing the `TTL` metadata field of the item, +which establishes a future point-in-time for the item's _expiration_. When the point-in-time is reached, Couchbase Server deletes the item. Following deletion by either method, a _tombstone_ is maintained by Couchbase Server, as a record (see below). -An item's TTL can be established either directly on the item itself, or via the bucket that contains the item. +An item's TTL can be established either directly on the item itself or via the bucket that contains the item. For information, see xref:data/expiration.adoc[Expiration]. == Tombstones A _tombstone_ is a record of an item that has been removed. -Tombstones are maintained in order to provide eventual consistency, between nodes and between clusters. +Tombstones are maintained to provide eventual consistency, between nodes and between clusters. Tombstones are created for the following: @@ -83,7 +88,7 @@ The tombstone is created when the document is _deleted_; and contains the former The tombstone is created when the collection is _dropped_; and contains information that includes the collection-id, the collection’s scope-id, and a manifest-id that records the dropping of the collection. + All documents that were in the dropped collection are deleted when the collection is dropped. -No tombstones are maintained for such documents: moreover, any tombstones for deleted documents that existed in the collection prior to its dropping are themselves removed when the collection is dropped; and consequently, only a collection-tombstone remains, when a collection is dropped. +No tombstones are maintained for such documents: moreover, any tombstones for deleted documents that existed in the collection prior to its dropping are themselves removed when the collection is dropped; and consequently, only a collection-tombstone remains when a collection is dropped. The collection-tombstone is replicated via DCP as a single message (ordered with respect to mutations occurring in the vBucket), to replicas and other DCP clients, to notify such recipients that the collection has indeed been dropped. It is then the responsibility of each recipient to purge anything it still contains that belonged to the dropped collection. @@ -95,23 +100,31 @@ For more information, see xref:data/expiration.adoc#post-expiration-purging[Post [#disk-paths] == Disk Paths -At node-initialization, Couchbase Server allows up to four custom paths to be established, for the saving of data to the filesystem: these are for the Data Service, the Index Service, the Analytics Service, and the Eventing Service. Note that the paths are node-specific: consequently, the data for any of these services may occupy a different filesystem-location, on each node. +At node-initialization, +Couchbase Server allows up to four custom paths to be established for the saving of data to the filesystem: +these are for the Data Service, the Index Service, the Analytics Service, and the Eventing Service. +Note that the paths are node-specific: +consequently, the data for any of these services may occupy a different filesystem-location, on each node. For information on setting data-paths, see xref:manage:manage-nodes/initialize-node.adoc[Initialize a Node]. [#append-only-writes-and-auto-compaction] == Append-Only Writes and Auto-Compaction -Couchbase Server uses an _append-only_ file-write format; which helps to ensure files' internal consistency, and reduces the risk of corruption. -Necessarily, this means that every change made to a file — whether an addition, a modification, or a deletion — results in a new entry being created at the end of the file: therefore, a file whose user-data is diminished by deletion actually grows in size. +Couchbase Server uses an _append-only_ file-write format, +which helps to ensure the internal consistency of the files and reduces the risk of corruption. +Necessarily, this means that every change made to a file — whether an addition, a modification, +or a deletion — results in a new entry being created at the end of the file: +therefore, a file whose user-data is diminished by deletion actually grows in size. File-sizes should be periodically reduced by means of _compaction_. -This operation can be performed either manually, on a specified bucket; or on an automated, scheduled basis, either for specified buckets, or for all buckets. +This operation can be performed either manually, on a specified bucket; or on an automated, scheduled basis, either for specified buckets or for all buckets. For information on performing manual compaction with the CLI, see xref:cli:cbcli/couchbase-cli-bucket-compact.adoc[bucket-compact]. For information on configuring auto-compaction with the CLI, see xref:cli:cbcli/couchbase-cli-setting-compaction.adoc[setting-compaction]. -For all information on using the REST API for compaction, see the xref:rest-api:compaction-rest-api.adoc[Compaction API]. +For all information on using the REST API for compaction, see the xref:rest-api:rest-autocompact-global.adoc[Global Compaction API] +or xref:rest-api:rest-autocompact-per-bucket.adoc[Per-bucket Compaction API]. For information on configuring auto-compaction with Couchbase Web Console, see xref:manage:manage-settings/configure-compact-settings.adoc[Auto-Compaction]. @@ -120,7 +133,7 @@ For information on configuring auto-compaction with Couchbase Web Console, see x _Disk I/O_ — reading items from and writing them to disk — does not block client-interactions: disk I/O is thus considered a _background task_. The priority of disk I/O (along with that of other background tasks, such as item-paging and DCP stream-processing) is configurable _per bucket_. -This means, for example, that one bucket's disk I/O can be granted priority over another's. +This means, for example, that one bucket's disk I/O can be granted priority over another. For further information, see xref:manage:manage-buckets/create-bucket.adoc[Create a Bucket]. @@ -137,7 +150,7 @@ Note that in _Capella_, Couchbase buckets are referred to as _Memory and Disk_ b |No Ejection |_Ephemeral_ -|If memory is exhausted then the buckets are set to read-only to prevent data loss. This is the default setting. +|If memory is exhausted, then the buckets are set to `read-only` to prevent data loss. This is the default setting. |NRU{empty}footnote:[Not Recently Used] Ejection |_Ephemeral_ @@ -145,11 +158,11 @@ Note that in _Capella_, Couchbase buckets are referred to as _Memory and Disk_ b |Value Only Ejection |_Couchbase_ -|In low memory situations, this policy wll eject values and data from memory, but keys and metadata will be retained. This is the default policy for _Couchbase_ buckets. +|In low-memory situations, this policy will eject values and data from memory, but keys and metadata will be retained. This is the default policy for _Couchbase_ buckets. |Full Ejection |_Couchbase_ -|Under this policy, data, keys and metadata are ejected from memory. +|Under this policy, data, keys, and metadata are ejected from memory. |=== diff --git a/modules/learn/pages/buckets-memory-and-storage/vbuckets.adoc b/modules/learn/pages/buckets-memory-and-storage/vbuckets.adoc index 81605c0f80..2945b1f895 100644 --- a/modules/learn/pages/buckets-memory-and-storage/vbuckets.adoc +++ b/modules/learn/pages/buckets-memory-and-storage/vbuckets.adoc @@ -12,7 +12,7 @@ Couchbase Server allows users and applications to save data, in binary or JSON f Each bucket therefore contains _keys_ and associated _values_. See xref:buckets-memory-and-storage/buckets.adoc[Buckets], for detailed information. -Within the memory and storage management system of Couchbase Server, both Couchbase and Ephermal buckets are implemented as _vBuckets_, 1024 of which are created for every bucket (except on MacOS, where the number is 64). +Within the memory and storage management system of Couchbase Server, both Couchbase and Ephemeral buckets are implemented as _vBuckets_, 1024 of which are created for every bucket (except on MacOS, where the number is 64). vBuckets are distributed evenly across the memory and storage facilities of the cluster; and the bucket's items are distributed evenly across its vBuckets. This evenness of distribution ensures that all instances of the xref:services-and-indexes/services/data-service.adoc[Data Service] take an approximately equal share of the workload, in terms of numbers of documents to maintain, and operations to handle. diff --git a/modules/learn/pages/clusters-and-availability/automatic-failover.adoc b/modules/learn/pages/clusters-and-availability/automatic-failover.adoc index c8d7e87ce4..b9075ae421 100644 --- a/modules/learn/pages/clusters-and-availability/automatic-failover.adoc +++ b/modules/learn/pages/clusters-and-availability/automatic-failover.adoc @@ -1,5 +1,6 @@ = Automatic Failover -:description: One or more nodes can be failed over automatically when they become unresponsive or experience continuous disk access problems. This is done as long as data safety is maintained;i.e. no data loss occurs as a result of failover. +:description: One or more nodes can be failed over automatically when they become unresponsive or experience a configured auto-failover triggering event. An auto-failover is performed only if all safety check conditions are met and the checks are done to maintain data safety; i.e. that no data loss occurs as a result of failover. + :page-aliases: clustersetup:automatic-failover.adoc :stem: @@ -8,39 +9,72 @@ == Understanding Automatic Failover +Important: The concept of auto-failover, and failover in general, applies to nodes in a single cluster, including nodes in a Server Group. There are no separate settings for Server Group failover or Server Group auto-failover. +If auto-failover settings are configured properly, and if the cluster services topology allows, auto-failover of all the nodes in a Server Group can occur. For example, if an event occurs so that all of the nodes in a Server Group were to become unavailable so that they were eligible for auto-failover, for auto-failover of all the nodes in the Server Group to occur, all of the auto-failover constraints listed in xref:learn:clusters-and-availability/automatic-failover.adoc[Auto-failover Constraints] must be met, including the majority quorum requirement: i.e., the remaining nodes must be able to form a majority quorum to be able to initiate an auto-failover. + +See xref:learn:clusters-and-availability/groups.adoc[Server Group Awareness] for more information about Server Groups. + +Note that node failover in the Couchbase Server is in the context of a single cluster, and auto-failover only occurs in a single cluster. +In the context of Cross Data Center Replication (XDCR), the failover refers to application failover to a different cluster. Application failovers are always determined and controlled by the user. + _Automatic Failover_ — or _auto-failover_ — can be configured to fail over one or more nodes automatically. No immediate administrator intervention is required. Specifically, the Cluster Manager autonomously detects and verifies that the nodes are unresponsive, and then initiates the _hard_ failover process. Auto-failover does not fix or identify problems that may have occurred. Once appropriate fixes have been applied to the cluster by the administrator, a rebalance is required. -Auto-failover is always _hard_ failover. +Auto-failover is always a _hard_ failover. For information on how services are affected by hard failover, see xref:learn:clusters-and-availability/hard-failover.adoc[Hard Failover]. -This page describes auto-failover concepts and policy. +As a reminder, failover is a mechanism in the Couchbase Server that allows a node to be taken out of the cluster so that applications no longer reference the services on the failed node and availability is maintained. The failover is at the node level, and the automatic failover process for a non-responsive or an unhealthy node starts when the cluster manager detects, per the configured auto-failover settings, that the node is unresponsive (the cluster manager of the node is not sending heartbeats to the cluster manager of other nodes) or the Data or the Index Service on a node is not healthy (the service heartbeat or process is not responding to the cluster manager). Then, multiple safety checks are run to see if an auto-failover can be performed. If all checks pass, the cluster manager performs the hard failover process. + +In the xref:learn:clusters-and-availability/automatic-failover.adoc#failover-events[Failover Events] section, the events that start the auto-failover process are described. In the xref:learn:clusters-and-availability/automatic-failover.adoc#auto-failover-constraints[Auto-Failover Constraints] section, the safety checks done after an auto-failover triggering event are explained. These safety checks are done to determine if an auto-failover should actually be performed. Checks required for service specific safety policies are described in the xref:learn:clusters-and-availability/automatic-failover.adoc#failover-policy[Service-Specific Auto-Failover Policy] section. + For information on managing auto-failover, see the information provided for Couchbase Web Console at xref:manage:manage-settings/general-settings.adoc[General] (which provides information on general cluster-settings), for the REST API at xref:rest-api:rest-cluster-autofailover-intro.adoc[Managing Auto-Failover], and for the CLI at xref:cli:cbcli/couchbase-cli-setting-autofailover.adoc[setting-autofailover]. == Failover Events -Auto-failover occurs in response to failed/failing events. -There are three types of event that can trigger auto-failover: +Auto-failover occurs in response to failed/failing events. Auto-failover applies to the node -- it’s the node that fails over regardless of the triggering event. There are specific types of events that trigger auto-failover processing. However, auto-failover will only actually occur if all of the checks (constraints and policies) for auto-failover pass. + +The constraints and policies checked after the triggering event are described in xref:learn:clusters-and-availability/automatic-failover.adoc#auto-failover-constraints[Auto-Failover Constraints] and xref:learn:clusters-and-availability/automatic-failover.adoc#failover-policy[Service-Specific Auto-Failover Policy] sections. For example, despite the triggering event, an auto-failover of a node may be prevented due to the lack of a quorum or because there aren’t any other nodes running a service that exists on the node to be auto-failed over. + +Below are the types of events that can trigger auto-failover processing on a node: * _Node failure_. -A server-node within the cluster is unresponsive (due to a network failure, out-of-memory problem, or other node-specific issue). +A server-node within the cluster is unresponsive (due to a network failure, very high CPU utilization problem, out-of-memory problem, or other node-specific issue). This means that the the cluster manager of the node has not sent heartbeats in the configured timeout period, and therefore, the health of the services running on the node is unknown. -* _Multiple node failure_. +** Multiple node failure. Concurrent correlated failure of multiple nodes such as physical rack of machines or multiple virtual machines sharing a host. -* _Disk read/write failure_. -Attempts to read from or write to disk on a particular node have resulted in a significant rate of failure, for longer than a specified time-period. -The node is removed by auto-failover, even though the node continues to be contactable. +* _Data Service disk read/write issues_. +Data Service disk read/write errors. Attempts by the Data Service to read from or write to disk on a particular node have resulted in a significant rate of failure (errors returned), for longer than a specified time-period. +* _Index or Data Service running on the mode is non-responsive or unhealthy_. +** Index Service non-responsiveness. +Index Service running on a node sends heartbeat messages to the cluster manager as an indication of its health. If the Index Service fails to send a heartbeat, it is considered unhealthy, and if it stays unhealthy for the user-specified threshold time for auto-failover, the cluster manager will start the auto-failover checks for the node that the index service is on. +** Data Service is unhealthy. +Besides the Data Service disk read/write issues configured monitoring for auto-failover, the Data Service running on a node can be deemed unhealthy per various other internal monitoring. If the Data Service stays unhealthy for the user-specified threshold time for auto-failover, the cluster manager will start the auto-failover checks for the node that the data service is on. + +Note that the Data Service and Index Service health for auto-failover uses the same Timeout value set for node unresponsiveness (see xref:learn:clusters-and-availability:automatic-failover.adoc[Configuring Auto-Failover]) -- this is the user-specified threshold time for auto-failover mentioned in the Data Service and Index Service monitoring. + +Note that on a node where there are only Search, Eventing, Query, Analytics, or Backup services running, the services could become unhealthy, but as long as the cluster manager heartbeats are sent and processed by the rest of the cluster, an auto-failover of the node will not be attempted -- this is because only the Data and Index Services health are monitored for node auto-failover. [#auto-failover-constraints] == Auto-Failover Constraints -Auto-failover is triggered: +If a monitored or configured auto-failover event occurs, an auto-failover will not be performed if all the safety checks do not pass. These checks are explained in this section and the xref:learn:clusters-and-availability:automatic-failover.adoc#failover-policy[Service-Specific Auto-Failover Policy] section. + +The xref:install:deployment-considerations-lt-3nodes.adoc#quorum-arbitration[quorum constraint] is a critical part of auto-failover since the cluster must be able to form a quorum to initiate a failover, following the failure of some of the nodes. For Server Groups, this means that if you have two server groups with equal number of nodes, for auto-failover of all nodes in one server group to be able to occur, you could deploy an xref:learn:clusters-and-availability:nodes.adoc#adding-arbiter-nodes[arbiter node] (or another node) in a third physical server group which will allow the remaining nodes to form a quorum. + +Another critical auto-failover constraint for Server Groups is the maximum number of nodes to be automatically failed over (`maxCount` in `/settings/autoFailover`) before administrator-intervention is required. If you want one entire server group of nodes to be able to be all automatically failed over, then the `maxCount` value should be at least the number of nodes in the server group. You can check the value of `maxCount` in `GET /settings/autoFailover` to see what the `maxCount` setting is. The value of `count` in the same `GET /settings/autoFailover` output tells you how many node auto-failovers have occurred since the parameter was last reset. Running a rebalance will reset the count value back to 0. The count should not be reset manually unless guided by Support, since resetting manually will cause you to lose track of the number of auto-failovers that have already occurred without the cluster being rebalanced. + +The list below describes other conditions that must be met for an auto-failover to be executed even after a monitored or configured auto-failover event has occurred. * If the majority of nodes in the cluster can form a quorum to initiate failover, following the failure of one of the nodes. For example, given a cluster of 18 nodes, _10_ nodes are required for the quorum; and given a cluster of 17 nodes, _9_ nodes are required for the quorum. +** In the event of certain disk read/write issues it is important to also consider the efficacy of the nodes in the cluster. Should a node encounter such an issue, particularly an issue causing extreme disk slowness, and the disk encountering the issue is shared between the Data Service and the Cluster Manager, then this node is unlikely to be able to participate in this quorum. The idea of a quorum should be extended beyond “nodes that can communicate with one another” to “nodes that can communicate with one another and and can reliably read from, and write to, their respective disks”. ++ +Note that in the event of some disk read/writes issues the Cluster Manager may instead become completely unresponsive to the other nodes in the Cluster. In such a scenario an auto-failover should be possible provided that all other constraints are met. ++ +** In the event of certain disk read/write issues on the orchestrator node, particularly extreme disk slowness, it may not be possible to perform an auto-failover at all. The orchestrator node is in charge of orchestrating failover and, as such, must be part of the quorum of nodes required to perform the failover. * Only up to an administrator-specified maximum number of nodes. After this maximum number of auto-failovers has been reached, no further auto-failover occurs, until the count is manually reset by the administrator, or until a rebalance is successfully performed. @@ -54,20 +88,28 @@ Therefore, even a single event may not trigger a response; and an administrator- * If the cluster has been configured to _preserve durable writes_, only if the failover cannot result in the loss of durably written data. See xref:learn:data/durability.adoc#preserving-durable-writes[Preserving Durable Writes]. -Note that auto-failover should be configured only when the cluster contains sufficient resources to handle all possible consequences: workload-intensity on remaining nodes may increase significantly. +Now that you know the constraints that must be met for auto-failover to occur (after a monitored/configured event is seen), below are some other important considerations when configuring auto-failover: -Auto-failover is for intra-cluster use only: it does not work with xref:learn:clusters-and-availability/xdcr-overview.adoc[Cross Data Center Replication (XDCR)]. +* The auto-failover should be configured only when the cluster contains sufficient resources to handle all possible consequences: workload-intensity on remaining nodes may increase significantly. The maxCount parameter of the auto-failover settings can be used to avoid this sort of thundering herd problem by limiting the number of nodes that can be automatically failed over. However, remember that if you want all of the nodes in a server group to be able to be auto-failed over, maxCount should at least be the number of nodes in the server group. -Auto-failover may take significantly longer if the unresponsive node is that on which the _orchestrator_ is running; since _time-outs_ must occur, before available nodes can elect a new orchestrator-node and thereby continue. +* Auto-failover may take significantly longer if the unresponsive node is that on which the _orchestrator_ is running; since _time-outs_ must occur, before available nodes can elect a new orchestrator-node and thereby continue. Faster failover can be achieved by deploying an arbiter node, which is a node that hosts no Couchbase service. See xref:learn:clusters-and-availability/nodes.adoc#fast-failover[Fast Failover]. -See xref:manage:manage-settings/configure-alerts.adoc[Alerts], for -details on configuring email alerts related to failover. +* Auto-failover is for intra-cluster use only: it does not work with xref:learn:clusters-and-availability/xdcr-overview.adoc[Cross Data Center Replication (XDCR)]. + +See xref:manage:manage-settings/configure-alerts.adoc[Alerts], for details on configuring email alerts related to failover. See xref:learn:clusters-and-availability/groups.adoc[Server Group Awareness], for information on server groups. +See xref:learn:clusters-and-availability/nodes.adoc#adding-arbiter-nodes.adoc[Adding Arbiter Nodes] for more information on the use of arbiter nodes for xref:learn:clusters-and-availability/nodes.adoc#fast-failover [fast failover] and xref:install:deployment-considerations-lt-3nodes.adoc#quorum-arbitration [quorum arbitration]. + +See xref:learn:clusters-and-availability/cluster-manager.adoc[Cluster Manager] for more information on the NS-Server and the Master Services, aka the Orchestrator. + [#failover-policy] == Service-Specific Auto-Failover Policy +When a monitored or configured auto-failover event occurs on a node, there are constraints that need to be checked to determine if the node can be automatically failed-over. An example of such an event is the node cluster manager not being responsive. In such instances, one of the constraints is the policies or rules specific to the services that are running on the unresponsive node. Since a number of different service configurations are possible, below is information about the auto-failover policy for Couchbase Services, followed by specific examples. + + The auto-failover policy for Couchbase Services is as follows: * A service must be running on a minimum number of nodes, for auto-failover to be applied to any one of those nodes, should that node become unresponsive. @@ -77,7 +119,6 @@ The auto-failover policy for Couchbase Services is as follows: * If the Data Service is running on its required minimum number of nodes, auto-failover may be applied to any of those nodes, even when auto-failover policy is thereby violated for one or more other, co-hosted services. This is referred to as xref:learn:clusters-and-availability/automatic-failover.adoc#data-service-preference[Data Service Preference]. -* The index service shares the same Auto-Failover settings of the Data Service. * When the Index service is co-located with the Data service, it will not be consulted on failing over the node. The node-minimum for each service is provided in the following table: @@ -87,7 +128,7 @@ The node-minimum for each service is provided in the following table: | Service | Nodes Required | Data -| 3 +| 2 | Query | 2 @@ -149,12 +190,11 @@ However, if instead, node #5 becomes unresponsive, auto-failover is not triggere | Data | #3 -| Data +| Arbiter Node, no services |=== + If node #1 becomes unresponsive, auto-failover can be triggered. -This is due to _Data Service Preference_, which applies auto-failover based on the policy for the Data Service, irrespective of other services on the unresponsive node. -In this case, even though the Query and Search Services were both running on only a single node (#1), which is below the auto-failover policy requirement for each of those services (2), the Data Service was running on three nodes (#1, #2, and #3), which meets the auto-failover policy requirement for the Data Service (3). +This is due to _Data Service Preference_, which applies auto-failover based on the policy for the Data Service, irrespective of other services on the unresponsive node. In this case, even though the Query and Search Services were both running on only a single node (#1), which is below the auto-failover policy requirement for each of those services (2), the Data Service was running on two nodes (#1 and #2), which meets the auto-failover policy requirement for the Data Service (2). [#data-service-preference-with-index-service] * A cluster has the following four nodes: @@ -190,26 +230,26 @@ WARNING: If an index does not have a replica and is co-located on a Data Service Auto-failover is configured by means of parameters that include the following. * _Timeout_. -The number of seconds that must elapse, after a node or group has become unresponsive, before auto-failover is triggered. This number is configurable: the default is 120 seconds; the minimum permitted is 5; the maximum 3600. +The number of seconds that must elapse, after a node or group has become unresponsive, before auto-failover is triggered. +This number is configurable. The default is 120 seconds; the minimum permitted duration is 1 second when set through the REST API and 5 seconds when set from the UI. +The maximum is 3600 seconds. Note that a low number reduces the potential time-period during which a consistently unresponsive node remains unresponsive before auto-failover is triggered; but may also result in auto-failover being unnecessarily triggered, in consequence of short, intermittent periods of node unavailability. -+ -WARNING: Care must be when running an un-replicated Index Service and a Data Service configured for fast failover (i.e. 5 seconds) on the same node. -If the failover is triggered, unnecessarily or otherwise, then the index service will be lost. + +Note that the monitoring for the Data Service and Index Service health for auto-failover uses the same Timeout value set for node unresponsiveness. For example, if the Index Service is deemed unhealthy (because of Index Service failure to send heartbeats) for the Timeout amount of time, then the node that the Index Service is on will be considered for auto-failover (despite the fact that the node cluster manager may be responding and sending heartbeats) + * _Maximum count_. The maximum number of nodes that can fail (either concurrently or sequentially in one or more events) and be handled by auto-failover. -The maximum value can be up to configured number of nodes, the default is 1. +The maximum value can be 100 and the default is 1. This parameter is available in Enterprise Edition only: in Community Edition, the maximum number of nodes that can fail and be handled by auto-failover is always 1. * _Count_. The number of nodes that have already failed over. The default value is 0. -The value is incremented by 1 for every node that has an automatic-failover that occurs, up to the defined maximum count: beyond this point, no further automatic failover can be triggered until the count is reset to 0 through administrator-intervention. +The value is incremented by 1 for every node that has an automatic-failover that occurs, up to the defined maximum count: beyond this point, no further automatic failover can be triggered until the count is reset to 0. Running a rebalance will reset the count value back to 0. + * _Enablement of disk-related automatic failover; with corresponding time-period_. Whether automatic failover is enabled to handle continuous read-write failures. If it is enabled, a number of seconds can also be specified: this is the length of a constantly recurring time-period against which failure-continuity on a particular node is evaluated. -The default for this number of seconds is 120; the minimum permitted is 5; the maximum 3600. -If at least 60% of the most recently elapsed instance of the time-period has consisted of continuous failure, failover is automatically triggered. -The default value for the enablement of disk-related automatic failover is false. -This parameter is available in Enterprise Edition only. +The default is 120 seconds, the minimum permitted is 1 second and the maximum is 3600 seconds. Automatic failover is triggered if at least 60% of the most recently elapsed instance of the time-period consists of disk-related continuous failure plus the standard auto failover timeout. By default, auto-failover is switched on, to occur after 120 seconds for up to 1 event. Nevertheless, Couchbase Server triggers auto-failover only within the constraints described above, in xref:learn:clusters-and-availability/automatic-failover.adoc#auto-failover-constraints[Auto-Failover Constraints]. @@ -224,20 +264,20 @@ xref:cli:cbcli/couchbase-cli-setting-autofailover.adoc[setting-autofailover] wit [#auto-failover-during-rebalance] == Auto-Failover During Rebalance -Couchbase Server provides a setting to determine whether, once enabled, auto-failover should specifically be triggered during xref:learn:clusters-and-availability/rebalance.adoc[Rebalance], in the event of a node becoming unresponsive. - -If auto-failover _has_ been set to be triggered, following the configured timeout period, the rebalance is stopped; then, auto-failover is duly triggered. -Following auto-failover, rebalance is _not_ automatically re-attempted. -At this point, the cluster is likely to be in an unbalanced state: therefore, rebalance should be performed manually; and the unresponsive node fixed and restored to the cluster, as appropriate. +If an auto-failover event occurs during a rebalance, the rebalance is stopped; then, auto-failover is triggered. -If auto-failover has _not_ been set to be triggered, unless there is manual intervention, no failover occurs. -This may cause the rebalance to hang for an indeterminate period; before failing, with error messages. +WARNING: Following an auto-failover, rebalance _is not_ automatically re-attempted. -For information on setting auto-failover in the context of rebalance, see the information on xref:manage:manage-settings/general-settings.adoc[General] settings. +At this point, the cluster is likely to be in an unbalanced state; therefore, a rebalance should be performed manually, and the unresponsive node fixed and restored to the cluster, as appropriate. [#auto-failover-and-durability] == Auto-Failover and Durability Couchbase Server provides _durability_, which ensures the greatest likelihood of data-writes surviving unexpected anomalies, such as node-outages. The auto-failover maximum should be established to support guarantees of durability. -See xref:learn:data/durability.adoc[Durability], for information. + +There are other settings that support guarantees of durability during auto-failover, as explained in xref:learn:data:durability.adoc#preserving-durable-writes[Preserving Durable Writes]. In Couchbase Enterprise Server 7.2+, auto-failover can be configured not to occur if a node’s failover might result in the loss of durably written data, as explained in xref:learn:data:durability.adoc#protection-guarantees-and-automatic-failover[Protection Guarantees and Automatic Failover]. + +The Preserve Durable Writes setting is a global setting that applies to all buckets in the cluster -- the setting can be enabled and disabled from the UI Settings page (see xref:manage:manage-settings:general-settings.adoc#node-availability[Node Availability]) or using the REST API for xref:rest-api:rest-cluster-autofailover-enable.html[Enabling and Disabling Auto-Failover] settings. + +For complete information on durability and how the protection guarantees are affected by auto-failover, see xref:learn:data:durability.adoc[Durability]. diff --git a/modules/learn/pages/clusters-and-availability/groups.adoc b/modules/learn/pages/clusters-and-availability/groups.adoc index f91170724c..d1543d47d3 100644 --- a/modules/learn/pages/clusters-and-availability/groups.adoc +++ b/modules/learn/pages/clusters-and-availability/groups.adoc @@ -36,9 +36,12 @@ and if all the nodes in one of the server groups fail at the same time, even if you have the maximum count for auto-failover set to a value equal to or greater than the number of nodes in the server group that failed, auto-failover cannot occur since the remaining nodes in the remaining server group cannot form a majority quorum. + +The xref:install:deployment-considerations-lt-3nodes.adoc#quorum-arbitration[quorum constraint] is a critical part of auto-failover since the cluster must be able to form a quorum to initiate a failover, following the failure of the nodes. For Server Groups, this means that if you have two server groups with equal number of nodes, for auto-failover of all nodes in one server group to be able to occur, you must have a third server group with at least one node that will allow the remaining nodes to form a quorum. The node in the third server group can be an xref:learn:clusters-and-availability/nodes.adoc#adding-arbiter-nodes[arbiter node], a type of node introduced in 7.6 Server that does not run any services and only exists to allow forming a quorum. ==== See xref:learn:clusters-and-availability/automatic-failover.adoc[Automatic Failover]. +See xref:learn:clusters-and-availability/nodes.adoc#adding-arbiter-nodes[Adding Arbiter Nodes] for more information on the use of arbiter nodes for xref:learn:clusters-and-availability/nodes.adoc#fast-failover[fast failover] and xref:install:deployment-considerations-lt-3nodes.html#quorum-arbitration[quorum arbitration]. Groups should be defined in accordance with the physical distribution of cluster-nodes. For example, a group should only include the nodes that are in a single _server rack_, or in the case of cloud deployments, a single _availability zone_. @@ -63,18 +66,21 @@ For information on the standard (non-Group-based) distribution of replica vBucke == Server Groups and vBuckets The distribution of vBuckets across groups is exemplified below. -In each illustration, all servers are assumed to be running the Data Service. +In each illustration, all servers are assumed to be running the Data Service, except for the arbiter node server, which does not run any service. [#vbucket-distribution-across-equal-groups] === Equal Groups -The following illustration shows how vBuckets are distributed across two groups; each group containing four of its cluster's eight nodes. +The following illustration shows how vBuckets are distributed across two groups; each group containing four of the cluster's nodes. The third group only contains one node, an arbiter node, which exists to allow a quorum to be formed if all the nodes in server group 1 or 2 fails. [#groups_two_equal] -image::clusters-and-availability/groups-two-equal.png[,720,align=left] +image::clusters-and-availability/groups-two-equal_updated.png[,720,align=left] Note that Group 2 contains all the replica vBuckets that correspond to active vBuckets on Group 1; while conversely, Group 1 contains all the replica vBuckets that correspond to active vBuckets on Group 2. +Note also that for auto-failover of Group 1 or Group 2 nodes to be possible, the auto-failover majority quorum requirement must be met. Therefore, a third server group is always recommended, and it can include a single xref:learn:clusters-and-availability/nodes.adoc#adding-arbiter-nodes[arbiter node] that is not running any services. + + [#unequal-groups] === Unequal Groups @@ -90,7 +96,7 @@ A number of constraints come into play when allocating active and replica vBucke Not all the constraints can be satisfied when the buckets are allocated across uneven groups. In this scenario, the `active balance` and `rack-zone` constraints will take priority: -when the vBucket map is generated, we will ensure that there are approximately the same number of active vBuckets on each cluster and that replicas of a given vBucket must reside in separate groups. +when the vBucket map is generated, we will ensure that there are approximately the same number of active vBuckets on each node in the cluster and that replicas of a given vBucket must reside in separate groups. The following illustration shows how vBuckets are distributed across two groups: Group 1 contains four nodes, while Group 2 contains five. @@ -208,6 +214,8 @@ For example, given a cluster: At a minimum, one instance of the Index Service and one instance of the Search Service should be deployed on each rack. +Also, for auto-failover to be possible, the service-specific auto-failover constraints must be met -- the policy information is documented in xref:learn:clusters-and-availability/automatic-failover.adoc#failover-policy[Service-Specific Auto-Failover Policy] -- it lists the number of nodes that each service must be running on and explains the xref:learn:clusters-and-availability/automatic-failover.adoc#data-service-preference[Data Service Preference] when a service is co-located with the Data Service. + [#defining-groups-and-enabling-group-failover] == Defining Groups and Enabling Failover of All a Group's Nodes diff --git a/modules/learn/pages/clusters-and-availability/rebalance.adoc b/modules/learn/pages/clusters-and-availability/rebalance.adoc index 55894d78ea..2f4b6ebb78 100644 --- a/modules/learn/pages/clusters-and-availability/rebalance.adoc +++ b/modules/learn/pages/clusters-and-availability/rebalance.adoc @@ -243,15 +243,26 @@ Community Edition clusters that are upgraded to Enterprise Edition 7.0.2 can hav During file transfer, should an unresolvable error occur, file transfer is automatically abandoned, and _partition build_ is used instead. The file-transfer feature can be enabled and disabled by means of the REST API. -See xref:rest-api:rest-fts-partition-file-transfer.adoc[Rebalance Based on File Transfer]. +See xref:fts-rest-manage:index.adoc[Search Manager Options]. [#rebalancing-the-query-service] === Query Service -When a node is removed and rebalanced, the Query Service will allow existing queries and transactions to complete before shutting down, which may result in the rebalancing operation taking longer to complete. - The Query Service diagnostic log on the node(s) being removed will contain messages indicating how many transactions and queries are still running. - Any new connection attempts to nodes that are shutting down will receive error 1180 (`E_SERVICE_SHUTTING_DOWN`), and may receive error 1181 (`E_SERVICE_SHUT_DOWN`) in the brief period between the completion of the last statement or transaction and the service exiting. - Such rejected requests will have HTTP status code 503 (`service unavailable`) set. +When you remove a node from a cluster and rebalance it, the Query Service allows existing queries and transactions to complete before shutting down. +This can increase the overall time required for the rebalance operation, depending on how long the active requests and transactions take to complete. + +To monitor the shutdown progress, you can check the Query Service diagnostic log on the node being removed. +These logs contain messages that indicate the number of ongoing transactions and queries. + +At the start of the rebalance operation, the Query Service waits for in-flight transactions and requests to complete. +Requests sent after the rebalance begins are accepted, but not waited on. +They do not delay the rebalance and may be terminated abruptly once the initial in-flight requests are complete. +For each such request, there will be a message in the Query Service diagnostic log indicating that the request was terminated as it was received after the rebalance started. + +For a brief period after all active requests and transactions are handled, and just before the service exits, requests may return error 1181 (`E_SERVICE_SHUT_DOWN`), indicating that the service has shut down. + +Rejected requests will have an HTTP status code 503 (`service unavailable`). +If needed, you can retry these requests on another Query node that is still in the cluster. [#rebalancing-the-eventing-service] === Eventing Service diff --git a/modules/learn/pages/clusters-and-availability/xdcr-active-active-sgw.adoc b/modules/learn/pages/clusters-and-availability/xdcr-active-active-sgw.adoc new file mode 100644 index 0000000000..668c48702a --- /dev/null +++ b/modules/learn/pages/clusters-and-availability/xdcr-active-active-sgw.adoc @@ -0,0 +1,101 @@ += XDCR Active-Active with Sync Gateway +:description: pass:q[You can use XDCR with Sync Gateway mobile clusters in a bi-directional, active-active replication, but you must make sure that both the Server and the Sync Gateway versions support this option. Otherwise, using XDCR with Sync Gateway buckets in a bi-directional replication can cause data corruption.] + +[abstract] +{description} + +[#xdcr-active-active-sgw-intro] +== Introduction + +NOTE: To set up XDCR bi-directional replication with Sync Gateway (SGW), the minimum required version for Server is 7.6.6 and SGW is 4.0.0. + +In the versions earlier than Server 7.6.6 and Sync Gateway (SGW) 4.0+, only an active-passive setup was supported with both XDCR and SGW. +XDCR Active-Active replication with Sync Gateway for XDCR-Mobile interoperability configuration is introduced in the Server 7.6.6 version, where you can configure an active-active XDCR setup with Sync Gateway (SGW) and mobile applications both on the XDCR source and target clusters. + +For more information about how Sync Gateway 4.0+ version works with Couchbase Server's XDCR, see xref:sync-gateway::server-compatibility-xdcr.adoc[XDCR - Server Compatibility]. + +[IMPORTANT] +==== +Here are a few limitations to the _XDCR Active-Active with Sync Gateway_ feature. + +* If you use the _user created extended attributes (user xattrs)_ in your documents, and you have more than 10 user xattrs in a document, then you cannot use the feature _XDCR Active-Active with Sync Gateway_. +This is due to an internal limitation of managing extended attributes in a document. +If you try to use the feature _XDCR Active-Active with Sync Gateway_ when you have more than 10 user xattrs in your document, the XDCR replication **silently skips** replicating that document. +As a result, the data in the replication-skipped document will not be consistent between the target and source clusters. +The only way you will know this skip occured is because the Prometheus stat `subdoc_cmd_docs_skipped` will be incremented and the document will _not_ be consistent between the target and source. + +* If you use Eventing service functions that update documents in XDCR-replicated buckets (Eventing source bucket mutations), ensure your functions do not cause continuous replication loops. +In bi-directional active-active XDCR environments, Eventing functions that trigger document updates can lead to "ping-pong" replication unless you implement logic to prevent infinite loops. +Always add safeguards to avoid redundant updates and unwanted replication behavior in bi-directional setups. For more information, see xref:sync-gateway:xdcr-active-active-eventing.adoc[XDCR Active-Active and Eventing]. +==== + +You can configure XDCR Active-Active with Sync Gateway for XDCR-Mobile interoperability using one of the following methods: + +* xref:learn:clusters-and-availability/xdcr-active-active-sgw.adoc#xdcr-active-active-sgw-greenfield-deployment[Greenfield deployment]: Set up a new active-active configuration with both XDCR and SGW. +* xref:learn:clusters-and-availability/xdcr-active-active-sgw.adoc#xdcr-active-active-sgw-upgrade[Upgrading an existing setup]: Convert an existing active-passive XDCR-SGW configuration to an active-active XDCR-SGW setup. + +NOTE: When using the feature _XDCR Active-Active with Sync Gateway_, where Sync Gateway version is 4.0* or a later version and Server version is 7.6.6, the replication target XDCR inbound user must have the RBAC roles, xref:learn:security/roles.adoc#xdcr-inbound[XDCR Inbound] role and xref:learn:security/roles.adoc#data-writer[Data Writer] role. + +[#xdcr-active-active-sgw-prerequisites] +== Prerequisites + +Set the bucket property `enableCrossClusterVersioning` to use the setting `mobile=Active` during the processes xref:manage:manage-xdcr/create-xdcr-replication.adoc[Create a Replication] and xref:manage:manage-xdcr/xdcr-management-overview.adoc[Manage XDCR]. +To enable the bucket property `enableCrossClusterVersioning` using the REST API, see xref:learn:clusters-and-availability/xdcr-enable-crossclusterversioning.adoc#modify-enablecrossclusterversioning[Modify the bucket property enableCrossClusterVersioning] or xref:rest-api:rest-bucket-create.adoc#example-enablecrossclusterversioning-edit[Example: Turning on enableCrossClusterVersioning, when Editing]. + +[#xdcr-active-active-sgw-greenfield-deployment] +== Greenfield Deployment + +To configure a new active-active XDCR with Sync Gateway setup, do the following: + +. Create two clusters on Server 7.6.6 or a later version with _all_ the nodes of the clusters. For example, cluster A and cluster B (or you can upgrade the existing Server clusters to 7.6.6 or a later version). +. Create buckets, for example, B1 and B2 in cluster A and cluster B respectively, between which XDCR will be set up. Now, do the following: +.. Enable the ECCV setting on B1. All the mutations in B1 will have a new metadata called HLV. +.. Enable the ECCV setting on B2. All the mutations in B2 will have a new metadata called HLV. ++ +NOTE: ECCV refers to the bucket property `enableCrossClusterVersioning`. If there are more than two buckets in the replication topology, you must enable ECCV for all those buckets. ++ +. Create an XDCR from B1 to B2 by setting `mobile=Active`. Also, create an XDCR from B2 to B1 by setting `mobile=Active`. ++ +For information about creating an XDCR by setting `mobile=Active` through the REST API, see xref:rest-api:rest-xdcr-create-replication.adoc[Creating a Replication]. ++ +For information about creating an XDCR by setting `mobile=Active` from the UI, see xref:manage:manage-xdcr/create-xdcr-replication.adoc#create-an-xdcr-replication-with-the-ui[Create an XDCR Replication with the UI]. +. Configure SGW 4.0+ version on each cluster, cluster A and cluster B. + +This setup can handle application traffic on both buckets B1 and B2 of the respective clusters along with SGW import into both the buckets simultaneously. + +[#xdcr-active-active-sgw-upgrade] +== Upgrading an existing setup + +You can convert an existing active-passive XDCR-Sync Gateway (SGW) setup into an active-active XDCR-Sync Gateway setup. + +For illustration, there are two clusters, A and B. An SGW is connected to cluster A and this cluster is active. +Cluster B is passive with XDCR setup from bucket B1 in cluster A to bucket B2 in cluster B. +The current application traffic should be only on bucket B1 of cluster A. + +.Replication before upgrade: XDCR Active-Passive with SGW +image::clusters-and-availability/xdcr-active-sgw-before-upgrade.png[,720,align=left] + +. Upgrade both clusters A and B with _all_ the nodes of the clusters to Server 7.6.6 or a later version. +. Enable ECCV on bucket B1. All the mutations in B1, after this point of time, will have a new metadata called HLV. ++ +NOTE: ECCV refers to the bucket property `enableCrossClusterVersioning`. If there are more than two buckets in the replication topology, you must enable ECCV for all those buckets. ++ +. Enable ECCV on bucket B2. All the mutations in B2, after this point of time, will have a new metadata called HLV. ++ +NOTE: If there are more than two buckets in the replication topology, you must enable ECCV for all those buckets. ++ +. Update the replication settings to `mobile=Active` of the already existing XDCR from B1 to B2. ++ +You can use the REST API or the XDCR UI to update an existing replication. For information about using the REST API to modify the replication settings for an existing replication, see xref:rest-api:rest-xdcr-adv-settings.adoc#change-existing-replication-with-mobile-active[Change Settings for an Existing Replication to Set mobile=Active] in xref:rest-api:rest-xdcr-adv-settings.adoc[Managing Advanced Settings]. ++ +. Create an XDCR from B2 to B1 with the replication settings as `mobile=Active`. +. Upgrade SGW on cluster A to the version 4.0+. +. Connect SGW version 4.0+ to cluster B. +. Enable application active traffic on cluster B. + +This setup can handle application traffic on both buckets B1 and B2 of the respective clusters along with SGW import into both the buckets simultaneously. + +This is an illustration of the final configuration: + +.Replication after upgrade: XDCR Active-Active with SGW +image::clusters-and-availability/xdcr-active-sgw-after-upgrade.png[,720,align=left] diff --git a/modules/learn/pages/clusters-and-availability/xdcr-conflict-resolution.adoc b/modules/learn/pages/clusters-and-availability/xdcr-conflict-resolution.adoc index 96ec6df573..54cdffd1be 100644 --- a/modules/learn/pages/clusters-and-availability/xdcr-conflict-resolution.adoc +++ b/modules/learn/pages/clusters-and-availability/xdcr-conflict-resolution.adoc @@ -5,38 +5,23 @@ [abstract] {description} -[#conflicts_and_their_resolution] -== Conflicts and Their Resolution - -A _conflict_ is caused when the source and target copies of an XDCR-replicated document are updated independently of and dissimilarly to one another, each by a local application. -The conflict must be _resolved_, by determining which of the variants should prevail; and then correspondingly saving both documents in identical form. -XDCR provides an automated _conflict resolution_ process. - -Two, alternative conflict resolution policies are supported: _sequence-number-based_ (which is the default), and _timestamp-based_. -Note that _timestamp-based_ conflict resolution is only available in the Enterprise Edition of Couchbase Server. - -[#the_conflict_resolution_process] -== The Conflict Resolution Process - -When a source document is modified, XDCR determines whether this revision of the document should be applied to the target. -For documents above 256 bytes in size, XDCR fetches metadata from the target cluster before replicating. -The target metadata for the document is compared with the source metadata for the document, in order to choose which document should prevail (the exact subset of metadata used in this comparison depends on the source bucket's _conflict resolution policy_). -If the source document prevails, it is replicated to the target; if the target document prevails, the source document is not replicated. - -Once a replicated document reaches the target, the target cluster also performs a metadata comparison as described, in order to confirm that the document from the source cluster should indeed prevail. If this is confirmed, the document from the source cluster is applied to the target cluster, and the target cluster's previous version of the document is discarded. -As a performance optimization, XDCR makes no metadata comparison on the source for documents of 256 bytes or less, thus making unnecessary a metadata fetch from the target cluster: instead, the document is replicated immediately to the target, and metadata comparison is performed there. +[#conflicts_and_their_resolution] +== Conflict Resolution -If a document is deleted on the source, XDCR makes no metadata comparison on the source before replication. +When a source document is modified, XDCR determines whether this revision of the document must be applied to the target. +This process is called conflict resolution, which is a fully automated process. +XDCR supports the following two alternative conflict resolution policies: -Once configured, conflict resolution is a fully automated process, requiring no manual intervention. +* Sequence number-based conflict resolution (This is the default policy). +* Timestamp-based conflict resolution. [#revision-id-based-conflict-resolution] == Conflict Resolution Based on Sequence Number Conflicts can be resolved by referring to documents' _sequence numbers_. Sequence numbers are maintained per document, and are incremented on every document-update. -A document's sequence number is stored as part of its _metadata_: specifically, as the value of the `rev` key (see xref:manage:manage-ui/manage-ui.adoc#console-documents[Documents], for details on how to inspect metadata). +A document's sequence number is stored as a part of its _metadata_: specifically, as the value of the `rev` key (see xref:manage:manage-ui/manage-ui.adoc#console-documents[Documents], for details on how to inspect metadata). The sequence numbers of source and target documents are compared; and the document with the higher sequence number prevails. If both documents have the same sequence number, the conflict is resolved by comparing the following metadata-elements, in the order shown: @@ -44,6 +29,9 @@ If both documents have the same sequence number, the conflict is resolved by com . Expiration (TTL) value . Document flags +When Cross Cluster Versioning is enabled, the Hybrid Logical Vector (HLV) metadata in the source and target documents' xattrs is also used in the conflict resolution processing. +For more information about the `enableCrossClusterVersioning` property and the HLV metadata, see xref:clusters-and-availability/xdcr-enable-crossclusterversioning.adoc[XDCR enableCrossClusterVersioning]. + [#timestamp-based-conflict-resolution] == Timestamp-Based Conflict Resolution @@ -57,6 +45,8 @@ If both document-versions have the same timestamp-value, the conflict is resolve . Expiration (TTL) value . Document flags +When Cross Cluster Versioning is enabled, the Hybrid Logical Vector (HLV) metadata in the source and target documents' xattrs is also used in the conflict resolution processing. For more information about the `enableCrossClusterVersioning` property and the HLV metadata, see xref:clusters-and-availability/xdcr-enable-crossclusterversioning.adoc[XDCR enableCrossClusterVersioning]. + [#time-synchronization] === Time Synchronization @@ -84,7 +74,7 @@ Each mutation has its own HLC timestamp. [#ensuring_safe_failover] === Ensuring Safe Failover -When failover (say, from data center A to data center B) is required, timestamp-based conflict resolution requires that applications redirect traffic to data center B only after the greater of the following two time-periods has elapsed: +When failover of an application is required (say, from data center A to data center B), timestamp-based conflict resolution requires that applications redirect traffic to data center B only after the greater of the following two time-periods has elapsed: * The replication latency between data centers A and B. This provides sufficient time for any _in-flight_ mutations to be received by data center B prior to traffic redirection. @@ -98,8 +88,16 @@ When availability is restored to data center A, applications must wait for the s Conflict resolution policy is configured on a per-bucket basis at bucket creation time, it cannot be changed later. For more information, see xref:manage:manage-buckets/create-bucket.adoc[Create a Bucket]. -Choosing a conflict resolution method requires consideration of the logic of the applications that require the data. -This is illustrated by the following examples: + +[IMPORTANT] +==== +* You must select the same conflict resolution policy for all the buckets in the replication topology because you can create a replication between only those buckets that have the same conflict resolution policy. +* When creating a bucket, you must actively choose the conflict resolution policy. +If you do not choose a policy, the Sequence number-based conflict resolution policy is set as default. +* After the bucket is created, you cannot change the conflict resolution policy for that bucket. In general, the Timestamp-based conflict resolution policy is preferred as the logic is easier to understand, feasible with general use cases, and also preferred for working with the latest Server features. +==== + +The following examples illustrate how the two different conflict resolution policies apply: * _Sequence-Number-based_, whereby the document with the higher number of updates wins. A hit-counter, for a website, is stored as a document within Couchbase Server: a value within the document is incremented each time the website is accessed. @@ -114,9 +112,10 @@ Therefore, in this instance, timestamp-based conflict resolution should be used, [#aligning_source_and_target_policies] == Aligning Source and Target Policies -XDCR replications cannot be created between buckets with different conflict resolution policies: source and target buckets must always be configured with the same policy. +XDCR replications cannot be created between buckets with different conflict resolution policies. The source and target buckets must always be configured with the same conflict resolution policy. -When using XDCR with a source cluster running a pre-4.6 version of Couchbase Server, only conflict resolution based on _sequence numbers_ can be used. +When creating a bucket, you must actively choose the conflict resolution policy. +If you do not choose a policy, the Sequence number-based conflict resolution policy is set as default. After the bucket is created, you cannot change the conflict resolution policy for that bucket. In general, the Timestamp-based conflict resolution policy is preferred as the logic is easier to understand. [#monitoring-conflict-resolution] == Monitoring Conflict Resolution on the Target Cluster diff --git a/modules/learn/pages/clusters-and-availability/xdcr-enable-crossclusterversioning.adoc b/modules/learn/pages/clusters-and-availability/xdcr-enable-crossclusterversioning.adoc new file mode 100644 index 0000000000..ea31aab5b5 --- /dev/null +++ b/modules/learn/pages/clusters-and-availability/xdcr-enable-crossclusterversioning.adoc @@ -0,0 +1,67 @@ += XDCR enableCrossClusterVersioning +:description: pass:q[Enabling Cross Cluster Versioning allows XDCR to add metadata to each replicated document.] + +[abstract] +{description} + + +Enabling Cross Cluster Versioning for all buckets in the replication topology is a pre-requisite for some XDCR features. +The bucket property `enableCrossClusterVersioning` cannot be disabled once it is set to `true`. +Therefore, you must not enable `enableCrossClusterVersioning` casually. + +When you set the bucket property `enableCrossClusterVersioning` (ECCV) to `true`, for each document processed by XDCR, XDCR stores additional metadata for the document in the extended attributes. +This metadata is also called Hybrid Logical Vector (HLV), which is a set of Hybrid Logical Clock (HLC) information. + +[#hlv-data-maintained-in-xattr] +== Hybrid Logical Vector (HLV) data maintained in the extended attributes + +The new metadata, HLV, is stored as a system created extended attribute (xattrs) called `_vv` (`xattrs._vv`). The HLV metadata is also called Version Vectors. + +The HLV metadata takes up `109 + 40N` bytes of space per document, where N is the number of buckets mutating the document in the replication topology. + +NOTE: As long as your replication topology is constant, the size of the HLV metadata will grow to `109 + 40N` bytes and remain constant. +However, the HLV data accumulation occurs when the replication topology changes and the document copy goes through different clusters, for example, when being restored to a new cluster. + +To remove the accumulated or outdated HLV data, the HLV metadata is pruned periodically. +You can control the pruning frequency by setting the bucket property `versionPruningWindowHrs`. + +[#remove-hlv-metadata] +=== Remove HLV metadata + +After enabling, you cannot disable the bucket property `enableCrossClusterVersioning`. +You can, however, backup and restore the data to a bucket where `enableCrossClusterVersioning` is set to `false`, and remove the xattrs information added by XDCR using the option `cbbackupmgr restore --disable-hlv`. + +NOTE: To remove the on-going maintenance of HLV information, you must remove the xattrs information previously added to the documents. +To completely remove the effects of enabling HLV, only restoring the data to a bucket where `enableCrossClusterVersioning` is set to `false` is not enough. + +[#modify-enablecrossclusterversioning] +== Modify the bucket property enableCrossClusterVersioning + +You can modify the bucket property `enableCrossClusterVersioning` through the REST API. +For information about modifying `enableCrossClusterVersioning` through the REST API, see xref:rest-api:rest-bucket-create.adoc#example-enablecrossclusterversioning-edit[Example: Turning on enableCrossClusterVersioning, when Editing]. + +NOTE: You cannot enable the bucket property `enableCrossClusterVersioning` while creating the bucket. + +[#version-pruning-window-hrs] +== versionPruningWindowHrs + +`versionPruningWindowHrs` is a bucket property, which controls the pruning frequency of the HLV metadata. +The default value of versionPruningWindowHrs is 720 hours (30 days), which means that any HLV data older than 720 hours is pruned to remove the outdated entries. + +NOTE: `versionPruningWindowHrs` must be set to the same value for all buckets in an XDCR replication topology. + +[#manage-version-pruning-window-hrs] +=== Manage the bucket property versionPruningWindowHrs + +For information about modifying the bucket property `versionPruningWindowHrs` through REST API, see xref:rest-api:rest-bucket-create.adoc#example-versionpruningwindowhrs-edit[Example: Specifying time value for versionPruningWindowHrs, when Editing]. + +[#features-requiring-crossclusterversioning-enabled] +== Features for which Cross Cluster Versioning must be enabled + +The bi-directional, active-active replication with Sync Gateway 4.0 or a later version and XDCR requires enabling Cross Cluster Versioning. ++ +For more information, including important limitations, see xref:learn:clusters-and-availability/xdcr-active-active-sgw.adoc[XDCR Active-Active with Sync Gateway]. ++ +For more information about how Sync Gateway 4.0+ version works with Couchbase Server's XDCR, see xref:sync-gateway::server-compatibility-xdcr.adoc[XDCR - Server Compatibility]. + +NOTE: To set up XDCR bi-directional replication with Sync Gateway (SGW), the minimum required version for Server is 7.6.6 and SGW is 4.0.0. \ No newline at end of file diff --git a/modules/learn/pages/clusters-and-availability/xdcr-overview.adoc b/modules/learn/pages/clusters-and-availability/xdcr-overview.adoc index 8e9f685e77..16b2a536d9 100644 --- a/modules/learn/pages/clusters-and-availability/xdcr-overview.adoc +++ b/modules/learn/pages/clusters-and-availability/xdcr-overview.adoc @@ -129,6 +129,13 @@ The data in any source collection can be replicated to any target collection, as * _Migration_. Data in the _default_ collection of a source bucket can be replicated to an administrator-defined collection in the target bucket. ++ +[WARNING] +==== +Be aware that performing data migration may result in data loss when using XDCR filters to delete data. + +If you are running filters that remove data, be sure to read xref:clusters-and-availability/xdcr-filtering.adoc#configuring-deletion-filters-to-prevent-data-loss[Configuring Deletion Filters to Prevent Data-Loss] before attempting a migration. +==== In each case, _filtering_ can be applied. diff --git a/modules/learn/pages/clusters-and-availability/xdcr-with-scopes-and-collections.adoc b/modules/learn/pages/clusters-and-availability/xdcr-with-scopes-and-collections.adoc index e060388c91..fa41bed92e 100644 --- a/modules/learn/pages/clusters-and-availability/xdcr-with-scopes-and-collections.adoc +++ b/modules/learn/pages/clusters-and-availability/xdcr-with-scopes-and-collections.adoc @@ -61,7 +61,7 @@ However, although the target bucket contains a scope named `ScopeB`, this does _ Therefore, since `ScopeB.CollectionB` is a keyspace unique to the source, no implicit mapping is established with the target, and no replication is automatically initiated. + Note, however, if an identical keyspace is _subsequently_ established within the target bucket, this is eventually detected by XDCR, by means of a periodic check. -At this point, a _backfill pipeline_ is automatically created, and is maintained for a temporary period; for the purpose of replicating any dropped data: this is described below, in xref:learn:collections-and-availability/xdcr-overview.adoc#target-collection-removal-and-addition[Target-Collection Removal and Addition]. +At this point, a _backfill pipeline_ is automatically created, and is maintained for a temporary period; for the purpose of replicating any dropped data: this is described below, in <>. For the practical steps required to set up implicit mappings, see xref:manage:manage-xdcr/replicate-using-scopes-and-collections.adoc#replicate-data-between-collections-implicitly-with-the-ui[Replicate Data Between Collections Implicitly, with the UI]. diff --git a/modules/learn/pages/data/transactions.adoc b/modules/learn/pages/data/transactions.adoc index e8d7ba82bd..fcdfcb1e69 100644 --- a/modules/learn/pages/data/transactions.adoc +++ b/modules/learn/pages/data/transactions.adoc @@ -75,6 +75,7 @@ transactions.run((txnctx) -> { txnctx.replace(beth, bethContent) } else throw new InsufficientFunds(); + // commit transaction - optional, can be omitted txnctx.commit(); }); @@ -104,11 +105,16 @@ transactions.run((ctx) -> { For more information on distributed transactions through the SDK APIs, see: -* xref:java-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[Java SDK] -* xref:dotnet-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[.NET SDK] -* xref:cxx-txns::distributed-acid-transactions-from-the-sdk.adoc[C++ API] -* xref:go-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[Go SDK] -* xref:nodejs-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[Node.js SDK] +* xref:cxx-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] +* xref:dotnet-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] +* xref:go-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] +* xref:java-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] +* xref:kotlin-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] +* xref:nodejs-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] +* xref:php-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] +* xref:python-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] +// * xref:ruby-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] +* xref:scala-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] For use-cases which need to run ad-hoc data changes, you can directly use transactional constructs in {sqlpp}. This can be accomplished using cbq, Query Workbench, CLI, or REST API in Couchbase Server, or through SDKs. @@ -322,10 +328,15 @@ xref:n1ql:n1ql-manage/query-settings.adoc#atrcollection_req[atrcollection] for d == Related Topics -* xref:java-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] -* xref:cxx-txns::distributed-acid-transactions-from-the-sdk.adoc[] +* xref:cxx-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] * xref:dotnet-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] * xref:go-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] +* xref:java-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] +* xref:kotlin-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] * xref:nodejs-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] +* xref:php-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] +* xref:python-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] +// * xref:ruby-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] +* xref:scala-sdk:howtos:distributed-acid-transactions-from-the-sdk.adoc[] * xref:server:n1ql:n1ql-language-reference/transactions.adoc[] // end::all[] diff --git a/modules/learn/pages/security/auditing.adoc b/modules/learn/pages/security/auditing.adoc index 6297205df2..3ebed9a7fd 100644 --- a/modules/learn/pages/security/auditing.adoc +++ b/modules/learn/pages/security/auditing.adoc @@ -55,6 +55,9 @@ For example, `"Unsuccessful attempt to login to couchbase cluster"`, `"Node was | Object | Contains key-value pairs for `"domain"` (specifying `"local"`; `"external"`; `"builtin"` — for the administrator who set up the cluster; or `"rejected"` — for a user who has been denied access); and `"user"` (specifying the id of the user who generated the event). +3+a| +include::partial$user-audit-warning.adoc[] + | `"local"` | Object | Contains key-value pairs for `"ip"` and incoming `"port"`, for the node on which the event was processed. diff --git a/modules/learn/pages/security/certificates.adoc b/modules/learn/pages/security/certificates.adoc index 0ca98ae4ec..e6ded7b662 100644 --- a/modules/learn/pages/security/certificates.adoc +++ b/modules/learn/pages/security/certificates.adoc @@ -227,7 +227,7 @@ Couchbase Server continues to support using the Subject Common Name. See also xref:learn:security/certificates.adoc#deprecation-of-subject-common-name[Deprecation of Subject Common Name]. * The `DNS` name, provided as a Subject Alternative Name for the certificate. -For example, if you add `subjectAltName = DNS:node2.cb.com` to the certificate, you can configure Couchbase Server to use `node2.cb.com` as the username withouy a prefix or delimiter specified in the handling-configuration. +For example, if you add `subjectAltName = DNS:node2.cb.com` to the certificate, you can configure Couchbase Server to use `node2.cb.com` as the username without a prefix or delimiter specified in the handling-configuration. + Prefix and delimiter are explained later in xref:learn:security/certificates.adoc#identifying-certificate-based-usernames-on-couchbase-server[Identifying Certificate-Based Usernames on Couchbase Server]. diff --git a/modules/learn/pages/security/roles.adoc b/modules/learn/pages/security/roles.adoc index b4133f9536..5d8d3d3d89 100644 --- a/modules/learn/pages/security/roles.adoc +++ b/modules/learn/pages/security/roles.adoc @@ -1971,6 +1971,97 @@ Administrators' queries automatically have permission to perform sequential scan ^| image:introduction/no.png[] |=== +[#query-use-sequences] +== Use Sequences + +The *Use Sequences* role allows users to use sequences within a specific scope. +Users with this role can execute {sqlpp} NEXTVAL and PREVVAL functions. +This role does not allow creating or managing sequences. +It does allow access to Couchbase Server Web Console. + +[#table_query_delete_role,cols="15,8,8,8,8",hrows=3] +|=== +5+^| Role: Use Sequences (`query_use_sequences`) + +.2+^h| Resources +4+^h| Privileges + +^h| *Read* +^h| *Write* +^h| *Execute* +^h| *Manage* + +^| Bucket, Scope: {sqlpp}, Sequences +^| image:introduction/no.png[] +^| image:introduction/no.png[] +^| image:introduction/yes.png[] +^| image:introduction/no.png[] + +^| Bucket, Scope: Collections +^| image:introduction/yes.png[] +^| image:introduction/no.png[] +^| image:introduction/no.png[] +^| image:introduction/no.png[] + +^| UI +^| image:introduction/yes.png[] +^| image:introduction/no.png[] +^| image:introduction/no.png[] +^| image:introduction/no.png[] + +^| Pools +^| image:introduction/yes.png[] +^| image:introduction/no.png[] +^| image:introduction/no.png[] +^| image:introduction/no.png[] +|=== + +[#query-manage-sequences] +== Manage Sequences + +The *Manage Sequences* role allows users to manage sequences for a given scope. +Manage these sequences with CREATE, ALTER, and DROP statements. +This role does not allow executing sequences. +It does allow access to Couchbase Server Web Console. + +[#table_scope_admin_role,cols="15,8,8,8,8",hrows=3] +|=== +5+^| Role: Manage Sequences (`query_manage_sequences`) + +.2+^h| Resources +4+^h| Privileges + +^h| *Read* +^h| *Write* +^h| *Execute* +^h| *Manage* + +^| Bucket, Scope: {sqlpp}, Sequences +^| image:introduction/no.png[] +^| image:introduction/no.png[] +^| image:introduction/no.png[] +^| image:introduction/yes.png[] + +^| Bucket, Scope: Collections +^| image:introduction/yes.png[] +^| image:introduction/no.png[] +^| image:introduction/no.png[] +^| image:introduction/no.png[] + +^| UI +^| image:introduction/yes.png[] +^| image:introduction/no.png[] +^| image:introduction/no.png[] +^| image:introduction/no.png[] + +^| Pools +^| image:introduction/yes.png[] +^| image:introduction/no.png[] +^| image:introduction/no.png[] +^| image:introduction/no.png[] +|=== + + [#query-manage-index] == Query Manage Index diff --git a/modules/learn/pages/services-and-indexes/services/backup-service.adoc b/modules/learn/pages/services-and-indexes/services/backup-service.adoc index 5bca27f8b7..c7ed1682d7 100644 --- a/modules/learn/pages/services-and-indexes/services/backup-service.adoc +++ b/modules/learn/pages/services-and-indexes/services/backup-service.adoc @@ -100,6 +100,10 @@ You can also search the repositories for individual documents that have been bac When restoring data from a backup, you can define filters to choose a subset of the data to restore. You can restore data to its original keyspace or apply a mapping to restore it to a different keyspace. +You do not have to restore backed up data to a bucket using the same xref:learn:buckets-memory-and-storage/storage-engines.adoc[storage engine] as the original bucket. +For example, you can restore data backed up from a bucket that used the Couchstore storage engine to a bucket using Magma. + + [#archiving-and-importing] == Archiving and Importing diff --git a/modules/learn/pages/services-and-indexes/services/data-service.adoc b/modules/learn/pages/services-and-indexes/services/data-service.adoc index 7670af73a0..f6e67f2079 100644 --- a/modules/learn/pages/services-and-indexes/services/data-service.adoc +++ b/modules/learn/pages/services-and-indexes/services/data-service.adoc @@ -51,7 +51,7 @@ xref:manage:manage-buckets/flush-bucket.adoc[Flush a Bucket]. The expiry pager runs every 10 minutes by default: for information on changing the interval, see `cbepctl` xref:cli:cbepctl/set-flush_param.adoc[set flush_param]. For more information on item-deletion and tombstones, see xref:data/expiration.adoc[Expiration]. ** *Batch Reader*: Enhances performance by combining changes made to multiple items into _batches_, which are placed on the disk queue, to be written to disk. -* *Scheduler*: A pool of threads, mainly purposes for handling I/O. +* *Scheduler*: A pool of threads mainly used for handling I/O. The threads are divided into four kinds, which run independently of and without effect on one another: ** *Non IO*: Tasks private to the scheduler that do not require disk-access; including connection-notification, checkpoint removal, and hash-table resizing. diff --git a/modules/learn/pages/services-and-indexes/services/services.adoc b/modules/learn/pages/services-and-indexes/services/services.adoc index 6925bcd4db..68d5b14e9a 100644 --- a/modules/learn/pages/services-and-indexes/services/services.adoc +++ b/modules/learn/pages/services-and-indexes/services/services.adoc @@ -12,7 +12,7 @@ Services are configured and deployed by the Full Administrator who initializes C The standard configuration-sequence allows a subset of services to be selected per node, with an individual memory-allocation for each. Each service supports a particular form of data-access. Services not required need not be deployed. -Services intended to support a heavy workload can be deployed across multiple cluster-nodes, to ensure optimal performance and resource-availability. +Services intended to support a heavy workload can be deployed across multiple cluster-nodes to ensure optimal performance and resource-availability. In Couchbase Server Version 7.6 and later, you can add one or more arbiter nodes to your cluster. include::learn:partial$arbiter-node-benefits.adoc[] @@ -21,14 +21,14 @@ include::learn:partial$arbiter-node-benefits.adoc[] Couchbase Server provides the following services: -* *Data*: Supports the storing, setting, and retrieving of data-items, specified by key. +* *Data*: Supports the storing, setting, and retrieving of data-items, specified by a key. * *Query*: Parses queries specified in the _N1QL_ query-language, executes the queries, and returns results. The Query Service interacts with both the Data and Index services. * *Index*: Creates indexes, for use by the Query and Analytics services. -* *Search*: Create indexes specially purposed for _Full Text Search_. +* *Search*: Create indexes specially purposed for _Full-Text Search_. This supports language-aware searching; allowing users to search for, say, the word `beauties`, and additionally obtain results for `beauty` and `beautiful`. * *Analytics*: Supports join, set, aggregation, and grouping operations; which are expected to be large, long-running, and highly consumptive of memory and CPU resources. -* *Eventing*: Supports near real-time handling of changes to data: code can be executed both in response to document-mutations, and as scheduled by timers. +* *Eventing*: Supports near real-time handling of changes to data: code can be executed both in response to document mutations and as scheduled by timers. * *Backup*: Supports the scheduling of full and incremental data backups, either for specific individual buckets, or for all buckets on the cluster. Also allows the scheduling of _merges_ of previously made backups. @@ -40,36 +40,41 @@ workload-requirements. [#setting-up-services] == Setting Up Services -Services are set up on a per node basis. +Services are set up on a per-node basis. Each node can run at most one instance of a service. Each node can run any number of services, up to the maximum, which is seven. In Couchbase Enterprise Server Version 7.6+, a node can be configured to run _no_ service. The _Data Service_ must run on at least one node of the cluster. -Some services are interdependent, and therefore require at least one instance of -each of their dependencies to be running on the cluster (for example, the +Some services are interdependent and therefore require at least one instance of +each of their dependencies to be running on the cluster — (for example, the _Query Service_ depends on the _Index Service_ and on the _Data Service_). When the first node in a cluster is initialized, the services assigned to it become the default assignment for each other node subsequently to be added to the cluster. -However, this default can be departed from, node by node; with one or more services omitted from the default, and one or more added. +However, this default can be departed from, node by node, with one or more services omitted from the default, and one or more added. When first allocated to a node, a service requires the assignment of a specific memory quota, which becomes standard for that service in each of its instances across the cluster. (The exceptions to this are the Query and Backup Services, which never require a memory quota.) -Service-allocation should be designed based on workload-analysis: if a particular service is expected to handle a heavy workload, it should be allocated with a larger memory quota, and potentially as the only service on the node. -Alternatively, if a cluster is to be used for development purposes only, it may be convenient to allocate services in the quickest and most convenient way, with some quotas being equal. +Service allocation should be designed based on workload-analysis: +if a particular service is expected to handle a heavy workload, +it should be allocated with a larger memory quota, and potentially as the only service on the node. +Alternatively, if a cluster is to be used for development only, +it may be convenient to allocate services in the quickest and most convenient way, +with some quotas being equal. For example, the following illustration shows how four services — Data, Index, Query, and Search — might be allocated evenly across the five nodes of a _development_ cluster: [#cb_cluster_with_services_development] -image::services-and-indexes/services/cbClusterWithServicesDevelopment.png[,720,align=left] +include::partial$cluster-example-diagrams.adoc[tag="five-cluster-setup"] This configuration might provide perfectly acceptable performance for each of the four services, in the context of development and testing. -However, if a large amount of data needed, in production, to be intensively indexed, and addressed by means of Query and Search, the following configuration would be more efficient: +However, if a large amount of data is required to be intensively indexed and addressed by means of Query and Search, the following production configuration would be more efficient: [#cb_cluster_with_services_production] -image::services-and-indexes/services/cbClusterWithServicesProduction.png[,720,align=left] +include::partial$cluster-example-diagrams.adoc[tag="six-cluster-setup"] -In this revised configuration, the Data Service is the only service to run on two of the nodes; the Index Service the only service on two futher nodes; and the Query and Search Services share the fifth and final node. +In this revised configuration, the Data Service is the only service to run on three of the nodes; +the Index Service is running on two further nodes; and the Query and Search Services share the sixth and final node. For a more detailed explanation of service memory quotas, see xref:buckets-memory-and-storage/memory.adoc[Memory]. For information on the practical steps required to initialize a cluster, including the allocation of services to nodes, see @@ -86,9 +91,9 @@ nodes can be removed, reconfigured to run the Search Service, and re-added to th cluster. Alternatively, additional hardware-resources (CPU, memory, disk-capacity) can be -added to targeted nodes in the cluster, in order to support the performance of -key services. This ability to provision services independently from one another, and -thereby -scale their performance individually up and down as required, provides the greatest -flexibility in terms of handling changing business requirements, and redeploying +added to targeted nodes in the cluster to support the performance of +key services. +This ability to provision services independently of one another, and +thereby scale their performance individually up and down as required, provides the greatest +flexibility in terms of handling changing business requirements and redeploying existing resources to ensure continuously heightened efficiency. diff --git a/modules/learn/pages/views/views-intro.adoc b/modules/learn/pages/views/views-intro.adoc index 11e36997c2..ef2f3c6985 100644 --- a/modules/learn/pages/views/views-intro.adoc +++ b/modules/learn/pages/views/views-intro.adoc @@ -5,7 +5,10 @@ [abstract] {description} -NOTE: Views are deprecated in Couchbase Server 7.0+. Views support in Couchbase Server will be removed in a future release only when the core functionality of the View engine is covered by other services. Views will not run on the newer xref:learn:buckets-memory-and-storage/storage-engines.adoc[Magma storage engine]. +NOTE: Views are deprecated in Couchbase Server 7.0+. +Views support in Couchbase Server will be removed in a future release. +Instead of views, use indexes and queries using the xref:learn:services-and-indexes/services/index-service.adoc[Index Service] (GSI) and the xref:learn:services-and-indexes/services/query-service.adoc[Query Service] ({sqlpp}). +Views will not run on the newer xref:learn:buckets-memory-and-storage/storage-engines.adoc[Magma storage engine]. A view creates an index on the data according to the defined format and structure. The view consists of specific fields and information extracted from the objects in Couchbase. diff --git a/modules/learn/partials/cluster-example-diagrams.adoc b/modules/learn/partials/cluster-example-diagrams.adoc new file mode 100644 index 0000000000..ec300b7730 --- /dev/null +++ b/modules/learn/partials/cluster-example-diagrams.adoc @@ -0,0 +1,182 @@ +// tag::five-cluster-setup[] +[plantuml] +.Couchbase Server Cluster for Development +---- +@startuml + +skinparam shadowing true +skinparam nodesep 10 +skinparam ranksep 20 + +database "Node 1" as node1 { + + component "Cluster\nManager" as cl1 + + component "Data\nService" as dl1 + + component "Index\nService" as ind1 + + component "Query\nService" as qry1 + + component "Search\nService" as ss1 + +} + +cl1-[hidden]- dl1 +dl1-[hidden]- ind1 +ind1 -[hidden]- qry1 +qry1 -[hidden]- ss1 + + + +database "Node 2" as node2 { + + component "Cluster\nManager" as cl2 + + component "Data\nService" as dl2 + + component "Index\nService" as ind2 + + component "Query\nService" as qry2 + + component "Search\nService" as ss2 + +} + +cl2-[hidden]- dl2 +dl2-[hidden]- ind2 +ind2 -[hidden]- qry2 +qry2 -[hidden]- ss2 + + + + +database "Node 3" as node3 { + + component "Cluster\nManager" as cl3 + + component "Data\nService" as dl3 + + component "Index\nService" as ind3 + + component "Query\nService" as qry3 + + component "Search\nService" as ss3 + +} + +cl3-[hidden]- dl3 +dl3-[hidden]- ind3 +ind3 -[hidden]- qry3 +qry3 -[hidden]- ss3 + +database "Node 4" as node4 { + + component "Cluster\nManager" as cl4 + + component "Data\nService" as dl4 + + component "Index\nService" as ind4 + + component "Query\nService" as qry4 + + component "Search\nService" as ss4 + +} + +cl4-[hidden]- dl4 +dl4-[hidden]- ind4 +ind4 -[hidden]- qry4 +qry4 -[hidden]- ss4 + + +database "Node 5" as node5 { + + component "Cluster\nManager" as cl5 + + component "Data\nService" as dl5 + + component "Index\nService" as ind5 + + component "Query\nService" as qry5 + + component "Search\nService" as ss5 + +} + +cl5-[hidden]- dl5 +dl5-[hidden]- ind5 +ind5 -[hidden]- qry5 +qry5 -[hidden]- ss5 + +@enduml +---- +// end::five-cluster-setup[] + + +// tag::six-cluster-setup[] + +[plantuml] +.Couchbase Server Cluster for Production +---- +@startuml + +skinparam shadowing true +skinparam nodesep 10 +skinparam ranksep 20 + +database "Node 1" as node1 { + + component "Cluster\nManager" as cl1 + + component "Data\nService" as dl1 +} + +database "Node 2" as node2 { + + component "Cluster\nManager" as cl2 + + component "Data\nService" as dl2 +} + +database "Node 3" as node3 { + + component "Cluster\nManager" as cl3 + + component "Data\nService" as dl3 +} + +database "Node 4" as node4 { + + component "Cluster\nManager" as cl4 + + component "Index\nService" as ind1 +} + +database "Node 5" as node5 { + + component "Cluster\nManager" as cl5 + + component "Index\nService" as ind2 +} + +database "Node 6" as node6 { + + component "Cluster\nManager" as cl6 + + component "Query\nService" as qry1 + + component "Search\nService" as ss1 +} + +cl1 -[hidden]- dl1 +cl2 -[hidden]- dl2 +cl3 -[hidden]- dl3 +cl4 -[hidden]- ind1 +cl5 -[hidden]- ind2 +cl6 -[hidden]- qry1 +qry1 -[hidden]- ss1 +@enduml +---- + +// end::six-cluster-setup[] diff --git a/modules/learn/partials/user-audit-warning.adoc b/modules/learn/partials/user-audit-warning.adoc new file mode 100644 index 0000000000..284b86f1f5 --- /dev/null +++ b/modules/learn/partials/user-audit-warning.adoc @@ -0,0 +1,9 @@ +[IMPORTANT] +.The occurrence of a system id in the audit log. +==== +In some instances, +the server will carry out an audited function +using a system identifier for the `user` field instead of the actual user logged into the system. + +This may show up in the audit logs as `"user":"@ns_server"` for example. +==== \ No newline at end of file diff --git a/modules/manage/assets/images/manage-backup-restore/newBucketWithRestoredData.png b/modules/manage/assets/images/manage-backup-restore/newBucketWithRestoredData.png index f64aa73da1..24a7499553 100644 Binary files a/modules/manage/assets/images/manage-backup-restore/newBucketWithRestoredData.png and b/modules/manage/assets/images/manage-backup-restore/newBucketWithRestoredData.png differ diff --git a/modules/manage/assets/images/manage-backup-restore/restoreButton.png b/modules/manage/assets/images/manage-backup-restore/restoreButton.png deleted file mode 100644 index d37e36e91b..0000000000 Binary files a/modules/manage/assets/images/manage-backup-restore/restoreButton.png and /dev/null differ diff --git a/modules/manage/assets/images/manage-backup-restore/restoreDialog.png b/modules/manage/assets/images/manage-backup-restore/restoreDialog.png index 36a158917a..710bf3c1ff 100644 Binary files a/modules/manage/assets/images/manage-backup-restore/restoreDialog.png and b/modules/manage/assets/images/manage-backup-restore/restoreDialog.png differ diff --git a/modules/manage/assets/images/manage-backup-restore/restoreDialogPartiallyComplete.png b/modules/manage/assets/images/manage-backup-restore/restoreDialogPartiallyComplete.png deleted file mode 100644 index a2d5181e1e..0000000000 Binary files a/modules/manage/assets/images/manage-backup-restore/restoreDialogPartiallyComplete.png and /dev/null differ diff --git a/modules/manage/assets/images/manage-settings/auto-compact-defaultNewUI.png b/modules/manage/assets/images/manage-settings/auto-compact-defaultNewUI.png index c0820503e6..0241f2187c 100644 Binary files a/modules/manage/assets/images/manage-settings/auto-compact-defaultNewUI.png and b/modules/manage/assets/images/manage-settings/auto-compact-defaultNewUI.png differ diff --git a/modules/manage/assets/images/manage-settings/index-storage-mode.png b/modules/manage/assets/images/manage-settings/index-storage-mode.png index 09774ffb93..7e52896dd2 100644 Binary files a/modules/manage/assets/images/manage-settings/index-storage-mode.png and b/modules/manage/assets/images/manage-settings/index-storage-mode.png differ diff --git a/modules/manage/assets/images/manage-xdcr/xdcr-add-replication-screen.png b/modules/manage/assets/images/manage-xdcr/xdcr-add-replication-screen.png index 82a4f0b688..47590be2ad 100644 Binary files a/modules/manage/assets/images/manage-xdcr/xdcr-add-replication-screen.png and b/modules/manage/assets/images/manage-xdcr/xdcr-add-replication-screen.png differ diff --git a/modules/manage/examples/migrate-bucket-storage-backend.sh b/modules/manage/examples/migrate-bucket-storage-backend.sh index 0fa7f4679f..6cdf6c0581 100644 --- a/modules/manage/examples/migrate-bucket-storage-backend.sh +++ b/modules/manage/examples/migrate-bucket-storage-backend.sh @@ -58,4 +58,10 @@ curl -X POST -u Administrator:password \ -d 'storageBackend=couchstore' # end::change-backend-couchstore[] +# tag::prereq-rollbackstorage-param[] +curl -v -X POST http://localhost:8091/pools/default/buckets/testbucket -u Administrator -d historyRetentionCollectionDefault=false +# end::prereq-rollbackstorage-param[] +# tag::prereq-rollbackstorage-collection[] +curl -X PATCH -u Administrator http://localhost:8091/pools/default/buckets/testbucket/scopes/_default/collections/_default -d history=false +# end::prereq-rollbackstorage-collection[] \ No newline at end of file diff --git a/modules/manage/pages/manage-backup-and-restore/manage-backup-and-restore.adoc b/modules/manage/pages/manage-backup-and-restore/manage-backup-and-restore.adoc index 0c0a1d12a7..b92c33ecc0 100644 --- a/modules/manage/pages/manage-backup-and-restore/manage-backup-and-restore.adoc +++ b/modules/manage/pages/manage-backup-and-restore/manage-backup-and-restore.adoc @@ -11,7 +11,7 @@ The data on a Couchbase-Server cluster can be backed up, restored, and archived by means of either of the following: * The *Backup Service*. -This can be configured by means of the *Backup* UI provided by Couchbase Web Console. +This can be configured by means of the *Backup* UI provided by Couchbase Server Web Console. * The xref:backup-restore:cbbackupmgr.adoc[cbbackupmgr] CLI utility. @@ -24,7 +24,7 @@ An overview of the Backup Service is provided in xref:learn:services-and-indexes === The Backup Service and cbbackupmgr Both the Backup Service and `cbbackupmgr` are included in Couchbase Server Enterprise Edition. -Note that from version 7.0, `cbbackupmgr` is also available in Community Edition, but without support for merge, cloud backup, or collection-level restore. +From version 7.0, `cbbackupmgr` is also available in Couchbase Server Community Edition, but without support for merge, cloud backup, or collection-level restore. The following paragraphs summarize the similarities and differences between the Backup Service and `cbbackupmgr` as provided by Enterprise Edition. @@ -34,20 +34,20 @@ For use of `cbbackupmgr`, the Full Admin or the Data Backup & Restore role must The Backup Service — which can be configured by means of the *Backup* facility of Couchbase Web Console, the Couchbase CLI, and the REST API — allows backup, restore, and archiving to be configured for the local cluster; and also permits restore to be configured for a remote cluster. By contrast, `cbbackupmgr` allows backup, restore, and archiving each to be configured either for the local or for a remote cluster: all available options are listed in xref:backup-restore:enterprise-backup-restore.adoc##version-compatibility[Version Compatibility]. -Whereas `cbbackupmgr` performs a specific backup or merge when executed, the Backup Service can be _scheduled_; so that backups and periodic merges are ongoing. +Whereas `cbbackupmgr` performs a specific backup or merge when executed, the Backup Service can be scheduled so that backups and periodic merges are ongoing. The Backup Service therefore supports additional and modified parameters, to allow scheduling to be configured. -Note that both the Backup Service and `cbbackupmgr` allow _full_ and _incremental_ backups. +Both the Backup Service and `cbbackupmgr` allow full and incremental backups. Unlike the Backup Service, `cbbackupmgr` requires a new repository to be created for each new, full backup (successive `cbbackupmgr` backups to the same repository being incremental). Both allow incremental backups, once created, to be merged, and their data deduplicated. Both use the same backup archive structure; allow the contents of backups to be listed; and allow specific documents to be searched for. Both the Backup Service and `cbbackupmgr` support use of AWS S3 storage. -Note that `cbbackupmgr` is available in both Couchbase Server 7.0 Enterprise Edition (_EE_) and 7.0 Community Edition (_CE_). +The `cbbackupmgr` tool is available in both Couchbase Server 7.0 Enterprise Edition (EE) and Couchbase Server Community Edition (CE). However, whereas in EE, `cbbackupmgr` allows backup and restore to be performed with reference to buckets, scopes, and collections; in CE, `cbbackupmgr` allows backup and restore to be performed with reference to buckets only. -For detailed information on how `cbbackupmgr` works (including a detailed description of incremental backup), see the xref:backup-restore:cbbackupmgr.adoc#discussion[Discussion] provided on the page for xref:backup-restore:cbbackupmgr.adoc[cbbackupmgr]. +For detailed information about how `cbbackupmgr` works (including a detailed description of incremental backup), see the xref:backup-restore:cbbackupmgr.adoc#discussion[Discussion] provided on the page for xref:backup-restore:cbbackupmgr.adoc[cbbackupmgr]. The page for xref:backup-restore:cbbackupmgr.adoc[cbbackupmgr] also provides a synopsis of the command, and a description of its basic options. The remainder of the current page describes how to configure and use the Backup Service, using Couchbase Web Console. @@ -59,15 +59,15 @@ For backup, restore, and other related tasks to be scheduled and performed, the The service (as is the case with all other Couchbase services) can be assigned either when a node is initially provisioned as a one-node cluster (as described in xref:manage:manage-nodes/create-cluster.adoc[Create a Cluster]), or when a node is added to an existing cluster (as described in xref:manage:manage-nodes/add-node-and-rebalance.adoc[Add a Node and Rebalance]). Provided that at least one node runs the Backup Service, data for the entire cluster can be backed up, restored, and archived. Locations to be used for saving data must be accessible to all cluster-nodes that are running the Backup Service. -Note also that Couchbase Server must have _read_ and _write_ access to the location. -On Linux, therefore, for a filesystem location, use the `chgrp` command to set the group ID of the folder to `couchbase`; unless a _non-root installation_ has been performed, in which case set the group ID either to the username of the current user, or to a group of which the current user is a member — see xref:install:non-root.adoc[Non-Root Install and Upgrade], for more information. +Note also that Couchbase Server must have read and write access to the location. +On Linux, therefore, for a filesystem location, use the `chgrp` command to set the group ID of the folder to `couchbase`; unless a non-root installation has been performed, in which case set the group ID either to the username of the current user, or to a group of which the current user is a member — see xref:install:non-root.adoc[Non-Root Install and Upgrade], for more information. [#access-the-backup-service-ui] == Access the Backup Service UI To access the Backup Service UI, proceed as follows: -. On Couchbase Web Console, left-click on the *Backup* tab, in the right-hand, vertical navigation bar: +. On Couchbase Web Console, click the *Backup* tab, in the vertical navigation bar: + image::manage-backup-restore/accessBackupTab.png[,100,align=left] + @@ -85,7 +85,7 @@ Currently, all panels are blank. The Backup Service allows backups (and merges) to be scheduled, as _tasks_. This section describes how task-definition and scheduling can be accomplished. -Note that for any given repository, the Backup Service performs one task at a time; with each task maintaining a lock on the repository. +For any given repository, the Backup Service performs one task at a time; with each task maintaining a lock on the repository. Therefore, the administrator-defined interval between tasks should always be sufficient to allow each task to run to completion. If a new task is scheduled to start while a previously started task is still running, the new task cannot run. For information, see xref:learn:services-and-indexes/services/backup-service.adoc#avoiding-task-overlap[Avoiding Task Overlap]. @@ -96,11 +96,11 @@ To schedule one or more backups, proceed as follows: When fully defined, the repository will combine the definitions of one or more backup and related activities, scheduled for one or more buckets, targeted at a storage location accessible to all nodes on the cluster. Each repository must have a name unique among repositories on the cluster. + -To add a repository, left-click on the *ADD REPOSITORY* tab, at the upper right of the screen: +To add a repository, click the *ADD REPOSITORY* tab, at the upper right of the screen: + image::manage-backup-restore/addRepositoryTab.png[,140,align=left] + -This brings up the *Select Plan* dialog, which initially appears as follows: +This opens the *Select Plan* dialog which initially appears as follows: + image::manage-backup-restore/selectPlanDialog.png[,420,align=left] @@ -111,12 +111,12 @@ The *_hourly_backups* plan appears as the default selection. + (For more information, see xref:manage:manage-backup-and-restore/manage-backup-and-restore.adoc#default-plans[Default Plans], below.) + -Left-click on the control that appears at the right-hand side of the *Select plan* dialog's interactive text-field. +Click the control that appears at the right-hand side of the *Select plan* dialog's interactive text-field. A pull-down menu appears, as follows: + image::manage-backup-restore/selectPlanDialogPullDownMenuInitial.png[,420,align=left] + -Three options are thus provided. +Three options are provided. The first two are *_daily_backups* and *_hourly_backups*. The third option is *+ Create new plan*: select this option: + @@ -127,7 +127,7 @@ This establishes the string *+ Create new plan* within the interactive text fiel image::manage-backup-restore/selectPlanDialog2.png[,420,align=left] . Create a custom plan. -In the *Name* field of the *Select Plan* dialog, enter a name for the plan that is to be created. +In the *Name* field of the *Select Plan* dialog, enter a name for the plan that's to be created. The name must be unique across the cluster, can only use the characters `[`, `]`, `A` to `Z`, `a` to `z`, `_` and `-`; and must not start with either `_` or `-`. + Then, optionally, add a description for the plan in the *Description* field: the description can be up to 140 characters in length. @@ -136,19 +136,20 @@ For example, to specify a plan for hourly backups, the following might be entere image::manage-backup-restore/createPlanDialogWithInitialInput.png[,420,align=left] + Next, specify the services for which data will be backed up. -Left-click on the *Services* control: this expands the dialog, and displays a complete list of Couchbase Services, each being accompanied by a checkbox. +Click *Services* to display the list of Couchbase Services. + image::manage-backup-restore/createPlanServicesListInitial.png[,90,align=left] + -To specify that only data for the Data and Index Services should be backed up, uncheck the boxes for all the other services. +To specify that only data for the Data and Index Services should be backed up, clear the boxes for all the other services. + -Next, to specify precise details of what should occur when the backup is run, left-click on the *Add Task* control. +Next, to specify precise details of what should occur when the backup is run, click the *Add Task* control. The dialog now expands, to reveal the following fields: + image::manage-backup-restore/createPlanDialogAddTaskFields.png[,420,align=left] + The fields permit the input of data to specify the details of a particular task. -Note that the dialog permits multiple tasks to be specified, by additional left-clickings of the *Add Task* control; and allows tasks selectively to be removed, by left-clickings of the *Remove Task* control. +The dialog permits multiple tasks to be added by click the *Add Task* control. +It also allows you to remove tasks by click the *Remove Task* control. + In the *Name* field, enter an appropriate name for the task: for example, *hourlyBackup*. + @@ -159,14 +160,14 @@ A pull-down menu appears: + image::manage-backup-restore/periodPullDownMenu.png[,420,align=left] + -From the pull-down menu, select *Hours*, to indicate that the frequency should be determined in units of hours. -(Note that this duly removes from the dialog the day-specification controls associated with *Weekly Calendar*.) +From the pull-down menu, select *Hours*, to set the frequency is in units of hours. +This removes from the dialog the day-specification controls associated with *Weekly Calendar*. + In the *Start Time* field, specify a time of day at which the task is to be run. The time of day must be specified as hours and minutes, separated by a colon. -Note that when the frequency-unit specified is *Minutes*, this field takes no input. +When the frequency-unit specified is *Minutes*, this field takes no input. When the frequency-unit specified is *Hours* (as is the case in the current example), only the numbers signifying minutes (those after the colon) are used. -To ensure that the hourly task is performed on the hour, leave these numbers as *00*. +To make sure that the hourly task is performed on the hour, leave these numbers as *00*. + In the *Type* field, specify the task to be performed, by accessing the control at the right-hand side of the field. This displays the following pull-down menu: @@ -178,11 +179,11 @@ Then, in the *Frequency* field, specify the frequency with which the task should The field only accepts integers: these must be between 1 and 200 inclusive. To specify that the task be performed hourly, enter *1*. + -(Note that an overview of all options for task-scheduling is provided below, in the section xref:manage:manage-backup-and-restore/manage-backup-and-restore.adoc#review-scheduling-options[Review Scheduling Options].) +See xref:manage:manage-backup-and-restore/manage-backup-and-restore.adoc#review-scheduling-options[Review Scheduling Options] for an overview of all task-scheduling options. + To complete specification of the task, determine whether the backup to be performed is *Full* or *Incremental*. -If it is to be *Full*, check the *Full Backup* checkbox. -If it is to be *Incremental* (as should be the case in the current example), leave the checkbox unchecked. +If it's to be *Full*, select *Full Backup*. +If it's to be *Incremental* (as should be the case in the current example), leave *Full Backup* cleared*. + The dialog now appears as follows: + @@ -206,7 +207,7 @@ The *ID* should be a name for the repository. The name must be unique across the cluster, can only use the characters `[`, `]`, `A` to `Z`, `a` to `z`, `_` and `-`; and must not start with either `_`, `-`, `[`, or `]`. For example, `hourlyBackupRepo`. + -The *Bucket* should be the name of either a _Couchbase_ or an _Ephemeral_ bucket, whose data is to be backed up. +The *Bucket* should be the name of either a Couchbase or an Ephemeral bucket, whose data is to be backed up. Selection can be made with a pull-down menu, accessed by means of the control at the right of the field. If a bucket-name is selected, only data from this bucket is backed up. If the default selection, *All buckets*, is used, data from all buckets on the cluster (including all Couchbase and all Ephemeral buckets) is backed up. @@ -216,14 +217,14 @@ For the current example, the sample bucket `travel-sample` is assumed to have be + The value for *Storage Locations* can be specified as *Filesystem* (the default) or *Cloud*. For the current example, *Filesystem* will be used. -Note that if *Cloud* is selected, allowing AWS S3 storage to be used, the dialog expands, and displays additional options: these are described below, in xref:manage:manage-backup-and-restore/manage-backup-and-restore.adoc#use-cloud-storage[Use Cloud Storage]. +If *Cloud* is selected, allowing AWS S3 storage to be used, the dialog expands, and displays additional options: these are described below, in xref:manage:manage-backup-and-restore/manage-backup-and-restore.adoc#use-cloud-storage[Use Cloud Storage]. + The *Location* should be the location of the storage-based archive for the repository. If on the local filesystem, this location must be a pathname accessible to all nodes within the cluster that are running the Backup Service: which is to say, reads from and writes to the location are shared through an NFS mount (or through some other type of shared-folder technology, such as Samba). -Couchbase Server must have _read_ and _write_ access to the location. +Couchbase Server must have read and write access to the location. On Linux, therefore, for a filesystem location, use the `chgrp` command to set the group ID of the folder to `couchbase`; unless a _non-root installation_ has been performed, in which case set the group ID either to the username of the current user, or to a group of which the current user is a member. + -Note that a location should be used for only one repository: when multiple repositories are to be archived, a different location should be used for each. +A location should be used for only one repository: when multiple repositories are to be archived, a different location should be used for each. If appropriate, locations may be specified as subdirectories, within a top-level directory. + When complete, the dialog may look as follows: @@ -239,7 +240,7 @@ The *Backup* screen now appears as follows: image::manage-backup-restore/newRepository.png[,720,align=left] -The newly created repository, *hourlyBackupRepo*, is thus displayed with its associated plan, `HourlyBackupPlan`, with the affected bucket (`travel-sample`) and the next scheduled backup displayed. +The newly created repository, *hourlyBackupRepo*, is displayed with its associated plan, `HourlyBackupPlan`, with the affected bucket (`travel-sample`) and the next scheduled backup displayed. Data Service and Index Service data for `travel-sample` will now be backed up to the specified location on the specified schedule. A repository whose plan is being executed (with data thereby backed up repeatedly, on schedule) is referred to as an _active_ repository. @@ -267,9 +268,9 @@ This displays the *Trigger Backup* dialog, which appears as follows: image::manage-backup-restore/triggerBackup.png[,420,align=left] The immediate backup to be performed will be _incremental_ by default. -To perform a _full_ backup, check the *Perform a full backup* checkbox. +To perform a _full_ backup, select *Perform a full backup*. -Left-click on the *Backup* button, at the lower right of the dialog. +Click the *Backup* button, at the lower right of the dialog. The dialog disappears, and a notification is displayed at the lower left of the console: image::manage-backup-restore/immediateBackupNotification.png[,220,align=left] @@ -297,8 +298,8 @@ The *Inspect Backups* view is selected by default. (Note the left-clicking the *Task History* button displays the *Tasks History* view: this is the same display as that accessed by means of the *Task History button, from the expanded row on the *Repositories* view of the *Backup* screen; and is described in xref:manage:manage-backup-and-restore/manage-backup-and-restore.adoc#inspect-tasks[Inspect Tasks], below.) The main, lower panel of the *Backups* view provides the ID of the repository (in this case, `83f3b752-78e6-49f8-a527-2844c30fbc75`) and its size (here, `235.551MiB`); and also provides a vertically arranged list of all backups that have occurred, with the earliest at the top. -Each backup has its own row; with its start-time, type (_full_ or _incremental_), and size. -To inspect a particular backup in detail, left-click on the control at the left-hand side of the row: +Each backup has its own row; with its start-time, type (full or incremental), and size. +To inspect a particular backup in detail, click the control at the left-hand side of the row: image::manage-backup-restore/examineBackup.png[,360,align=left] @@ -309,16 +310,16 @@ image::manage-backup-restore/examineBackupExpanded.png[,720,align=left] The displayed data includes the UUID for the source cluster. Also specified are the numbers of *Eventing Functions* written for the Eventing Service, and the number of *Full Text Search Aliases* for the Search Service (here, the numbers are both zero). -Each bucket that has been backed up (in this case, the `travel-sample` bucket alone), appears on its assigned row in a table that specifies its size; along with the number of items, mutations, and tombstones that have been included in the backup. -The row also lists the numbers of backed up indexes for the Index, Search, and Analytics Services; and the number of backed up Views. -Additionally, in a searchable sub-panel, each _scope_ that the bucket contains is individually listed (these here being the _default_ and _inventory_ scopes, and four _tenant_agent_ scopes); with the number of mutations and tombstones listed, per scope. +Each backed-up bucket appears on a table showing its size and the number of items, mutations, and tombstones that have been included in the backup. +The row also lists the numbers of backed up indexes for the Index, Search, and Analytics Services plus the number of backed up Views. +A searchable sub-panel lists each scope that the bucket contains along with the number of mutations and tombstones they contain. -To inspect the individual collections within a displayed scope, left-click on the row for the scope. +To inspect the individual collections within a displayed scope, click the row for the scope. The row expands vertically, as follows: image::manage-backup-restore/examineBackupExpandedScope.png[,720,align=left] -Thus, left-clicking on the row for the `inventory` scope has displayed the individual collections within the scope; and thereby shows the mutations and tombstones for each collection. +Clicking on the row for the `inventory` scope displays the individual collections within the scope with the mutations and tombstones for each collection. Collections can be searched for, based on strings entered into the *filter collections* field, which is located to the upper right of the collections panel. The upper panel of the *Data* screen provides interactive fields labelled *Key* and *Search Path*. @@ -329,7 +330,9 @@ For example, by accessing the control at the left-hand side of the *Start* field image::manage-backup-restore/specifyStartingBackupForSearch.png[,280,align=left] For example, type a known document key into the *Key* field — such as `airline_10`. -Then, enter the bucket name into the *Search Path* field: note that this requires explicit specification of both _scope_ and _collection_; unless default scope and collection have been used, in which case, explicit specification of the defaults is optional — for example, `travel-sample._default._default`. +Then, enter the bucket name into the *Search Path* field. +You must explicitly specify both the scope and collection unless you're using the default scope and collection. In that case, explicit;y setting the defaults is optional. +For example, `travel-sample._default._default`. When a search is expressed to include all backups of the bucket for the `inventory` scope and `airline` collection, the panels appear as follows: @@ -340,7 +343,7 @@ The *Examine* screen is now displayed: image::manage-backup-restore/examineScreen.png[,720,align=left] -Note that the controls adjacent to the *Diff* button, near the top of the screen, allow different backups to be selected, so that the differences between the document-versions they contain can be individually examined: +The controls adjacent to the *Diff* button, near the top of the screen, allow different backups to be selected, so that the differences between the document-versions they contain can be individually examined: image::manage-backup-restore/diffSelector.png[,420,align=left] @@ -371,7 +374,7 @@ image::manage-backup-restore/deleteBackupConfirmation.png[,420,align=left] Enter the backup name into the interactive text field, and left-click on *Delete*, to continue with deletion. The backup is deleted. -Note that once it has been deleted, it cannot be restored. +Once it has been deleted, it cannot be restored. [#inspect-tasks] == Inspect Tasks @@ -403,14 +406,14 @@ The selected row is expanded vertically, as follows: image::manage-backup-restore/expandedTaskRow.png[,480,align=left] -The details of the task are thus displayed as a JSON document. +The details of the task are displayed as a JSON document. The details include counts of items, vBuckets, and bytes received from the operation. The `node_runs` subdocument provides information specific to each node in the cluster. [#schedule-merges] == Schedule Merges -A _merge_ allows multiple backups to be combined as one; with _deduplication_ occurring. +A merge allows multiple backups to be combined as one; with deduplication occurring. Merges are supported for filesystem-based repositories: however, merges are _not_ supported for cloud-based repositories. If a merge is scheduled for a cloud-based repository, the Backup Service skips the task. @@ -418,7 +421,7 @@ An immediate merge cannot be triggered for a cloud-based repository. Merges can be scheduled as _tasks_, to be applied to backed up data within a defined repository. This section describes how task-definition and scheduling for merges can be accomplished. -Note that for any given repository, the Backup Service performs one task at a time; with each task maintaining a lock on the repository. +For any given repository, the Backup Service performs one task at a time; with each task maintaining a lock on the repository. Therefore, the administrator-defined interval between tasks should always be sufficient to allow each task to run to completion. If a new task is scheduled to start while a previously started task is still running, the new task cannot run. For information, see xref:learn:services-and-indexes/services/backup-service.adoc#avoiding-task-overlap[Avoiding Task Overlap]. @@ -431,7 +434,7 @@ When the *Select Plan* dialog is displayed, choose *+ Create new plan*. . In the redisplayed *Select Plan* dialog, specify a *Name* and a *Description* for the plan. Then, specify the *Services* whose data should be backed up. + -Note that a merge can only be scheduled as part of a plan that also schedules backup: the merge will be applied to backups within the defined repository. +A merge can only be scheduled as part of a plan that also schedules backup: the merge will be applied to backups within the defined repository. . Left-click on *Add Task*, and add a *Backup* task. For example: @@ -445,19 +448,19 @@ For example: + image::manage-backup-restore/mergeTask.png[,420,align=left] + -The *Type* of the task *MergeTask* has thus been specified as *Merge*, with a frequency of four hours. +The *Type* of the task *MergeTask* has been specified as *Merge*, with a frequency of four hours. Note the fields *Merge Offset Start* and *Merge Offset End*, which respectively specify the relative start and end points of each merge that will be performed. An offset start of *0* indicates that each merge will start with backups made on the current day, if such backups exist. An offset end of *2* indicates that each merge will end with backups that were made 2 days before the specified start-day, if such backups exist. If backups were not made every day during the specified period, as many as can be found will be merged. + -Note that a detailed, diagrammatic explanation of *Merge Offset Start* and *Merge Offset End* is provided in xref:learn:services-and-indexes/services/backup-service.adoc#specifying-merge-offsets[Specifying Merge Offsets]. +A detailed, diagrammatic explanation of *Merge Offset Start* and *Merge Offset End* is provided in xref:learn:services-and-indexes/services/backup-service.adoc#specifying-merge-offsets[Specifying Merge Offsets]. + Left-click on the *Next* button: + image::manage-backup-restore/nextButton.png[,140,align=left] -. When the *Create Repository* dialog appears, enter the *ID* of the repository you are creating, the name of the *Bucket* that is being backed up, the appropriate value of *Storage Locations* (here, *Filesystem*), and the on-disk location of the repository-archive. +. When the *Create Repository* dialog appears, enter the *ID* of the repository you're creating, the name of the *Bucket* that is being backed up, the appropriate value of *Storage Locations* (here, *Filesystem*), and the on-disk location of the repository-archive. (Note that this on-disk location must be accessible to _all_ Backup Service nodes in the cluster.) For example: + @@ -527,93 +530,154 @@ The details in the expanded row confirm that five backups were merged by the ope [#restore-backups] == Restore Backups -One or more backups can be _restored_ to the cluster; which means that the data in the backups is copied back into the buckets from which it was originally backed up, or into other buckets. -Proceed as follows: +You can restore a backup to the same bucket or buckets that you originally backed up or to a different set of buckets. +You can also restore a backup to a different cluster. +The buckets you restore data to do not have to use the same xref:learn:buckets-memory-and-storage/storage-engines.adoc[storage engine] as the original buckets. +You can restore a backup of data from a bucket using the Couchstore storage engine to one using Magma. +You can also restore a Magma-backed bucket backup to a Couchstore bucket. -. In the *Repositories* view of the *Backup* screen, select the repository from which data is to be restored, and left-click on the row for the repository, in order to expand it vertically. -Then, left-click on the *Restore* button: -+ -image:manage-backup-restore/restoreButton.png[,140,align=left] -+ -The *Restore* dialog is now displayed: -+ -image:manage-backup-restore/restoreDialog.png[,420,align=left] +To restore a backup: -. Use the *Restore* dialog to specify which backup or backups should be restored. -In the *Cluster* field, enter the IP address of the cluster at which the data-restoration is targeted. -Enter username and password for the target cluster in the *User* and *Password* fields, and then use the controls at the right-hand sides of the *Start* and *End* fields to select the first and last backups in the series that is to be restored. -The dialog now appears as follows: +. Select menu:Backup[Repositories] then expand the repository containing the data you want to restore. +. Click btn:[Restore]. +The *Restore* dialog opens: + -image:manage-backup-restore/restoreDialogPartiallyComplete.png[,420,align=left] +image:manage-backup-restore/restoreDialog.png[,420,align=left] -. Open the *Services* tab, on the *Restore* dialog, and specify the services whose data is to be restored — unchecking the checkbox for each service whose data is not required. +. In the *Cluster* field, enter the URL of a node in the cluster where you want to restore the data. +Include the REST API port--by default, 8091 for unencrypted HTTP and 18901 for secure HTTPS connections. +. Choose the method you want to use to authenticate with the target cluster. +You can use either Plain (a username and password) or a client certificate and key. +After making your choice, supply the credentials for the target cluster. +. In the *Start* and *End* fields, choose the start and end range of backups you want to restore. +. If you want to restore users and groups, expand *Users* and click *Restore users and User Groups*. +Also choose whether the backed-up users and groups overwrite any identically named existing ones. +. If you want to select which service's data gets restored, expand the *Services* section and select or clear services you want. For example: + image:manage-backup-restore/restoreUncheckCheckboxes.png[,240,align=left] -. Open the *Advanced Restore Options* tab, on the *Restore* dialog. -The dialog expands vertically, revealing the following fields: +. Expand the *Advanced Restore Options* if you want to: + -image:manage-backup-restore/restoreAdvancedOptionsInitial.png[,420,align=left] -+ -These fields allow selection of documents to be restored on the basis of the data they contain. -Documents that meet the specified criteria are included in the data-restoration; those that do not are omitted from it. -+ -Use of these fields is optional: if all data in the specified backups is to be restored, leave these fields blank. -If only some data should be restored, proceed as follows: -+ -In the *Filter Keys* field, add a _regular expression_ that must be matched by a document's _key_, if the document is to be included in the restoration. -For example, `^airline` ensures that only a document whose key begins with the string `airline` is included. + * Filter what data Couchbase Server restores. + * Restore a bucket's data to a different bucket. + * Control how the restore handles xref:learn:data/expiration.adoc[expiration] TTL values. + * Configure details about the bucket and collections being restored. + -In the *Filter Values* field, add a regular expression that must be matched by a _value_ within the document, if the document is to be included in the restoration. -For example, `MIL*` ensures that only a document that contains at least one key-value pair whose value contains the string `MIL` followed by zero of more characters is to be included in the restoration. -(See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions[Regular Expressions^] and https://www.regular-expressions.info/[Regular-Expressions.info^], for further information.) +All the fields in this section are optional. +See <> for more information. + +. Click btn:[Restore] to start the restore process. +A green pop-up briefly appears to verify that the restore task has started. + +To monitor an ongoing restore, click the btn:[Task History] button in the repository's entry in the *Repositories* tab. +The active restore task appears under the *Tasks* section. + +image:manage-backup-restore/newBucketWithRestoredData.png[,,align=left] + +After the restore tasks finishes, you can see whether it succeeded or failed under the *Results* section. + +NOTE: If the restore task completes while you're viewing *Task History*, it does not appear under the *Results* section until you click btn:[Refresh Tasks]. + +To learn how to restore a backup using the command line, see xref:backup-restore:cbbackupmgr-restore.adoc[]. + +[#advanced_restore_options] +=== Advanced Restore Options + +Expanding the *Restore* dialog's *Advanced Restore Options* section shows you fields where you can control: + +* Data filtering +* How TTL values are interpreted +* Whether the restore creates missing buckets or removes some scopes or collections. + +Once you expand the *Advanced Restore Options* section, a set of fields appears: + +image:manage-backup-restore/restoreAdvancedOptionsInitial.png[,420,align=left] + +The fields in this section are: + +Filter Keys:: +Lets you enter a regular expression the restore task uses to filter the key values. +The restore task only restores a document if its key matches the regular expression. + -In the *Map Data* field, indicate whether the data is to be restored to its original or to a different bucket. -If this field is left blank, data is restored to its original bucket: note that this bucket must continue to exist on the cluster. -If data is to be restored to a different bucket, that bucket must either already have been defined on the cluster, or must be created by means of the *Auto-create bucket* option, described below. +For example, if you enter `^airline` in this field, then the restore task only restores documents whose key begins with the string `airline`. + +Filter Values:: +Lets you enter a regular expression the restore task uses to filter documents based on their data. +The restore task only restores a document if one of its values matches the regular expression. + -For example, if data to be restored from `travel-sample` should be restored to `ts`, enter `travel-sample=ts`. +For example, if you enter `MIL*` in this field, the restore task only restores a document if has a value that contains the string `MIL` followed by zero of more characters. + +Map Data:: +Lets you have the restore task restore a backed-up bucket's data to a different bucket. +If you leave this field blank, the restore task restores data into same bucket from which it was backed up. + -Use the *Include Data* and *Exclude Data* fields to indicate the subset of buckets whose data is to be restored. -For example, if backups to be restored were made when the cluster had four buckets defined, named `bucket1`, `bucket2`, `bucket3`, and `bucket4`, entering `bucket1,bucket4` in the *Include Data* field ensures that only data from `bucket1` and `bucket4` is restored; while entering `bucket2,bucket3` in the *Exclude Data* field ensures that data from `bucket2` and `bucket3` is _not_ restored. Note that these options are intended for use on backups that included all buckets on the cluster: they are not required when the backup was made of one bucket only. +If you want a bucket's data to be saved in a bucket of a different name, enter the original bucket's name, an equal sign (`=`) and the target bucket's name. +For example to restore all data backed up from the `travel-sample` bucket into a bucket named `ts`, enter `travel-sample=ts` into the *Map Data* field. + -Note that the *Include Data* and *Exclude Data* fields also allow the _scopes_ and _collections_ within buckets to be specified. -To specify a scope within a bucket, use the syntax _bucket-name_._scope-name_. -To specify a collection within a scope within a bucket, use the syntax _bucket-name_._scope-name_._collection-name_. -For example, entering `bucket1.scope1` in the *Include Data* field would ensure that only data from the scope `scope1` within `bucket1` is restored; while entering `bucket2.scope1.collection1` in the *Exclude Data* field would ensure that data from `collection1`, within `scope1` in `bucket2`, is _not_ be restored. -(For an overview of scopes and collections, see xref:learn:data/scopes-and-collections.adoc[Scopes and Collections].) +The target bucket must exist on the target cluster or you must enable <>. + +Include Data:: +Exclude Data:: +These fields let you limit the restoration to a subset of the buckets, scopes, and collections in the backup. +The *Include Data* has the restore task restore just the buckets, collections, and scopes that you list in this field. +The *Exclude Data* field restores all data in the backup except the buckets or collections you list in this field. + -The *Replace TTL* field allows a new _expiration_ value to be established for restored documents. -The dropdown menu provides the options *none* (the default), which means that no new expiration value is established for any document; *all*, which means that a new expiration value is established for every restored document; and *expired*, which means that a new expiration value is established for every document that has expired. -The new expiration value must be specified by means of the *Replace TTL-with* field: the value must either be specified as an RFC3339 time stamp (such as `2006-01-02T15:04:05-07:00`); or must be `0`, which means that each affected document is restored with no expiration value established. -For more information, see xref:learn:data/expiration.adoc[Expiration]. +To include or exclude buckets, add their names in a comma-separated list to the *Include Data* or *Exclude Data* fields. +For example, suppose the backups you're restoring contain four buckets named `bucket1`, `bucket2`, `bucket3`, and `bucket4`. +Then entering `bucket1,bucket4` in the *Include Data* field has the restore task restore just the data from `bucket1` and `bucket4`. +In this case, you could instead enter `bucket2,bucket3` in the *Exclude Data* field to get the same result. + -Check the *Force Updates* field to ensure that data restored from the specified backup overwrites the current values on the cluster when the current values are the more recent. -If the *Force Updates* checkbox is not checked, current values are not overwritten if more recent. +You can specify a scope to be included in or excluded from the restore by listing its bucket name, followed by a period, and then the scope name. +Similarly, to include or exclude a collection, specify the name of its bucket, scope, and its collection name joined by periods. +For example, to exclude the `route` collection in the `travel-sample` bucket's `inventory` scope, enter `travel-sample.inventory.route` in *Exclude Data*. + -Check the *Auto-remove Collections* checkbox to omit from the restoration any scope or collection that has been removed from the cluster since the backup was performed. -(Note that if a data-containing, administrator-created collection is backed up, but is then deleted from the cluster with all its data, the deleted data will not be restored by the *Restore* operation: however, the empty collection _will_ be restored by the *Restore* operation, unless the *Auto-remove Collections* checkbox is checked, prior to the *Restore* operation.) +See xref:learn:data/scopes-and-collections.adoc[] for an overview of scopes and collections. + +Replace TTL:: +Replace TTL with:: +These fields let you choose how the restore task handles time to live (TTL) values in the documents it's restoring. +The *Replace TTL* list controls when the restore task applies the date you enter into the *Replace TTL with* field to the documents it's restoring. +The settings in this list are: + -Check the *Auto-create Buckets* checkbox to create any buckets to which the restoration has been mapped that do not yet exist on the target cluster. +* *none*: The restore task does not change the TTL value in the value in the backup. +If the document's expiration time is in the past, Couchbase Server marks it as deleted soon after the restore task restores it. +* *expired*: If a document being restored has an expiration date in the past, the restore task sets its TTL to the value you supply in *Replace TTL with*. +* *all*: The restore task applies the new TTL you supply in *Replace TTL with* to all documents it restores. +It even applies the new value to restored documents that had a TTL of `0` (no expiration) in the backup. + -For example, the *Restore* dialog may now appear as follows: +The value you supply in *Replace TTL with* field must be either: + -image:manage-backup-restore/restoreDialogComplete.png[,420,align=left] +* `0` : No TTL value is set for the document. +The document does not expire unless the bucket or collection containing it has a non-zero `maxTTL` value. +See xref:learn:data/expiration.adoc[]. +* A string containing an http://https://www.rfc-editor.org/rfc/rfc3339[RFC3339^] time stamp. +All documents to which the restore task applies this value will expire when on the date and time you set. + -Values are thus specified for filtering documents on a basis of both key and value. -The data to be restored from `travel-sample` is specified to be restored to a bucket named `ts`, which has not previously been created: therefore, the *Auto-create Buckets* checkbox has been checked. +NOTE: The *Replace TTL with* field does not prevent you from entering a timestamp in the past. +Entering a date in the past results in any documents that the restore task applies the field's value to being deleted by Couchbase Server soon after restoration. -. Left-click on *Restore*. -This triggers the specified restoration. -The dialog disappears; and a green restore-notification appears, at the lower left of the console. +Force Updates:: +By default, the restore task does not overwrite an existing document that has a more recent modification time than its backed up version. +Select *Force Updates* to have the restore task always overwrite existing documents with the version in the backup even if the existing document is more recent. -Subsequent to the operation, its results can be checked; by means of the *Buckets* screen of Couchbase Web Console, which might now appear as follows: +Auto-remove Collections:: +When checked, the restore task drops scopes and collections that currently exist in buckets but had been dropped prior to the backup's creation. +The restore task knows which scopes and collections have been dropped because the backup contains the tombstones of these dropped objects. +For a scope or collection to be dropped when you enable *Auto-remove Collections*, its ID must match the ID of a dropped scope or collection as well as matching its name. +Just matching the name of a deleted scope or collection is not enough to have the restore task drop it. ++ +NOTE: This option is only useful for situations where you're dropping and recreating buckets. +For example, suppose you make a backup of a bucket where you had dropped scopes or collections. +Then, later, you drop the bucket and recreate it and its scopes and collections (including the ones you had previously deleted) in precisely the same order that you had created them in the original bucket. +In this case, the scopes and collections will have the same IDs that they had in the original bucket and therefore in the backup. +Finally, if you restore the backup to the bucket with *Auto-remove Collections* selected, the restore task deletes scopes and collections that match the IDs of deleted ones in the backup. -image:manage-backup-restore/newBucketWithRestoredData.png[,720,align=left] +[#auto-create-buckets] +Auto-create Buckets:: +By default, the restore task exits with an error message if a bucket being restored from the backup does not currently exist in the cluster. +Selecting *Auto-create Buckets* has the restore task create any missing buckets. -A new bucket, named `ts`, has thus been created. -Its item-count indicates that it contains only a subset of the documents contained in `travel-sample`, in accordance with the filtering specified for the restore operation. [#pause-backups] == Pause Backups @@ -1042,7 +1106,7 @@ Provider:: This should remain set as `GCP`. Cloud Bucket:: -The name of the bucket on the `GCP` service you are backing up to. +The name of the bucket on the `GCP` service you're backing up to. Cloud Auth Type:: This can be either `ID and Key` or `Instance Metadata Service`. @@ -1071,7 +1135,7 @@ You will require a different set of options depending on which one cloud authent [%collapsible] ===== -If you are using a GCP virtual machine to hold your backup, then you can make use of the GCP VM service account with the `Metadata Service` authorization type. +If you're using a GCP virtual machine to hold your backup, then you can make use of the GCP VM service account with the `Metadata Service` authorization type. . Ensure that the service account that are using on https://cloud.google.com/[Google Cloud] has `Access scopes` set to `Set access for each API`. + diff --git a/modules/manage/pages/manage-buckets/migrate-bucket.adoc b/modules/manage/pages/manage-buckets/migrate-bucket.adoc index fa90d38807..fbb42364b4 100644 --- a/modules/manage/pages/manage-buckets/migrate-bucket.adoc +++ b/modules/manage/pages/manage-buckets/migrate-bucket.adoc @@ -103,15 +103,15 @@ If the bucket has a low write workload, Couchbase Server may be able to compact Magma's default fragmentation threshold is 50%. Couchbase Server treats this threshold differently than the Couchstore threshold. -It does not perform a full compaction with the goal of reducing the bucket's fragmentation to 0%. +Couchbase Server does not perform a full compaction with the goal of reducing the bucket's fragmentation to 0%. Instead, Couchbase Server compacts a Magma bucket to maintain its fragmentation at the threshold value. This maintenance of the default 50% fragmentation can result in greater disk use for a Magma-backed bucket verses the Couchstore-backed bucket. If a bucket you migrated to Magma has higher sustained disk use that interferes with the node's performance, you have two options: * Reduce the fragmentation threshold of the Magma bucket. -For example, you could choose to reduce the fragmentation threshold to 30%. -You should only consider changing the threshold if the bucket's workload is not write-intensive. +For example, you can choose to reduce the fragmentation threshold to 30%. +You must consider changing the threshold only if the bucket's workload is not write-intensive. For write-intensive workloads, the best practice for Magma buckets is to leave the fragmentation setting at 50%. See xref:manage:manage-settings/configure-compact-settings.adoc[] to learn how to change the bucket's database fragmentation setting. @@ -123,16 +123,43 @@ See the next section for more information. As you migrate each node's vBuckets to a new storage backend, you may decide that the migration is not meeting your needs. For example, you may see increased disk usage when moving from Couchstore to Magma as explained in xref:#disk_usage[Disk Use Under Couchstore Verses Magma]. -You can roll back the migration by: + +=== Prerequisites + +You can rollback a migration from Magma to Couchstore by deactivating the history retention on the buckets, where Magma is the backend storage. + +Follow these steps to rollback storage from Magma to Couchstore: + +. Run the following command to deactivate the parameter `historyRetentionCollectionDefault` for all the collections within the bucket. ++ +[source,console] +---- +include::manage:example$migrate-bucket-storage-backend.sh[tag=prereq-rollbackstorage-param] +---- ++ +For more information, see the xref:rest-api:rest-bucket-create.adoc#historyretentioncollectiondefault[`historyRetentionCollectionDefault`] parameter details. ++ +. Run the following command for each existing collections to deactivate the associated history retention on the bucket. ++ +[source,console] +---- +include::manage:example$migrate-bucket-storage-backend.sh[tag=prereq-rollbackstorage-collection] +---- ++ +For more information about creating and editing a collection, see xref:rest-api:creating-a-collection.adoc#description[Creating and Editing a Collection]. + +=== Procedure + +You can roll back the migration by doing the following: . Changing the bucket's backend setting to its original value. . Force any migrated nodes to rewrite their vBuckets back to the old backend. -You do not have to perform any steps for nodes you did not migrate. +Perform the steps only for the nodes you migrated. -For example, to roll back the migration shown in xref:#perform_migration[Perform a Migration], you would follow these steps: +For example, to roll back the migration explained in xref:#perform_migration[Perform a Migration], follow these steps: -. Call the REST API to change the bucket's backend back to Couchstore: +. Call the REST API to change back the bucket's backend storage to Couchstore: + [source,console] ---- @@ -146,7 +173,7 @@ include::manage:example$migrate-bucket-storage-backend.sh[tag=change-backend-cou include::manage:example$migrate-bucket-storage-backend.sh[tag=get-node-overrides] ---- + -For the migration shown in xref:#perform_migration[Perform a Migration], the output looks like this: +For the migration explained in xref:#perform_migration[Perform a Migration], the output appears as follows: + [source,json] ---- @@ -160,7 +187,7 @@ null + In this case, you must roll back node3 because you migrated it to Magma. -. For each node that you have already migrated, perform another xref:install:upgrade-procedure-selection.adoc#swap-rebalance[swap rebalance] or a xref:learn:clusters-and-availability/graceful-failover.adoc[graceful failover] followed by a xref:learn:clusters-and-availability/recovery.adoc#full-recovery[full recovery] and xref:learn:clusters-and-availability/rebalance.adoc[rebalance] to roll the vBuckets on the node back to the previous backend. +. For each node that you have already migrated, perform another xref:install:upgrade-procedure-selection.adoc#swap-rebalance[swap rebalance] or a xref:learn:clusters-and-availability/graceful-failover.adoc[graceful failover], then perform a xref:learn:clusters-and-availability/recovery.adoc#full-recovery[full recovery] and xref:learn:clusters-and-availability/rebalance.adoc[rebalance] to roll back the vBuckets on the node to the previous backend. + To roll back node3, follow these steps: + @@ -184,4 +211,4 @@ include::manage:example$migrate-bucket-storage-backend.sh[tag=recover-node] include::manage:example$migrate-bucket-storage-backend.sh[tag=rebalance-cluster] ---- -. Repeat the previous step until all nodes that you'd migrated have rolled back to their original storage backend. +. Repeat the previous step until you have rolled back all of the migrated nodes to their original storage backend. diff --git a/modules/manage/pages/manage-logging/manage-logging.adoc b/modules/manage/pages/manage-logging/manage-logging.adoc index 1f25a142c3..485102e66d 100644 --- a/modules/manage/pages/manage-logging/manage-logging.adoc +++ b/modules/manage/pages/manage-logging/manage-logging.adoc @@ -409,7 +409,7 @@ To make a dynamic change, execute a [.cmd]`curl POST` command, using the followi ---- curl -X POST -u adminName:adminPassword HOST:PORT/diag/eval \ - -d ‘ale:set_loglevel(,).’ + -d 'ale:set_loglevel(,).' ---- * `log_component`: The default log level (except `couchdb`) is `debug`; for example `ns_server`. @@ -418,7 +418,7 @@ The available loggers are `ns_server`, `couchdb`, `user`, `Menelaus`, `ns_doctor + ---- curl -X POST -u Administrator:password http://127.0.0.1:8091/diag/eval \ - -d 'ale:set_loglevel(ns_server,error). + -d 'ale:set_loglevel(ns_server,error).' ---- [#collecting-logs-using-cli] diff --git a/modules/manage/pages/manage-nodes/list-cluster-nodes.adoc b/modules/manage/pages/manage-nodes/list-cluster-nodes.adoc index 9be550c1c5..159abb687c 100644 --- a/modules/manage/pages/manage-nodes/list-cluster-nodes.adoc +++ b/modules/manage/pages/manage-nodes/list-cluster-nodes.adoc @@ -133,20 +133,29 @@ The method returns a large amount of information, which includes many of the det The output may be unformatted, and thereby difficult to read until formatting is applied. The following call passes the result to the https://stedolan.github.io/jq/[jq^] command-line JSON processor for -formatting, and then uses the standard command-line utility `grep` to reduce the output to available hostnames: +formatting, and then uses the standard command-line utility `egrep` to reduce the output to available hostnames and otpNode names: ---- curl -u Administrator:password -v -X GET \ -http://10.142.181.101:8091/pools/default | jq '.' | grep hostname +http://10.142.181.101:8091/pools/default | jq '.' | egrep 'hostname|otpNode' ---- The output is as follows: ---- +"otpNode": "ns_1@10.142.181.101", "hostname": "10.142.181.101:8091", +"otpNode": "ns_1@10.142.181.102", "hostname": "10.142.181.102:8091", ---- +As shown in the example REST API command below, the otpNode value can be used with the `/nodes/` URI to retrieve detailed information about the node, including the storage paths for the services: + +---- +curl -u Administrator:password -v -X GET \ +http://10.142.181.101:8091/nodes/ns_1@10.142.181.101 | jq '.' +---- + For more information, see xref:rest-api:rest-cluster-get.adoc[Retrieving Cluster Information]. [#next-steps-after-list-nodes] diff --git a/modules/manage/pages/manage-scopes-and-collections/manage-scopes-and-collections.adoc b/modules/manage/pages/manage-scopes-and-collections/manage-scopes-and-collections.adoc index af393e5371..fbc32bb4fd 100644 --- a/modules/manage/pages/manage-scopes-and-collections/manage-scopes-and-collections.adoc +++ b/modules/manage/pages/manage-scopes-and-collections/manage-scopes-and-collections.adoc @@ -654,7 +654,7 @@ http://localhost:8091/pools/default/buckets/testBucket/scopes/my_scope/collectio -d maxTTL=0 ---- + -Setting `maxTTL` parameter to `0` manes that documents in the collection use the bucket's `maxTTL` setting. +Setting `maxTTL` parameter to `0` means that documents in the collection use the bucket's `maxTTL` setting. If successful, the operation returns a uid. + ---- diff --git a/modules/manage/pages/manage-security/manage-auditing.adoc b/modules/manage/pages/manage-security/manage-auditing.adoc index 5339bcb0d4..21475ba643 100644 --- a/modules/manage/pages/manage-security/manage-auditing.adoc +++ b/modules/manage/pages/manage-security/manage-auditing.adoc @@ -225,6 +225,6 @@ Likewise, a list of `disabledUsers` is specified. See xref:manage:manage-security/manage-auditing.adoc#ignoring-events-by-user[Ignoring Filterable Events By User], above, for information. Note, however, that when specified using the REST API, local and internal usernames take the `/local`, rather than the `/couchbase` suffix. The `rotateSize` is specified in bytes, and the `rotateInterval` in seconds. -The `pruneAge` parameter tells Couchbase Server to automatically delete rotated audit logs after 10800 minutes (1 week). +The `pruneAge` parameter tells Couchbase Server to automatically delete rotated audit logs after 10800 seconds (3 hours). See xref:rest-api:rest-auditing.adoc[Configure Auditing], for more detailed information; including use of the `GET /settings/audit` method and URI to retrieve the current audit configuration. diff --git a/modules/manage/pages/manage-settings/configure-compact-settings.adoc b/modules/manage/pages/manage-settings/configure-compact-settings.adoc index 4174fe53c0..742970abc2 100644 --- a/modules/manage/pages/manage-settings/configure-compact-settings.adoc +++ b/modules/manage/pages/manage-settings/configure-compact-settings.adoc @@ -250,23 +250,17 @@ Formatted, this might appear as follows: ---- { "autoCompactionSettings": { - "parallelDBAndViewCompaction": true, - "allowedTimePeriod": { - "fromHour": 0, - "toHour": 2, - "fromMinute": 0, - "toMinute": 0, - "abortOutside": false - }, + "parallelDBAndViewCompaction": false, + "magmaFragmentationPercentage": 50, "databaseFragmentationThreshold": { "percentage": 30, - "size": 536870912 + "size": 1073741824 }, "viewFragmentationThreshold": { "percentage": 30, - "size": 536870912 + "size": 1073741824 }, - "indexCompactionMode": "full", + "indexCompactionMode": "circular", "indexCircularCompaction": { "daysOfWeek": "Monday,Wednesday,Friday", "interval": { @@ -281,8 +275,9 @@ Formatted, this might appear as follows: "percentage": 30 } }, - "purgeInterval": 4 + "purgeInterval": 3 } + ---- See xref:rest-api:rest-autocompact-get.adoc[Getting Auto-Compaction Settings], for more information. @@ -294,6 +289,7 @@ To modify auto-compaction settings, use the `/controller/setAutoCompaction` meth curl -i -X POST http://10.143.192.101:8091/controller/setAutoCompaction \ -u Administrator:password \ -d databaseFragmentationThreshold[percentage]=30 \ +-d magmaFragmentationPercentage[percentage]=30 \ -d databaseFragmentationThreshold[size]=1073741824 \ -d viewFragmentationThreshold[percentage]=30 \ -d viewFragmentationThreshold[size]=1073741824 \ diff --git a/modules/manage/pages/manage-settings/general-settings.adoc b/modules/manage/pages/manage-settings/general-settings.adoc index 9a4cc10478..d52e68556a 100644 --- a/modules/manage/pages/manage-settings/general-settings.adoc +++ b/modules/manage/pages/manage-settings/general-settings.adoc @@ -158,11 +158,13 @@ For information, see xref:learn:clusters-and-availability/rebalance.adoc#limitin === Data Settings The settings in this area control the numbers of threads that are allocated _per node_ by Couchbase Server to the _reading_ and _writing_ of data, respectively. -The maximum thread-allocation to each is _64_, the minimum _4_. +The maximum thread-allocation to each is _128_, the minimum _4_. A high thread-allocation may improve performance on systems whose hardware-resources are commensurately supportive (for example, where the number of CPU cores is high). In particular, a high number of _writer_ threads on such systems may significantly optimize the performance of _durable writes_: see xref:learn:data/durability.adoc[Durability], for information. +A high number of reader and writer threads will benefit disk based workloads that require high throughput especially when using high end disk drives such as NVMe SSDs. This is likely to be the case when using Magma as the storage engine. In this case it is best to choose xref:manage:manage-settings/general-settings.adoc#data-settings['Disk i/o optimized'] mode for Reader and Writer thread settings. + Note, however, that a high thread-allocation might _impair_ some aspects of system-performance on less appropriately resourced nodes. Consequently, changes to the default thread-allocation should not be made to production systems without prior testing. @@ -318,7 +320,10 @@ The default cleared setting has an Index Server node rebuild any newly assigned You cannot enable file-based rebalance when you have enabled Memory Optimized Index Storage. When you select this option, Couchbase Server copies the index files from one Index Server node to another during a rebalance instead of rebuilding them. See xref:learn:clusters-and-availability/rebalance.adoc#index-rebalance-methods[Index Rebalance Methods]. - ++ +You can disable this feature from the UI or via REST API. To learn about disabling this feature via REST API, see xref:rest-api:post-settings-indexes.adoc#disable-file-transfer-based-rebalance[Curl Command to Disable the File Transfer Based Rebalance]. This feature is disabled by default. ++ +WARNING: Disabling this feature slows down the Rebalance operation. [#xdcr-maximum-processes] === XDCR Maximum Processes @@ -719,7 +724,7 @@ To establish index settings, use the `/settings/indexes` method. [source,shell] ---- -include::rest-api:example$post-settings-indexes.sh[] +include::rest-api:example$post-settings-indexes.sh[tags=gsi-settings] ---- This establishes the storage mode for indexes as `plasma`. @@ -764,7 +769,7 @@ To set the directory for temporary backfill data, and establish its size-limit, [source,shell] ---- -include::rest-api:example$query-settings-post-settings.sh[tag=request] +include::n1ql-rest-settings:example$query-settings-post-settings.sh[tag=request] ---- This specifies that the directory for temporary backfill data should be `/tmp`; and that the maximum size should be 2048 megabytes. @@ -773,7 +778,7 @@ If successful, this call returns a JSON document featuring all the current query [source,json] ---- -include::rest-api:example$query-settings-post-settings.jsonc[tags=tmpSpace;ellipsis;access] +include::n1ql-rest-settings:example$query-settings-post-settings.jsonc[tags=tmpSpace;ellipsis;access] ---- The document's values indicate that the specified values for directory and size have been established; and that the current setting for access-control restricts access to all, with no exceptions. @@ -782,7 +787,7 @@ To specify particular URLs as allowed and disallowed, use the `/settings/querySe [source,shell] ---- -include::rest-api:example$query-settings-post-access.sh[tag=request] +include::n1ql-rest-settings:example$query-settings-post-access.sh[tag=request] ---- A JSON document is specified as the payload for the method. @@ -792,10 +797,10 @@ If successful, the call returns a JSON document that confirms the modified setti [source,json] ---- -include::rest-api:example$query-settings-post-access.jsonc[] +include::n1ql-rest-settings:example$query-settings-post-access.jsonc[] ---- -For additional information, refer to xref:rest-api:rest-cluster-query-settings.adoc[Cluster Query Settings API]. +For additional information, refer to xref:n1ql-rest-settings:index.adoc[]. [#rebalance-settings-via-rest] === Rebalance Settings via REST diff --git a/modules/manage/pages/manage-xdcr/create-xdcr-replication.adoc b/modules/manage/pages/manage-xdcr/create-xdcr-replication.adoc index 75f93f1dcf..f3255c82b7 100644 --- a/modules/manage/pages/manage-xdcr/create-xdcr-replication.adoc +++ b/modules/manage/pages/manage-xdcr/create-xdcr-replication.adoc @@ -214,6 +214,19 @@ If successful, this provides the following response: For more information, see xref:rest-api:rest-xdcr-create-replication.adoc[Creating a Replication]. For information on REST-driven configuration of the xref:manage:manage-xdcr/create-xdcr-replication.adoc#xdcr-advanced-settings-pointer[Advanced Settings] described above, see xref:rest-api:rest-xdcr-adv-settings.adoc[Managing Advanced Settings]. +[#create-an-xdcr-replication-with-mobile-as-active] +== Create an XDCR Replication with mobile=Active + +To create or update an XDCR replication with `mobile=Active`, do the following: + +* Create an XDCR replication with `mobile=Active` or update an existing replication. For information about _creating_ (new) an XDCR replication with `mobile=Active`, see xref:learn:clusters-and-availability/xdcr-active-active-sgw.adoc#xdcr-active-active-sgw-greenfield-deployment[Greenfield deployment], and for information about _updating_ an existing replication with `mobile=Active`, see xref:learn:clusters-and-availability/xdcr-active-active-sgw.adoc#xdcr-active-active-sgw-upgrade[Upgrade an existing setup]. +* Create or update an XDCR replication with `mobile=Active` option using the REST API, starting from Server 7.6.6 version. See xref:rest-api:rest-xdcr-create-replication.adoc[Creating a Replication]. +* Create or update a XDCR replication with `mobile=Active` option from the UI, starting from Server 7.6.6 version. See xref:manage:manage-xdcr/create-xdcr-replication.adoc#create-an-xdcr-replication-with-the-ui[Create an XDCR Replication with the UI]. + +The pre-requisite to use `mobile=Active` is to set the bucket property `enableCrossClusterVersioning`. For more information about the bucket property `enableCrossClusterVersioning`, see xref:learn:clusters-and-availability/xdcr-enable-crossclusterversioning.adoc[XDCR enableCrossClusterVersioning]. + +To enable the bucket property `enableCrossClusterVersioning` using REST API, see xref:learn:clusters-and-availability/xdcr-enable-crossclusterversioning.adoc#modify-enablecrossclusterversioning[Modify the bucket property enableCrossClusterVersioning] or xref:rest-api:rest-bucket-create.adoc#example-enablecrossclusterversioning-edit[Example: Turning on enableCrossClusterVersioning, when Editing]. + [#next-xdcr-steps-after-create-replication] == Next Steps diff --git a/modules/manage/pages/monitor/monitoring-indexes.adoc b/modules/manage/pages/monitor/monitoring-indexes.adoc index a44697f562..b028ffb65d 100644 --- a/modules/manage/pages/monitor/monitoring-indexes.adoc +++ b/modules/manage/pages/monitor/monitoring-indexes.adoc @@ -6,7 +6,7 @@ // Cross references :manage-indexes: xref:manage:manage-indexes/manage-indexes.adoc :index-stats: xref:manage:monitor/ui-monitoring-statistics.adoc#index_stats -:rest-index-stats: xref:rest-api:rest-index-stats.adoc +:rest-index-stats: xref:index-rest-stats:index.adoc :couchbase-cli-server-info: xref:cli:cbcli/couchbase-cli-server-info.adoc [abstract] @@ -143,7 +143,7 @@ Note that there is no CLI support for statistics for a specific index. == Monitor Indexes with the REST API You can monitor all Index-Service statistics, and statistics for a specific index, using the REST API. -Refer to {rest-index-stats}[Index Statistics API]. +Refer to {rest-index-stats}[]. [[related-links]] == Related Links diff --git a/modules/metrics-reference/attachments/cbas_cross_reference.csv b/modules/metrics-reference/attachments/cbas_cross_reference.csv new file mode 100644 index 0000000000..81830dca27 --- /dev/null +++ b/modules/metrics-reference/attachments/cbas_cross_reference.csv @@ -0,0 +1,11 @@ +Couchbase Server pre-7.0,Couchbase Exporter,Couchbase Server 7.0+ +cbas_disk_used,cbcbas_disk_used,cbas_disk_used_bytes_total +failed_at_parser_records_count_total,N/A,cbas_failed_to_parse_records_count +cbas_gc_count,cbcbas_gc_count,cbas_gc_count_total +cbas_gc_time,cbcbas_gc_time,cbas_gc_time_milliseconds_total +cbas_heap_used,cbcbas_heap_used,cbas_heap_memory_used_bytes +incoming_records_count,N/A,cbas_incoming_records_count +cbas_io_reads,cbcbas_io_reads,cbas_io_reads_total +cbas_io_writes,cbcbas_io_writes,cbas_io_writes_total +cbas_system_load_average,cbcbas_system_load_avg,cbas_system_load_average +cbas_thread_count,cbcbas_thread_count,cbas_thread_count \ No newline at end of file diff --git a/modules/metrics-reference/attachments/index_cross_reference.csv b/modules/metrics-reference/attachments/index_cross_reference.csv new file mode 100644 index 0000000000..29336f17c6 --- /dev/null +++ b/modules/metrics-reference/attachments/index_cross_reference.csv @@ -0,0 +1,28 @@ +Couchbase Server pre-7.0,Couchbase Exporter,Couchbase Server 7.0+ +index_memory_quota,cbindex_memory_quota,index_memory_quota +index_memory_used,cbindex_memory_used,index_memory_used_total +index_ram_percent,cbindex_ram_percent,(index_memory_used_total / ignoring(name) index_memory_quota) * 100 +index_remaining_ram,cbindex_remaining_ram,index_memory_quota - ignoring(name) index_memory_used_total +avg_scan_latency,cbindex_avg_scan_latency,index_avg_scan_latency +cache_hit_percent,cbindex_cache_hit_percent,(index_cache_hits * 100) / (index_cache_hits + index_cache_misses) +cache_hits,cbindex_cache_hits,index_cache_hits +cache_misses,cbindex_cache_misses,index_cache_misses +data_size,cbindex_data_size,index_data_size +data_size_on_disk,N/A,index_data_size_on_disk +disk_size,cbindex_disk_size,index_disk_size +frag_percent,cbindex_frag_percent,index_frag_percent +items_count,cbindex_items_count,index_items_count +log_space_on_disk,N/A,index_log_space_on_disk +memory_used,N/A,index_memory_used +num_docs_indexed,cbindex_num_docs_indexed,index_num_docs_indexed +num_docs_pending,N/A,index_num_docs_pending +num_docs_queued,N/A,index_num_docs_queued +num_docs_pending+queued,cbindex_num_docs_pending_queued,(index_num_docs_pending + index_num_docs_queued) +num_requests,cbindex_num_requests,index_num_requests +num_rows_returned,cbindex_num_rows_returned,index_num_rows_returned +raw_data_size,N/A,index_raw_data_size +recs_in_mem,N/A,index_recs_in_mem +recs_on_disk,N/A,index_recs_on_disk +index_resident_percent,cbindex_resident_percent,index_resident_percent +scan_bytes_read,N/A,index_scan_bytes_read +total_scan_duration,N/A,index_total_scan_duration \ No newline at end of file diff --git a/modules/metrics-reference/attachments/kv_cross_reference.csv b/modules/metrics-reference/attachments/kv_cross_reference.csv new file mode 100644 index 0000000000..ec1524ea0c --- /dev/null +++ b/modules/metrics-reference/attachments/kv_cross_reference.csv @@ -0,0 +1,576 @@ +Memcached Stats,Couchbase Exporter (Interesting Stats),Couchbase Exporter (Bucket Stats),Couchbase Exporter (Per-Node Bucket Stats),Native Prometheus (7.0+) +evictions,N/A,cbbucketstat_evictions,cbpernodebucket_evictions,kv_memcache_evictions{} +total_items,N/A,N/A,N/A,kv_memcache_total_items{} +reclaimed,N/A,N/A,N/A,kv_memcache_reclaimed{} +engine_maxbytes,N/A,N/A,N/A,kv_memcache_engine_maxbytes{} +ep_access_scanner_enabled,N/A,N/A,N/A,kv_ep_access_scanner_enabled{} +ep_allow_sanitize_value_in_deletion,N/A,N/A,N/A,kv_ep_allow_sanitize_value_in_deletion{} +ep_alog_block_size,N/A,N/A,N/A,kv_ep_alog_block_size{} +ep_alog_max_stored_items,N/A,N/A,N/A,kv_ep_alog_max_stored_items{} +ep_alog_path,N/A,N/A,N/A,kv_ep_alog_path{} +ep_alog_resident_ratio_threshold,N/A,N/A,N/A,kv_ep_alog_resident_ratio_threshold{} +ep_alog_sleep_time,N/A,N/A,N/A,kv_ep_alog_sleep_time{} +ep_alog_task_time,N/A,N/A,N/A,kv_ep_alog_task_time{} +ep_backend,N/A,N/A,N/A,kv_ep_backend{} +ep_backfill_mem_threshold,N/A,N/A,N/A,kv_ep_backfill_mem_threshold{} +ep_bfilter_enabled,N/A,N/A,N/A,kv_ep_bfilter_enabled{} +ep_bfilter_fp_prob,N/A,N/A,N/A,kv_ep_bfilter_fp_prob{} +ep_bfilter_key_count,N/A,N/A,N/A,kv_ep_bfilter_key_count{} +ep_bfilter_residency_threshold,N/A,N/A,N/A,kv_ep_bfilter_residency_threshold{} +ep_bucket_type,N/A,N/A,N/A,kv_ep_bucket_type{} +ep_chk_expel_enabled,N/A,N/A,N/A,kv_ep_chk_expel_enabled{} +ep_chk_max_items,N/A,N/A,N/A,kv_ep_chk_max_items{} +ep_chk_period,N/A,N/A,N/A,kv_ep_chk_period{} +ep_chk_remover_stime,N/A,N/A,N/A,kv_ep_chk_remover_stime{} +ep_collections_drop_compaction_delay,N/A,N/A,N/A,kv_ep_collections_drop_compaction_delay{} +ep_collections_enabled,N/A,N/A,N/A,kv_ep_collections_enabled{} +ep_compaction_exp_mem_threshold,N/A,N/A,N/A,kv_ep_compaction_exp_mem_threshold{} +ep_compaction_write_queue_cap,N/A,N/A,N/A,kv_ep_compaction_write_queue_cap{} +ep_compression_mode,N/A,N/A,N/A,kv_ep_compression_mode{} +ep_conflict_resolution_type,N/A,N/A,N/A,kv_ep_conflict_resolution_type{} +ep_connection_manager_interval,N/A,N/A,N/A,kv_ep_connection_manager_interval{} +ep_couch_bucket,N/A,N/A,N/A,kv_ep_couch_bucket{} +ep_couchstore_file_cache_max_size,N/A,N/A,N/A,kv_ep_couchstore_file_cache_max_size{} +ep_couchstore_mprotect,N/A,N/A,N/A,kv_ep_couchstore_mprotect{} +ep_couchstore_tracing,N/A,N/A,N/A,kv_ep_couchstore_tracing{} +ep_couchstore_write_validation,N/A,N/A,N/A,kv_ep_couchstore_write_validation{} +ep_cursor_dropping_checkpoint_mem_lower_mark,N/A,N/A,N/A,kv_ep_cursor_dropping_checkpoint_mem_lower_mark{} +ep_cursor_dropping_checkpoint_mem_upper_mark,N/A,N/A,N/A,kv_ep_cursor_dropping_checkpoint_mem_upper_mark{} +ep_cursor_dropping_lower_mark,N/A,N/A,N/A,kv_ep_cursor_dropping_lower_mark{} +ep_cursor_dropping_upper_mark,N/A,N/A,N/A,kv_ep_cursor_dropping_upper_mark{} +ep_data_traffic_enabled,N/A,N/A,N/A,kv_ep_data_traffic_enabled{} +ep_dbname,N/A,N/A,N/A,kv_ep_dbname{} +ep_dcp_backfill_byte_limit,N/A,N/A,N/A,kv_ep_dcp_backfill_byte_limit{} +ep_dcp_conn_buffer_size,N/A,N/A,N/A,kv_ep_dcp_conn_buffer_size{} +ep_dcp_conn_buffer_size_aggr_mem_threshold,N/A,N/A,N/A,kv_ep_dcp_conn_buffer_size_aggr_mem_threshold{} +ep_dcp_conn_buffer_size_aggressive_perc,N/A,N/A,N/A,kv_ep_dcp_conn_buffer_size_aggressive_perc{} +ep_dcp_conn_buffer_size_max,N/A,N/A,N/A,kv_ep_dcp_conn_buffer_size_max{} +ep_dcp_conn_buffer_size_perc,N/A,N/A,N/A,kv_ep_dcp_conn_buffer_size_perc{} +ep_dcp_consumer_process_buffered_messages_batch_size,N/A,N/A,N/A,kv_ep_dcp_consumer_process_buffered_messages_batch_size{} +ep_dcp_consumer_process_buffered_messages_yield_limit,N/A,N/A,N/A,kv_ep_dcp_consumer_process_buffered_messages_yield_limit{} +ep_dcp_enable_noop,N/A,N/A,N/A,kv_ep_dcp_enable_noop{} +ep_dcp_flow_control_policy,N/A,N/A,N/A,kv_ep_dcp_flow_control_policy{} +ep_dcp_idle_timeout,N/A,N/A,N/A,kv_ep_dcp_idle_timeout{} +ep_dcp_min_compression_ratio,N/A,N/A,N/A,kv_ep_dcp_min_compression_ratio{} +ep_dcp_noop_mandatory_for_v5_features,N/A,N/A,N/A,kv_ep_dcp_noop_mandatory_for_v5_features{} +ep_dcp_noop_tx_interval,N/A,N/A,N/A,kv_ep_dcp_noop_tx_interval{} +ep_dcp_producer_snapshot_marker_yield_limit,N/A,N/A,N/A,kv_ep_dcp_producer_snapshot_marker_yield_limit{} +ep_dcp_scan_byte_limit,N/A,N/A,N/A,kv_ep_dcp_scan_byte_limit{} +ep_dcp_scan_item_limit,N/A,N/A,N/A,kv_ep_dcp_scan_item_limit{} +ep_dcp_takeover_max_time,N/A,N/A,N/A,kv_ep_dcp_takeover_max_time{} +ep_defragmenter_age_threshold,N/A,N/A,N/A,kv_ep_defragmenter_age_threshold{} +ep_defragmenter_chunk_duration,N/A,N/A,N/A,kv_ep_defragmenter_chunk_duration{} +ep_defragmenter_enabled,N/A,N/A,N/A,kv_ep_defragmenter_enabled{} +ep_defragmenter_interval,N/A,N/A,N/A,kv_ep_defragmenter_interval{} +ep_defragmenter_stored_value_age_threshold,N/A,N/A,N/A,kv_ep_defragmenter_stored_value_age_threshold{} +ep_durability_min_level,N/A,N/A,N/A,kv_ep_durability_min_level{} +ep_durability_timeout_task_interval,N/A,N/A,N/A,kv_ep_durability_timeout_task_interval{} +ep_ephemeral_full_policy,N/A,N/A,N/A,kv_ep_ephemeral_full_policy{} +ep_ephemeral_metadata_mark_stale_chunk_duration,N/A,N/A,N/A,kv_ep_ephemeral_metadata_mark_stale_chunk_duration{} +ep_ephemeral_metadata_purge_age,N/A,N/A,N/A,kv_ep_ephemeral_metadata_purge_age{} +ep_ephemeral_metadata_purge_interval,N/A,N/A,N/A,kv_ep_ephemeral_metadata_purge_interval{} +ep_ephemeral_metadata_purge_stale_chunk_duration,N/A,N/A,N/A,kv_ep_ephemeral_metadata_purge_stale_chunk_duration{} +ep_executor_pool_backend,N/A,N/A,N/A,kv_ep_executor_pool_backend{} +ep_exp_pager_enabled,N/A,N/A,N/A,kv_ep_exp_pager_enabled{} +ep_exp_pager_initial_run_time,N/A,N/A,N/A,kv_ep_exp_pager_initial_run_time{} +ep_exp_pager_stime,N/A,N/A,N/A,kv_ep_exp_pager_stime{} +ep_failpartialwarmup,N/A,N/A,N/A,kv_ep_failpartialwarmup{} +ep_flusher_total_batch_limit,N/A,N/A,N/A,kv_ep_flusher_total_batch_limit{} +ep_fsync_after_every_n_bytes_written,N/A,N/A,N/A,kv_ep_fsync_after_every_n_bytes_written{} +ep_getl_default_timeout,N/A,N/A,N/A,kv_ep_getl_default_timeout{} +ep_getl_max_timeout,N/A,N/A,N/A,kv_ep_getl_max_timeout{} +ep_hlc_drift_ahead_threshold_us,N/A,N/A,N/A,kv_ep_hlc_drift_ahead_threshold_us{} +ep_hlc_drift_behind_threshold_us,N/A,N/A,N/A,kv_ep_hlc_drift_behind_threshold_us{} +ep_ht_locks,N/A,N/A,N/A,kv_ep_ht_locks{} +ep_ht_resize_interval,N/A,N/A,N/A,kv_ep_ht_resize_interval{} +ep_ht_size,N/A,N/A,N/A,kv_ep_ht_size{} +ep_item_compressor_chunk_duration,N/A,N/A,N/A,kv_ep_item_compressor_chunk_duration{} +ep_item_compressor_interval,N/A,N/A,N/A,kv_ep_item_compressor_interval{} +ep_item_eviction_age_percentage,N/A,N/A,N/A,kv_ep_item_eviction_age_percentage{} +ep_item_eviction_freq_counter_age_threshold,N/A,N/A,N/A,kv_ep_item_eviction_freq_counter_age_threshold{} +ep_item_eviction_policy,N/A,N/A,N/A,kv_ep_item_eviction_policy{} +ep_item_freq_decayer_chunk_duration,N/A,N/A,N/A,kv_ep_item_freq_decayer_chunk_duration{} +ep_item_freq_decayer_percent,N/A,N/A,N/A,kv_ep_item_freq_decayer_percent{} +ep_item_num_based_new_chk,N/A,N/A,N/A,kv_ep_item_num_based_new_chk{} +ep_keep_closed_chks,N/A,N/A,N/A,kv_ep_keep_closed_chks{} +ep_magma_bloom_filter_accuracy,N/A,N/A,N/A,kv_ep_magma_bloom_filter_accuracy{} +ep_magma_bloom_filter_accuracy_for_bottom_level,N/A,N/A,N/A,kv_ep_magma_bloom_filter_accuracy_for_bottom_level{} +ep_magma_checkpoint_every_batch,N/A,N/A,N/A,kv_ep_magma_checkpoint_every_batch{} +ep_magma_checkpoint_interval,N/A,N/A,N/A,kv_ep_magma_checkpoint_interval{} +ep_magma_checkpoint_threshold,N/A,N/A,N/A,kv_ep_magma_checkpoint_threshold{} +ep_magma_delete_frag_ratio,N/A,N/A,N/A,kv_ep_magma_delete_frag_ratio{} +ep_magma_delete_memtable_writecache,N/A,N/A,N/A,kv_ep_magma_delete_memtable_writecache{} +ep_magma_enable_block_cache,N/A,N/A,N/A,kv_ep_magma_enable_block_cache{} +ep_magma_enable_direct_io,N/A,N/A,N/A,kv_ep_magma_enable_direct_io{} +ep_magma_enable_upsert,N/A,N/A,N/A,kv_ep_magma_enable_upsert{} +ep_magma_expiry_frag_threshold,N/A,N/A,N/A,kv_ep_magma_expiry_frag_threshold{} +ep_magma_expiry_purger_interval,N/A,N/A,N/A,kv_ep_magma_expiry_purger_interval{} +ep_magma_flusher_thread_percentage,N/A,N/A,N/A,kv_ep_magma_flusher_thread_percentage{} +ep_magma_fragmentation_percentage,N/A,N/A,N/A,kv_ep_magma_fragmentation_percentage{} +ep_magma_heartbeat_interval,N/A,N/A,N/A,kv_ep_magma_heartbeat_interval{} +ep_magma_initial_wal_buffer_size,N/A,N/A,N/A,kv_ep_magma_initial_wal_buffer_size{} +ep_magma_max_checkpoints,N/A,N/A,N/A,kv_ep_magma_max_checkpoints{} +ep_magma_max_default_storage_threads,N/A,N/A,N/A,kv_ep_magma_max_default_storage_threads{} +ep_magma_max_level_0_ttl,N/A,N/A,N/A,kv_ep_magma_max_level_0_ttl{} +ep_magma_max_recovery_bytes,N/A,N/A,N/A,kv_ep_magma_max_recovery_bytes{} +ep_magma_max_write_cache,N/A,N/A,N/A,kv_ep_magma_max_write_cache{} +ep_magma_mem_quota_ratio,N/A,N/A,N/A,kv_ep_magma_mem_quota_ratio{} +ep_magma_value_separation_size,N/A,N/A,N/A,kv_ep_magma_value_separation_size{} +ep_magma_write_cache_ratio,N/A,N/A,N/A,kv_ep_magma_write_cache_ratio{} +ep_max_checkpoints,N/A,N/A,N/A,kv_ep_max_checkpoints{} +ep_max_failover_entries,N/A,N/A,N/A,kv_ep_max_failover_entries{} +ep_max_item_privileged_bytes,N/A,N/A,N/A,kv_ep_max_item_privileged_bytes{} +ep_max_item_size,N/A,N/A,N/A,kv_ep_max_item_size{} +ep_max_num_bgfetchers,N/A,N/A,N/A,kv_ep_max_num_bgfetchers{} +ep_max_num_shards,N/A,N/A,N/A,kv_ep_max_num_shards{} +ep_max_num_workers,N/A,N/A,N/A,kv_ep_max_num_workers{} +ep_max_size,N/A,cbbucketstat_ep_max_size_bytes,cbpernodebucket_ep_max_size,kv_ep_max_size{} +ep_cache_size,N/A,N/A,N/A,kv_ep_cache_size{} +ep_max_threads,N/A,N/A,N/A,kv_ep_max_threads{} +ep_max_ttl,N/A,N/A,N/A,kv_ep_max_ttl{} +ep_max_vbuckets,N/A,N/A,N/A,kv_ep_max_vbuckets{} +ep_mem_high_wat,N/A,cbbucketstat_ep_mem_high_wat_bytes,cbpernodebucket_ep_mem_high_wat,kv_ep_mem_high_wat{} +ep_mem_low_wat,N/A,cbbucketstat_ep_mem_low_wat_bytes,cbpernodebucket_ep_mem_low_wat,kv_ep_mem_low_wat{} +ep_mem_used_merge_threshold_percent,N/A,N/A,N/A,kv_ep_mem_used_merge_threshold_percent{} +ep_min_compression_ratio,N/A,N/A,N/A,kv_ep_min_compression_ratio{} +ep_mutation_mem_threshold,N/A,N/A,N/A,kv_ep_mutation_mem_threshold{} +ep_num_auxio_threads,N/A,N/A,N/A,kv_ep_num_auxio_threads{} +ep_num_nonio_threads,N/A,N/A,N/A,kv_ep_num_nonio_threads{} +ep_num_reader_threads,N/A,N/A,N/A,kv_ep_num_reader_threads{} +ep_num_writer_threads,N/A,N/A,N/A,kv_ep_num_writer_threads{} +ep_pager_active_vb_pcnt,N/A,N/A,N/A,kv_ep_pager_active_vb_pcnt{} +ep_pager_sleep_time_ms,N/A,N/A,N/A,kv_ep_pager_sleep_time_ms{} +ep_persistent_metadata_purge_age,N/A,N/A,N/A,kv_ep_persistent_metadata_purge_age{} +ep_pitr_enabled,N/A,N/A,N/A,kv_ep_pitr_enabled{} +ep_pitr_granularity,N/A,N/A,N/A,kv_ep_pitr_granularity{} +ep_pitr_max_history_age,N/A,N/A,N/A,kv_ep_pitr_max_history_age{} +ep_replication_throttle_cap_pcnt,N/A,N/A,N/A,kv_ep_replication_throttle_cap_pcnt{} +ep_replication_throttle_queue_cap,N/A,N/A,N/A,kv_ep_replication_throttle_queue_cap{} +ep_replication_throttle_threshold,N/A,N/A,N/A,kv_ep_replication_throttle_threshold{} +ep_retain_erroneous_tombstones,N/A,N/A,N/A,kv_ep_retain_erroneous_tombstones{} +ep_rocksdb_bbt_options,N/A,N/A,N/A,kv_ep_rocksdb_bbt_options{} +ep_rocksdb_block_cache_high_pri_pool_ratio,N/A,N/A,N/A,kv_ep_rocksdb_block_cache_high_pri_pool_ratio{} +ep_rocksdb_block_cache_ratio,N/A,N/A,N/A,kv_ep_rocksdb_block_cache_ratio{} +ep_rocksdb_cf_options,N/A,N/A,N/A,kv_ep_rocksdb_cf_options{} +ep_rocksdb_default_cf_optimize_compaction,N/A,N/A,N/A,kv_ep_rocksdb_default_cf_optimize_compaction{} +ep_rocksdb_high_pri_background_threads,N/A,N/A,N/A,kv_ep_rocksdb_high_pri_background_threads{} +ep_rocksdb_low_pri_background_threads,N/A,N/A,N/A,kv_ep_rocksdb_low_pri_background_threads{} +ep_rocksdb_memtables_ratio,N/A,N/A,N/A,kv_ep_rocksdb_memtables_ratio{} +ep_rocksdb_options,N/A,N/A,N/A,kv_ep_rocksdb_options{} +ep_rocksdb_seqno_cf_optimize_compaction,N/A,N/A,N/A,kv_ep_rocksdb_seqno_cf_optimize_compaction{} +ep_rocksdb_stats_level,N/A,N/A,N/A,kv_ep_rocksdb_stats_level{} +ep_rocksdb_uc_max_size_amplification_percent,N/A,N/A,N/A,kv_ep_rocksdb_uc_max_size_amplification_percent{} +ep_rocksdb_write_rate_limit,N/A,N/A,N/A,kv_ep_rocksdb_write_rate_limit{} +ep_sync_writes_max_allowed_replicas,N/A,N/A,N/A,kv_ep_sync_writes_max_allowed_replicas{} +ep_time_synchronization,N/A,N/A,N/A,kv_ep_time_synchronization{} +ep_uuid,N/A,N/A,N/A,kv_ep_uuid{} +ep_warmup,N/A,N/A,N/A,kv_ep_warmup{} +ep_warmup_batch_size,N/A,N/A,N/A,kv_ep_warmup_batch_size{} +ep_warmup_min_items_threshold,N/A,N/A,N/A,kv_ep_warmup_min_items_threshold{} +ep_warmup_min_memory_threshold,N/A,N/A,N/A,kv_ep_warmup_min_memory_threshold{} +ep_xattr_enabled,N/A,N/A,N/A,kv_ep_xattr_enabled{} +ep_storage_age,N/A,N/A,N/A,kv_ep_storage_age_seconds{} +ep_storage_age_highwat,N/A,N/A,N/A,kv_ep_storage_age_highwat_seconds{} +ep_num_workers,N/A,N/A,N/A,kv_ep_num_workers{} +ep_bucket_priority,N/A,N/A,N/A,kv_ep_bucket_priority{} +ep_total_enqueued,N/A,N/A,N/A,kv_ep_total_enqueued{} +ep_total_deduplicated,N/A,N/A,N/A,kv_ep_total_deduplicated{} +ep_expired_access,N/A,N/A,N/A,kv_ep_expired_access{} +ep_expired_compactor,N/A,N/A,N/A,kv_ep_expired_compactor{} +ep_expired_pager,N/A,N/A,N/A,kv_ep_expired_pager{} +ep_queue_size,N/A,cbbucketstat_ep_queue_size,cbpernodebucket_ep_queue_size,kv_ep_queue_size{} +ep_diskqueue_items,N/A,cbbucketstat_ep_diskqueue_items,cbpernodebucket_ep_diskqueue_items,kv_ep_diskqueue_items{} +ep_commit_num,N/A,N/A,N/A,kv_ep_commit_num{} +ep_commit_time,N/A,N/A,N/A,kv_ep_commit_time_seconds{} +ep_commit_time_total,N/A,N/A,N/A,kv_ep_commit_time_total_seconds{} +ep_item_begin_failed,N/A,N/A,N/A,kv_ep_item_begin_failed{} +ep_item_commit_failed,N/A,cbbucketstat_ep_item_commit_failed,cbpernodebucket_ep_item_commit_failed,kv_ep_item_commit_failed{} +ep_item_flush_expired,N/A,N/A,N/A,kv_ep_item_flush_expired{} +ep_item_flush_failed,N/A,N/A,N/A,kv_ep_item_flush_failed{} +ep_flusher_state,N/A,N/A,N/A,kv_ep_flusher_state{} +ep_flusher_todo,N/A,cbbucketstat_ep_flusher_todo,cbpernodebucket_ep_flusher_todo,kv_ep_flusher_todo{} +ep_total_persisted,N/A,N/A,N/A,kv_ep_total_persisted{} +ep_uncommitted_items,N/A,N/A,N/A,kv_ep_uncommitted_items{} +ep_chk_persistence_timeout,N/A,N/A,N/A,kv_ep_chk_persistence_timeout_seconds{} +ep_vbucket_del,N/A,N/A,N/A,kv_ep_vbucket_del{} +ep_vbucket_del_fail,N/A,N/A,N/A,kv_ep_vbucket_del_fail{} +ep_flush_duration_total,N/A,N/A,N/A,kv_ep_flush_duration_total_seconds{} +ep_persist_vbstate_total,N/A,N/A,N/A,kv_ep_persist_vbstate_total{} +mem_used,N/A,N/A,N/A,kv_mem_used_bytes{} +mem_used_estimate,N/A,N/A,N/A,kv_mem_used_estimate_bytes{} +ep_mem_low_wat_percent,N/A,N/A,N/A,kv_ep_mem_low_wat_percent_ratio{} +ep_mem_high_wat_percent,N/A,N/A,N/A,kv_ep_mem_high_wat_percent_ratio{} +bytes,N/A,N/A,N/A,kv_total_memory_used_bytes{} +ep_kv_size,N/A,cbbucketstat_ep_kv_size,cbpernodebucket_ep_kv_size,kv_ep_key_value_size_bytes{} +ep_blob_num,N/A,N/A,N/A,kv_ep_blob_num{} +ep_blob_overhead,N/A,N/A,N/A,"kv_memory_overhead_bytes{for=""blobs""}" +ep_value_size,N/A,N/A,N/A,"kv_memory_used_bytes{for=""blobs""}" +ep_storedval_size,N/A,N/A,N/A,"kv_memory_used_bytes{for=""storedvalues""}" +ep_storedval_overhead,N/A,N/A,N/A,"kv_memory_overhead_bytes{for=""storedvalues""}" +ep_storedval_num,N/A,N/A,N/A,kv_ep_storedval_num{} +ep_overhead,N/A,cbbucketstat_ep_overhead,cbpernodebucket_ep_overhead,kv_total_memory_overhead_bytes{} +ep_item_num,N/A,N/A,N/A,kv_ep_item_num{} +ep_oom_errors,N/A,cbbucketstat_ep_oom_errors,cbpernodebucket_ep_oom_errors,kv_ep_oom_errors{} +ep_tmp_oom_errors,N/A,cbbucketstat_ep_tmp_oom_errors,cbpernodebucket_ep_tmp_oom_errors,kv_ep_tmp_oom_errors{} +ep_mem_tracker_enabled,N/A,N/A,N/A,kv_ep_mem_tracker_enabled{} +ep_bg_fetched,cbnode_interestingstats_ep_bg_fetched,cbbucketstat_ep_bg_fetched,cbpernodebucket_ep_bg_fetched,kv_ep_bg_fetched{} +ep_bg_meta_fetched,N/A,N/A,N/A,kv_ep_bg_meta_fetched{} +ep_bg_remaining_items,N/A,N/A,N/A,kv_ep_bg_remaining_items{} +ep_bg_remaining_jobs,N/A,N/A,N/A,kv_ep_bg_remaining_jobs{} +ep_num_pager_runs,N/A,N/A,N/A,kv_ep_num_pager_runs{} +ep_num_expiry_pager_runs,N/A,N/A,N/A,kv_ep_num_expiry_pager_runs{} +ep_num_freq_decayer_runs,N/A,N/A,N/A,kv_ep_num_freq_decayer_runs{} +ep_items_expelled_from_checkpoints,N/A,N/A,N/A,kv_ep_items_expelled_from_checkpoints{} +ep_items_rm_from_checkpoints,N/A,N/A,N/A,kv_ep_items_rm_from_checkpoints{} +ep_num_value_ejects,N/A,cbbucketstat_ep_num_value_ejects,cbpernodebucket_ep_num_value_ejects,kv_ep_num_value_ejects{} +ep_num_eject_failures,N/A,N/A,N/A,kv_ep_num_eject_failures{} +ep_num_not_my_vbuckets,N/A,N/A,N/A,kv_ep_num_not_my_vbuckets{} +ep_pending_ops,N/A,N/A,N/A,kv_ep_pending_ops{} +ep_pending_ops_total,N/A,N/A,N/A,kv_ep_pending_ops_total{} +ep_pending_ops_max,N/A,N/A,N/A,kv_ep_pending_ops_max{} +ep_pending_ops_max_duration,N/A,N/A,N/A,kv_ep_pending_ops_max_duration_seconds{} +ep_pending_compactions,N/A,N/A,N/A,kv_ep_pending_compactions{} +ep_rollback_count,N/A,N/A,N/A,kv_ep_rollback_count{} +ep_vbucket_del_max_walltime,N/A,N/A,N/A,kv_ep_vbucket_del_max_walltime_seconds{} +ep_vbucket_del_avg_walltime,N/A,N/A,N/A,kv_ep_vbucket_del_avg_walltime_seconds{} +ep_bg_num_samples,N/A,N/A,N/A,kv_ep_bg_num_samples{} +ep_bg_min_wait,N/A,N/A,N/A,kv_ep_bg_min_wait_seconds{} +ep_bg_max_wait,N/A,N/A,N/A,kv_ep_bg_max_wait_seconds{} +ep_bg_wait_avg,N/A,N/A,N/A,kv_ep_bg_wait_avg_seconds{} +ep_bg_min_load,N/A,N/A,N/A,kv_ep_bg_min_load_seconds{} +ep_bg_max_load,N/A,N/A,N/A,kv_ep_bg_max_load_seconds{} +ep_bg_load_avg,N/A,N/A,N/A,kv_ep_bg_load_avg_seconds{} +ep_bg_wait,N/A,N/A,N/A,kv_ep_bg_wait_seconds{} +ep_bg_load,N/A,N/A,N/A,kv_ep_bg_load_seconds{} +ep_degraded_mode,N/A,N/A,N/A,kv_ep_degraded_mode{} +ep_num_access_scanner_runs,N/A,N/A,N/A,kv_ep_num_access_scanner_runs{} +ep_num_access_scanner_skips,N/A,N/A,N/A,kv_ep_num_access_scanner_skips{} +ep_access_scanner_last_runtime,N/A,N/A,N/A,kv_ep_access_scanner_last_runtime_seconds{} +ep_access_scanner_num_items,N/A,N/A,N/A,kv_ep_access_scanner_num_items{} +ep_access_scanner_task_time,N/A,N/A,N/A,kv_ep_access_scanner_task_time{} +ep_expiry_pager_task_time,N/A,N/A,N/A,kv_ep_expiry_pager_task_time{} +ep_startup_time,N/A,N/A,N/A,kv_ep_startup_time_seconds{} +ep_warmup_thread,N/A,N/A,N/A,kv_ep_warmup_thread{} +ep_warmup_time,N/A,N/A,N/A,kv_ep_warmup_time_seconds{} +ep_warmup_oom,N/A,N/A,N/A,kv_ep_warmup_oom{} +ep_warmup_dups,N/A,N/A,N/A,kv_ep_warmup_dups{} +ep_num_ops_get_meta,N/A,cbbucketstat_ep_num_ops_get_meta,cbpernodebucket_ep_num_ops_get_meta,"kv_ops{op=""get_meta""}" +ep_num_ops_set_meta,N/A,cbbucketstat_ep_num_ops_set_meta,cbpernodebucket_ep_num_ops_set_meta,"kv_ops{op=""set_meta""}" +ep_num_ops_del_meta,N/A,cbbucketstat_ep_num_ops_del_meta,cbpernodebucket_ep_num_ops_del_meta,"kv_ops{op=""del_meta""}" +ep_num_ops_set_meta_res_fail,N/A,N/A,N/A,"kv_ops_failed{op=""set_meta""}" +ep_num_ops_del_meta_res_fail,N/A,N/A,N/A,"kv_ops_failed{op=""del_meta""}" +ep_num_ops_set_ret_meta,N/A,cbbucketstat_ep_num_ops_set_ret_meta,cbpernodebucket_ep_num_ops_set_ret_meta,"kv_ops{op=""set_ret_meta""}" +ep_num_ops_del_ret_meta,N/A,cbbucketstat_ep_num_ops_del_ret_meta,cbpernodebucket_ep_num_ops_del_ret_meta,"kv_ops{op=""del_ret_meta""}" +ep_num_ops_get_meta_on_set_meta,N/A,N/A,N/A,"kv_ops{op=""get_meta_for_set_meta""}" +ep_workload_pattern,N/A,N/A,N/A,kv_ep_workload_pattern{} +ep_defragmenter_num_visited,N/A,N/A,N/A,kv_ep_defragmenter_num_visited{} +ep_defragmenter_num_moved,N/A,N/A,N/A,kv_ep_defragmenter_num_moved{} +ep_defragmenter_sv_num_moved,N/A,N/A,N/A,kv_ep_defragmenter_sv_num_moved{} +ep_item_compressor_num_visited,N/A,N/A,N/A,kv_ep_item_compressor_num_visited{} +ep_item_compressor_num_compressed,N/A,N/A,N/A,kv_ep_item_compressor_num_compressed{} +ep_cursor_dropping_lower_threshold,N/A,N/A,N/A,kv_ep_cursor_dropping_lower_threshold_bytes{} +ep_cursor_dropping_upper_threshold,N/A,N/A,N/A,kv_ep_cursor_dropping_upper_threshold_bytes{} +ep_cursors_dropped,N/A,N/A,N/A,kv_ep_cursors_dropped{} +ep_cursor_memory_freed,N/A,N/A,N/A,kv_ep_cursor_memory_freed_bytes{} +ep_data_write_failed,N/A,N/A,cbpernodebucket_ep_data_write_failed,kv_ep_data_write_failed{} +ep_data_read_failed,N/A,N/A,cbpernodebucket_ep_data_read_failed,kv_ep_data_read_failed{} +ep_io_document_write_bytes,N/A,N/A,N/A,kv_ep_io_document_write_bytes_bytes{} +ep_io_total_read_bytes,N/A,N/A,N/A,kv_ep_io_total_read_bytes_bytes{} +ep_io_total_write_bytes,N/A,N/A,N/A,kv_ep_io_total_write_bytes_bytes{} +ep_io_compaction_read_bytes,N/A,N/A,N/A,kv_ep_io_compaction_read_bytes_bytes{} +ep_io_compaction_write_bytes,N/A,N/A,N/A,kv_ep_io_compaction_write_bytes_bytes{} +ep_io_bg_fetch_read_count,N/A,N/A,N/A,kv_ep_io_bg_fetch_read_count{} +ep_bg_fetch_avg_read_amplification,N/A,N/A,N/A,kv_ep_bg_fetch_avg_read_amplification_ratio{} +ep_magma_compactions,N/A,N/A,N/A,kv_ep_magma_compactions{} +ep_magma_flushes,N/A,N/A,N/A,kv_ep_magma_flushes{} +ep_magma_ttl_compactions,N/A,N/A,N/A,kv_ep_magma_ttl_compactions{} +ep_magma_filecount_compactions,N/A,N/A,N/A,kv_ep_magma_filecount_compactions{} +ep_magma_writer_compactions,N/A,N/A,N/A,kv_ep_magma_writer_compactions{} +ep_magma_readamp,N/A,N/A,N/A,kv_ep_magma_readamp_ratio{} +ep_magma_readamp_get,N/A,N/A,N/A,kv_ep_magma_readamp_get_ratio{} +ep_magma_read_bytes,N/A,N/A,N/A,kv_ep_magma_read_bytes_bytes{} +ep_magma_read_bytes_compact,N/A,N/A,N/A,kv_ep_magma_read_bytes_compact_bytes{} +ep_magma_read_bytes_get,N/A,N/A,N/A,kv_ep_magma_read_bytes_get_bytes{} +ep_magma_bytes_outgoing,N/A,N/A,N/A,kv_ep_magma_bytes_outgoing_bytes{} +ep_magma_readio,N/A,N/A,N/A,kv_ep_magma_readio{} +ep_magma_readioamp,N/A,N/A,N/A,kv_ep_magma_readioamp_ratio{} +ep_magma_bytes_per_read,N/A,N/A,N/A,kv_ep_magma_bytes_per_read_ratio{} +ep_magma_writeamp,N/A,N/A,N/A,kv_ep_magma_writeamp_ratio{} +ep_magma_bytes_incoming,N/A,N/A,N/A,kv_ep_magma_bytes_incoming_bytes{} +ep_magma_write_bytes,N/A,N/A,N/A,kv_ep_magma_write_bytes_bytes{} +ep_magma_write_bytes_compact,N/A,N/A,N/A,kv_ep_magma_write_bytes_compact_bytes{} +ep_magma_logical_data_size,N/A,N/A,N/A,kv_ep_magma_logical_data_size_bytes{} +ep_magma_logical_disk_size,N/A,N/A,N/A,kv_ep_magma_logical_disk_size_bytes{} +ep_magma_fragmentation,N/A,N/A,N/A,kv_ep_magma_fragmentation_ratio{} +ep_magma_total_disk_usage,N/A,N/A,N/A,kv_ep_magma_total_disk_usage_bytes{} +ep_magma_wal_disk_usage,N/A,N/A,N/A,kv_ep_magma_wal_disk_usage_bytes{} +ep_magma_block_cache_mem_used,N/A,N/A,N/A,kv_ep_magma_block_cache_mem_used_bytes{} +ep_magma_write_cache_mem_used,N/A,N/A,N/A,kv_ep_magma_write_cache_mem_used_bytes{} +ep_magma_wal_mem_used,N/A,N/A,N/A,kv_ep_magma_wal_mem_used_bytes{} +ep_magma_table_meta_mem_used,N/A,N/A,N/A,kv_ep_magma_table_meta_mem_used_bytes{} +ep_magma_buffer_mem_used,N/A,N/A,N/A,kv_ep_magma_buffer_mem_used_bytes{} +ep_magma_bloom_filter_mem_used,N/A,N/A,N/A,kv_ep_magma_bloom_filter_mem_used_bytes{} +ep_magma_index_resident_ratio,N/A,N/A,N/A,kv_ep_magma_index_resident_ratio_ratio{} +ep_magma_block_cache_hits,N/A,N/A,N/A,kv_ep_magma_block_cache_hits{} +ep_magma_block_cache_misses,N/A,N/A,N/A,kv_ep_magma_block_cache_misses{} +ep_magma_block_cache_hit_ratio,N/A,N/A,N/A,kv_ep_magma_block_cache_hit_ratio_ratio{} +ep_magma_tables_created,N/A,N/A,N/A,kv_ep_magma_tables_created{} +ep_magma_tables_deleted,N/A,N/A,N/A,kv_ep_magma_tables_deleted{} +ep_magma_tables,N/A,N/A,N/A,kv_ep_magma_tables{} +ep_magma_syncs,N/A,N/A,N/A,kv_ep_magma_syncs{} +ep_rocksdb_kMemTableTotal,N/A,N/A,N/A,kv_ep_rocksdb_kMemTableTotal_bytes{} +ep_rocksdb_kMemTableUnFlushed,N/A,N/A,N/A,kv_ep_rocksdb_kMemTableUnFlushed_bytes{} +ep_rocksdb_kTableReadersTotal,N/A,N/A,N/A,kv_ep_rocksdb_kTableReadersTotal_bytes{} +ep_rocksdb_kCacheTotal,N/A,N/A,N/A,kv_ep_rocksdb_kCacheTotal_bytes{} +ep_rocksdb_default_kSizeAllMemTables,N/A,N/A,N/A,kv_ep_rocksdb_default_kSizeAllMemTables_bytes{} +ep_rocksdb_seqno_kSizeAllMemTables,N/A,N/A,N/A,kv_ep_rocksdb_seqno_kSizeAllMemTables_bytes{} +ep_rocksdb_block_cache_data_hit_ratio,N/A,N/A,N/A,kv_ep_rocksdb_block_cache_data_hit_ratio_ratio{} +ep_rocksdb_block_cache_index_hit_ratio,N/A,N/A,N/A,kv_ep_rocksdb_block_cache_index_hit_ratio_ratio{} +ep_rocksdb_block_cache_filter_hit_ratio,N/A,N/A,N/A,kv_ep_rocksdb_block_cache_filter_hit_ratio_ratio{} +ep_rocksdb_default_kTotalSstFilesSize,N/A,N/A,N/A,kv_ep_rocksdb_default_kTotalSstFilesSize_bytes{} +ep_rocksdb_seqno_kTotalSstFilesSize,N/A,N/A,N/A,kv_ep_rocksdb_seqno_kTotalSstFilesSize_bytes{} +ep_rocksdb_scan_totalSeqnoHits,N/A,N/A,N/A,kv_ep_rocksdb_scan_totalSeqnoHits{} +ep_rocksdb_scan_oldSeqnoHits,N/A,N/A,N/A,kv_ep_rocksdb_scan_oldSeqnoHits{} +ep_db_data_size,N/A,N/A,N/A,kv_ep_db_data_size_bytes{} +ep_db_file_size,N/A,N/A,N/A,kv_ep_db_file_size_bytes{} +ep_db_history_file_size,N/A,N/A,N/A,kv_ep_db_history_file_size_bytes{} +ep_db_prepare_size,N/A,N/A,N/A,kv_ep_db_prepare_size_bytes{} +bg_wait,N/A,N/A,N/A,kv_bg_wait_seconds{} +bg_load,N/A,N/A,N/A,kv_bg_load_seconds{} +pending_ops,N/A,N/A,N/A,kv_pending_ops_seconds{} +access_scanner,N/A,N/A,N/A,kv_access_scanner_seconds{} +checkpoint_remover,N/A,N/A,N/A,kv_checkpoint_remover_seconds{} +item_pager,N/A,N/A,N/A,kv_item_pager_seconds{} +expiry_pager,N/A,N/A,N/A,kv_expiry_pager_seconds{} +storage_age,N/A,N/A,N/A,kv_storage_age_seconds{} +set_with_meta,N/A,N/A,N/A,"kv_cmd_time_taken_seconds{op=""set_with_meta""}" +get_cmd,N/A,N/A,N/A,"kv_cmd_time_taken_seconds{op=""get""}" +store_cmd,N/A,N/A,N/A,"kv_cmd_time_taken_seconds{op=""store""}" +arith_cmd,N/A,N/A,N/A,"kv_cmd_time_taken_seconds{op=""arith""}" +get_stats_cmd,N/A,N/A,N/A,"kv_cmd_time_taken_seconds{op=""get_stats""}" +get_vb_cmd,N/A,N/A,N/A,"kv_cmd_time_taken_seconds{op=""get_vb""}" +set_vb_cmd,N/A,N/A,N/A,"kv_cmd_time_taken_seconds{op=""set_vb""}" +del_vb_cmd,N/A,N/A,N/A,"kv_cmd_time_taken_seconds{op=""del_vb""}" +chk_persistence_cmd,N/A,N/A,N/A,"kv_cmd_time_taken_seconds{op=""chk_persistence""}" +notify_io,N/A,N/A,N/A,kv_notify_io_seconds{} +disk_insert,N/A,N/A,N/A,"kv_disk_seconds{op=""insert""}" +disk_update,N/A,N/A,N/A,"kv_disk_seconds{op=""update""}" +disk_del,N/A,N/A,N/A,"kv_disk_seconds{op=""del""}" +disk_vb_del,N/A,N/A,N/A,"kv_disk_seconds{op=""vb_del""}" +disk_commit,N/A,N/A,N/A,"kv_disk_seconds{op=""commit""}" +item_alloc_sizes,N/A,N/A,N/A,kv_item_alloc_sizes_bytes{} +bg_batch_size,N/A,N/A,N/A,kv_bg_batch_size{} +persistence_cursor_get_all_items,N/A,N/A,N/A,"kv_cursor_get_all_items_time_seconds{cursor_type=""persistence""}" +dcp_cursors_get_all_items,N/A,N/A,N/A,"kv_cursor_get_all_items_time_seconds{cursor_type=""dcp""}" +sync_write_commit_majority,N/A,N/A,N/A,"kv_sync_write_commit_duration_seconds{level=""majority""}" +sync_write_commit_majority_and_persist_on_master,N/A,N/A,N/A,"kv_sync_write_commit_duration_seconds{level=""majority_and_persist_on_master""}" +sync_write_commit_persist_to_majority,N/A,N/A,N/A,"kv_sync_write_commit_duration_seconds{level=""persist_to_majority""}" +uptime,N/A,N/A,N/A,kv_uptime_seconds{} +stat_reset,N/A,N/A,N/A,kv_stat_reset{} +time,N/A,N/A,N/A,kv_time_seconds{} +version,N/A,N/A,N/A,kv_version{} +memcached_version,N/A,N/A,N/A,kv_memcached_version{} +daemon_connections,N/A,N/A,N/A,kv_daemon_connections{} +curr_connections,N/A,cbbucketstat_curr_connections,cbpernodebucket_curr_connections,kv_curr_connections{} +system_connections,N/A,N/A,N/A,kv_system_connections{} +total_connections,N/A,N/A,N/A,kv_total_connections{} +connection_structures,N/A,N/A,N/A,kv_connection_structures{} +cmd_set,N/A,N/A,N/A,"kv_ops{op=""set""}" +cmd_flush,N/A,N/A,N/A,"kv_ops{op=""flush""}" +cmd_lock,N/A,N/A,N/A,"kv_ops{op=""lock""}" +cmd_subdoc_lookup,N/A,N/A,N/A,"kv_subdoc_ops{op=""lookup""}" +cmd_subdoc_mutation,N/A,N/A,N/A,"kv_subdoc_ops{op=""mutation""}" +bytes_subdoc_lookup_total,N/A,N/A,N/A,kv_subdoc_lookup_searched_bytes{} +bytes_subdoc_lookup_extracted,N/A,N/A,N/A,kv_subdoc_lookup_extracted_bytes{} +bytes_subdoc_mutation_total,N/A,N/A,N/A,kv_subdoc_mutation_updated_bytes{} +bytes_subdoc_mutation_inserted,N/A,N/A,N/A,kv_subdoc_mutation_inserted_bytes{} +cmd_total_sets,N/A,N/A,N/A,kv_cmd_total_sets{} +cmd_total_gets,N/A,N/A,N/A,kv_cmd_total_gets{} +cmd_total_ops,N/A,N/A,N/A,kv_cmd_total_ops{} +cmd_mutation,N/A,N/A,N/A,kv_cmd_mutation{} +cmd_lookup,N/A,N/A,N/A,kv_cmd_lookup{} +auth_cmds,N/A,N/A,N/A,kv_auth_cmds{} +auth_errors,N/A,N/A,N/A,kv_auth_errors{} +get_hits,cbnode_interestingstats_get_hits,cbbucketstat_get_hits,cbpernodebucket_get_hits,"kv_ops{result=""hit"",op=""get""}" +get_misses,N/A,cbbucketstat_get_misses,cbpernodebucket_get_misses,"kv_ops{result=""miss"",op=""get""}" +delete_misses,N/A,cbbucketstat_delete_misses,cbpernodebucket_delete_misses,"kv_ops{result=""miss"",op=""delete""}" +delete_hits,N/A,cbbucketstat_delete_hits,cbpernodebucket_delete_hits,"kv_ops{result=""hit"",op=""delete""}" +incr_misses,N/A,cbbucketstat_incr_misses,cbpernodebucket_incr_misses,"kv_ops{result=""miss"",op=""incr""}" +incr_hits,N/A,cbbucketstat_incr_hits,cbpernodebucket_incr_hits,"kv_ops{result=""hit"",op=""incr""}" +decr_misses,N/A,cbbucketstat_decr_misses,cbpernodebucket_decr_misses,"kv_ops{result=""miss"",op=""decr""}" +decr_hits,N/A,cbbucketstat_decr_hits,cbpernodebucket_decr_hits,"kv_ops{result=""hit"",op=""decr""}" +cas_misses,N/A,cbbucketstat_cas_misses,cbpernodebucket_cas_misses,"kv_ops{result=""miss"",op=""cas""}" +cas_hits,N/A,cbbucketstat_cas_hits,cbpernodebucket_cas_hits,"kv_ops{result=""hit"",op=""cas""}" +cas_badval,N/A,cbbucketstat_cas_badval,cbpernodebucket_cas_bad_val,"kv_ops{result=""badval"",op=""cas""}" +bytes_read,N/A,cbbucketstat_read_bytes,cbpernodebucket_bytes_read,kv_read_bytes{} +bytes_written,N/A,cbbucketstat_written_bytes,cbpernodebucket_bytes_written,kv_written_bytes{} +rejected_conns,N/A,N/A,N/A,kv_rejected_conns{} +threads,N/A,N/A,N/A,kv_threads{} +conn_yields,N/A,N/A,N/A,kv_conn_yields{} +iovused_high_watermark,N/A,N/A,N/A,kv_iovused_high_watermark{} +msgused_high_watermark,N/A,N/A,N/A,kv_msgused_high_watermark{} +lock_errors,N/A,N/A,N/A,kv_lock_errors{} +cmd_lookup_10s_count,N/A,N/A,N/A,kv_cmd_lookup_10s_count{} +cmd_lookup_10s_duration_us,N/A,N/A,N/A,kv_cmd_lookup_10s_duration_seconds{} +cmd_mutation_10s_count,N/A,N/A,N/A,kv_cmd_mutation_10s_count{} +cmd_mutation_10s_duration_us,N/A,N/A,N/A,kv_cmd_mutation_10s_duration_seconds{} +total_resp_errors,N/A,N/A,N/A,kv_total_resp_errors{} +enabled,N/A,N/A,N/A,kv_audit_enabled{} +dropped_events,N/A,N/A,N/A,kv_audit_dropped_events{} +vb_active_num,N/A,cbbucketstat_vbuckets_active_num,cbpernodebucket_vb_active_num,"kv_num_vbuckets{state=""active""}" +vb_replica_num,N/A,cbbucketstat_vbuckets_replica_num,cbpernodebucket_vb_replica_num,"kv_num_vbuckets{state=""replica""}" +vb_pending_num,N/A,cbbucketstat_vbuckets_pending_num,cbpernodebucket_vb_pending_num,"kv_num_vbuckets{state=""pending""}" +vb_dead_num,N/A,N/A,N/A,"kv_num_vbuckets{state=""dead""}" +vb_active_curr_items,N/A,N/A,N/A,"kv_vb_curr_items{state=""active""}" +vb_replica_curr_items,cbnode_interestingstats_vb_replica_curr_items,cbbucketstat_vbuckets_replica_curr_items,cbpernodebucket_vb_replica_curr_items,"kv_vb_curr_items{state=""replica""}" +vb_pending_curr_items,N/A,cbbucketstat_vbuckets_pending_curr_items,cbpernodebucket_vb_pending_curr_items,"kv_vb_curr_items{state=""pending""}" +vb_active_hp_vb_req_size,N/A,N/A,N/A,"kv_num_high_pri_requests{state=""active""}" +vb_replica_hp_vb_req_size,N/A,N/A,N/A,"kv_num_high_pri_requests{state=""replica""}" +vb_pending_hp_vb_req_size,N/A,N/A,N/A,"kv_num_high_pri_requests{state=""pending""}" +vb_active_num_non_resident,cbnode_interestingstats_vb_active_number_non_resident,cbbucketstat_vbuckets_active_num_non_resident,cbpernodebucket_vb_active_num_non_resident,"kv_vb_num_non_resident{state=""active""}" +vb_replica_num_non_resident,N/A,cbbucketstat_vbuckets_replica_num_non_resident,cbpernodebucket_vb_replica_num_non_resident,"kv_vb_num_non_resident{state=""replica""}" +vb_pending_num_non_resident,N/A,cbbucketstat_vbuckets_pending_num_non_resident,cbpernodebucket_vb_pending_num_non_resident,"kv_vb_num_non_resident{state=""pending""}" +vb_active_perc_mem_resident,N/A,cbbucketstat_vbuckets_active_resident_items_ratio,cbpernodebucket_vb_active_resident_items_ratio,"kv_vb_perc_mem_resident_ratio{state=""active""}" +vb_replica_perc_mem_resident,N/A,cbbucketstat_vbuckets_replica_resident_items_ratio,cbpernodebucket_vb_replica_resident_items_ratio,"kv_vb_perc_mem_resident_ratio{state=""replica""}" +vb_pending_perc_mem_resident,N/A,cbbucketstat_vbuckets_pending_resident_items_ratio,cbpernodebucket_vb_pending_resident_items_ratio,"kv_vb_perc_mem_resident_ratio{state=""pending""}" +vb_active_eject,N/A,cbbucketstat_vbuckets_active_eject,cbpernodebucket_vb_active_eject,"kv_vb_eject{state=""active""}" +vb_replica_eject,N/A,cbbucketstat_vbuckets_replica_eject,cbpernodebucket_vb_replica_eject,"kv_vb_eject{state=""replica""}" +vb_pending_eject,N/A,cbbucketstat_vbuckets_pending_eject,cbpernodebucket_vb_pending_eject,"kv_vb_eject{state=""pending""}" +vb_active_expired,N/A,N/A,N/A,"kv_vb_expired{state=""active""}" +vb_replica_expired,N/A,N/A,N/A,"kv_vb_expired{state=""replica""}" +vb_pending_expired,N/A,N/A,N/A,"kv_vb_expired{state=""pending""}" +vb_active_meta_data_memory,N/A,cbbucketstat_vbuckets_active_meta_data_memory,cbpernodebucket_vb_active_meta_data_memory,"kv_vb_meta_data_memory_bytes{state=""active""}" +vb_replica_meta_data_memory,N/A,cbbucketstat_vbuckets_replica_meta_data_memory,cbpernodebucket_vb_replica_meta_data_memory,"kv_vb_meta_data_memory_bytes{state=""replica""}" +vb_pending_meta_data_memory,N/A,cbbucketstat_vbuckets_pending_meta_data_memory,cbpernodebucket_vb_pending_meta_data_memory,"kv_vb_meta_data_memory_bytes{state=""pending""}" +vb_active_meta_data_disk,N/A,N/A,N/A,"kv_vb_meta_data_disk_bytes{state=""active""}" +vb_replica_meta_data_disk,N/A,N/A,N/A,"kv_vb_meta_data_disk_bytes{state=""replica""}" +vb_pending_meta_data_disk,N/A,N/A,N/A,"kv_vb_meta_data_disk_bytes{state=""pending""}" +vb_active_checkpoint_memory,N/A,N/A,N/A,"kv_vb_checkpoint_memory_bytes{state=""active""}" +vb_replica_checkpoint_memory,N/A,N/A,N/A,"kv_vb_checkpoint_memory_bytes{state=""replica""}" +vb_pending_checkpoint_memory,N/A,N/A,N/A,"kv_vb_checkpoint_memory_bytes{state=""pending""}" +vb_active_checkpoint_memory_unreferenced,N/A,N/A,N/A,"kv_vb_checkpoint_memory_unreferenced_bytes{state=""active""}" +vb_replica_checkpoint_memory_unreferenced,N/A,N/A,N/A,"kv_vb_checkpoint_memory_unreferenced_bytes{state=""replica""}" +vb_pending_checkpoint_memory_unreferenced,N/A,N/A,N/A,"kv_vb_checkpoint_memory_unreferenced_bytes{state=""pending""}" +vb_active_checkpoint_memory_overhead,N/A,N/A,N/A,"kv_vb_checkpoint_memory_overhead_bytes{state=""active""}" +vb_replica_checkpoint_memory_overhead,N/A,N/A,N/A,"kv_vb_checkpoint_memory_overhead_bytes{state=""replica""}" +vb_pending_checkpoint_memory_overhead,N/A,N/A,N/A,"kv_vb_checkpoint_memory_overhead_bytes{state=""pending""}" +vb_active_ht_memory,N/A,N/A,N/A,"kv_vb_ht_memory_bytes{state=""active""}" +vb_replica_ht_memory,N/A,N/A,N/A,"kv_vb_ht_memory_bytes{state=""replica""}" +vb_pending_ht_memory,N/A,N/A,N/A,"kv_vb_ht_memory_bytes{state=""pending""}" +vb_active_itm_memory,N/A,cbbucketstat_vbuckets_active_itm_memory,cbpernodebucket_vb_active_itm_memory,"kv_vb_itm_memory_bytes{state=""active""}" +vb_replica_itm_memory,N/A,cbbucketstat_vbuckets_replica_itm_memory,cbpernodebucket_vb_replica_itm_memory,"kv_vb_itm_memory_bytes{state=""replica""}" +vb_pending_itm_memory,N/A,cbbucketstat_vbuckets_pending_itm_memory,cbpernodebucket_vb_pending_itm_memory,"kv_vb_itm_memory_bytes{state=""pending""}" +vb_active_itm_memory_uncompressed,N/A,N/A,N/A,"kv_vb_itm_memory_uncompressed_bytes{state=""active""}" +vb_replica_itm_memory_uncompressed,N/A,N/A,N/A,"kv_vb_itm_memory_uncompressed_bytes{state=""replica""}" +vb_pending_itm_memory_uncompressed,N/A,N/A,N/A,"kv_vb_itm_memory_uncompressed_bytes{state=""pending""}" +vb_active_ops_create,N/A,cbbucketstat_vbuckets_active_ops_create,cbpernodebucket_vb_active_ops_create,"kv_vb_ops_create{state=""active""}" +vb_replica_ops_create,N/A,cbbucketstat_vbuckets_replica_ops_create,cbpernodebucket_vb_replica_ops_create,"kv_vb_ops_create{state=""replica""}" +vb_pending_ops_create,N/A,cbbucketstat_vbuckets_pending_ops_create,cbpernodebucket_vb_pending_ops_create,"kv_vb_ops_create{state=""pending""}" +vb_active_ops_update,N/A,cbbucketstat_vbuckets_active_ops_update,cbpernodebucket_vb_active_ops_update,"kv_vb_ops_update{state=""active""}" +vb_replica_ops_update,N/A,cbbucketstat_vbuckets_replica_ops_update,cbpernodebucket_vb_replica_ops_update,"kv_vb_ops_update{state=""replica""}" +vb_pending_ops_update,N/A,cbbucketstat_vbuckets_pending_ops_update,cbpernodebucket_vb_pending_ops_update,"kv_vb_ops_update{state=""pending""}" +vb_active_ops_delete,N/A,N/A,N/A,"kv_vb_ops_delete{state=""active""}" +vb_replica_ops_delete,N/A,N/A,N/A,"kv_vb_ops_delete{state=""replica""}" +vb_pending_ops_delete,N/A,N/A,N/A,"kv_vb_ops_delete{state=""pending""}" +vb_active_ops_get,N/A,N/A,N/A,"kv_vb_ops_get{state=""active""}" +vb_replica_ops_get,N/A,N/A,N/A,"kv_vb_ops_get{state=""replica""}" +vb_pending_ops_get,N/A,N/A,N/A,"kv_vb_ops_get{state=""pending""}" +vb_active_ops_reject,N/A,N/A,N/A,"kv_vb_ops_reject{state=""active""}" +vb_replica_ops_reject,N/A,N/A,N/A,"kv_vb_ops_reject{state=""replica""}" +vb_pending_ops_reject,N/A,N/A,N/A,"kv_vb_ops_reject{state=""pending""}" +vb_active_queue_size,N/A,cbbucketstat_vbuckets_active_queue_size,cbpernodebucket_vb_active_queue_size,"kv_vb_queue_size{state=""active""}" +vb_replica_queue_size,N/A,cbbucketstat_vbuckets_replica_queue_size,cbpernodebucket_vb_replica_queue_size,"kv_vb_queue_size{state=""replica""}" +vb_pending_queue_size,N/A,cbbucketstat_vbuckets_pending_queue_size,cbpernodebucket_vb_pending_queue_size,"kv_vb_queue_size{state=""pending""}" +vb_active_queue_memory,N/A,N/A,N/A,"kv_vb_queue_memory_bytes{state=""active""}" +vb_replica_queue_memory,N/A,N/A,N/A,"kv_vb_queue_memory_bytes{state=""replica""}" +vb_pending_queue_memory,N/A,N/A,N/A,"kv_vb_queue_memory_bytes{state=""pending""}" +vb_active_queue_age,N/A,cbbucketstat_vbuckets_active_queue_age,cbpernodebucket_vb_active_queue_age,"kv_vb_queue_age_seconds{state=""active""}" +vb_replica_queue_age,N/A,cbbucketstat_vbuckets_replica_queue_age,cbpernodebucket_vb_replica_queue_age,"kv_vb_queue_age_seconds{state=""replica""}" +vb_pending_queue_age,N/A,cbbucketstat_vbuckets_pending_queue_age,cbpernodebucket_vb_pending_queue_age,"kv_vb_queue_age_seconds{state=""pending""}" +vb_active_queue_pending,N/A,N/A,N/A,"kv_vb_queue_pending_bytes{state=""active""}" +vb_replica_queue_pending,N/A,N/A,N/A,"kv_vb_queue_pending_bytes{state=""replica""}" +vb_pending_queue_pending,N/A,N/A,N/A,"kv_vb_queue_pending_bytes{state=""pending""}" +vb_active_queue_fill,N/A,cbbucketstat_vbuckets_active_queue_fill,cbpernodebucket_vb_active_queue_fill,"kv_vb_queue_fill{state=""active""}" +vb_replica_queue_fill,N/A,cbbucketstat_vbuckets_replica_queue_fill,cbpernodebucket_vb_replica_queue_fill,"kv_vb_queue_fill{state=""replica""}" +vb_pending_queue_fill,N/A,cbbucketstat_vbuckets_pending_queue_fill,cbpernodebucket_vb_pending_queue_fill,"kv_vb_queue_fill{state=""pending""}" +vb_active_queue_drain,N/A,cbbucketstat_vbuckets_active_queue_drain,cbpernodebucket_vb_active_queue_drain,"kv_vb_queue_drain{state=""active""}" +vb_replica_queue_drain,N/A,cbbucketstat_vbuckets_replica_queue_drain,cbpernodebucket_vb_replica_queue_drain,"kv_vb_queue_drain{state=""replica""}" +vb_pending_queue_drain,N/A,cbbucketstat_vbuckets_pending_queue_drain,cbpernodebucket_vb_pending_queue_drain,"kv_vb_queue_drain{state=""pending""}" +vb_active_rollback_item_count,N/A,N/A,N/A,"kv_vb_rollback_item_count{state=""active""}" +vb_replica_rollback_item_count,N/A,N/A,N/A,"kv_vb_rollback_item_count{state=""replica""}" +vb_pending_rollback_item_count,N/A,N/A,N/A,"kv_vb_rollback_item_count{state=""pending""}" +curr_items,cbnode_interestingstats_curr_items,N/A,cbpernodebucket_curr_items,kv_curr_items{} +curr_temp_items,N/A,N/A,N/A,kv_curr_temp_items{} +curr_items_tot,cbnode_interestingstats_curr_items_tot,cbbucketstat_curr_items_tot,cbpernodebucket_curr_items_tot,kv_curr_items_tot{} +vb_active_sync_write_accepted_count,N/A,N/A,N/A,"kv_vb_sync_write_accepted_count{state=""active""}" +vb_replica_sync_write_accepted_count,N/A,N/A,N/A,"kv_vb_sync_write_accepted_count{state=""replica""}" +vb_active_sync_write_committed_count,N/A,N/A,N/A,"kv_vb_sync_write_committed_count{state=""active""}" +vb_replica_sync_write_committed_count,N/A,N/A,N/A,"kv_vb_sync_write_committed_count{state=""replica""}" +vb_active_sync_write_aborted_count,N/A,N/A,N/A,"kv_vb_sync_write_aborted_count{state=""active""}" +vb_replica_sync_write_aborted_count,N/A,N/A,N/A,"kv_vb_sync_write_aborted_count{state=""replica""}" +ep_vb_total,N/A,cbbucketstat_ep_vbuckets,cbpernodebucket_ep_vb_total,kv_ep_vb_total{} +ep_total_new_items,N/A,N/A,N/A,kv_ep_total_new_items{} +ep_total_del_items,N/A,N/A,N/A,kv_ep_total_del_items{} +ep_diskqueue_memory,N/A,N/A,N/A,kv_ep_diskqueue_memory_bytes{} +ep_diskqueue_fill,N/A,cbbucketstat_ep_diskqueue_fill,cbpernodebucket_ep_diskqueue_fill,kv_ep_diskqueue_fill{} +ep_diskqueue_drain,N/A,cbbucketstat_ep_diskqueue_drain,cbpernodebucket_ep_diskqueue_drain,kv_ep_diskqueue_drain{} +ep_diskqueue_pending,N/A,N/A,N/A,kv_ep_diskqueue_pending{} +ep_meta_data_memory,N/A,cbbucketstat_ep_meta_data_memory,cbpernodebucket_ep_meta_data_memory,kv_ep_meta_data_memory_bytes{} +ep_meta_data_disk,N/A,N/A,N/A,kv_ep_meta_data_disk_bytes{} +ep_checkpoint_memory,N/A,N/A,N/A,kv_ep_checkpoint_memory_bytes{} +ep_checkpoint_memory_unreferenced,N/A,N/A,N/A,kv_ep_checkpoint_memory_unreferenced_bytes{} +ep_checkpoint_memory_overhead,N/A,N/A,N/A,kv_ep_checkpoint_memory_overhead_bytes{} +ep_total_cache_size,N/A,N/A,N/A,kv_ep_total_cache_size_bytes{} +rollback_item_count,N/A,N/A,N/A,kv_rollback_item_count{} +ep_num_non_resident,N/A,cbbucketstat_ep_num_non_resident,cbpernodebucket_ep_num_non_resident,kv_ep_num_non_resident{} +ep_chk_persistence_remains,N/A,N/A,N/A,kv_ep_chk_persistence_remains{} +ep_active_hlc_drift,N/A,cbbucketstat_ep_active_hlc_drift,cbpernodebucket_ep_active_hlc_drift,"kv_ep_hlc_drift_seconds{state=""active""}" +ep_active_hlc_drift_count,N/A,N/A,cbpernodebucket_ep_active_hlc_drift_count,"kv_ep_hlc_drift_count{state=""active""}" +ep_replica_hlc_drift,N/A,cbbucketstat_ep_replica_hlc_drift,cbpernodebucket_ep_replica_hlc_drift,"kv_ep_hlc_drift_seconds{state=""replica""}" +ep_replica_hlc_drift_count,N/A,N/A,cbpernodebucket_ep_replica_hlc_drift_count,"kv_ep_hlc_drift_count{state=""replica""}" +ep_active_ahead_exceptions,N/A,cbbucketstat_ep_active_ahead_exceptions,cbpernodebucket_ep_active_ahead_exceptions,"kv_ep_ahead_exceptions{state=""active""}" +ep_active_behind_exceptions,N/A,N/A,N/A,"kv_ep_behind_exceptions{state=""active""}" +ep_replica_ahead_exceptions,N/A,cbbucketstat_ep_replica_ahead_exceptions,cbpernodebucket_ep_replica_ahead_exceptions,"kv_ep_ahead_exceptions{state=""replica""}" +ep_replica_behind_exceptions,N/A,N/A,N/A,"kv_ep_behind_exceptions{state=""replica""}" +ep_clock_cas_drift_threshold_exceeded,N/A,cbbucketstat_ep_clock_cas_drift_threshold_exceeded,cbpernodebucket_ep_clock_cas_drift_threshold_exceeded,kv_ep_clock_cas_drift_threshold_exceeded{} +vb_active_auto_delete_count,N/A,N/A,N/A,"kv_vb_auto_delete_count{state=""active""}" +vb_replica_auto_delete_count,N/A,N/A,N/A,"kv_vb_auto_delete_count{state=""replica""}" +vb_pending_auto_delete_count,N/A,N/A,N/A,"kv_vb_auto_delete_count{state=""pending""}" +vb_active_ht_tombstone_purged_count,N/A,N/A,N/A,"kv_vb_ht_tombstone_purged_count{state=""active""}" +vb_replica_ht_tombstone_purged_count,N/A,N/A,N/A,"kv_vb_ht_tombstone_purged_count{state=""replica""}" +vb_pending_ht_tombstone_purged_count,N/A,N/A,N/A,"kv_vb_ht_tombstone_purged_count{state=""pending""}" +vb_active_seqlist_count,N/A,N/A,N/A,"kv_vb_seqlist_count{state=""active""}" +vb_replica_seqlist_count,N/A,N/A,N/A,"kv_vb_seqlist_count{state=""replica""}" +vb_pending_seqlist_count,N/A,N/A,N/A,"kv_vb_seqlist_count{state=""pending""}" +vb_active_seqlist_deleted_count,N/A,N/A,N/A,"kv_vb_seqlist_deleted_count{state=""active""}" +vb_replica_seqlist_deleted_count,N/A,N/A,N/A,"kv_vb_seqlist_deleted_count{state=""replica""}" +vb_pending_seqlist_deleted_count,N/A,N/A,N/A,"kv_vb_seqlist_deleted_count{state=""pending""}" +vb_active_seqlist_purged_count,N/A,N/A,N/A,"kv_vb_seqlist_purged_count{state=""active""}" +vb_replica_seqlist_purged_count,N/A,N/A,N/A,"kv_vb_seqlist_purged_count{state=""replica""}" +vb_pending_seqlist_purged_count,N/A,N/A,N/A,"kv_vb_seqlist_purged_count{state=""pending""}" +vb_active_seqlist_read_range_count,N/A,N/A,N/A,"kv_vb_seqlist_read_range_count{state=""active""}" +vb_replica_seqlist_read_range_count,N/A,N/A,N/A,"kv_vb_seqlist_read_range_count{state=""replica""}" +vb_pending_seqlist_read_range_count,N/A,N/A,N/A,"kv_vb_seqlist_read_range_count{state=""pending""}" +vb_active_seqlist_stale_count,N/A,N/A,N/A,"kv_vb_seqlist_stale_count{state=""active""}" +vb_replica_seqlist_stale_count,N/A,N/A,N/A,"kv_vb_seqlist_stale_count{state=""replica""}" +vb_pending_seqlist_stale_count,N/A,N/A,N/A,"kv_vb_seqlist_stale_count{state=""pending""}" +vb_active_seqlist_stale_value_bytes,N/A,N/A,N/A,"kv_vb_seqlist_stale_value_bytes{state=""active""}" +vb_replica_seqlist_stale_value_bytes,N/A,N/A,N/A,"kv_vb_seqlist_stale_value_bytes{state=""replica""}" +vb_pending_seqlist_stale_value_bytes,N/A,N/A,N/A,"kv_vb_seqlist_stale_value_bytes{state=""pending""}" +vb_active_seqlist_stale_metadata_bytes,N/A,N/A,N/A,"kv_vb_seqlist_stale_metadata_bytes{state=""active""}" +vb_replica_seqlist_stale_metadata_bytes,N/A,N/A,N/A,"kv_vb_seqlist_stale_metadata_bytes{state=""replica""}" +vb_pending_seqlist_stale_metadata_bytes,N/A,N/A,N/A,"kv_vb_seqlist_stale_metadata_bytes{state=""pending""}" +manifest_uid,N/A,N/A,N/A,kv_manifest_uid{} +force,N/A,N/A,N/A,kv_manifest_force{} +name,N/A,N/A,N/A,kv_collection_name{} +scope_name,N/A,N/A,N/A,kv_collection_scope_name{} +maxTTL,N/A,N/A,N/A,kv_collection_maxTTL_seconds{} +name,N/A,N/A,N/A,kv_scope_name{} +collections,N/A,N/A,N/A,kv_scope_collection_count{} +collections_mem_used,N/A,N/A,N/A,kv_collection_mem_used_bytes{} +items,N/A,N/A,N/A,kv_collection_item_count{} +disk_size,N/A,N/A,N/A,kv_collection_disk_size_bytes{} +ops_store,N/A,N/A,N/A,"kv_collection_ops{op=""store""}" +ops_delete,N/A,N/A,N/A,"kv_collection_ops{op=""delete""}" +ops_get,N/A,N/A,N/A,"kv_collection_ops{op=""get""}" +N/A,N/A,cbbucketstat_couch_docs_fragmentation,cbpernodebucket_couch_docs_fragmentation,((kv_ep_db_file_size_bytes - kv_ep_db_history_file_size_bytes) - kv_ep_db_data_size_bytes) / (kv_ep_db_file_size_bytes - kv_ep_db_history_file_size_bytes) \ No newline at end of file diff --git a/modules/metrics-reference/attachments/n1ql_cross_reference.csv b/modules/metrics-reference/attachments/n1ql_cross_reference.csv new file mode 100644 index 0000000000..6aa6001a8f --- /dev/null +++ b/modules/metrics-reference/attachments/n1ql_cross_reference.csv @@ -0,0 +1,20 @@ +Couchbase Server pre-7.0,Couchbase Exporter,Couchbase Server 7.0+ +query_active_requests,cbquery_active_requests,n1ql_active_requests +query_avg_req_time,cbquery_avg_req_time,n1ql_request_time / n1ql_requests +query_avg_response_size,cbquery_avg_response_size,n1ql_result_size / n1ql_requests +query_avg_result_count,cbquery_avg_result_count,n1ql_result_count / n1ql_requests +query_avg_svc_time,cbquery_avg_svc_time,n1ql_service_time / n1ql_requests +query_errors,cbquery_errors,n1ql_errors +query_invalid_requests,cbquery_invalid_requests,n1ql_invalid_requests +query_queued_requests,cbquery_queued_requests,n1ql_queued_requests +query_request_time,cbquery_request_time,n1ql_request_time +query_requests,cbquery_requests,n1ql_requests +query_requests_1000ms,cbquery_requests_1000ms,n1ql_requests_1000ms +query_requests_250ms,cbquery_requests_250ms,n1ql_requests_250ms +query_requests_5000ms,cbquery_requests_5000ms,n1ql_requests_5000ms +query_requests_500ms,cbquery_requests_500ms,n1ql_requests_500ms +query_result_count,cbquery_result_count,n1ql_result_count +query_result_size,cbquery_result_size,n1ql_result_size +query_selects,cbquery_selects,n1ql_selects +query_service_time,cbquery_service_time,n1ql_service_time +query_warnings,cbquery_warnings,n1ql_warnings \ No newline at end of file diff --git a/modules/metrics-reference/attachments/xdcr_cross_reference.csv b/modules/metrics-reference/attachments/xdcr_cross_reference.csv new file mode 100644 index 0000000000..96369bb250 --- /dev/null +++ b/modules/metrics-reference/attachments/xdcr_cross_reference.csv @@ -0,0 +1,54 @@ +Couchbase Server pre-7.0,Couchbase Server 7.0+ +add_docs_cas_changed,xdcr_add_docs_cas_changed_total +add_docs_written,xdcr_add_docs_written_total +changes_left,xdcr_changes_left_total +data_merged,xdcr_data_merged_bytes +data_replicated,xdcr_data_replicated_bytes +datapool_failed_gets,xdcr_datapool_failed_gets_total +dcp_datach_length,xdcr_dcp_datach_length_total +dcp_dispatch_time,xdcr_dcp_dispatch_time_seconds +deletion_docs_cas_changed,xdcr_deletion_docs_cas_changed_total +deletion_docs_written,xdcr_deletion_docs_written_total +deletion_failed_cr_source,xdcr_deletion_failed_cr_source_total +deletion_filtered,xdcr_deletion_filtered_total +deletion_received_from_dcp,xdcr_deletion_received_from_dcp_total +deletion_target_docs_skipped,xdcr_deletion_target_docs_skipped_total +docs_checked,xdcr_docs_checked_total +docs_cloned,xdcr_docs_cloned_total +docs_failed_cr_source,xdcr_docs_failed_cr_source_total +docs_filtered,xdcr_docs_filtered_total +docs_merge_cas_changed,xdcr_docs_merge_cas_changed_total +docs_merged,xdcr_docs_merged_total +docs_opt_repd,xdcr_docs_opt_repd_total +docs_processed,xdcr_docs_processed_total +docs_received_from_dcp,xdcr_docs_received_from_dcp +docs_rep_queue,xdcr_docs_rep_queue_total +docs_unable_to_filter,xdcr_docs_unable_to_filter_total +docs_written,xdcr_docs_written_total +expiry_docs_merge_failed,xdcr_expiry_docs_merge_failed_total +expiry_docs_merged,xdcr_expiry_docs_merged_total +expiry_docs_written,xdcr_expiry_docs_written_total +expiry_failed_cr_source,xdcr_expiry_failed_cr_source_total +expiry_filtered,xdcr_expiry_filtered_total +expiry_merge_cas_changed,xdcr_expiry_merge_cas_changed_total +expiry_received_from_dcp,xdcr_expiry_received_from_dcp_total +expiry_stripped,xdcr_expiry_stripped_total +expiry_target_docs_skipped,xdcr_expiry_target_docs_skipped_total +num_checkpoints,xdcr_num_checkpoints_total +num_failedckpts,xdcr_num_failedckpts_total +resp_wait_time,xdcr_resp_wait_time_seconds +set_docs_cas_changed,xdcr_set_docs_cas_changed_total +set_docs_written,xdcr_set_docs_written_total +set_failed_cr_source,xdcr_set_failed_cr_source_total +set_filtered,xdcr_set_filtered_total +set_received_from_dcp,xdcr_set_received_from_dcp_total +set_target_docs_skipped,xdcr_set_target_docs_skipped_total +size_rep_queue,xdcr_size_rep_queue_bytes +target_docs_skipped,xdcr_target_docs_skipped_total +throttle_latency,xdcr_throttle_latency_seconds +throughput_throttle_latency,xdcr_throughput_throttle_latency_seconds +time_committing,xdcr_time_committing_seconds +wtavg_docs_latency,xdcr_wtavg_docs_latency_seconds +wtavg_get_doc_latency,xdcr_wtavg_get_doc_latency_seconds +wtavg_merge_latency,xdcr_wtavg_merge_latency_seconds +wtavg_meta_latency,xdcr_wtavg_meta_latency_seconds \ No newline at end of file diff --git a/modules/metrics-reference/pages/analytics-service-metrics-cross-reference.adoc b/modules/metrics-reference/pages/analytics-service-metrics-cross-reference.adoc new file mode 100644 index 0000000000..6d857c24ff --- /dev/null +++ b/modules/metrics-reference/pages/analytics-service-metrics-cross-reference.adoc @@ -0,0 +1,15 @@ += Analytics Service Metrics Cross Reference +:description: A cross-referenced table of the metrics provided by the Analytics Service as named by various generations of reporting tools. + +[abstract] +{description} + +See xref:analytics-service-metrics.adoc[] for full description of all the Analytics Service metrics. + +The following table lets you lookup a metric name you may know from an alternative supported or legacy reporting tool. + +.Analytics Service Metrics Cross Reference +[%header, format=csv] +|=== +include::attachment$cbas_cross_reference.csv[] +|=== \ No newline at end of file diff --git a/modules/metrics-reference/pages/analytics-service-metrics.adoc b/modules/metrics-reference/pages/analytics-service-metrics.adoc index d4e1cd439f..143bdcc1f6 100644 --- a/modules/metrics-reference/pages/analytics-service-metrics.adoc +++ b/modules/metrics-reference/pages/analytics-service-metrics.adoc @@ -6,6 +6,8 @@ The following Analytics Service metrics can be queried by means of the REST APIs described in xref:rest-api:rest-statistics.adoc[Statistics]. +See xref:analytics-service-metrics-cross-reference.adoc[] if you are looking for a metric name you know from an alternative supported or legacy tool. + [template,attachment$cbas_metrics_metadata.json] -- include::partial$metrics.hbs[] diff --git a/modules/metrics-reference/pages/backup-service-metrics.adoc b/modules/metrics-reference/pages/backup-service-metrics.adoc index 5ceaa4bc12..4a01c9b86a 100644 --- a/modules/metrics-reference/pages/backup-service-metrics.adoc +++ b/modules/metrics-reference/pages/backup-service-metrics.adoc @@ -6,6 +6,9 @@ The following Backup Service metrics can be queried by means of the REST APIs described in xref:rest-api:rest-statistics.adoc[Statistics]. +include::partial$histogram-sidebar.adoc[] + + [template,attachment$backup_metrics_metadata.json] -- include::partial$metrics.hbs[] diff --git a/modules/metrics-reference/pages/data-service-metrics-cross-reference.adoc b/modules/metrics-reference/pages/data-service-metrics-cross-reference.adoc new file mode 100644 index 0000000000..00b5ab948f --- /dev/null +++ b/modules/metrics-reference/pages/data-service-metrics-cross-reference.adoc @@ -0,0 +1,15 @@ += Data Service Metrics Cross Reference +:description: A cross-referenced table of the metrics provided by the Data Service as named by various generations of reporting tools. + +[abstract] +{description} + +See xref:data-service-metrics.adoc[] for full description of all the Data Service metrics. + +The following table lets you lookup a metric name you may know from an alternative supported or legacy reporting tool. + +.Data Service Metrics Cross Reference +[%header, format=csv] +|=== +include::attachment$kv_cross_reference.csv[] +|=== diff --git a/modules/metrics-reference/pages/data-service-metrics.adoc b/modules/metrics-reference/pages/data-service-metrics.adoc index 6be34ffc03..ccc1e4a9a2 100644 --- a/modules/metrics-reference/pages/data-service-metrics.adoc +++ b/modules/metrics-reference/pages/data-service-metrics.adoc @@ -6,7 +6,13 @@ The following Data Service metrics can be queried by means of the REST APIs described in xref:rest-api:rest-statistics.adoc[Statistics]. +See xref:data-service-metrics-cross-reference.adoc[] if you are looking for a metric name you know from an alternative supported or legacy tool. + +include::partial$histogram-sidebar.adoc[] + [template,attachment$kv_metrics_metadata.json] -- include::partial$metrics.hbs[] -- + + diff --git a/modules/metrics-reference/pages/index-service-metrics-cross-reference.adoc b/modules/metrics-reference/pages/index-service-metrics-cross-reference.adoc new file mode 100644 index 0000000000..23cc2c9c73 --- /dev/null +++ b/modules/metrics-reference/pages/index-service-metrics-cross-reference.adoc @@ -0,0 +1,15 @@ += Index Service Cross Reference +:description: A cross-referenced table of the metrics provided by the Index Service as named by various generations of reporting tools. + +[abstract] +{description} + +See xref:index-service-metrics.adoc[] for full description of all the Index Service metrics. + +The following table lets you lookup a metric name you may know from an alternative supported or legacy reporting tool. + +.Index Service Metrics Cross Reference +[%header, format=csv] +|=== +include::attachment$index_cross_reference.csv[] +|=== \ No newline at end of file diff --git a/modules/metrics-reference/pages/index-service-metrics.adoc b/modules/metrics-reference/pages/index-service-metrics.adoc index 5b9a273bf5..f240b6c671 100644 --- a/modules/metrics-reference/pages/index-service-metrics.adoc +++ b/modules/metrics-reference/pages/index-service-metrics.adoc @@ -6,6 +6,8 @@ The following Index-Service metrics can be queried by means of the REST APIs described in xref:rest-api:rest-statistics.adoc[Statistics]. +See xref:index-service-metrics-cross-reference.adoc[] if you are looking for a metric name you know from an alternative supported or legacy tool. + [template,attachment$index_metrics_metadata.json] -- include::partial$metrics.hbs[] diff --git a/modules/metrics-reference/pages/ns-server-metrics.adoc b/modules/metrics-reference/pages/ns-server-metrics.adoc index 119a7a4025..d8ebb3ffc7 100644 --- a/modules/metrics-reference/pages/ns-server-metrics.adoc +++ b/modules/metrics-reference/pages/ns-server-metrics.adoc @@ -6,6 +6,8 @@ The following Cluster Manager metrics can be queried by means of the REST APIs described in xref:rest-api:rest-statistics.adoc[Statistics]. +include::partial$histogram-sidebar.adoc[] + [template,attachment$cm_metrics_metadata.json] -- include::partial$metrics.hbs[] diff --git a/modules/metrics-reference/pages/query-service-metrics-cross-reference.adoc b/modules/metrics-reference/pages/query-service-metrics-cross-reference.adoc new file mode 100644 index 0000000000..ea92b56eb2 --- /dev/null +++ b/modules/metrics-reference/pages/query-service-metrics-cross-reference.adoc @@ -0,0 +1,15 @@ += Query Service Metrics Cross Reference +:description: A cross-referenced table of the metrics provided by the Query Service as named by various generations of reporting tools. + +[abstract] +{description} + +See xref:query-service-metrics.adoc[] for full description of all the Query Service metrics. + +The following table lets you lookup a metric name you may know from an alternative supported or legacy reporting tool. + +.Query Service Metrics Cross Reference +[%header, format=csv] +|=== +include::attachment$n1ql_cross_reference.csv[] +|=== \ No newline at end of file diff --git a/modules/metrics-reference/pages/query-service-metrics.adoc b/modules/metrics-reference/pages/query-service-metrics.adoc index 8697460d7f..2dcbd9d7e4 100644 --- a/modules/metrics-reference/pages/query-service-metrics.adoc +++ b/modules/metrics-reference/pages/query-service-metrics.adoc @@ -6,6 +6,22 @@ The following Query Service metrics can be queried by means of the REST APIs described in xref:rest-api:rest-statistics.adoc[Statistics]. +As a brief introduction however, this is how you would go about building a REST command for the `n1ql_active_requests` metric. + +''' +.Using CURL to retrieve Query Service metrics + +Run the following command from your shell console to get the total number of active requests. + +[source, shell] +---- +curl -X GET --location "http://localhost:8091/pools/default/stats/range/n1ql_active_requests" \ + --basic --user Administrator:password +---- + +''' +See xref:query-service-metrics-cross-reference.adoc[] if you are looking for a metric name you know from an alternative supported or legacy tool. + [template,attachment$n1ql_metrics_metadata.json] -- include::partial$metrics.hbs[] diff --git a/modules/metrics-reference/pages/xdcr-metrics-cross-reference.adoc b/modules/metrics-reference/pages/xdcr-metrics-cross-reference.adoc new file mode 100644 index 0000000000..e98e2560cd --- /dev/null +++ b/modules/metrics-reference/pages/xdcr-metrics-cross-reference.adoc @@ -0,0 +1,15 @@ += XDCR Metrics Cross Reference +:description: A cross-referenced table of the metrics provided by XDCR as named by various generations of reporting tools. + +[abstract] +{description} + +See xref:xdcr-metrics.adoc[] for full description of all the XDCR metrics. + +The following table lets you lookup a metric name you may know from an alternative supported or legacy reporting tool. + +.XDCR Metrics Cross Reference +[%header, format=csv] +|=== +include::attachment$xdcr_cross_reference.csv[] +|=== \ No newline at end of file diff --git a/modules/metrics-reference/pages/xdcr-metrics.adoc b/modules/metrics-reference/pages/xdcr-metrics.adoc index 4c27b2b189..8c77edb3c7 100644 --- a/modules/metrics-reference/pages/xdcr-metrics.adoc +++ b/modules/metrics-reference/pages/xdcr-metrics.adoc @@ -6,6 +6,8 @@ The following XDCR metrics can be queried by means of the REST APIs described in xref:rest-api:rest-statistics.adoc[Statistics]. +See xref:xdcr-metrics-cross-reference.adoc[] if you are looking for a metric name you know from an alternative supported or legacy tool. + [template,attachment$goxdcr_metrics_metadata.json] -- include::partial$metrics.hbs[] diff --git a/modules/metrics-reference/partials/histogram-sidebar.adoc b/modules/metrics-reference/partials/histogram-sidebar.adoc new file mode 100644 index 0000000000..0439095f5c --- /dev/null +++ b/modules/metrics-reference/partials/histogram-sidebar.adoc @@ -0,0 +1,12 @@ +[sidebar] +.Histograms +**** +Note that each histogram metric will generate three time series, +with the following suffixes: + +* `_count` +* `_sum` +* `_bucket` + +Please refer to https://prometheus.io/docs/practices/histograms/[Prometheus Histograms and Summaries] for more information. +**** diff --git a/modules/n1ql/pages/n1ql-rest-api/admin.adoc b/modules/n1ql/pages/n1ql-rest-api/admin.adoc deleted file mode 100644 index b107c826f1..0000000000 --- a/modules/n1ql/pages/n1ql-rest-api/admin.adoc +++ /dev/null @@ -1,17 +0,0 @@ -= Query Admin REST API - -//// -These partials are created automatically by Swagger2Markup. -Refer to https://github.com/couchbaselabs/cb-swagger -//// - -include::partial$n1ql-rest-api/admin/overview.adoc[tag=body] -include::partial$n1ql-rest-api/admin/paths.adoc[] -include::partial$n1ql-rest-api/admin/definitions.adoc[] -include::partial$n1ql-rest-api/admin/security.adoc[] - -== See Also - -* [[queryCleanupClientAttempts]][[queryCleanupLostAttempts]][[queryCleanupWindow]][[queryCompletedLimit]][[queryCompletedMaxPlanSize]][[queryCompletedThreshold]][[queryLogLevel]][[queryMaxParallelism]][[queryMemoryQuota]][[queryNodeQuota]][[queryNodeQuotaValPercent]][[queryNumAtrs]][[queryNumCpus]][[queryN1qlFeatCtrl]][[queryPipelineBatch]][[queryPipelineCap]][[queryPreparedLimit]][[queryScanCap]][[queryTimeout]][[queryTxTimeout]][[queryUseCBO]][[queryUseReplica]]For cluster-level settings, see the xref:rest-api:rest-cluster-query-settings.adoc#_settings[Cluster Query Settings API]. - -* [[atrcollection_req]][[client_context_id]][[controls_req]][[max_parallelism_req]][[memory_quota_req]][[numatrs_req]][[pipeline_batch_req]][[pipeline_cap_req]][[pretty_req]][[profile_req]][[scan_cap_req]][[timeout_req]][[tximplicit]][[use_cbo_req]][[use_replica_req]]For request-level parameters, see the xref:n1ql:n1ql-rest-api/index.adoc#_request_parameters[Query Service REST API]. \ No newline at end of file diff --git a/modules/n1ql/pages/n1ql-rest-api/functions.adoc b/modules/n1ql/pages/n1ql-rest-api/functions.adoc deleted file mode 100644 index b3f49b3d57..0000000000 --- a/modules/n1ql/pages/n1ql-rest-api/functions.adoc +++ /dev/null @@ -1,12 +0,0 @@ -= Query Functions REST API -:page-edition: Enterprise Edition - -//// -These partials are created automatically by Swagger2Markup. -Refer to https://github.com/couchbaselabs/cb-swagger -//// - -include::partial$n1ql-rest-api/functions/overview.adoc[tag=body] -include::partial$n1ql-rest-api/functions/paths.adoc[] -include::partial$n1ql-rest-api/functions/definitions.adoc[] -include::partial$n1ql-rest-api/functions/security.adoc[] \ No newline at end of file diff --git a/modules/n1ql/pages/n1ql-rest-api/index.adoc b/modules/n1ql/pages/n1ql-rest-api/index.adoc deleted file mode 100644 index 18667a2857..0000000000 --- a/modules/n1ql/pages/n1ql-rest-api/index.adoc +++ /dev/null @@ -1,17 +0,0 @@ -= Query Service REST API - -//// -These partials are created automatically by Swagger2Markup. -Refer to https://github.com/couchbaselabs/cb-swagger -//// - -include::partial$n1ql-rest-api/query/overview.adoc[tag=body] -include::partial$n1ql-rest-api/query/paths.adoc[] -include::partial$n1ql-rest-api/query/definitions.adoc[] -include::partial$n1ql-rest-api/query/security.adoc[] - -== See Also - -* [[queryMaxParallelism]][[queryMemoryQuota]][[queryNumAtrs]][[queryPipelineBatch]][[queryPipelineCap]][[queryScanCap]][[queryTimeout]][[queryTxTimeout]][[queryUseCBO]][[queryUseReplica]]For cluster-level settings, see the xref:rest-api:rest-cluster-query-settings.adoc#_settings[Cluster Query Settings API]. - -* [[atrcollection-srv]][[controls-srv]][[max-parallelism-srv]][[memory-quota-srv]][[numatrs-srv]][[pipeline-batch-srv]][[pipeline-cap-srv]][[pretty-srv]][[profile-srv]][[scan-cap-srv]][[timeout-srv]][[txtimeout-srv]][[use-cbo-srv]][[use-replica-srv]]For node-level settings, see the xref:n1ql:n1ql-rest-api/admin.adoc#_settings[Admin REST API]. \ No newline at end of file diff --git a/modules/rebalance-reference/pages/rebalance-reference.adoc b/modules/rebalance-reference/pages/rebalance-reference.adoc index 9a5735570b..ce0d8a5a2b 100644 --- a/modules/rebalance-reference/pages/rebalance-reference.adoc +++ b/modules/rebalance-reference/pages/rebalance-reference.adoc @@ -16,7 +16,7 @@ On conclusion of the rebalance, the report can be accessed in any of the followi * By means of the REST API, as described in xref:rest-api:rest-get-cluster-tasks.adoc[Getting Cluster Tasks]. -* By accessing the directory `/opt/couchbase/var/lib/couchbase/logs/reblance` on _any_ of the cluster nodes. +* By accessing the directory `/opt/couchbase/var/lib/couchbase/logs/rebalance` on _any_ of the cluster nodes. A rebalance report is maintained here for (up to) the last _five_ rebalances performed. Each report is provided as a `*.json` file, whose name indicates the time at which the report was run — for example, `rebalance_report_2020-03-17T11:10:17Z.json`. + diff --git a/modules/release-notes/pages/relnotes.adoc b/modules/release-notes/pages/relnotes.adoc index d606d44a39..e7fd70e6c1 100644 --- a/modules/release-notes/pages/relnotes.adoc +++ b/modules/release-notes/pages/relnotes.adoc @@ -1,7 +1,20 @@ = Release Notes for Couchbase Server 7.6 :page-aliases: analytics:releasenote -:description: Couchbase Server 7.6.0 introduces multiple new features and fixes, as well as some deprecations and removals. +:description: Couchbase Server 7.6.x introduces multiple new features and fixes, as well as some deprecations and removals. :page-toclevels: 2 +:stem: + +include::partial$docs-server-7.6.8-release-note.adoc[] + +include::partial$docs-server-7.6.7-release-note.adoc[] + +include::partial$docs-server-7.6.6-release-note.adoc[] + +include::partial$docs-server-7.6.5-release-note.adoc[] + +include::partial$docs-server-7.6.4-release-note.adoc[] + +include::partial$docs-server-7.6.3-release-note.adoc[] include::partial$docs-server-7.6.2-release-note.adoc[] diff --git a/modules/release-notes/partials/docs-server-7.6.0-release-note.adoc b/modules/release-notes/partials/docs-server-7.6.0-release-note.adoc index 95d4a6f547..4bf6ec31b6 100644 --- a/modules/release-notes/partials/docs-server-7.6.0-release-note.adoc +++ b/modules/release-notes/partials/docs-server-7.6.0-release-note.adoc @@ -15,6 +15,7 @@ deprecated in Couchbase Server{nbsp}7.6: ** Amazon Linux 2 LTS x86 & ARM ** Microsoft Windows Server 2019 ** Ubuntu 20.04 LTS x86 & ARM +** SUSE Linux Enterprise Server Version 12 ** MacOS 12 (Monterey) x86 & ARM * We are removing these platforms from support (they are already deprecated): @@ -255,6 +256,43 @@ include::partial$index-upgrade-issue.adoc[] |=== +==== Query Service + +[#table-known-issues-760-query-service,cols="10,40,40"] +|=== +|Issue | Description | Workaround + +// tag::MB-64966[] + +| https://jira.issues.couchbase.com/browse/MB-64966[MB-64966] +| Scope level user-defined functions (UDFs), sequences, and histogram data are stored in the `bucket/_system/_query` collection. + +``FLUSH``ing the bucket will delete the scoped UDF entries. +When the UDFs are executed, it will work until the UDFs are evicted from the cache. +After that, the execution will always result in an error. + +Sequences metadata is also stored in the `bucket/_system/_query` collection. +FLUSHing the bucket will delete the entries. When executed, sequences will return errors once evicted from the cache. + +``FLUSH``ing the bucket will also delete statistics data stored in the `bucket/_system/_query` collection. + By using stale statistics stored in the cache, a non-optimal plan could be generated. + +a| For user-defined functions, you can do one of the following: + +* Recreate the scoped user-defined functions after the `FLUSH`. +* Use global UDFs instead of scoped UDFs. (Global UDFs are not stored in the `_query` collection.) +* Create scoped UDFs on the bucket that does not have FLUSH enabled and then reference them via a fully qualified name (`bucket.scope.UDFname`). + +For sequence metadata: + +* After the `FLUSH`, recreate the sequence, or +* Create sequences on the bucket that does not have FLUSH enabled and then reference them via a fully qualified name (`bucket.scope.UDFname`). + +For statistical data, rerun `UPDATE STATISTICS` on all the indexes on every scope and collections in the bucket that was flushed. + +// end::MB-64966[] +|=== + ==== Search Service [#table-known-issues-760-search-service, cols="10,40,40"] |=== diff --git a/modules/release-notes/partials/docs-server-7.6.1-release-note.adoc b/modules/release-notes/partials/docs-server-7.6.1-release-note.adoc index 72fd276df7..8d34d7c7df 100644 --- a/modules/release-notes/partials/docs-server-7.6.1-release-note.adoc +++ b/modules/release-notes/partials/docs-server-7.6.1-release-note.adoc @@ -48,6 +48,17 @@ include::partial$index-upgrade-issue.adoc[] |=== +==== Query Service + +[#table-known-issues-761-query-service,cols="10,40,40"] +|=== +|Issue | Description | Workaround + +include::partial$docs-server-7.6.0-release-note.adoc[tag="MB-64966"] + +|=== + + ==== Search Service [#table-known-issues-761-search-service, cols="10,40,40"] |=== diff --git a/modules/release-notes/partials/docs-server-7.6.2-release-note.adoc b/modules/release-notes/partials/docs-server-7.6.2-release-note.adoc index e72ed404f3..a3fb18e74f 100644 --- a/modules/release-notes/partials/docs-server-7.6.2-release-note.adoc +++ b/modules/release-notes/partials/docs-server-7.6.2-release-note.adoc @@ -61,7 +61,7 @@ The upgrade corrupted existing indexes requiring you to drop and rebuild them. |=== -[#query_services_762] + ==== Query Service [#table-fixed-issues-762-query-service,cols="10,40,40"] |=== @@ -80,6 +80,11 @@ Such requests don't affect other regular processing beyond occupying a servicer, Forcibly restarting the Query service will clear the issue. | Issue resolved. +| https://issues.couchbase.com/browse/MB-61564[MB-61564] +| Incorrect return code when attempting to create an index that already exists. +The system returns the error: `200: index already exists`. +| Issue resolved: the system will now return: `409: index already exists`. + | https://issues.couchbase.com/browse/MB-61764[MB-61764] | In rare cases, Couchbase Server could report an `IndexOutOfBoundsException` error if a {sqlpp} query contained a subquery with an `IN` clause. | Issue resolved. @@ -189,14 +194,31 @@ and will not always retry when receiving them. This release contains the following known issues: +==== Query Service + +[#table-known-issues-762-query-service, cols="10,40,40"] +|=== +|Issue | Description | Workaround + +// tag::MB-63414[] +| https://jira.issues.couchbase.com/browse/MB-63414[MB-63414] +| If `memory_quota` is set to a value and Query uses a non-covered array index, it can result in `Request has exceeded memory quota` error. +| Disable memory quota or https://www.couchbase.com/support/working-with-technical-support/[contact support] for alternatives. +// end::MB-63414[] + +include::partial$docs-server-7.6.0-release-note.adoc[tag="MB-64966"] + +|=== ==== Index Service [#table-known-issues-762-index-service, cols="10,40,40"] |=== |Issue | Description | Workaround +// tag::MB-62220[] | https://issues.couchbase.com/browse/MB-62220[MB-62220] | Dropped replicas are not rebuilt during swap rebalance | Drop and then recreate the indexes. +// end::MB-62220[] |=== diff --git a/modules/release-notes/partials/docs-server-7.6.3-release-note.adoc b/modules/release-notes/partials/docs-server-7.6.3-release-note.adoc new file mode 100644 index 0000000000..48e3bbcb21 --- /dev/null +++ b/modules/release-notes/partials/docs-server-7.6.3-release-note.adoc @@ -0,0 +1,137 @@ +== Release 7.6.3 (September 2024) + +Couchbase Server 7.6.3 was released in September 2024. This maintenance release contains new features and fixes several known issues. + +For detailed information on new features and enhancements, please see xref:introduction:whats-new.adoc[]. + +[#deprecated-7-6-3] +=== Deprecated Platforms + +The use of x86 processors that do not support Advanced Vector Extensions 2 (AVX2) is deprecated in version 7.6.3. +These instructions are available in most Intel processors produced since 2013 and AMD processors produced since 2015. +See xref:install:pre-install.adoc#x86-processors[System Requirements] for details. + + +[#fixed-issues-763] +=== Fixed Issues + +This release contains the following fixes: + +==== Cluster Manager + +[#table-fixed-issues-763-cluster-manager,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-63070[MB-63070] +| An issue occurred where `cbcollect_info` did not process some `` tags, especially when they were present at the end of the file or inside binary data. As a result, some logs that should have been redacted were not redacted. +| Issue resolved. + +|=== + +==== Storage Engine +[#table-fixed-issues-763-storage-engine,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-62984[MB-62984] +| When a Couchbase Data Service experienced a hard failover or crashed with data loss, DCP clients (like those used with Elasticsearch and Kafka) sometimes received incomplete or partial snapshots of the data. This incomplete data sometimes leads to rare situations where the Data Service crashed repeatedly. +| Issue resolved. + +| https://jira.issues.couchbase.com/browse/MB-63261[MB-63261] +| An issue caused by a race condition in the index recovery code path sometimes resulted in an item count mismatch and wrong query results. +Prior to Release 7.6.0, this issue sometimes occurred during Indexer restart. +However, as part of the file-based rebalance process introduced in 7.6.0, a recovery of the index is performed after the index is moved, which increases the likelihood that this race condition might be reached. +| Issue resolved. + +|=== + + +==== Query Service +[#table-fixed-issues-763-query-service,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-63024[MB-63024] +a| +A query sometimes returned the error: `Nested aggregates not allowed` under the following conditions: + + * query contains in its FROM clause a subquery used as the inner of a join + * the subquery is used as an inner of a nested-loop join in query plan + * the subquery only accesses a single keyspace (i.e., no joins/nests/unnests) + * the subquery contains one or more aggregates (e.g., COUNT) in its projection + * the subquery contains a `GROUP BY` clause, and the `GROUP BY` list does not match index key order of any secondary indexes defined + * a secondary index exists that allows index group/aggregate pushdown for the subquery + * the cost-based optimizer is `ON` (which is the default), and a previous `UPDATE STATISTICS/ANALYZE` command has been run on the keyspace referenced in the subquery. +| Issue resolved. + +| https://jira.issues.couchbase.com/browse/MB-63060[MB-63060] +| An issue occurred when migrating nested UDFs to any 7.6.x release from a previous version. In cases when an inline UDF called another UDF, query nodes became unresponsive. +| Issue resolved. + +| https://jira.issues.couchbase.com/browse/MB-63078[MB-63078] +a| When upgrading to a 7.6.x release, if buckets were loaded by Query nodes before they were fully migrated to 7.6.x format, Query continually reloaded the bucket on access, leading to degraded performance. This issue has been resolved in 7.6.3. +| Issue resolved. + +| https://jira.issues.couchbase.com/browse/MB-63147[MB-63147] +| An issue occurred with user-defined functions (UDFs) that contained queries defined with a common-table expression (CTE). If the CTE contained references to the UDF arguments, the execution of the UDF returned an error indicating “correlation reference `` is not allowed.” +| Issue resolved. + +|=== + +==== Index Service +[#table-fixed-issues-763-index-service,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-63193[MB-63193] +| An issue in the rebalancer code caused several partitions to miss streaming mutations when partitioned indexes were moved during file-based rebalance. This issue resulted in corrupt indexes, as not all data will be processed. The issue occurred only with partitioned indexes when file-based rebalance was enabled. +| Issue resolved. + +|=== + + +==== Analytics Service +[#table-fixed-issues-763-anaytics-service,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-62923[MB-62923] +| An issue with the HTTP client lifecycle resulted in leaked TCP connections between the Analytics service processes during internal credentials rotation, eventually leading to ephemeral port exhaustion. +| Issue resolved. + +| https://jira.issues.couchbase.com/browse/MB-62949[MB-62949]| +When a query encounters a failure, the details of the exception can get masked by subsequent exceptions, leading to the loss of the root cause of the query failure. +| Issue resolved. +|=== + + +[#known-issues-763] +=== Known Issues + +This release contains the following known issues: + +==== Query Service + +[#table-known-issues-763-query-service, cols="10,40,40"] +|=== +|Issue | Description | Workaround + +| https://jira.issues.couchbase.com/browse/MB-63414[MB-63414] +| If `memory_quota` is set to a value and Query uses a non-covered array index, it can result in `Request has exceeded memory quota` error. +a| Disable memory quota or https://www.couchbase.com/support/working-with-technical-support/[contact support] for alternatives. + +NOTE: This issue is fixed on Capella. + +include::partial$docs-server-7.6.0-release-note.adoc[tag="MB-64966"] + +|=== + +==== Index Service +[#table-known-issues-763-index-service, cols="10,40,40"] +|=== +|Issue | Description | Workaround + +include::partial$docs-server-7.6.2-release-note.adoc[tag="MB-62220"] + +|=== diff --git a/modules/release-notes/partials/docs-server-7.6.4-release-note.adoc b/modules/release-notes/partials/docs-server-7.6.4-release-note.adoc new file mode 100644 index 0000000000..a184233b4f --- /dev/null +++ b/modules/release-notes/partials/docs-server-7.6.4-release-note.adoc @@ -0,0 +1,214 @@ +== Release 7.6.4 (December 2024) + +Couchbase Server 7.6.4 was released in December 2024. This maintenance release contains new features and fixes several known issues. + +For detailed information on new features and enhancements, please see xref:introduction:whats-new.adoc[]. + +=== Deprecated Platforms + +Windows 10 is deprecated in Couchbase Server 7.6.4. +A future version will remove support for this operating system. + +[#fixed-issues-764] +=== Fixed Issues + +This release contains the following fixes: + +==== Cluster Manager + +[#table-fixed-issues-764-cluster-manager,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-62219[MB-62219] +| The cluster manager was unable to rotate the root certificate when a new root uses the same private key as the old root. +| Issue resolved. + +| https://jira.issues.couchbase.com/browse/MB-62413[MB-62413] +| When a username is extracted from a client certificate, the candidate username is checked for its existence in couchbase-server. If such a user doesn't exist, the algorithm now doesn't stop but tries to extract another candidate username. +|Issue resolved. + +| https://jira.issues.couchbase.com/browse/MB-63981[MB-63871] +| The /prometheus_sd_config endpoint provides a new option `clusterLabels` which species the cluster name and cluster UUID be returned. Prometheus will use them as labels in time series data. +| This provides a method to guarantee uniqueness for stats with the same name gathered from multiple clusters. + +|=== + +==== Storage Engine +[#table-fixed-issues-764-storage-engine,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-63261[MB-63261] +| An issue occurred caused by a race condition in the index recovery code path, which may result in item count mismatch and wrong query results. + +Prior to Release 7.6.0, this issue may occur during an Indexer restart. +However, as part of the file-based rebalance process introduced in 7.6.0, a recovery of the index is performed after the index is moved, which increases the likelihood that the race condition might be reached. +| The race condition has been addressed and the issue is resolved. + +|=== + +==== XDCR +[#table-fixed-issues-764-xdcr,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-62383[MB-62383] +| In rare situations, a document with a very large CAS could be inserted into a bucket. It was possible for XDCR to replicate this document with malformed CAS to other clusters. +| XDCR now has built-in guardrails to prevent documents with malformed CAS beyond a certain drift threshold from being replicated and then to notify the user if this situation occurs via UI and logs. + +| https://jira.issues.couchbase.com/browse/MB-62410[MB-62410] +| Connection Pre-check did support all inputs normally accepted by XDCR +| XDCR connection pre-check now supports alternate addresses, DNS SRV, and works identically to XDCR remote cluster references. + +| https://jira.issues.couchbase.com/browse/MB-63738[MB-63738] +| The XDCR dashboard on Source cluster now shows a UI alert when a pipeline is stuck due to EACCESS errors while writing to Target cluster. +| Issue resolved. + +|=== + + +==== Query Service +[#table-fixed-issues-764-query-service,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-62932[MB-62932] +| The execution of prepared statements containing (`*`) expressions in the projection can return incorrect results in scenarios where the encoded plans of such statements need to be decoded before execution. + +| Issue resolved. + +| https://jira.issues.couchbase.com/browse/MB-63016[MB-63016] +| If an index is defined with a `CASE` statement as an index key, and the `CASE` statement contains a not-equal expression, and the same `CASE` statement is used in the projection clause of a query, then the covering index scan is not performed properly. +| The issue is fixed, and we now use covering index scan appropriately. + +| https://jira.issues.couchbase.com/browse/MB-63069[MB-63069] +| In versions `7.6.2`, and `7.6.3`, queries with a `FROM` clause containing an `UNNEST` and `ORDER BY` with a `LIMIT` or `LIMIT + OFFSET` clauses, then in some cases incorrect results were observed when the `ORDER` could not be pushed down to the indexer. +| Issue resolved. + +| https://jira.issues.couchbase.com/browse/MB-63414[MB-63414] +| If `memory_quota` is set to a value and Query uses a non-covered array index, it can result in `Request has exceeded memory quota` error. +| Issue resolved. + +| https://jira.issues.couchbase.com/browse/MB-64155[MB-64155] +a| In previous versions of Couchbase Server, a clear delimiter between a numeric constant and a keyword was not required. + +| From version `7.6.4+`, a statement containing text such as: `… 1AND …` + will now correctly generate a syntax error indicating it is an invalid number. + + Correct such instances by inserting a space between the numeric constant and the following token, e.g. `… 1 AND …`. + +| https://jira.issues.couchbase.com/browse/MB-63518[MB-63518] +a|An API issue has been resolved where attempting to set the completed-stream-size to zero when it was already zero via the administrative REST API would: + +* Prevent stream completion. + +* Block access to the system:vitals keyspace or `vitals` end-point. + +* Require a full Query process restart on affected nodes as the only previous workaround. + +| Issue resolved. + +| https://jira.issues.couchbase.com/browse/MB-63420[MB-63420] +a| A server panic was resolved when processing an aggregate function with a less-common reference structure, such as MAX([d.date, d][1]). The more standard form MAX([d.date, d])[1] was not impacted. The fix ensures stable query processing for this edge case. + +| Issue resolved. + +|=== + +==== Eventing Service +[#table-fixed-issues-764-eventing-service,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-63014[MB-63014] +| In earlier versions of Couchbase, during eventing function deployment or resumption, KV service disruptions (like network partitions) could leave LCB instances in an unrecoverable, unhealthy state. This caused the eventing function to become permanently stuck, with subsequent operations failing to return control, rendering the function non-functional. + +| This issue has been resolved. Couchbase Eventing now actively monitors LCB instance statuses and implements a "lazy" retry mechanism for bootstrapping unhealthy instances. The retry process continues until the operation times out, which is dynamically determined by the script timeout. This ensures that JavaScript code remains responsive, with mutations timing out gracefully if bootstrap attempts fail, instead of causing a complete function lockup. + + +|=== + +==== Index Service +[#table-fixed-issues-764-index-service,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-61387[MB-61387] +| To speed up the initial process of building the index, +the index service has an optimization that skips checking for existing entries and directly adds new ones. +This optimization is crucial for the initial build but should not be used for updates to existing indexes. +Unfortunately, in a rare sequence of events, all indexes might be enabled for this optimization, +leading to duplicate entries in the storage layer, leading to incorrect results. +| The optimization is now only enabled for those indexes that are undergoing an initial build process. + +| https://issues.couchbase.com/browse/MB-62220[MB-62220] +| We have an optimization in place which avoids unnecessary index movements when a swap rebalance is performed. +A minor bug in this optimization interfered with rebuilding user-dropped replicas/lost replicas during such a rebalance +where we didn't consider new nodes coming in when placing lost indexes. +| Adds a sorted pseudo-random order where we give higher priority to new incoming nodes followed by old nodes staying in the cluster when we try to place lost indexes. +Hence, we can repair lost replicas. + +| https://jira.issues.couchbase.com/browse/MB-62919[MB-62919] +| After enabling GSI shard-based rebalance from the Web UI and saving the settings, it was not possible to disable the same setting by unticking the box. The box is greyed out and cannot be interacted with. +| Issue resolved. + +| https://jira.issues.couchbase.com/browse/MB-63276[MB-63276] +| An issue in the rebalancer caused several partitions to miss streaming mutations when partitioned indexes were moved during file-based rebalances. This issue resulted in corrupt indexes as not all data was processed. + +The issue occurred only with partitioned indexes with file-based rebalances enabled. +| Issue resolved. + +|=== + + +==== Search Service +[#table-fixed-issues-764-search-service,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-62427[MB-62427] +a| In index definitions from version 6.x, the `segmentVersion` parameter is absent, with it previously defaulting to v11. +With 7.6 we’ve updated the default to v16. +So when the `segmentVersion` is absent, the v16 code will be going into effect. + +An issue in the v16 code while interpreting data with no vector fields caused an alignment panic. +This panic is fixed with 7.6.4, allowing 6.x index definitions to work ok in 7.6.4. + +* This problem occurs only for index definitions that survive an upgrade from 6.x to 7.x and then 7.6.x without being “re-built” meaning they’d still be using the v11 file format. + +* Indexes that were introduced in server version 7.x and later or re-built after the cluster was fully upgraded to 7.x, (which will have a segmentVersion:15 setting within their index definition) will not encounter this issue, because they invoke the v15 code. + +| In summary, version 7.6.4 now supports upgrades of 6.x index definitions. +However, +Couchbase recommends upgrading indexes to 15+ segment version format for improvements in indexing footprint and performance. + +Issue resolved. + +|=== + + +==== Backup Service +[#table-fixed-issues-764-backup-service,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-61013[MB-61013] +| The Backup service incorrectly allowed backups to be created in the data directory +because the server could delete the backup files. +| The server will not allow archives to be created in the data directory. + +|=== + +[#known-issues-764] +=== Known Issues + +This release contains the following known issues: + +==== Query Service + +[#table-known-issues-764-query-service, cols="10,40,40"] +|=== +|Issue | Description | Workaround + +include::partial$docs-server-7.6.0-release-note.adoc[tag="MB-64966"] + +|=== + + diff --git a/modules/release-notes/partials/docs-server-7.6.5-release-note.adoc b/modules/release-notes/partials/docs-server-7.6.5-release-note.adoc new file mode 100644 index 0000000000..275c4b4a45 --- /dev/null +++ b/modules/release-notes/partials/docs-server-7.6.5-release-note.adoc @@ -0,0 +1,103 @@ +== Release 7.6.5 (January 2025) + +Couchbase Server 7.6.5 was released in January 2025. +This maintenance release contains fixes several for known issues. + +For detailed information on new features and enhancements, see xref:introduction:whats-new.adoc[]. + +[#fixed-issues-765] +=== Fixed Issues + +This release contains the following fixes: + + +==== Data Service + +[#table-fixed-issues-765-data-service,cols="10,40,40"] +|=== +|Issue | Description | Resolution + + +| https://jira.issues.couchbase.com/browse/MB-64353[MB-64353] +| Hard failover of ephemeral buckets could lead to the rollback of DCP clients back to zero. + +| Fixed in version `7.6.3`. + +Upgrading to `7.6.5` will enable the fix. + +| https://jira.issues.couchbase.com/browse/MB-64742[MB-64742] +a|A bug in the plasma tracking statistics incorrectly marked stale recovery points in the recovery log as valid data. This caused two problems: + +* At low mutation rates, the log cleaning process ran slowly and couldn't effectively trim recovery point history. +* At higher mutation rates, the system worked around this issue because mutations would increase the fragmentation ratio enough to trigger the log cleaner, which could then trim recovery point history despite the tracking statistics bug. + +| The system now marks only the latest recovery point that exists in both the recovery log and data log. This change effectively limits the recovery point history list to a single entry in the recovery log. The plasma tracking statistics have been fixed to correctly identify older recovery points as stale data in the recovery log. These improvements allow the log cleaner to run efficiently even at low mutation rates. + + +|=== + +==== Query Service + +[#table-fixed-issues-765-query-service,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-64684[MB-64684] +a| An issue occurred when an inline user-defined function (UDF) included a query with a nested `Array` construct, +and the innermost `Array` construct referred to both: + +* a UDF argument, and +* the binding variable of an outer `ARRAY` construct, + +Then the UDF creation or execution could fail. + +| Issue resolved + +|=== + + +==== Search Service + +[#table-fixed-issues-765-search-service,cols="10,40,40"] +|=== +|Issue | Description | Resolution +| https://jira.issues.couchbase.com/browse/MB-64652[MB-64652] +a| An issue in versioning caused an online upgrade-rebalance operation +to fail on account of a planning algorithm improvement. +The bug would also impact the failover-recovery online upgrade path. + +NOTE: This issue only surfaced when an upgrade was attempted over a system with search indexes from `7.6.[0–3]` to `7.6.4`. + +| The issue has been resolved in version `7.6.5`. +Our recommendation is to avoid the issue by moving directly to this version. + +| https://jira.issues.couchbase.com/browse/MB-64764[MB-64764] +| If an index was created during rebalance, +and the rebalance was stopped before plans were computed for that index, +then no plans or partitions were created for the index until a subsequent rebalance was completed. + +In `7.6.4` we introduced a change +to compute the plans for indexes created during a rebalance even when the rebalance stops midway. +This inadvertently caused a regression where a subsequent rebalance could get stuck and fail continuously on timeouts. + +| In `7.6.5` we reverted the change: if the user aborts an ongoing rebalance, then plans for an index introduced during the operation may not be deployed until a subsequent rebalance operation is triggered and completed. + +|=== + + +[#known-issues-765] +=== Known Issues + +This release contains the following known issues: + +==== Query Service + +[#table-known-issues-765-query-service, cols="10,40,40"] +|=== +|Issue | Description | Workaround + +include::partial$docs-server-7.6.0-release-note.adoc[tag="MB-64966"] + +|=== + + diff --git a/modules/release-notes/partials/docs-server-7.6.6-release-note.adoc b/modules/release-notes/partials/docs-server-7.6.6-release-note.adoc new file mode 100644 index 0000000000..6a1e8d21fa --- /dev/null +++ b/modules/release-notes/partials/docs-server-7.6.6-release-note.adoc @@ -0,0 +1,192 @@ +== Release 7.6.6 (May 2025) + +For detailed information on new features and enhancements, see xref:introduction:whats-new.adoc[]. + +[#fixed-issues-766] +=== Fixed Issues + +Couchbase Server 7.6.6 was released in May 2025. +This maintenance release contains fixes several for known issues. + +This release contains the following fixes: + +==== Storage Engine + +[#table-fixed-issues-766-storage-engine,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-64353[MB-64353] +a| Hard failover of ephemeral buckets could lead to the rollback of DCP clients back to 0. +There is currently no work-around for this, and currently the only solution is to upgrade to a version with the fix. + +7.6.3 is the oldest release with the fix. + +*Details:* + + +* Hard failover of an ephemeral bucket. + +* Replica promoted to active. + +* DCP client reconnects to the new active. + +* DCP clients rollback to zero and have to re-stream all the mutations from the new active. + +| Issue resolved. + +| https://jira.issues.couchbase.com/browse/MB-64742[MB-64742] +| An issue in the plasma tracking stats flagged a stale recovery point in the recovery log as valid data. +This made the log cleaning run slowly at a low mutation rate, +and therefore, unable to trim the recovery point history effectively. + +At a higher mutation rate, the mutations alone could push the fragmentation ratio high enough to start the log cleaner, +and therefore, was able to trim rp history despite the issue with tracking stats. + + +| We mark the latest version of the recovery point that exists in both the recovery log and the data log. + This effectively means that plasma will keep the recovery point history list to 1 in the recovery log. + In addition, + the plasma tracking stats are fixed such that it no longer treats the older recovery point as valid data in the recovery log. + + This allows the log cleaner to run even at a low mutation rate. + +| https://jira.issues.couchbase.com/browse/MB-65738[MB-65738] +| An optimization to increase the memory residency of active plasma items caused an issue. +In-memory compression of plasma pages under certain conditions may result in high CPU usage with this optimization. + +Prior to Release `7.6.0`, this issue will not occur as this optimization was not present. + +| Issue resolved. + + +|=== + + +==== Data Service + +[#table-fixed-issues-766-data-service,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-63827[MB-63827] +| DCP connection metrics for connection names that do not conform to the server format are not aggregated by connection type when exposing to Prometheus, potentially producing a large number of time series. + +| The metrics are aggregated and exposed with `connection_type="_unknown"`. + + +|=== + +==== XDCR + +[#table-fixed-issues-766-xdcr,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-62309[MB-62309] +| Tombstones were not replicated if the binary filter is turned on. +| Issue resolved. + +| https://jira.issues.couchbase.com/browse/MB-64563[MB-64563] +| If users do not want the replication to be automatically deleted upon either source or bucket deletion and/or recreation, +users can set the `skipReplSpecAutoGc` replication setting to `true` upon replication creation. + +In the situation that the replication would have been deleted, +it will have been automatically paused instead, +and a persistent UI error message logged to be viewed. + +Users are expected to manually execute the recovery action by deleting the replication and re-creating a newer one, +if necessary. + +| Issue resolved. + + +|=== + +==== Query Service + +[#table-fixed-issues-766-query-service,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +|https://jira.issues.couchbase.com/browse/MB-65055[MB-65055] +| In a cluster with multiple query nodes, +performing a rolling upgrade to version `7.6.x` +from an earlier version (such as `7.2`) may cause prepared statements +containing a WITH clause to malfunction on the upgraded query node. + +| Issue resolved. + +|=== + +==== Index Service + +[#table-fixed-issues-766-index-service,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-65012[MB-65012] +| When shard affinity is enabled, the indexer internally maps all replicas of an index to an alternateID. +If such an alternateID exists on a node outside the list of nodes, +instead of preventing replica creation, +the system creates a new replica with the same alternateID on different nodes. +This leads to rebalance failures on the affected nodes. + +| When `ALTER INDEX` is used with the `replica_count` action, +do not allow the `nodes` clause when shard affinity is enabled + + +|=== + +==== Search Service + +[#table-fixed-issues-766-search-service,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-63246[MB-63246] +| A Couchbase cluster upgrade to 7.6.2 caused data ingestion failure for legacy indexes using `gocouchbase`. +A `gocbcore`-specific check in https://github.com/couchbase/cbft/blob/f81ec8920f9b022f7d605feee1907eb4c664379a/pindex_bleve.go#L2374[here] is incompatible with the `gocouchbase` feed, +resulting in ingestion errors. + +| Issue Resolved. + +|=== + +[#known-issues-766] +== Known Issues + +=== Query Service + +[#table-known-issues-766-query-service,cols="10,40,40"] +|=== +|Issue | Description | Workaround + +| https://jira.issues.couchbase.com/browse/MB-64966[MB-64966] +a| Scope level user-defined functions, sequences, and histogram data are stored in `bucket/_system/_query` collection. + +Flushing the bucket will delete the scoped UDF entries. +When the UDFs are executed, it will work until the UDFs are evicted from the cache. +After that, it will always result in an error. + +Sequences metadata is also stored in `bucket/_system/_query` collection. +FLUSHing the bucket will delete the entries. When executed, sequences will return errors once evicted from the cache. + +`FLUSH`ing the bucket will also delete statistics data stored in the `bucket/_system/_query collection. + By using stale statistics stored in the cache, a non-optimal plan could be generated. + + +a| For user-defined functions, you can do one of the following: + +* Recreate the scoped user-defined functions after the `FLUSH`. +* Use global UDFs instead of scoped UDFs. (Global UDFs are not stored in the `_query` collection.) +* Create scoped UDFs on the bucket that does not have FLUSH enabled and then reference them via a fully qualified name (`bucket.scope.UDFname`). + +For sequence metadata: + +* After the `FLUSH`, recreate the sequence, or +* Create sequences on the bucket that does not have FLUSH enabled and then reference them via a fully qualified name (`bucket.scope.UDFname`). + +For statistical data, rerun `UPDATE STATISTICS` on all the indexes on every scope and collections in the bucket that was flushed. + + +|=== + diff --git a/modules/release-notes/partials/docs-server-7.6.7-release-note.adoc b/modules/release-notes/partials/docs-server-7.6.7-release-note.adoc new file mode 100644 index 0000000000..3d664823b8 --- /dev/null +++ b/modules/release-notes/partials/docs-server-7.6.7-release-note.adoc @@ -0,0 +1,29 @@ + + +[#release-767] +== Release 7.6.7 (August 2025) + +Couchbase Server 7.6.7 was released in August 2025. +This maintenance release contains fixes to issues. + +== Fixed Issues + +=== Storage Engine + +[#table-fixed-issues-767-storage-engine,cols="10,40,40"] +|=== +|Issue | Description | Resolution + +// tag::MB-67762[] + +a| https://jira.issues.couchbase.com/browse/MB-67762[MB-67762] + +a| There is an integer overflow bug in buckets using the Magma storage engine. +This bug can potentially cause a subset of mutations +to become invisible to read operations after approximately 500 billion to 2 trillion mutations per bucket. + +a| For details, please refer to https://jira.issues.couchbase.com/browse/MB-67762[MB-67762] + +// end::MB-67762[] +|=== + diff --git a/modules/release-notes/partials/docs-server-7.6.8-release-note.adoc b/modules/release-notes/partials/docs-server-7.6.8-release-note.adoc new file mode 100644 index 0000000000..cdd6709297 --- /dev/null +++ b/modules/release-notes/partials/docs-server-7.6.8-release-note.adoc @@ -0,0 +1,170 @@ + +[#release-768] +== Release 7.6.8 (November 2025) + +Couchbase Server 7.6.8 was released in November 2025. +This maintenance release contains fixes to issues. + +== Fixed Issues + + +=== Cluster Manager +[#table-fixed-issues-768-cluster-manager, cols='10,40,40'] +|=== +|Issue | Description | Resolution + + +|https://jira.issues.couchbase.com/browse/MB-67106/[MB-67106] + +a|*Metric Rename – Cache Miss Ratio → Get Miss Ratio* +The _Cache Miss Ratio_ metric name suggested it measured memory or disk caching performance. +In practical instances, as of version 7.6.2, it was redefined to measure the proportion of read (`get`) operations that fail because the requested key is not present in the bucket at all. + To eliminate confusion, the metric has been renamed to *Get Miss Ratio*. + +| Issue resolved + + +|https://jira.issues.couchbase.com/browse/MB-67758/[MB-67758] + +a|Prior to the 7.6.8 release, the auto-failover pop up alerts, as documented in link:https://docs.couchbase.com/server/current/manage/manage-settings/configure-alerts.html#saving-and-testing-the-alert-configuration[Saving and Testing the Alert Configuration] did not work. + +This has been fixed. + +| Issue resolved + + +|=== + + + +=== Data Service +[#table-fixed-issues-768-data-service, cols='10,40,40'] +|=== +|Issue | Description | Resolution + + +|https://jira.issues.couchbase.com/browse/MB-66876/[MB-66876] + +a|The `magma_min_value_block_size_threshold` has been adjusted to INT_MAX for version 7.6.x, ensuring that block compression applies consistently for documents larger than 64 KiB. +This change is not required for version 8.0.0 due to differences in configuration management. +Couchbase recommends that if `magma_per_document_compression_enabled` is set to false, `magma_min_value_block_size_threshold` should also be set to `infinity` to maintain compression behavior. +This resolves an issue with the Magma storage engine not compressing documents during persistence, which has been addressed in recent builds. +The changes are confirmed and the ticket is closed after reviewing the code updates. + +// Generated by [chatgpt:gpt-4o] + +| Issue resolved + + +|https://jira.issues.couchbase.com/browse/MB-67106/[MB-67106] + +a|*Metric Rename – Cache Miss Ratio → Get Miss Ratio* +The _Cache Miss Ratio_ metric name suggested it measured memory or disk caching performance. +In reality, as of version 7.6.2, it was redefined to measure the proportion of read (`get`) operations that fail because the requested key is not present in the bucket at all. + To eliminate this confusion, the metric has been renamed to *Get Miss Ratio*. + +| Issue resolved + + +|=== + + + + + +=== XDCR +[#table-fixed-issues-768-xdcr, cols='10,40,40'] +|=== +|Issue | Description | Resolution + + +|https://jira.issues.couchbase.com/browse/MB-66649/[MB-66649] + +a|When modifying filter expressions or mapping configurations (explicit/migration), race conditions may prevent proper replication from the source. +This can result in source bucket documents failing to replicate to the target bucket. +To address this issue, the system now displays an alert when detecting replication resuming from stale checkpoints. +This alert notifies users to delete and recreate the affected replication. +IMPORTANT: Do not dismiss this alert by pausing and resuming the replication, as this will hide the warning without resolving the underlying issue. + +| Issue resolved + + +|=== + + + +=== Query Service +[#table-fixed-issues-768-query-service, cols='10,40,40'] +|=== +|Issue | Description | Resolution + + +|https://jira.issues.couchbase.com/browse/MB-66703/[MB-66703] + +a|Added a plan version to prepared statements and logic to trigger reprepare when outdated plans are detected. +This avoids duplicate OFFSET execution in mixedmode clusters due to changes in ORDER BY and LIMIT/OFFSET handling. + +| Issue resolved + + +|https://jira.issues.couchbase.com/browse/MB-67849/[MB-67849] + +a|The recent builds of Couchbase Server versions 7.2.7 through 8.1.0 and Enterprise Analytics 2.1.0 include updates from the `go_json` library to address int64 overflow issues reported under MB-67849. +The updates involve implementing proper checks for int64 overflow to properly represent large numbers (greater than 2^63^or less than -2^63^). + +| Issue resolved + + +|=== + + + + +=== Index Service +[#table-fixed-issues-768-index-service, cols='10,40,40'] +|=== +|Issue | Description | Resolution + +| https://jira.issues.couchbase.com/browse/MB-66434[MB-66434] +| In certain scenarios where `shard affinity` has been toggled on or off, or a previous rebalance was canceled midway, +the Alternate Shard IDs for some replicas may be reset. +During a later rebalance where shard affinity is enabled and `optimise index placement` is turned off, +the replica repair process assigns a shard to the lost replica based on the Alternate Shard ID of its surviving counterpart. +The existing replicas whose Alternate Shard IDs were reset are also re-assigned the same Alternate Shard ID. + +However, since index movement is turned off (due to `optimise index placement` being off), +these replicas remain on their original nodes. +As a result, the same Alternate Shard ID can end up being present on multiple nodes. +This violates the planning invariant that requires each Alternate Shard ID exists on one node only. +Violation of this invariant led to a panic during rebalance. + +| This complex condition was considered in the later stages of planning, +to ensure that whenever non-moving indexes are also assigned the Alternate Shard ID, +they are not to be considered in the planning of moving indexes. + +This will prevent the occurrence of panics during a rebalance. + +|=== + + + + + +=== Tools +[#table-fixed-issues-768-tools, cols='10,40,40'] +|=== +|Issue | Description | Resolution + + +|https://jira.issues.couchbase.com/browse/MB-60676/[MB-60676] + +a|Previously in clusters with the backup service running on multiple nodes it was possible for more than one task on a repo to be attempted at the same, causing all but one of these tasks to fail. +If multiple tasks are scheduled to run at the same time, we now skip all but the first one. + +| Issue resolved + + +|=== + + + diff --git a/modules/rest-api/examples/fts-sample-index-def-response.jsonc b/modules/rest-api/examples/fts-sample-index-def-response.jsonc deleted file mode 100644 index 11734b8da2..0000000000 --- a/modules/rest-api/examples/fts-sample-index-def-response.jsonc +++ /dev/null @@ -1,167 +0,0 @@ -{ - "status": "ok", - "indexDef": { - "type": "fulltext-index", - "name": "color-test", - "uuid": "6ea521a918bd3837", - "sourceType": "gocbcore", - "sourceName": "vector-sample", - "sourceUUID": "614177a67bdfbd2823c5f9c3e62f5991", - "planParams": { - "maxPartitionsPerPIndex": 1024, - "indexPartitions": 1 - }, - "params": { - "doc_config": { - "docid_prefix_delim": "", - "docid_regexp": "", - "mode": "scope.collection.type_field", - "type_field": "type" - }, - "mapping": { - "analysis": {}, - "default_analyzer": "standard", - "default_datetime_parser": "dateTimeOptional", - "default_field": "_all", - "default_mapping": { - "dynamic": false, - "enabled": false - }, - "default_type": "_default", - "docvalues_dynamic": false, - "index_dynamic": false, - "store_dynamic": false, - "type_field": "_type", - "types": { - "color.rgb": { - "dynamic": false, - "enabled": true, - "properties": { - "color": { - "dynamic": false, - "enabled": true, - "fields": [ - { - "analyzer": "en", - "docvalues": true, - "include_in_all": true, - "include_term_vectors": true, - "index": true, - "name": "color", - "store": true, - "type": "text" - } - ] - }, - "colorvect_dot": { - "dynamic": false, - "enabled": true, - "fields": [ - { - "dims": 3, - "index": true, - "name": "colorvect_dot", - "similarity": "dot_product", - "type": "vector", - "vector_index_optimized_for": "recall" - } - ] - } - } - } - } - }, - "store": { - "indexType": "scorch", - "segmentVersion": 16 - } - }, - "sourceParams": {} - }, - "planPIndexes": [ - { - "name": "vector-sample.color.color-test_6ea521a918bd3837_4c1c5584", - "uuid": "1543820346544e08", - "indexType": "fulltext-index", - "indexName": "vector-sample.color.color-test", - "indexUUID": "6ea521a918bd3837", - "sourceType": "gocbcore", - "sourceName": "vector-sample", - "sourceUUID": "614177a67bdfbd2823c5f9c3e62f5991", - "sourcePartitions": "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360,361,362,363,364,365,366,367,368,369,370,371,372,373,374,375,376,377,378,379,380,381,382,383,384,385,386,387,388,389,390,391,392,393,394,395,396,397,398,399,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,427,428,429,430,431,432,433,434,435,436,437,438,439,440,441,442,443,444,445,446,447,448,449,450,451,452,453,454,455,456,457,458,459,460,461,462,463,464,465,466,467,468,469,470,471,472,473,474,475,476,477,478,479,480,481,482,483,484,485,486,487,488,489,490,491,492,493,494,495,496,497,498,499,500,501,502,503,504,505,506,507,508,509,510,511,512,513,514,515,516,517,518,519,520,521,522,523,524,525,526,527,528,529,530,531,532,533,534,535,536,537,538,539,540,541,542,543,544,545,546,547,548,549,550,551,552,553,554,555,556,557,558,559,560,561,562,563,564,565,566,567,568,569,570,571,572,573,574,575,576,577,578,579,580,581,582,583,584,585,586,587,588,589,590,591,592,593,594,595,596,597,598,599,600,601,602,603,604,605,606,607,608,609,610,611,612,613,614,615,616,617,618,619,620,621,622,623,624,625,626,627,628,629,630,631,632,633,634,635,636,637,638,639,640,641,642,643,644,645,646,647,648,649,650,651,652,653,654,655,656,657,658,659,660,661,662,663,664,665,666,667,668,669,670,671,672,673,674,675,676,677,678,679,680,681,682,683,684,685,686,687,688,689,690,691,692,693,694,695,696,697,698,699,700,701,702,703,704,705,706,707,708,709,710,711,712,713,714,715,716,717,718,719,720,721,722,723,724,725,726,727,728,729,730,731,732,733,734,735,736,737,738,739,740,741,742,743,744,745,746,747,748,749,750,751,752,753,754,755,756,757,758,759,760,761,762,763,764,765,766,767,768,769,770,771,772,773,774,775,776,777,778,779,780,781,782,783,784,785,786,787,788,789,790,791,792,793,794,795,796,797,798,799,800,801,802,803,804,805,806,807,808,809,810,811,812,813,814,815,816,817,818,819,820,821,822,823,824,825,826,827,828,829,830,831,832,833,834,835,836,837,838,839,840,841,842,843,844,845,846,847,848,849,850,851,852,853,854,855,856,857,858,859,860,861,862,863,864,865,866,867,868,869,870,871,872,873,874,875,876,877,878,879,880,881,882,883,884,885,886,887,888,889,890,891,892,893,894,895,896,897,898,899,900,901,902,903,904,905,906,907,908,909,910,911,912,913,914,915,916,917,918,919,920,921,922,923,924,925,926,927,928,929,930,931,932,933,934,935,936,937,938,939,940,941,942,943,944,945,946,947,948,949,950,951,952,953,954,955,956,957,958,959,960,961,962,963,964,965,966,967,968,969,970,971,972,973,974,975,976,977,978,979,980,981,982,983,984,985,986,987,988,989,990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023", - "nodes": { - "b7d460b7d4145482ac132dfa23727c5c": { - "canRead": true, - "canWrite": true, - "priority": 0 - } - }, - "indexParams": { - "doc_config": { - "docid_prefix_delim": "", - "docid_regexp": "", - "mode": "scope.collection.type_field", - "type_field": "type" - }, - "mapping": { - "analysis": {}, - "default_analyzer": "standard", - "default_datetime_parser": "dateTimeOptional", - "default_field": "_all", - "default_mapping": { - "dynamic": false, - "enabled": false - }, - "default_type": "_default", - "docvalues_dynamic": false, - "index_dynamic": false, - "store_dynamic": false, - "type_field": "_type", - "types": { - "color.rgb": { - "dynamic": false, - "enabled": true, - "properties": { - "color": { - "dynamic": false, - "enabled": true, - "fields": [ - { - "analyzer": "en", - "docvalues": true, - "include_in_all": true, - "include_term_vectors": true, - "index": true, - "name": "color", - "store": true, - "type": "text" - } - ] - }, - "colorvect_dot": { - "dynamic": false, - "enabled": true, - "fields": [ - { - "dims": 3, - "index": true, - "name": "colorvect_dot", - "similarity": "dot_product", - "type": "vector", - "vector_index_optimized_for": "recall" - } - ] - } - } - } - } - }, - "store": { - "indexType": "scorch", - "segmentVersion": 16 - } - } - } - ], - "warnings": [] -} \ No newline at end of file diff --git a/modules/rest-api/examples/fts-sample-success-response-vector-search.jsonc b/modules/rest-api/examples/fts-sample-success-response-vector-search.jsonc deleted file mode 100644 index c526ec35a0..0000000000 --- a/modules/rest-api/examples/fts-sample-success-response-vector-search.jsonc +++ /dev/null @@ -1,49 +0,0 @@ -{ - "status": { - "total": 1, - "failed": 0, - "successful": 1 - }, - "request": { - "fields": ["*"], - "query": { - "match_none": "" - }, - "knn": [ - { - "k": 2, - "field": "colorvect_dot", - "vector": [ 0.707106781186548, 0, 0.707106781186548 ] - } - ] - }, - "hits": [ - { - "index": "vector-sample.color.color-test_4d6a4a2f00f48fa2_4c1c5584", - "id": "#FF00FF", - "score": 0.9999999403953552, - "sort": [ - "_score" - ], - "fields": { - "color": "magenta / fuchsia" - } - }, - { - "index": "vector-sample.color.color-test_4d6a4a2f00f48fa2_4c1c5584", - "id": "#B000B0", - "score": 0.9999999403953552, - "sort": [ - "_score" - ], - "fields": { - "color": "dark lavender" - } - } - ], - "total_hits": 2, - "cost": 0, - "max_score": 0.9999999403953552, - "took": 4608572, - "facets": null -} \ No newline at end of file diff --git a/modules/rest-api/examples/fts-sample-success-response.jsonc b/modules/rest-api/examples/fts-sample-success-response.jsonc deleted file mode 100644 index 65546b0d83..0000000000 --- a/modules/rest-api/examples/fts-sample-success-response.jsonc +++ /dev/null @@ -1,262 +0,0 @@ -{ - "status": { - "total": 1, - "failed": 0, - "successful": 1 - }, - "request": { - "query": { - "conjuncts": [ - { - "match": "location", - "field": "reviews.content", - "prefix_length": 0, - "fuzziness": 0, - "operator": "or" - }, - { - "match_phrase": "nice view", - "field": "reviews.content" - } - ] - }, - "size": 10, - "from": 0, - "highlight": { - "style": "html", - "fields": [ - "reviews.content" - ] - }, - "fields": null, - "facets": null, - "explain": true, - "sort": [ - "reviews.Ratings.Cleanliness", - { - "by": "field", - "field": "reviews.Ratings.Cleanliness", - "type": "number" - }, - "-_score", - "-_id" - ], - "includeLocations": false, - "score": "none", - "search_after": null, - "search_before": null - }, - "hits": [ - { - "index": "travel-sample.inventory.travel-test_53373d2948c55e82_4c1c5584", - "id": "hotel_7388", - "score": 0, - "explanation": { - "value": 0, - "message": "sum of:", - "children": [ - { - "value": 0, - "message": "product of:", - "children": [ - { - "value": 0, - "message": "sum of:", - "children": [ - { - "value": 0, - "message": "weight(reviews.content:location^1.000000 in \u0000\u0000\u0000\u0000\u0000\u0000\u0003\n), product of:", - "children": [ - { - "value": 0.5320504947307548, - "message": "queryWeight(reviews.content:location^1.000000), product of:", - "children": [ - { - "value": 1, - "message": "boost" - }, - { - "value": 1.4291903588638628, - "message": "idf(docFreq=596, maxDocs=917)" - }, - { - "value": 0.3722740581273647, - "message": "queryNorm" - } - ] - }, - { - "value": 0, - "message": "fieldWeight(reviews.content:location in \u0000\u0000\u0000\u0000\u0000\u0000\u0003\n), product of:", - "children": [ - { - "value": 0, - "message": "tf(termFreq(reviews.content:location)=0" - }, - { - "value": 0, - "message": "fieldNorm(field=reviews.content, doc=\u0000\u0000\u0000\u0000\u0000\u0000\u0003\n)" - }, - { - "value": 1.4291903588638628, - "message": "idf(docFreq=596, maxDocs=917)" - } - ] - } - ] - } - ] - }, - { - "value": 1, - "message": "coord(1/1)" - } - ] - }, - { - "value": 0, - "message": "sum of:", - "children": [ - { - "value": 0, - "message": "weight(reviews.content:view^1.000000 in \u0000\u0000\u0000\u0000\u0000\u0000\u0003\n), product of:", - "children": [ - { - "value": 0.6867550119496617, - "message": "queryWeight(reviews.content:view^1.000000), product of:", - "children": [ - { - "value": 1, - "message": "boost" - }, - { - "value": 1.8447565629585312, - "message": "idf(docFreq=393, maxDocs=917)" - }, - { - "value": 0.3722740581273647, - "message": "queryNorm" - } - ] - }, - { - "value": 0, - "message": "fieldWeight(reviews.content:view in \u0000\u0000\u0000\u0000\u0000\u0000\u0003\n), product of:", - "children": [ - { - "value": 0, - "message": "tf(termFreq(reviews.content:view)=0" - }, - { - "value": 0, - "message": "fieldNorm(field=reviews.content, doc=\u0000\u0000\u0000\u0000\u0000\u0000\u0003\n)" - }, - { - "value": 1.8447565629585312, - "message": "idf(docFreq=393, maxDocs=917)" - } - ] - } - ] - }, - { - "value": 0, - "message": "weight(reviews.content:nice^1.000000 in \u0000\u0000\u0000\u0000\u0000\u0000\u0003\n), product of:", - "children": [ - { - "value": 0.4952674273751292, - "message": "queryWeight(reviews.content:nice^1.000000), product of:", - "children": [ - { - "value": 1, - "message": "boost" - }, - { - "value": 1.3303839377539577, - "message": "idf(docFreq=658, maxDocs=917)" - }, - { - "value": 0.3722740581273647, - "message": "queryNorm" - } - ] - }, - { - "value": 0, - "message": "fieldWeight(reviews.content:nice in \u0000\u0000\u0000\u0000\u0000\u0000\u0003\n), product of:", - "children": [ - { - "value": 0, - "message": "tf(termFreq(reviews.content:nice)=0" - }, - { - "value": 0, - "message": "fieldNorm(field=reviews.content, doc=\u0000\u0000\u0000\u0000\u0000\u0000\u0003\n)" - }, - { - "value": 1.3303839377539577, - "message": "idf(docFreq=658, maxDocs=917)" - } - ] - } - ] - } - ] - } - ] - }, - "locations": { - "reviews.content": { - "location": [ - { - "pos": 312, - "start": 1641, - "end": 1649, - "array_positions": [ - 4 - ] - } - ], - "nice": [ - { - "pos": 165, - "start": 840, - "end": 844, - "array_positions": [ - 2 - ] - } - ], - "view": [ - { - "pos": 166, - "start": 845, - "end": 849, - "array_positions": [ - 2 - ] - } - ] - } - }, - "fragments": { - "reviews.content": [ - "…at\u0026#39;s her name checked us in, very friendly and knowlegeable of the area. I would stay here again get area and right at the street car stop. nice resturants in walking distance. \u003cmark\u003enice\u003c/mark\u003e \u003cmark\u003eview\u003c/mark\u003e of the city o…" - ] - }, - "sort": [ - "􏿿􏿿􏿿", - "􏿿􏿿􏿿", - "_score", - "hotel_7388" - ] - }, - - {...} - ], - "total_hits": 27, - "cost": 108906, - "max_score": 0, - "took": 14964461, - "facets": null -} \ No newline at end of file diff --git a/modules/rest-api/examples/post-settings-indexes.sh b/modules/rest-api/examples/post-settings-indexes.sh index 5a140580ef..b25536440c 100644 --- a/modules/rest-api/examples/post-settings-indexes.sh +++ b/modules/rest-api/examples/post-settings-indexes.sh @@ -1,3 +1,4 @@ +# tag::gsi-settings[] curl -v -X POST http://127.0.0.1:8091/settings/indexes \ -u Administrator:password \ -d indexerThreads=4 \ @@ -7,3 +8,8 @@ curl -v -X POST http://127.0.0.1:8091/settings/indexes \ -d redistributeIndexes=false \ -d numReplica=0 \ -d enablePageBloomFilter=false +# end::gsi-settings[] + +# tag::disable-ftb-rebalance[] +curl -X POST http://:8091/settings/indexes -d enableShardAffinity=false -u Administrator: +# end::disable-ftb-rebalance[] \ No newline at end of file diff --git a/modules/rest-api/pages/post-settings-indexes.adoc b/modules/rest-api/pages/post-settings-indexes.adoc index 5b852fa5b6..9c7667c043 100644 --- a/modules/rest-api/pages/post-settings-indexes.adoc +++ b/modules/rest-api/pages/post-settings-indexes.adoc @@ -142,7 +142,7 @@ The following example sets the global secondary index settings. [source#example-curl,bash] ---- -include::example$post-settings-indexes.sh[] +include::example$post-settings-indexes.sh[tags=gsi-settings] ---- == Sample Response @@ -155,3 +155,13 @@ include::example$post-settings-indexes.jsonc[] ---- *401*:: This response code returns an empty body. + +[#disable-file-transfer-based-rebalance] +== Curl Command to Disable the File Transfer Based Rebalance + +The following command disables the File Transfer Based Rebalance (`enableShardAffinity`) feature in the xref:manage:manage-settings/general-settings.adoc#index-storage-mode[Index Storage Mode]. + +[source#example-curl,bash] +---- +include::example$post-settings-indexes.sh[tag=disable-ftb-rebalance] +---- diff --git a/modules/rest-api/pages/rest-auditing.adoc b/modules/rest-api/pages/rest-auditing.adoc index c4e6074814..697d00d9a7 100644 --- a/modules/rest-api/pages/rest-auditing.adoc +++ b/modules/rest-api/pages/rest-auditing.adoc @@ -12,6 +12,8 @@ GET /settings/audit POST /settings/audit GET /settings/audit/descriptors + +GET /settings/audit/nonFilterableDescriptors ---- [#description] @@ -23,7 +25,8 @@ This includes the enablement and disablement of auditing, the establishment of a The `GET` method used with the `/settings/audit/descriptors` URI returns a document that lists _filterable_ audit events; providing an _id_, _name_, _module_, and _description_ for each. The _id_ can be used in interpreting the output from `POST /settings/audit`, and in determining input-values for `GET /settings/audit`. A _filterable_ event is an event that can be individually disabled, even when event-auditing for the node is enabled. -Events that are not filterable are not included in the list returned by `GET /settings/audit/descriptors`. +Events that are not filterable are not included in the list returned by `GET /settings/audit/descriptors`. + +Events that are not filterable can be retrieved using the `GET` method `/settings/audit/nonFilterableDescriptors` Auditing can be configured by the *Full Admin* and the *Local User Security Admin* roles. The auditing configuration can be read by the *Full Admin*, the *Local User Security Admin*, and the *Read-Only Admin* roles. @@ -55,16 +58,17 @@ When auditing is enabled, all _non-filterable_ events are audited; and none can * The `logPath` parameter specifies the pathname of the directory to which the `audit.log` file is written. [#pruneAge] -* The `pruneAge` parameter sets the number of minutes Couchbase Server keeps rotated audit logs. +* The `pruneAge` parameter sets the number of seconds Couchbase Server keeps rotated audit logs. When set to the minimum value 0 (the default), Couchbase Server does not prune rotated audit logs. -If set to a value greater than 0, Couchbase Server deletes rotated audit logs that are older than this value in minutes. -The maximum value for this setting is 35791394 (4085 years). +If set to a value greater than 0, Couchbase Server deletes rotated audit logs that are older than this value in seconds. +The maximum value for this setting is 35791394 (1 year, 45 days, 15 hours, 29 minutes, and 54 seconds). * The `rotateInterval` parameter specifies the maximum time-period that is to elapse between log-rotations. -Its value must be a number of seconds, in the range of 900 (15 minutes) to 604800 (7 days), inclusive. +Its value must be a number of seconds and must be a multiple of 60. The value must also be in the range of 900 (15 minutes) to 604800 (7 days), inclusive. * The `rotateSize` parameter specifes the maximum size to which the `audit.log` file is permitted to grow, before being rotated. -Its value must be a number of bytes, in the range of 0 to 524288000 (500 MB), inclusive. +Its value must be a number of bytes, in the range of 11048576 to 524288000 (1 MiB to 500 MiB), inclusive. +The default is 20971520 (20 MiB). * The `disabled` parameter indicates which individual _filterable_ events are disabled. Its value must be one or more filterable-event ids, specified as a comma-separated list, without spaces. @@ -121,7 +125,7 @@ Each member is an object of two elements, which are the _name_ of the disabled u * _logPath_: The current value of the pathname to which the `audit.log` file is being written. -* _pruneAge_: The number of minutes Couchbase Server keeps rotated audit logs. The value `0` means Couchbase Server does not automatically prune these logs. +* _pruneAge_: The number of seconds Couchbase Server keeps rotated audit logs. The value `0` means Couchbase Server does not automatically prune these logs. * _rotateInterval_: An integer that is the number of seconds in the maximum time-period on whose elapse the log file is rotated. @@ -223,6 +227,57 @@ The output thus provides a list of `disabled` filterable-event ids. It confirms that event auditing is enabled, and lists `disabledUsers`: the list shown contains one local user, and two internal. The current `logpath`, `rotateInterval`, and `rotateSize` are also provided. +=== List of Non-filterable Audit Events + +The next example uses the `nonFilterableDescriptors` to retrieve the list of non-filterable audit events. + +[source,shell] +---- +curl -v -X GET -u Administrator:password \ +http://localhost:8091/settings/audit/nonFilterableDescriptors | jq +---- + +A successful call will return a JSON result string similar to the following, + containing a list of the audit events that cannot be filtered. + +---- +[ + { + "id": 4096, + "name": "configured audit daemon", + "module": "auditd", + "description": "loaded configuration file for audit daemon" + }, + { + "id": 4097, + "name": "shutting down audit daemon", + "module": "auditd", + "description": "The audit daemon is being shutdown" + }, + { + "id": 8192, + "name": "login success", + "module": "ns_server", + "description": "Successful login to couchbase cluster" + }, + +// Truncated . . . + + { + "id": 8193, + "name": "login failure", + "module": "ns_server", + "description": "Unsuccessful attempt to login to couchbase cluster" + }, + { + "id": 8194, + "name": "delete user", + "module": "ns_server", + "description": "User was deleted" + }, +] +---- + === Change the Event-Auditing Configuration The following call can be used to modify the event-auditing configuration for the node: diff --git a/modules/rest-api/pages/rest-autocompact-per-bucket.adoc b/modules/rest-api/pages/rest-autocompact-per-bucket.adoc index a99bc9c491..5bd81a3155 100644 --- a/modules/rest-api/pages/rest-autocompact-per-bucket.adoc +++ b/modules/rest-api/pages/rest-autocompact-per-bucket.adoc @@ -153,7 +153,7 @@ The values assigned to validly specified parameters will be applied. For example, specifying `-d purgeIntervalg=11` leaves the `purgeInterval` at its current value. * An invalid parameter-specification may result in the value being established as `"undefined"`. -For example, specifying `-d allowedTimePeriod[toMinut3e]=10` results in a setting such as the following: +For example, specifying `-d allowedTimePeriod[toMinute]=10` results in a setting such as the following: ---- "allowedTimePeriod": { diff --git a/modules/rest-api/pages/rest-bucket-create.adoc b/modules/rest-api/pages/rest-bucket-create.adoc index 76320237cc..9c70a88871 100644 --- a/modules/rest-api/pages/rest-bucket-create.adoc +++ b/modules/rest-api/pages/rest-bucket-create.adoc @@ -78,6 +78,7 @@ curl -X POST -u : -d allowedTimePeriod[toHour]= -d allowedTimePeriod[toMinute]= -d allowedTimePeriod[abortOutside]=[ true | false ] + -d versionPruningWindowHrs= ---- All parameters are described in the following subsections. @@ -1090,6 +1091,51 @@ This parameter is ignored if `autoCompactionDefined` is `false` (which is its de See the examples provided above, in xref:rest-api:rest-bucket-create.adoc#example-databasefragmentationthresholdpercentage-create[Example: Specifying a Data Fragmentation Threshold as a Percentage, when Creating] and xref:rest-api:rest-bucket-create.adoc#example-databasefragmentationthresholdpercentage-edit[Example: Specifying a Data Fragmentation Threshold as a Percentage, when Editing]. +[#enablecrossclusterversioning] +=== enableCrossClusterVersioning + +Enabling Cross Cluster Versioning is a pre-requisite to a few XDCR features. +The bucket property `enableCrossClusterVersioning` can only be set to true after a bucket has been created. +When enabled, for each document processed by XDCR, XDCR stores additional metadata, called the Hybrid Logical Vector (HLV), in the document extended attributes (xattrs). +For more information, see xref:clusters-and-availability/xdcr-enable-crossclusterversioning.adoc[XDCR enableCrossClusterVersioning]. + +See the example provided in xref:rest-api:rest-bucket-create.adoc#example-enablecrossclusterversioning-edit[Example: Turning on enableCrossClusterVersioning, when Editing] + +CAUTION: The default value is `false`. Do not change the value of this property unless instructed by a feature configuration. Once enabled, you cannot turn off the `enableCrossClusterVersioning` property. The only way for you to undo setting this value to `true` is to backup your data, create a new bucket, and restore the data, using the option `cbbackupmgr restore --disable-hlv` to remove the HLV info in the xattrs. + +[#example-enablecrossclusterversioning-edit] +==== Example: Turning on enableCrossClusterVersioning, when Editing + +The following example modifies the value of the bucket property `enableCrossClusterVersioning` to `true`. + +---- +curl -v -X POST http://localhost:8091/pools/default/buckets/testBucket \ +-u Administrator:password \ +-d enableCrossClusterVersioning=true +---- + +[#version-pruning-window-hrs-property] +=== versionPruningWindowHrs + +Controls the pruning frequency of the Hybrid Logical Vector (HLV) metadata. +The default value of versionPruningWindowHrs is 720 hours (30 days), which means that any HLV data older than 720 hours is pruned to remove the outdated entries. +For more information, see xref:clusters-and-availability/xdcr-enable-crossclusterversioning.adoc[versionPruningWindowHrs] in xref:learn:clusters-and-availability/xdcr-enable-crossclusterversioning.adoc#version-pruning-window-hrs[XDCR enableCrossClusterVersioning]. + +See the example provided in xref:rest-api:rest-bucket-create.adoc#example-versionpruningwindowhrs-edit[Example: Specifying time value for versionPruningWindowHrs, when Editing] + +NOTE: `versionPruningWindowHrs` must be set to the same value for all buckets in an XDCR replication topology. + +[#example-versionpruningwindowhrs-edit] +==== Example: Specifying time value for versionPruningWindowHrs, when Editing + +The following example sets the time duration value for the bucket property `versionPruningWindowHrs`. + +---- +curl -v -X POST http://localhost:8091/pools/default/buckets/testBucket \ +-u Administrator:password \ +-d versionPruningWindowHrs=120 +---- + == Responses If bucket-creation is successful, HTTP response `202 Accepted` is returned, with empty content. @@ -1099,15 +1145,6 @@ If the bucket cannot created due to a missing or incorrect parameter, a `400` re If the URL is incorrectly specified a `404 (Object Not Found)` error is returned. Failure to authenticate gives `401 Unauthorized`. -[#notes] -== Notes - -Couchbase Server 7.6.2 added a property named `enableCrossClusterVersioning` to support a future feature. -Do not change the value of this property. - -CAUTION: Once enabled, you cannot turn off the `enableCrossClusterVersioning` property. -The only way for you to undo setting this value to `true` is to backup your data, create a new bucket, and reload the data into it. - == See Also A conceptual description of buckets is provided in xref:learn:buckets-memory-and-storage/buckets.adoc[Buckets]. diff --git a/modules/rest-api/pages/rest-bucket-stats.adoc b/modules/rest-api/pages/rest-bucket-stats.adoc index b2b8afe8e8..aaeadff1f2 100644 --- a/modules/rest-api/pages/rest-bucket-stats.adoc +++ b/modules/rest-api/pages/rest-bucket-stats.adoc @@ -1,11 +1,16 @@ = Getting Bucket Statistics -:description: pass:q[To retrieve bucket statistics, use the `GET` operation with the `/pools/default/buckets/bucket_name/stats` URI.] +:description: pass:q[See the IMPORTANT note.] :page-topic-type: reference :page-aliases: rest-api:rest-node-retrieve-stats +[.deprecated]#Deprecated# + [abstract] {description} +IMPORTANT: As of Couchbase Server 8.0, the REST API endpoint `GET /pools/default/buckets//stats` is deprecated. +For retrieving bucket statistics details, use xref:rest-api:rest-statistics-single.adoc[Getting a Single Statistic] or xref:rest-api:rest-statistics-multiple.adoc[Getting Multiple Statistics] instead. + == HTTP method and URI Statistics can be retrieved at the bucket level. diff --git a/modules/rest-api/pages/rest-cluster-email-notifications.adoc b/modules/rest-api/pages/rest-cluster-email-notifications.adoc index 369d3f1e3a..93401c00a5 100644 --- a/modules/rest-api/pages/rest-cluster-email-notifications.adoc +++ b/modules/rest-api/pages/rest-cluster-email-notifications.adoc @@ -164,7 +164,7 @@ When a threshold is fully enabled, its value is an integer between `1` and `100` Thresholds are enabled, disabled, and configured by means of `POST /settings/alerts/limits`; and their values are retrieved by means of `GET /settings/alerts/limits`. + The thresholds are intended to be assigned values in ascending order; with `memoryNoticeThreshold` the lowest, and `memoryCriticalThreshold` the highest. -The default values are, for `memoryNoticeThreshold` `-1` (meaning disabled), for `memoryWarningThreshold` `90`, and for `memoryCriticalThreshold` `95`. +The default values are, for `memoryNoticeThreshold` `-1` (meaning disabled), for `memoryWarningThreshold` `85`, and for `memoryCriticalThreshold` `90`. [#setting-memcache-alert-threshold] * `memcachedSystemConnectionWarningThreshold`. Trigger the `xref:manage:manage-settings/configure-alerts.adoc#memcached-alert [memcached_connections]` alert if the number of `system` connections in use exceeds the given percentage of connections available. (E.g., set this value to `90` to trigger an alert if the system connections used by the data service exceed 90% of the connections available.) diff --git a/modules/rest-api/pages/rest-cluster-query-settings.adoc b/modules/rest-api/pages/rest-cluster-query-settings.adoc deleted file mode 100644 index 418290d279..0000000000 --- a/modules/rest-api/pages/rest-cluster-query-settings.adoc +++ /dev/null @@ -1,17 +0,0 @@ -= Query Settings REST API - -//// -These partials are created automatically by Swagger2Markup. -Refer to https://github.com/couchbaselabs/cb-swagger -//// - -include::partial$query-settings/overview.adoc[tag=body] -include::partial$query-settings/paths.adoc[] -include::partial$query-settings/definitions.adoc[] -include::partial$query-settings/security.adoc[] - -== See Also - -* [[cleanupclientattempts]][[cleanuplostattempts]][[cleanupwindow]][[completed-limit]][[completed-max-plan-size]][[completed-threshold]][[loglevel]][[max-parallelism-srv]][[memory-quota-srv]][[node-quota]][[node-quota-val-percent]][[num-cpus]][[numatrs-srv]][[n1ql-feat-ctrl]][[pipeline-batch-srv]][[pipeline-cap-srv]][[prepared-limit]][[scan-cap-srv]][[timeout-srv]][[txtimeout-srv]][[use-cbo-srv]][[use-replica-srv]]For node-level settings, see the xref:n1ql:n1ql-rest-api/admin.adoc#_settings[Admin REST API]. - -* [[max_parallelism_req]][[memory_quota_req]][[numatrs_req]][[pipeline_batch_req]][[pipeline_cap_req]][[scan_cap_req]][[timeout_req]][[tximplicit]][[txtimeout_req]][[use_cbo_req]][[use_replica_req]]For request-level parameters, see the xref:n1ql:n1ql-rest-api/index.adoc#_request_parameters[Query Service REST API]. \ No newline at end of file diff --git a/modules/rest-api/pages/rest-configure-rebalance-retry.adoc b/modules/rest-api/pages/rest-configure-rebalance-retry.adoc index 8cd6128f1e..f864380524 100644 --- a/modules/rest-api/pages/rest-configure-rebalance-retry.adoc +++ b/modules/rest-api/pages/rest-configure-rebalance-retry.adoc @@ -4,9 +4,9 @@ == HTTP methods and URI ---- -GET /pools/default/retryRebalance +GET /settings/retryRebalance -POST /pools/default/retryRebalance +POST /settings/retryRebalance ---- [#description] @@ -20,7 +20,7 @@ For an overview of rebalance and rebalance retries, see xref:learn:clusters-and- ---- curl -X GET -u : -http://:/pools/default/retryRebalance +http://:/settings/retryRebalance curl -X POST -u Administrator:password http://:/settings/retryRebalance diff --git a/modules/rest-api/pages/rest-discovery-api.adoc b/modules/rest-api/pages/rest-discovery-api.adoc index 8859f24424..2923babdf9 100644 --- a/modules/rest-api/pages/rest-discovery-api.adoc +++ b/modules/rest-api/pages/rest-discovery-api.adoc @@ -36,6 +36,7 @@ By default, the discovery endpoint returns the list of nodes in your database th ---- curl --get -u \ http://:/prometheus_sd_config + -d clusterLabels=[none|uuidAndName|uuidOnly] -d disposition=[attachment|inline] -d network=[default|external] -d port=[insecure|secure] @@ -48,6 +49,11 @@ This is the same role Couchbase Server requires to retrieve metrics. === Parameters +clusterLabels=[none|uuidAndName|uuidOnly]:: +Controls the inclusion of information labels for the cluster. +When set to `none`, no labels are included in the response. +When set to `uuidAndName`, both the UUID and the name of the node are added to the response. +When set to `uuidOnly`, only the UUID of the node is returned in the response. disposition=[attachment|inline]:: Controls how Couchbase Server returns the list of nodes in the response. When set to the default `inline`, it returns the list inline within the response. @@ -94,6 +100,34 @@ The next example shows the response that Couchbase Server sends in response to t ] ---- +Adding the `clusterLabels` parameter to the message payload will add additional node information to the response. For example, this command: + +[source, shell] +---- +curl -s --get -u prometheus:password http://node1:8091/prometheus_sd_config \ +-d clusterLabels=uuidAndName | jq +---- + +will send back the following response: + +[source, json] +---- +[ + { + "targets": [ + "node1:18091", + "node2:18091", + "node3:18091" + ], + "labels": { + "cluster_uuid": "4798c8f9-89bd-d7bf-4bcf-d93fb3e03e46", + "cluster_name": "DB1" + } + } +] + +---- + [[old-api]] == Replicate the Earlier Discovery API diff --git a/modules/rest-api/pages/rest-fts-advanced.adoc b/modules/rest-api/pages/rest-fts-advanced.adoc deleted file mode 100644 index cd4bf9cda1..0000000000 --- a/modules/rest-api/pages/rest-fts-advanced.adoc +++ /dev/null @@ -1,58 +0,0 @@ -= Advanced - -== Index Partition Definition - -[[g-api-index]]GET /api/pindex:: -Get information about an index partition. -+ -*Permission Required*: cluster.bucket[].fts!read -+ -*Role Required*: FTS-Searcher, FTS-Admin -+ -.Sample response ----- -{ - "pindexes": { - "myFirstIndex_6cc599ab7a85bf3b_0": { - "indexName": "myFirstIndex", - "indexParams": "", - "indexType": "blackhole", - "indexUUID": "6cc599ab7a85bf3b", - "name": "myFirstIndex_6cc599ab7a85bf3b_0", - "sourceName": "", - "sourceParams": "", - "sourcePartitions": "", - "sourceType": "nil", - "sourceUUID": "", - "uuid": "2d9ecb8b574a9f6a" - } - }, - "status": "ok" -} ----- - -[[g-api-index-name]]GET /api/pindex/\{pindexName}:: -*Permission Required*: cluster.bucket[[.var]`bucket_name`].fts!read -+ -*Role Required*: FTS-Searcher, FTS-Admin - -== Index Partition Querying - -[[g-api-index-name-count]]GET /api/pindex/\{pindexName}/count:: -*Permission Required*: cluster.bucket[[.var]`bucket_name`].fts!read -+ -*Role Required*: FTS-Searcher, FTS-Admin - -[[p-api-index-name-query]]POST /api/pindex/\{pindexName}/query:: -*Permission Required*: cluster.bucket[[.var]`bucket_name`].fts!write -+ -*Role Required*: FTS-Admin - -== FTS Memory Quota - -[[p-api-fts-memory-quota]]POST /pools/default:: -*Permission Required*: cluster.bucket[[.var]`bucket_name`].fts!manage -+ -*Role Required*: FTS-Admin -+ -Specify the [.param]`ftsMemoryQuota` parameter with an integer value (example: ftsMemoryQuota=512) to set the memory quota for the full text search (FTS) service. diff --git a/modules/rest-api/pages/rest-fts-indexing.adoc b/modules/rest-api/pages/rest-fts-indexing.adoc deleted file mode 100644 index c4ead40832..0000000000 --- a/modules/rest-api/pages/rest-fts-indexing.adoc +++ /dev/null @@ -1,2014 +0,0 @@ -= Search Index Management and Monitoring -:page-toclevels: 2 -:description: Use the following endpoints in the Search Service API to manage and monitor your Search indexes. - -[abstract] -{description} - -== Search Index Definitions - -Use the following APIs to retrieve Search index definitions, create new Search indexes, or delete an existing Search index. - -[#g-api-index] -=== Get All Search Index Definitions - -Returns all Search index definitions from the bucket where you have read permissions, as a JSON object. - -NOTE: This endpoint is for legacy Search indexes and may be deprecated in a future release. -Use <>, instead. - -==== Endpoint - -GET /api/index - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!read - -==== Role Required - -*Search Reader* or *Search Admin* - -.Sample Response ----- -{ - "indexDefs": { - "implVersion": "4.0.0", - "indexDefs": { - "myFirstIndex": { - "name": "myFirstIndex", - "params": "", - "planParams": { - "hierarchyRules": null, - "maxPartitionsPerPIndex": 0, - "nodePlanParams": null, - "numReplicas": 0, - "planFrozen": false - }, - "sourceName": "", - "sourceParams": "", - "sourceType": "nil", - "sourceUUID": "", - "type": "fulltext-index", - "uuid": "6cc599ab7a85bf3b" - } - }, - "uuid": "6cc599ab7a85bf3b" - }, - "status": "ok" -} ----- - -[#g-api-scoped-indexes] -=== GET All Search Index Definitions (Scoped) - -Returns all Search index definitions inside the bucket and scope specified in the endpoint URL as a JSON object. - -==== Endpoint - -GET /api/bucket/`${BUCKET_NAME}`/scope/`${SCOPE_NAME}`/index - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!read - -==== Role Required - -*Search Reader* or *Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${BUCKET_NAME}` -| Yes -| String, URL path parameter -| The name of the bucket where you want to return Search index definitions. - -|[.param]`${SCOPE_NAME}` -| Yes -| String, URL path parameter -| The name of the scope where you want to return Search index definitions. - -|==== - -.Sample response ----- -{ - "status": "ok", - "indexDefs": { - "uuid": "23cf9530131858b8", - "indexDefs": { - "travel-sample.inventory.travel-hotel": { - "type": "fulltext-index", - "name": "travel-hotel", - "uuid": "a04a16f178846bc4", - "sourceType": "gocbcore", - "sourceName": "travel-sample", - "sourceUUID": "8f866261438f8b0d415a437552f3ae99", - "planParams": { - "maxPartitionsPerPIndex": 1024, - "indexPartitions": 1 - }, - "params": { - "doc_config": { - "docid_prefix_delim": "", - "docid_regexp": "", - "mode": "scope.collection.type_field", - "type_field": "type" - }, - "mapping": { - "analysis": {}, - "default_analyzer": "standard", - "default_datetime_parser": "dateTimeOptional", - "default_field": "_all", - "default_mapping": { - "dynamic": true, - "enabled": false - }, - "default_type": "_default", - "docvalues_dynamic": false, - "index_dynamic": true, - "store_dynamic": false, - "type_field": "_type", - "types": { - "inventory.hotel": { - "dynamic": false, - "enabled": true, - "properties": { - "reviews": { - "dynamic": false, - "enabled": true, - "properties": { - "content": { - "dynamic": false, - "enabled": true, - "fields": [ - { - "docvalues": true, - "include_in_all": true, - "include_term_vectors": true, - "index": true, - "name": "content", - "store": true, - "type": "text" - } - ] - } - } - } - } - } - } - }, - "store": { - "indexType": "scorch", - "segmentVersion": 15 - } - }, - "sourceParams": {} - }, - "travel-sample.inventory.travel-test": { - "type": "fulltext-index", - "name": "travel-test", - "uuid": "766ddce5d41a3b41", - "sourceType": "gocbcore", - "sourceName": "travel-sample", - "sourceUUID": "8f866261438f8b0d415a437552f3ae99", - "planParams": { - "maxPartitionsPerPIndex": 1024, - "indexPartitions": 1 - }, - "params": { - "doc_config": { - "docid_prefix_delim": "", - "docid_regexp": "", - "mode": "scope.collection.type_field", - "type_field": "type" - }, - "mapping": { - "analysis": {}, - "default_analyzer": "standard", - "default_datetime_parser": "dateTimeOptional", - "default_field": "_all", - "default_mapping": { - "dynamic": true, - "enabled": true - }, - "default_type": "_default", - "docvalues_dynamic": false, - "index_dynamic": true, - "store_dynamic": false, - "type_field": "_type" - }, - "store": { - "indexType": "scorch", - "segmentVersion": 15 - } - }, - "sourceParams": {} - } - }, - "implVersion": "5.7.0" - } -} ----- - - -[#g-api-index-name] -=== GET Index Definition - -Returns the definition of the Search index specified in the endpoint URL as a JSON object. - -NOTE: This endpoint is for legacy Search indexes and may be deprecated in a future release. -Use <>, instead. - -==== Endpoint - -GET /api/index/`${INDEX_NAME}` - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!read - -==== Role Required - -*Search Reader* or *Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -| [.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -a| The name of the Search index definition you want to return. -You must use the fully qualified name for the index, which includes the bucket and scope. - -To view the full, scoped name for an index for use with this endpoint: - -. Go to the *Search* tab in the Couchbase Server Web Console. -. Point to the *Index Name* for an index. - -|==== - -.Sample response ----- -{ - "indexDef": { - "name": "myFirstIndex", - "params": "", - "planParams": { - "hierarchyRules": null, - "maxPartitionsPerPIndex": 0, - "nodePlanParams": null, - "numReplicas": 0, - "planFrozen": false - }, - "sourceName": "", - "sourceParams": "", - "source - ": "nil", - "sourceUUID": "", - "type": "fulltext-index", - "uuid": "6cc599ab7a85bf3b" - }, - "planPIndexes": [ - { - "indexName": "myFirstIndex", - "indexParams": "", - "indexType": "bleve", - "indexUUID": "6cc599ab7a85bf3b", - "name": "myFirstIndex_6cc599ab7a85bf3b_0", - "nodes": { - "78fc2ffac2fd9401": { - "canRead": true, - "canWrite": true, - "priority": 0 - } - }, - "sourceName": "", - "sourceParams": "", - "sourcePartitions": "", - "sourceType": "nil", - "sourceUUID": "", - "uuid": "64bed6e2edf354c3" - } - ], - "status": "ok", - "warnings": [] - } ----- - -[#g-api-scoped-index-name] -=== GET Index Definition (Scoped) - -Returns the Search index definition for the Search index specified in the endpoint URL as a JSON object. -Unlike <>, this endpoint is scoped and does not require a fully qualified `${INDEX_NAME}` value. - -==== Endpoint - -GET /api/bucket/`${BUCKET_NAME}`/scope/`${SCOPE_NAME}`/index/`${INDEX_NAME}` - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!read - -==== Role Required - -*Search Reader* or *Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${BUCKET_NAME}` -| Yes -| String, URL path parameter -| The name of the bucket used to create the Search index definition. - -|[.param]`${SCOPE_NAME}` -| Yes -| String, URL path parameter -| The name of the scope used to create the Search index definition. - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -| The name of the Search index definition to return. - -|==== - -.Sample Response for a Vector Search index -[source,json] ----- -include::example$fts-sample-index-def-response.jsonc[] ----- - -[#p-api-index-name] -=== PUT Create or Update an Index Definition - -If the Search index in the endpoint URL does not exist, this endpoint uses a JSON object in the request body to create a new index. -If the Search index already exists, this endpoint updates the Search index definition. - -NOTE: This endpoint is for legacy Search indexes and may be deprecated in a future release. -Use <>, instead. - -==== Endpoint - -PUT /api/index/`${INDEX_NAME}` - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!write - -==== Role Required - -*Search Admin* - -==== Parameters - -For a detailed list of all parameters for the Request Body for the PUT /api/index/`${INDEX_NAME}` endpoint, see xref:search:search-index-params.adoc[]. - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -a| The name of the Search index definition to create or update. -The name must satisfy the regular expression `^[A-Za-z][0-9A-Za-z_\-]*$`. - -If you want to update an index definition, you must use the fully qualified name for the index, which includes the bucket and scope. - -To view the full, scoped name for an index for use with this endpoint: - -. Go to the *Search* tab in the Couchbase Server Web Console. -. Point to the *Index Name* for an index. - -|[.param]`params` -| Yes -| String (JSON), form parameter -| Sets the Search index's type identifier, type mappings, and analyzers. -For more information, see xref:search:search-index-params.adoc#params[Params Object]. - -|[.param]`type` -| Yes -| String, form parameter -a|The type of Search index you want to create. -For more information, see xref:search:search-index-params.adoc#initial[Initial Settings]. - -|[.param]`planParams` -| Yes -| String (JSON), form parameter -| Set a Search index's partition and replication settings. -For more information, see xref:search:search-index-params.adoc#planparams[planParams Object]. - -|[.param]`prevIndexUUID` -| No -| String, form parameter -| Intended for clients that want to check that they are not overwriting the Search index definition updates of concurrent clients. - -|[.param]`sourceName` -| No -| String, form parameter -| The name of the bucket where you want to create the Search index. -For more information, see xref:search:search-index-params.adoc#initial[Initial Settings]. - -|[.param]`sourceUUID` -| No -| String, form parameter -| The UUID of the bucket where you want to create the Search index. -For more information, see xref:search:search-index-params.adoc#initial[Initial Settings]. - -|==== - -.Sample failure response ----- -{ - "error": "rest_create_index: index type is required, indexName: travel-test", - "request": "", - "status": "fail" -} ----- -The Search Service returns a non-200 HTTP error code when a request fails. - -.Sample success response ----- -{ - "status": "ok", - "name": "travel-test", - "uuid": "565ca041af3baf9d" -} ----- -The Search Service returns a HTTP 200 status code with a response that includes `"status": "ok"`. - -[#p-api-scoped-index-name] -=== PUT Create or Update an Index Definition (Scoped) - -If the Search index in the endpoint URL does not exist, this endpoint uses a JSON object in the request body to create a new index. -If the Search index already exists, this endpoint updates the Search index definition. -Unlike <>, this endpoint is scoped and does not require a fully qualified `${INDEX_NAME}` value. - -==== Endpoint - -PUT /api/bucket/`${BUCKET_NAME}`/scope/`${SCOPE_NAME}`/index/`${INDEX_NAME}` - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!write - -==== Role Required - -*Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -| `${BUCKET_NAME}` -|Yes -|String, URL path parameter -|The name of the bucket where the Search Service should create or update the Search index definition. - -|`${SCOPE_NAME}` -| Yes -| String, URL path parameter -| The name of the scope where the Search Service should create or update the Search index definition. - -|`${INDEX_NAME}` -| Yes -| String, URL path parameter -| The name of the Search index definition to create or update. -The name must satisfy the regular expression `^[A-Za-z][0-9A-Za-z_\-]*$`. -|==== - -==== Request Body - -Add a JSON object with the full Search index definition you want to create or update. -For more information about how to create an index definition JSON, see xref:search:search-index-params.adoc[]. - -.Sample failure response ----- -{ - "error": "rest_create_index: index type is required, indexName: travel-test", - "request": {}, - "status": "fail" -} ----- -The Search Service returns a non-200 HTTP error code when a request fails. - -.Sample success response ----- -{ - "status": "ok", - "name": "travel-sample.inventory.travel-test",[.var]`${BUCKET_NAME}` - "uuid": "654cb62baebf2d26" -} ----- -The Search Service returns a HTTP 200 status code with a response that includes `"status": "ok"`. - -[#d-api-index-name] -=== DELETE Index Definition - -Deletes the Search index definition specified in the endpoint URL. - -NOTE: This endpoint is for legacy Search indexes and may be deprecated in a future release. -Use <>, instead. - -==== Endpoint - -DELETE /api/index/${INDEX_NAME} - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!write - -==== Role Required - -*Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -a| The name of the Search index definition you want to delete. -You must use the fully qualified name for the index, which includes the bucket and scope. - -To view the full, scoped name for an index for use with this endpoint: - -. Go to the *Search* tab in the Couchbase Server Web Console. -. Point to the *Index Name* for an index. - -|==== - -.Sample failure response ----- -{ - "error": "rest_auth: preparePerms, err: index not found", - "request": "", - "status": "fail" -} ----- -The Search Service returns a non-200 HTTP error code when a request fails. - -.Sample success response ----- -{ - "status": "ok", - "uuid": "123294e5a4efbe39" -} ----- -The Search Service returns a HTTP 200 status code with a response that includes `"status": "ok"`. - -[#d-api-scoped-index-name] -=== DELETE Index Definition (Scoped) - -Delete the Search index definition from the bucket and scope specified in the endpoint URL. -Unlike <>, this endpoint is scoped and does not require a fully qualified `${INDEX_NAME}` value. - -==== Endpoint - -DELETE /api/bucket/`${BUCKET_NAME}`/scope/`${SCOPE_NAME}`/index/`${INDEX_NAME}` - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!write - -==== Role Required - -*Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${BUCKET_NAME}` -| Yes -| String, URL path parameter -| The name of the bucket used to create the Search index definition. - -|[.param]`${SCOPE_NAME}` -| Yes -| String, URL path parameter -| The name of the scope used to create the Search index definition. - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -| The name of the Search index definition you want to delete. - -|==== - -.Sample failure response ----- -{ - "error": "rest_auth: preparePerms, err: index not found", - "request": "", - "status": "fail" -} ----- -The Search Service returns a non-200 HTTP error code when a request fails. - -.Sample success response ----- -{ - "status": "ok", - "uuid": "687be6a2ad647c34" -} ----- -The Search Service returns a HTTP 200 status code with a response that includes `"status": "ok"`. - -== Index Management - -Use the following endpoints to manage index controls, such as document ingestion, partition assignment, and queries. - -[#p-api-idx-name-ingestcontrol] -=== POST Set Index IngestControl - -For the Search index specified in the endpoint URL, pause or resume index updates and maintenance. -While paused, the Search index does not load any new document mutations. - -NOTE: This endpoint is for legacy Search indexes and may be deprecated in a future release. -Use <>, instead. - -==== Endpoint -POST /api/index/`${INDEX_NAME}`/ingestControl/`${OP}` - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!manage - -==== Role Required - -*Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -a| The name of the Search index definition where you want to pause or resume document loading. -You must use the fully qualified name for the index, which includes the bucket and scope. - -To view the full, scoped name for an index for use with this endpoint: - -. Go to the *Search* tab in the Couchbase Server Web Console. -. Point to the *Index Name* for an index. - -|[.param]`${OP}` -| Yes -| String, URL path parameter -|To pause ingestion and maintenance, set `${OP}` to `pause`. -To resume ingestion and maintenance on a paused index, set `${OP}` to `resume`. - -|==== - -.Sample success response ----- -{ - "status": "ok", -} ----- -The Search Service returns a HTTP 200 status code with a response that includes `"status": "ok"`. - -[#p-api-scoped-ingestcontrol] -=== POST Set Index IngestControl (Scoped) - -For the Search index specified in the endpoint URL, pause or resume index updates and maintenance. -While paused, the Search index does not load any new document mutations. -Unlike <>, this endpoint is scoped and does not require a fully qualified `${INDEX_NAME}` value. - -==== Endpoint - -POST /api/bucket/`${BUCKET_NAME}`/scope/`${SCOPE_NAME}`/index/`${INDEX_NAME}`/ingestControl/`${OP}` - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!manage - -==== Role Required - -*Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${BUCKET_NAME}` -| Yes -| String, URL path parameter -| The name of the bucket used to create the Search index definition. - -|[.param]`${SCOPE_NAME}` -| Yes -| String, URL path parameter -| The name of the scope used to create the Search index definition. - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -| The name of the Search index definition where you want to pause or resume document loading. - -|[.param]`${OP}` -| Yes -| String, URL path parameter -| To pause ingestion and maintenance, set `${OP}` to `pause`. -To resume ingestion and maintenance on a paused index, set `${OP}` to `resume`. - -|==== - -.Sample success response ----- -{ - "status": "ok", -} ----- -The Search Service returns a HTTP 200 status code with a response that includes `"status": "ok"`. - -[#p-api-idx-name-planfreezecontrol] -=== POST Freeze Index Partition Assignment - -For the Search index specified in the endpoint URL, freeze or unfreeze the assignment of index partitions to nodes. -While frozen, the Search index stops assigning partitions during index rebalancing and index definition updates. - -NOTE: This endpoint is for legacy Search indexes and may be deprecated in a future release. -Use <>, instead. - -==== Endpoint - -POST /api/index/`${INDEX_NAME}`/planFreezeControl/`${OP}` - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!manage - -==== Role Required - -*Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -a| The name of the Search index definition to freeze or unfreeze for partition assignment. -You must use the fully qualified name for the index, which includes the bucket and scope. - -To view the full, scoped name for an index for use with this endpoint: - -. Go to the *Search* tab in the Couchbase Server Web Console. -. Point to the *Index Name* for an index. - -|[.param]`${OP}` -| Yes -| String, URL path parameter -| To freeze partition assignment, set `${OP}` to `freeze`. -To unfreeze partition assignment on a frozen index, set `${OP}` to `unfreeze`. - -|==== - -.Sample success response ----- -{ - "status": "ok", -} ----- -The Search Service returns a HTTP 200 status code with a response that includes `"status": "ok"`. - -[#p-api-scoped-planfreezecontrol] -=== POST Freeze Index Partition Assignment (Scoped) - -For the Search index specified in the endpoint URL, freeze or unfreeze the assignment of index partitions to nodes. -While frozen, the Search index stops assigning partitions during index rebalancing and index definition updates. -Unlike <>, this endpoint is scoped and does not require a fully qualified `${INDEX_NAME}` value. - -==== Endpoint - -POST /api/bucket/`${BUCKET_NAME}`/scope/`${SCOPE_NAME}`/index/`${INDEX_NAME}`/planFreezeControl/`${OP}` - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!manage - -==== Role Required - -*Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${BUCKET_NAME}` -| Yes -| String, URL path parameter -| The name of the bucket used to create the Search index definition. - -|[.param]`${SCOPE_NAME}` -| Yes -| String, URL path parameter -| The name of the scope used to create the Search index definition. - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -| The name of the Search index definition to freeze or unfreeze for partition assignment. - -|[.param]`${OP}` -| Yes -| String, URL path parameter -| To freeze partition assignment, set `${OP}` to `freeze`. -To unfreeze partition assignment on a frozen index, set `${OP}` to `unfreeze`. - -|==== - -.Sample success response ----- -{ - "status": "ok", -} ----- -The Search Service returns a HTTP 200 status code with a response that includes `"status": "ok"`. - -[#p-api-idx-name-querycontrol] -=== POST Stop Queries on an Index - -For the Search index specified in the endpoint URL, disallow or allow queries. -While queries are disallowed, users see an error that the Search index's partitions could not be reached. - -NOTE: This endpoint is for legacy Search indexes and may be deprecated in a future release. -Use <>, instead. - -==== Endpoint - -POST /api/index/`${INDEX_NAME}`/queryControl/`${OP}` - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!manage - -==== Role Required - -*Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -a| The name of the Search index definition where you want to allow or disallow Search queries. -You must use the fully qualified name for the index, which includes the bucket and scope. - -To view the full, scoped name for an index for use with this endpoint: - -. Go to the *Search* tab in the Couchbase Server Web Console. -. Point to the *Index Name* for an index. - -|[.param]`${OP}` -| Yes -| String, URL path parameter -| To allow queries against a Search index, set `${OP}` to `allow`. -To block queries against a Search index, set `${OP}` to `disallow`. - -|==== - -.Sample success response ----- -{ - "status": "ok", -} ----- -The Search Service returns a HTTP 200 status code with a response that includes `"status": "ok"`. - -[#p-api-scoped-querycontrol] -=== POST Stop Queries on an Index (Scoped) - -For the Search index specified in the endpoint URL, disallow or allow queries. -While queries are disallowed, users see an error that the Search index's partitions could not be reached. -Unlike <>, this endpoint is scoped and does not require a fully qualified `${INDEX_NAME}` value. - -==== Endpoint - -POST /api/bucket/`${BUCKET_NAME}`/scope/`${SCOPE_NAME}`/index/`${INDEX_NAME}`/queryControl/`${OP}` - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!manage - -==== Role Required - -*Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${BUCKET_NAME}` -| Yes -| String, URL path parameter -| The name of the bucket used to create the Search index definition. - -|[.param]`${SCOPE_NAME}` -| Yes -| String, URL path parameter -| The name of the scope used to create the Search index definition. - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -| The name of the Search index definition where you want to allow or disallow Search queries. - -|[.param]`${OP}` -| Yes -| String, URL path parameter -| To allow queries against a Search index, set `${OP}` to `allow`. -To block queries against a Search index, set `${OP}` to `disallow`. - -|==== - -.Sample success response ----- -{ - "status": "ok", -} ----- -The Search Service returns a HTTP 200 status code with a response that includes `"status": "ok"`. - -== Index Monitoring And Debugging - -Use the following endpoints to get statistics about Search indexes for monitoring and debugging. - -[#g-api-stats] -=== GET Indexing and Data Metrics for All Indexes - -Use this endpoint to get indexing and data related metrics, timings, and counters from the node running the Search Service, for all Search indexes. -The endpoint returns the data as a JSON object. - -==== Endpoint - -GET /api/stats - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].stats!read - -==== Role Required - -*Search Admin* - -.Sample response ----- -{ - "feeds": { - "myFirstIndex_6cc599ab7a85bf3b": {} - }, - "manager": { - "TotCreateIndex": 1, - "TotCreateIndexOk": 1, - "TotDeleteIndex": 0, - "TotDeleteIndexOk": 0, - "TotIndexControl": 0, - "TotIndexControlOk": 0, - "TotJanitorClosePIndex": 0, - "TotJanitorKick": 2, - "TotJanitorKickErr": 0, - "TotJanitorKickOk": 2, - "TotJanitorKickStart": 2, - "TotJanitorNOOP": 0, - "TotJanitorNOOPOk": 0, - "TotJanitorRemovePIndex": 0, - "TotJanitorSubscriptionEvent": 0, - "TotJanitorUnknownErr": 0, - "TotKick": 0, - "TotPlannerKick": 2, - "TotPlannerKickChanged": 1, - "TotPlannerKickErr": 0, - "TotPlannerKickOk": 2, - "TotPlannerKickStart": 2, - "TotPlannerNOOP": 0, - "TotPlannerNOOPOk": 0, - "TotPlannerSubscriptionEvent": 0, - "TotPlannerUnknownErr": 0, - "TotSaveNodeDef": 2, - "TotSaveNodeDefGetErr": 0, - "TotSaveNodeDefOk": 2, - "TotSaveNodeDefSame": 0, - "TotSaveNodeDefSetErr": 0 - }, - "pindexes": { - "myFirstIndex_6cc599ab7a85bf3b_0": null - } -} ----- - -[#g-api-nsstats-index-name] -=== GET Search Index Statistics - -Return indexing and data related metrics, timings, and counters for the Search index specified in the endpoint URL. -The endpoint returns the data as a JSON object. - -Use this endpoint for monitoring the number of requests, documents, and more for a specific index. -For more detailed partition information and more overall statistics for an index, see <>. - -==== Endpoint - -GET /api/nsstats/index/`${INDEX_NAME}` - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].stats!read - -==== Role Required - -FTS-Searcher, FTS-Admin - -.Sample response ----- -{ - "avg_grpc_internal_queries_latency": 0, - "avg_grpc_queries_latency": 0, - "avg_internal_queries_latency": 0, - "avg_queries_latency": 1.523718, - "batch_merge_count": 0, - "doc_count": 15000, - "iterator_next_count": 0, - "iterator_seek_count": 0, - "last_access_time": "2023-09-20T03:05:55.440-07:00", - "num_bytes_live_data": 0, - "num_bytes_read_at_query_time": 4638582, - "num_bytes_used_disk": 28062406, - "num_bytes_used_disk_by_root": 23651161, - "num_bytes_used_disk_by_root_reclaimable": 0, - "num_bytes_written_at_index_time": 21208735, - "num_files_on_disk": 6, - "num_mutations_to_index": 0, - "num_persister_nap_merger_break": 40, - "num_persister_nap_pause_completed": 7, - "num_pindexes_actual": 1, - "num_pindexes_target": 1, - "num_recs_to_persist": 0, - "num_root_filesegments": 4, - "num_root_memorysegments": 0, - "reader_get_count": 0, - "reader_multi_get_count": 0, - "reader_prefix_iterator_count": 0, - "reader_range_iterator_count": 0, - "timer_batch_store_count": 0, - "timer_data_delete_count": 0, - "timer_data_update_count": 15000, - "timer_opaque_get_count": 2048, - "timer_opaque_set_count": 2048, - "timer_rollback_count": 0, - "timer_snapshot_start_count": 1024, - "tot_seq_received": 19096, - "total_bytes_indexed": 4866486, - "total_bytes_query_results": 1798, - "total_compaction_written_bytes": 87917849, - "total_compactions": 0, - "total_grpc_internal_queries": 0, - "total_grpc_queries": 0, - "total_grpc_queries_error": 0, - "total_grpc_queries_slow": 0, - "total_grpc_queries_timeout": 0, - "total_grpc_request_time": 0, - "total_internal_queries": 0, - "total_queries": 4, - "total_queries_error": 1, - "total_queries_slow": 0, - "total_queries_timeout": 0, - "total_request_time": 6276461, - "total_term_searchers": 7, - "total_term_searchers_finished": 7, - "writer_execute_batch_count": 0 -} ----- - -[#g-api-stats-index-name] -=== GET Indexing and Data Metrics, Timings, and Other Statistics - -Return indexing and data related metrics, timings, and counters for the Search index specified in the endpoint URL. -The endpoint returns the data as a JSON object. - -Use this endpoint for more detailed partition information and more available statistics than <>. - -==== Endpoint - -GET /api/stats/index/`${INDEX_NAME}` - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].stats!read - -==== Role Required - -*Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -a| The name of the Search index where you want to retrieve statistics. -You must use the fully qualified name for the index, which includes the bucket and scope. - -To view the full, scoped name for an index for use with this endpoint: - -. Go to the *Search* tab in the Couchbase Server Web Console. -. Point to the *Index Name* for an index. - -|==== - -.Sample response ----- -{ - "feeds": {}, - "pindexes": { - "travel-sample.inventory.travel-test_3858c70e4d4d8df9_4c1c5584": { - "pindexStoreStats": { - "TimerBatchStore": { - "count": 0, - "min": 0, - "max": 0, - "mean": 0.00, - "stddev": 0.00, - "percentiles": { - "99%": 0.00, - "99.9%": 0.00, - "median": 0.00, - "75%": 0.00, - "95%": 0.00 - }, - "rates": { - "mean": 0.00, - "1-min": 0.00, - "5-min": 0.00, - "15-min": 0.00 - } - }, - "Errors": [] - }, - "bleveIndexStats": { - "index": { - "CurFilesIneligibleForRemoval": 0, - "CurOnDiskBytes": 34007668, - "CurOnDiskFiles": 4, - "CurRootEpoch": 0, - "LastMergedEpoch": 44, - "LastPersistedEpoch": 44, - "MaxBatchIntroTime": 0, - "MaxFileMergeZapIntroductionTime": 0, - "MaxFileMergeZapTime": 0, - "MaxMemMergeZapTime": 0, - "TotAnalysisTime": 0, - "TotBatchIntroTime": 0, - "TotBatches": 0, - "TotBatchesEmpty": 0, - "TotBytesReadAtQueryTime": 294892, - "TotBytesWrittenAtIndexTime": 0, - "TotDeletes": 0, - "TotEventTriggerCompleted": 2, - "TotEventTriggerStarted": 2, - "TotFileMergeForceOpsCompleted": 0, - "TotFileMergeForceOpsStarted": 0, - "TotFileMergeIntroductions": 0, - "TotFileMergeIntroductionsDone": 0, - "TotFileMergeIntroductionsObsoleted": 0, - "TotFileMergeIntroductionsSkipped": 0, - "TotFileMergeLoopBeg": 2, - "TotFileMergeLoopEnd": 1, - "TotFileMergeLoopErr": 0, - "TotFileMergePlan": 1, - "TotFileMergePlanErr": 0, - "TotFileMergePlanNone": 1, - "TotFileMergePlanOk": 0, - "TotFileMergePlanTasks": 0, - "TotFileMergePlanTasksDone": 0, - "TotFileMergePlanTasksErr": 0, - "TotFileMergePlanTasksSegments": 0, - "TotFileMergePlanTasksSegmentsEmpty": 0, - "TotFileMergeSegments": 0, - "TotFileMergeSegmentsEmpty": 0, - "TotFileMergeWrittenBytes": 0, - "TotFileMergeZapBeg": 0, - "TotFileMergeZapEnd": 0, - "TotFileMergeZapIntroductionTime": 0, - "TotFileMergeZapTime": 0, - "TotFileSegmentsAtRoot": 1, - "TotIndexTime": 0, - "TotIndexedPlainTextBytes": 0, - "TotIntroduceLoop": 3, - "TotIntroduceMergeBeg": 0, - "TotIntroduceMergeEnd": 0, - "TotIntroducePersistBeg": 0, - "TotIntroducePersistEnd": 0, - "TotIntroduceRevertBeg": 0, - "TotIntroduceRevertEnd": 0, - "TotIntroduceSegmentBeg": 0, - "TotIntroduceSegmentEnd": 0, - "TotIntroducedItems": 0, - "TotIntroducedSegmentsBatch": 0, - "TotIntroducedSegmentsMerge": 0, - "TotItemsToPersist": 0, - "TotMemMergeBeg": 0, - "TotMemMergeDone": 0, - "TotMemMergeErr": 0, - "TotMemMergeSegments": 0, - "TotMemMergeZapBeg": 0, - "TotMemMergeZapEnd": 0, - "TotMemMergeZapTime": 0, - "TotMemorySegmentsAtRoot": 0, - "TotOnErrors": 0, - "TotPersistLoopBeg": 2, - "TotPersistLoopEnd": 1, - "TotPersistLoopErr": 0, - "TotPersistLoopProgress": 0, - "TotPersistLoopWait": 2, - "TotPersistLoopWaitNotified": 0, - "TotPersistedItems": 0, - "TotPersistedSegments": 0, - "TotPersisterMergerNapBreak": 1, - "TotPersisterNapPauseCompleted": 1, - "TotPersisterSlowMergerPause": 0, - "TotPersisterSlowMergerResume": 0, - "TotSnapshotsRemovedFromMetaStore": 0, - "TotTermSearchersFinished": 13, - "TotTermSearchersStarted": 13, - "TotUpdates": 0, - "analysis_time": 0, - "batches": 0, - "deletes": 0, - "errors": 0, - "index_time": 0, - "num_bytes_read_at_query_time": 294892, - "num_bytes_used_disk": 34007668, - "num_bytes_used_disk_by_root": 15644303, - "num_bytes_used_disk_by_root_reclaimable": 0, - "num_bytes_written_at_index_time": 0, - "num_files_on_disk": 4, - "num_items_introduced": 0, - "num_items_persisted": 0, - "num_persister_nap_merger_break": 1, - "num_persister_nap_pause_completed": 1, - "num_plain_text_bytes_indexed": 0, - "num_recs_to_persist": 0, - "num_root_filesegments": 1, - "num_root_memorysegments": 0, - "term_searchers_finished": 13, - "term_searchers_started": 13, - "total_compaction_written_bytes": 0, - "updates": 0 - }, - "search_time": 40353204, - "searches": 1 - }, - "basic": { - "DocCount": 917 - }, - "partitions": {}, - "copyPartitionStats": { - "TotCopyPartitionStart": 0, - "TotCopyPartitionFinished": 0, - "TotCopyPartitionTimeInMs": 0, - "TotCopyPartitionFailed": 0, - "TotCopyPartitionRetries": 0, - "TotCopyPartitionErrors": 0, - "TotCopyPartitionSkipped": 0, - "TotCopyPartitionCancelled": 0, - "TotCopyPartitionOnHttp2": 0 - } - } - } -} ----- - -[#g-api-nsstats-index-name] -=== GET Query, Mutation, Partition, and Other Index Statistics - -Return query, mutation, document, partition, and compaction statistics for the Search index specified in the endpoint URL. -The endpoint returns the data as a JSON object. - -==== Endpoint - -GET /api/nsstats/index/`${INDEX_NAME}` - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].stats!read - -==== Role Required - -*Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -a| The name of the Search index where you want to retrieve statistics. -You must use the fully qualified name for the index, which includes the bucket and scope. - -To view the full, scoped name for an index for use with this endpoint: - -. Go to the *Search* tab in the Couchbase Server Web Console. -. Point to the *Index Name* for an index. - -|==== - -.Sample response ----- -{ - "avg_grpc_internal_queries_latency": 0, - "avg_grpc_queries_latency": 0, - "avg_internal_queries_latency": 0, - "avg_queries_latency": 12.971812, - "batch_merge_count": 0, - "doc_count": 917, - "iterator_next_count": 0, - "iterator_seek_count": 0, - "last_access_time": "2023-12-05T17:09:09.326+00:00", - "num_bytes_live_data": 0, - "num_bytes_read_at_query_time": 269399, - "num_bytes_used_disk": 16692286, - "num_bytes_used_disk_by_root": 15643710, - "num_bytes_used_disk_by_root_reclaimable": 0, - "num_bytes_written_at_index_time": 12524609, - "num_files_on_disk": 2, - "num_mutations_to_index": 0, - "num_persister_nap_merger_break": 2, - "num_persister_nap_pause_completed": 2, - "num_pindexes_actual": 1, - "num_pindexes_target": 1, - "num_recs_to_persist": 0, - "num_root_filesegments": 1, - "num_root_memorysegments": 0, - "reader_get_count": 0, - "reader_multi_get_count": 0, - "reader_prefix_iterator_count": 0, - "reader_range_iterator_count": 0, - "timer_batch_store_count": 0, - "timer_data_delete_count": 0, - "timer_data_update_count": 917, - "timer_opaque_get_count": 2048, - "timer_opaque_set_count": 2048, - "timer_rollback_count": 0, - "timer_snapshot_start_count": 96, - "tot_seq_received": 87974, - "total_bytes_indexed": 8523422, - "total_bytes_query_results": 35846, - "total_compaction_written_bytes": 15643710, - "total_compactions": 0, - "total_grpc_internal_queries": 0, - "total_grpc_queries": 0, - "total_grpc_queries_error": 0, - "total_grpc_queries_slow": 0, - "total_grpc_queries_timeout": 0, - "total_grpc_request_time": 0, - "total_internal_queries": 0, - "total_queries": 2, - "total_queries_error": 0, - "total_queries_slow": 0, - "total_queries_timeout": 0, - "total_request_time": 25947312, - "total_term_searchers": 15, - "total_term_searchers_finished": 15, - "writer_execute_batch_count": 0 -} ----- - -[#g-api-stats-index-name-analyzeDoc] -=== POST Analyze Document - -Use the Search index specified in the endpoint URL to analyze a document from the request body. - -==== Endpoint - -POST /api/index/`${INDEX_NAME}`/analyzeDoc - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!read - -==== Role Required - -*Search Reader* or *Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -a| The name of the Search index to use to analyze the document in the request body. -You must use the fully qualified name for the index, which includes the bucket and scope. - -To view the full, scoped name for an index for use with this endpoint: - -. Go to the *Search* tab in the Couchbase Server Web Console. -. Point to the *Index Name* for an index. - -|==== - -==== Request Body - -Add any valid JSON document to the request body. -For example: - ----- -{ - "name": "hello world", - "title": "couchbase blr" -} ----- - -==== Response Object - -In the following example, a Search index with the following settings analyzed the document from the sample Request Body: - -* A `keyword` analyzer for the `title` field. -* An `ngram` token filter with a `min` of 2 and a `max` of 5 for the `name` field. - ----- -{ - "status": "ok", - "analyzed": [ - { - "couchbase blr": { - "Term": "Y291Y2hiYXNlIGJscg==", - "Locations": [ - { - "Field": "title", - "ArrayPositions": [], - "Start": 0, - "End": 13, - "Position": 1 - } - ] - } - }, - { - "he": { - "Term": "aGU=", - "Locations": [ - { - "Field": "name", - "ArrayPositions": [], - "Start": 0, - "End": 5, - "Position": 1 - } - ] - }, - "hel": { - "Term": "aGVs", - "Locations": [ - { - "Field": "name", - "ArrayPositions": [], - "Start": 0, - "End": 5, - "Position": 1 - } - ] - }, - "hell": { - "Term": "aGVsbA==", - "Locations": [ - { - "Field": "name", - "ArrayPositions": [], - "Start": 0, - "End": 5, - "Position": 1 - } - ] - }, - "hello": { - "Term": "aGVsbG8=", - "Locations": [ - { - "Field": "name", - "ArrayPositions": [], - "Start": 0, - "End": 5, - "Position": 1 - } - ] - }, - "wo": { - "Term": "d28=", - "Locations": [ - { - "Field": "name", - "ArrayPositions": [], - "Start": 6, - "End": 11, - "Position": 2 - } - ] - }, - "wor": { - "Term": "d29y", - "Locations": [ - { - "Field": "name", - "ArrayPositions": [], - "Start": 6, - "End": 11, - "Position": 2 - } - ] - }, - "worl": { - "Term": "d29ybA==", - "Locations": [ - { - "Field": "name", - "ArrayPositions": [], - "Start": 6, - "End": 11, - "Position": 2 - } - ] - }, - "world": { - "Term": "d29ybGQ=", - "Locations": [ - { - "Field": "name", - "ArrayPositions": [], - "Start": 6, - "End": 11, - "Position": 2 - } - ] - } - }, - null - ] -} ----- - -[#g-api-scoped-status] -=== GET Index Status (Scoped) - -Returns the status of the Search index specified in the endpoint URL, including whether all index partitions are created and ready to use. - -==== Endpoint - -GET /api/bucket/`${BUCKET_NAME}`/scope/`${SCOPE_NAME}`/index/`${INDEX_NAME}`/status - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!read - -==== Role Required - -*Search Reader* or *Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${BUCKET_NAME}` -| Yes -| String, URL path parameter -| The name of the bucket used to create the Search index definition. - -|[.param]`${SCOPE_NAME}` -| Yes -| String, URL path parameter -| The name of the scope used to create the Search index definition. - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -| The name of the Search index definition to status check. - -|==== - -.Sample success response ----- -{ - "status": "ok", - "indexStatus": "Ready" -} ----- -The Search Service returns a HTTP 200 status code with a response that includes `"status": "ok"`. - -== Index Querying - -Use the following endpoints to query the contents of a Search index. - -[#g-api-index-name-count] -=== GET Indexed Documents Count - -Returns the number of indexed documents inside the Search index specified in the endpoint URL. - -==== Endpoint - -GET /api/index/`${INDEX_NAME}`/count - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!read - -==== Role Required - -*Search Reader* or *Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -a| The name of the index where you want to count the number of available documents. -You must use the fully qualified name for the index, which includes the bucket and scope. - -To view the full, scoped name for an index for use with this endpoint: - -. Go to the *Search* tab in the Couchbase Server Web Console. -. Point to the *Index Name* for an index. - -|==== - -.Sample success response ----- -{ - "status": "ok", - "count": 285 -} ----- -The Search Service returns a HTTP 200 status code with a response that includes `"status": "ok"`. - -[#p-api-pindex-lookup] -=== POST Return the Index Partition and ID for a Document (Scoped) - -Send a document ID in the request body and return the Search index partition ID for where the document is stored. -The endpoint returns a JSON object as a response. - -==== Endpoint - -POST /api/bucket/`${BUCKET_NAME}`/scope/`${SCOPE_NAME}`/index/`${INDEX_NAME}`/pindexLookup - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!read - -==== Role Required - -*Search Reader* or *Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${BUCKET_NAME}` -| Yes -| String, URL path parameter -| The name of the bucket used to create the Search index definition. - -|[.param]`${SCOPE_NAME}` -| Yes -| String, URL path parameter -| The name of the scope used to create the Search index definition. - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -| The name of the Search index definition that contains the document. - -|==== - -==== Request Body - -Add a valid JSON object that contains the `docID` property, with a value that matches the document ID for a document in the Search index. - -For example: ----- -{ - "docID": "hotel_5848" -} ----- - -.Sample success response ----- -{ - "status": "ok", - "pindexes": { - "travel-sample.inventory.travel-test": { - "id": "travel-sample.inventory.travel-test_123294e5a4efbe39_4c1c5584" - } - } -} ----- -The Search Service returns a HTTP 200 status code with a response that includes `"status": "ok"`. - -[#p-api-index-name-query] -=== POST Query a Search Index - -Run a query formatted as a JSON object against the Search index definition specified in the endpoint URL. -The endpoint returns a JSON object as a response. - -NOTE: This endpoint is for legacy Search indexes and may be deprecated in a future release. -Use <>, instead. - -==== Endpoint - -POST /api/index/`${INDEX_NAME}`/query - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!read - -==== Role Required - -*Search Reader* or *Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -a| The name of the Search index definition to query with the request body. -You must use the fully qualified name for the index, which includes the bucket and scope. - -To view the full, scoped name for an index for use with this endpoint: - -. Go to the *Search* tab in the Couchbase Server Web Console. -. Point to the *Index Name* for an index. - -|==== - -==== Request Body - -For more detailed information about how to format the request body for the POST /api/index/`${INDEX_NAME}`/query endpoint, see xref:search:search-request-params.adoc[]. - -For example, the following request body searches for the text `a sample query` in the documents included in the Search index: - ----- -{ - "query": { - "query": "a sample query", - "boost": 1 - }, - "size": 10, - "from": 0, - "highlight": null, - "fields": null, - "facets": null, - "explain": false - } ----- - -The following request body uses from/size for results paging, uses ctl for a timeout and the "at_plus" consistency level. -On consistency, the index must have incorporated at least mutation sequence-number 123 for partition (vbucket) 0 and mutation sequence-number 234 for partition (vbucket) 1 (where vbucket 1 should have a vbucketUUID of a0b1c2): - ----- -{ - "ctl": { - "timeout": 10000, - "consistency": { - "level": "at_plus", - "vectors": { - "customerIndex": { - "0": 123, - "1/a0b1c2": 234 - } - } - } - }, - "query": { - "query": "alice smith", - "boost": 1 - }, - "size": 10, - "from": 20, - "highlight": { - "style": null, - "fields": null - }, - "fields": [ - "*" - ], - "facets": null, - "explain": true -} ----- - -The following hybrid search request body searches for a specified normalized color vector in `colorvect_dot`, but uses regular query parameters to limit the `brightness` value of the returned color to the range of `70-80`: - ----- -{ - "fields": ["*"], - "query": { - "min": 70, - "max": 80, - "inclusive_min": false, - "inclusive_max": true, - "field": "brightness" - }, - "knn": [ - { - "k": 10, - "field": "colorvect_dot", - "vector": [ 0.707106781186548, 0, 0.707106781186548 ] - } - ], - "size": 10 -} ----- - -For more information about vector searches, see xref:vector-search:vector-search.adoc[]. - -==== Response Object - -The response object has a status section that must be checked for every request. -Under nearly all circumstances, the query response will be HTTP 200 even though individual index shards (pindexes) may encounter a timeout or return an error. - -==== Consistency and Timeouts - -A query can specify a timeout value, a consistency requirement, or both. -This section explains how this affects the query behavior and how to handle the resulting query return values. - -* logical first phase consistency wait - if timeout in this period, get 416 error with message saying request could not be satisfied). -* If consistency wait times out with 416, return value to client will indicate the sequence number range processed so the client will have an idea how far the processing got and has the option of retrying more intelligently. -* In phase 2, you have the normal pindex timeout. -This will start whenever the first phase completes. -At this point, request will return 200 HTTP response code unless there is an internal server error. -* Client must check response status, which will return any errors or timeouts for each pindex. -If The response includes the number of errors, and the client can determine whether they need the complete results or can continue as long as enough pindexes return to give a reasonable user experience. -Note that the query return status will be 200 even if all pindexes return errors so it's critical to check the response status and code accordingly. -* If client sets timeout very low, e.g. -1ms, you may receive a 200 error with all timeouts instead of a consistency wait timeout. - - -[#p-api-scoped-query] -=== POST Query a Search Index (Scoped) - -Run a query formatted as a JSON object against the Search index definition specified in the endpoint URL. -The endpoint returns a JSON object as a response. -Unlike <>, this endpoint is scoped and does not require a fully qualified `${INDEX_NAME}` value. - -==== Endpoint - -POST /api/bucket/`${BUCKET_NAME}`/scope/`${SCOPE_NAME}`/index/`${INDEX_NAME}`/query - -==== Permission Required - -cluster.bucket[[.var]`$BUCKET_NAME`].fts!read - -==== Role Required - -*Search Reader* or *Search Admin* - -==== Parameters - -[cols="1,1,1,2"] -|==== -|Parameter |Required? |Type |Description - -|[.param]`${BUCKET_NAME}` -| Yes -| String, URL path parameter -| The name of the bucket used to create the Search index definition. - -|[.param]`${SCOPE_NAME}` -| Yes -| String, URL path parameter -| The name of the scope used to create the Search index definition. - -|[.param]`${INDEX_NAME}` -| Yes -| String, URL path parameter -| The name of the Search index definition to query with the request body. - -|==== - -==== Request Body - -Add a JSON object to define the settings for your Search query. -For more information about how to create a Search query JSON object, see xref:search:search-request-params.adoc[]. - -.Sample failure response ----- -{ - "error": "rest_index: Query, indexName: travel-sample.inventory.travel-test, err: bleve: QueryBleve parsing searchRequest, err: unknown query type", - "request": { - "collections": [ - "hotel" - ], - "ctl": { - "consistency": { - "level": "at_plus", - "results": "complete", - "vectors": { - "searchIndexName": { - "607/205096593892159": 2, - "640/298739127912798": 4 - } - } - }, - "timeout": 10000 - }, - "explain": true, - "from": 0, - "highlight": { - "fields": [ - "reviews.content" - ], - "style": "html" - }, - "includeLocations": false, - "limit": 10, - "offset": 0, - "query": {}, - "score": "none", - "size": 10, - "sort": [ - "reviews.Ratings.Cleanliness", - { - "by": "field", - "desc": false, - "field": "reviews.Ratings.Cleanliness", - "missing": "last", - "mode": "default", - "type": "number" - }, - "-_score", - "-_id" - ] - }, - "status": "fail" -} ----- -The Search Service returns a non-200 HTTP error code when a request fails. - -.Sample success response - regular query -[source,json] ----- -include::example$fts-sample-success-response.jsonc[] ----- -The Search Service returns a HTTP 200 status code with a response that includes `"status": "ok"`. - -.Sample success response - hybrid vector search -[source,json] ----- -include::example$fts-sample-success-response-vector-search.jsonc[] ----- \ No newline at end of file diff --git a/modules/rest-api/pages/rest-fts-node.adoc b/modules/rest-api/pages/rest-fts-node.adoc deleted file mode 100644 index b03a0c7b1e..0000000000 --- a/modules/rest-api/pages/rest-fts-node.adoc +++ /dev/null @@ -1,231 +0,0 @@ -= Node Configuration - -== Node Configuration - -[[g-api-cfg]]GET /api/cfg:: -Returns the node's current view of the cluster's configuration as JSON. -+ -*Permission Required*: cluster.settings!read -+ -*Role Required*: Full-Admin, Cluster-Admin -+ -.Sample response ----- -{ - "indexDefs": { - "implVersion": "4.0.0", - "indexDefs": { - "myFirstIndex": { - "name": "myFirstIndex", - "params": "", - "planParams": { - "hierarchyRules": null, - "maxPartitionsPerPIndex": 0, - "nodePlanParams": null, - "numReplicas": 0, - "planFrozen": false - }, - "sourceName": "", - "sourceParams": "", - "sourceType": "nil", - "sourceUUID": "", - "type": "blackhole", - "uuid": "6cc599ab7a85bf3b" - } - }, - "uuid": "6cc599ab7a85bf3b" - }, - "indexDefsCAS": 3, - "indexDefsErr": null, - "nodeDefsKnown": { - "implVersion": "4.0.0", - "nodeDefs": { - "78fc2ffac2fd9401": { - "container": "", - "extras": "", - "hostPort": "0.0.0.0:8094", - "implVersion": "4.0.0", - "tags": null, - "uuid": "78fc2ffac2fd9401", - "weight": 1 - } - }, - "uuid": "2f0d18fb750b2d4a" - }, - "nodeDefsKnownCAS": 1, - "nodeDefsKnownErr": null, - "nodeDefsWanted": { - "implVersion": "4.0.0", - "nodeDefs": { - "78fc2ffac2fd9401": { - "container": "", - "extras": "", - "hostPort": "0.0.0.0:8094", - "implVersion": "4.0.0", - "tags": null, - "uuid": "78fc2ffac2fd9401", - "weight": 1 - } - }, - "uuid": "72d6750878551451" - }, - "nodeDefsWantedCAS": 2, - "nodeDefsWantedErr": null, - "planPIndexes": { - "implVersion": "4.0.0", - "planPIndexes": { - "myFirstIndex_6cc599ab7a85bf3b_0": { - "indexName": "myFirstIndex", - "indexParams": "", - "indexType": "blackhole", - "indexUUID": "6cc599ab7a85bf3b", - "name": "myFirstIndex_6cc599ab7a85bf3b_0", - "nodes": { - "78fc2ffac2fd9401": { - "canRead": true, - "canWrite": true, - "priority": 0 - } - }, - "sourceName": "", - "sourceParams": "", - "sourcePartitions": "", - "sourceType": "nil", - "sourceUUID": "", - "uuid": "64bed6e2edf354c3" - } - }, - "uuid": "6327debf817a5ec7", - "warnings": { - "myFirstIndex": [] - } - }, - "planPIndexesCAS": 5, - "planPIndexesErr": null, - "status": "ok" -} ----- - -[[p-api-cfgrefresh]]POST /api/cfgRefresh:: -Requests the node to refresh its configuration from the configuration provider. -+ -*Permission Required*: cluster.settings!write -+ -*Role Required*: Full-Admin, Cluster-Admin - -[[p-api-mgrkick]]POST /api/managerKick:: -Forces the node to replan resource assignments (by running the planner, if enabled) and to update its runtime state to reflect the latest plan (by running the janitor, if enabled). -+ -*Permission Required*: cluster.settings!write -+ -*Role Required*: Full-Admin, Cluster-Admin - -[[g-api-mgrmeta]]GET /api/managerMeta:: -Returns information on the node's capabilities, including available indexing and storage options as JSON, and is intended to help management tools and web UI's to be more dynamically metadata driven. -+ -*Permission Required*: cluster.settings!read -+ -*Role Required*: Full-Admin, Cluster-Admin - -== Node Diagnostics - -[[g-api-diag]]GET /api/diag:: -Returns the full set of diagnostic information from the node in one shot as JSON. -That is, the /api/diag response will be the union of the responses from the other REST API diagnostic and monitoring endpoints from the node, and is intended to make production support easier. -+ -For example, for a three node cluster, you could capture the [.path]_/api/diag_ output of each node with something like: -+ ----- -curl http://cbft-01:8094/api/diag > cbft-01.json -curl http://cbft-02:8094/api/diag > cbft-02.json -curl http://cbft-03:8094/api/diag > cbft-03.json ----- -+ -The [.path]_/api/diag_ response JSON object can be quite large, from 100's of KB to much more. -+ -The motivation for [.path]_/api/diag_ is to simplify working with the Couchbase community, forums, technical support and other engineers by making data capture from each FTS node a single step. -+ -*Permission Required*: cluster.logs!read -+ -*Role Required*: Full-Admin, Cluster-Admin - -[[g-api-log]]GET /api/log:: -Returns recent log messages and key events for the node as JSON. -+ -*Permission Required*: cluster.logs!read -+ -*Role Required*: Full-Admin, Cluster-Admin -+ -.Sample response ----- -{ - "events": [], - "messages": [] -} ----- - -[[g-api-runtime]]GET /api/runtime:: -Returns information on the node's software, such as version strings and slow-changing runtime settings as JSON. -+ -*Permission Required*: cluster.settings!read -+ -*Role Required*: Full-Admin, Cluster-Admin -+ -.Sample response ----- -{ - "arch": "amd64", - "go": { - "GOMAXPROCS": 1, - "GOROOT": "/usr/local/go", - "compiler": "gc", - "version": "go1.4" - }, - "numCPU": 8, - "os": "darwin", - "versionData": "4.0.0", - "versionMain": "v0.3.1" -} ----- - -[[g-api-runtime-args]]GET /api/runtime/args:: -Returns information on the node's command-line, parameters, environment variables and O/S process values as JSON. -+ -*Permission Required*: cluster.settings!read -+ -*Role Required*: Full-Admin, Cluster-Admin - -[[p-api-runtime-cpu]]POST /api/runtime/profile/cpu:: -Requests the node to capture local cpu usage profiling information. -+ -*Permission Required*: cluster.admin -+ -*Role Required*: Full-Admin, Cluster-Admin - -[[p-api-runtime-memory]]POST /api/runtime/profile/memory:: -Requests the node to capture lcoal memory usage profiling information. -+ -*Permission Required*: cluster.admin -+ -*Role Required*: Full-Admin, Cluster-Admin - -== Node Management - -[[p-api-runtime-gc]]POST /api/runtime/gc:: -Requests the node to perform a GC. -+ -*Permission Required*: cluster.admin -+ -*Role Required*: Full-Admin, Cluster-Admin - -== Node Monitoring - -[[g-api-runtime-stats]]GET /api/runtime/stats:: -Returns information on the node's low-level runtime stats as JSON. -+ -*Role Required*: Full-Admin, Cluster-Admin - -[[g-api-runtime-statsmem]]GET /api/runtime/statsMem:: -Returns information on the node's low-level GC and memory related runtime stats as JSON. -+ -*Role Required*: Full-Admin, Cluster-Admin diff --git a/modules/rest-api/pages/rest-fts-partition-file-transfer.adoc b/modules/rest-api/pages/rest-fts-partition-file-transfer.adoc deleted file mode 100644 index eb7ff9ae44..0000000000 --- a/modules/rest-api/pages/rest-fts-partition-file-transfer.adoc +++ /dev/null @@ -1,80 +0,0 @@ -= Rebalance Based on File Transfer -:description: The Search-Service REST API configures rebalance based on file transfer. - -[abstract] -{description} - -[#http-methods-and-uris] -== HTTP Method and URI - ----- -PUT /api/managerOptions ----- - -[#description] -== Description - -The Search Service automatically partitions its indexes across all Search nodes in the cluster, ensuring optimal distribution, following rebalance. - -To achieve this, in versions of Couchbase Server prior to 7.1, by default, partitions needing to be newly created were entirely _built_, on their newly assigned nodes. -In 7.1+, by default, new partitions are created by the _transfer_ of partition files from old nodes to new nodes: this significantly enhances performance. -This is an Enterprise-only feature, which requires all Search Service nodes _either_ to be running 7.1 or later; _or_ to be running 7.0.2, with the feature explicitly switched on by means of this endpoint. -Note that users of 7.1+ can explicitly switch the feature _off_ by means of this endpoint; in which case _partition build_ is used to establish new partitions, rather than file transfer. - -During file transfer, should an unresolvable error occur, file transfer is automatically abandoned, and partition build is used instead. - -== Curl Syntax - ----- -curl -X PUT http://:8094/api/managerOptions - -u : - -H "Content-type:application/json" - -d '{"disableFileTransferRebalance": [ "true" | "false" ]}' ----- - -If the value specified for the key `disableFileTransferRebalance` is `false` (which is the default in 7.1+), new Search-Service partitions are created during rebalance by means of partition-file transfer. -If the value is `true`, partitions are created by means of partition build, from scratch; over DCP connections from the Data Service. - -== Responses - -Success returns `200 OK`, and the message `{"status":"ok"}`. -If the URI is incorrectly specified, `404 Object Not Found` is returned. -If the method is incorrectly specified, `405 Method Not Allowed` is returned, with the message `{"error":"Method not allowed for endpoint","status":"fail"}`. - -Failure to authenticate returns `401 Unauthorized`. - -== Examples - -The following example _disables_ the creation of new partitions by means of file-transfer: - ----- -curl -X PUT http://localhost:8094/api/managerOptions \ --u Administrator:password \ --H "Content-type:application/json" \ --d '{"disableFileTransferRebalance": "true" }' ----- - -From this point, Search-Service index-partitions are _built_ on the new nodes assigned to them during rebalance. - -The following example _re-enables_ the creation of new partitions by means of file-transfer: - ----- -curl -X PUT http://localhost:8094/api/managerOptions \ --u Administrator:password \ --H "Content-type:application/json" \ --d '{"disableFileTransferRebalance": "false" }' ----- - -From this point, Search-Service index-partitions are again created by _file transfer_, on the new nodes assigned to them during rebalance. - -In each case, successful execution returns the following: - ----- -{"status":"ok"} ----- - -== See Also - -An overview of rebalance for all services is provided at xref:learn:clusters-and-availability/rebalance.adoc[Rebalance]. -An overview of the REST API for the Search Service is provided at xref:rest-api:rest-fts.adoc[Search API]. -An architectural summary of the Search Service is provided at xref:learn:services-and-indexes/services/search-service.adoc#search-service-architecture[Search Service Architecture]. diff --git a/modules/rest-api/pages/rest-fts-query.adoc b/modules/rest-api/pages/rest-fts-query.adoc deleted file mode 100644 index 9a3a0c9561..0000000000 --- a/modules/rest-api/pages/rest-fts-query.adoc +++ /dev/null @@ -1,347 +0,0 @@ -= Active Queries REST API - -== Overview - -The Active Queries REST API is provided by the Search service. -This API enables you to get information about active FTS queries. - -The API schemes and host URLs are as follows:{blank} - -* `+http://node:8094/+` -* `+https://node:18094/+` (for secure access) - -where [.var]`node` is the host name or IP address of a computer running the Search service. - - -=== Version information -[%hardbreaks] -__Version__ : 7.0 - -=== Produces - -* `application/json` - -== Paths - -*{toc-title}* - -* <> -* <> -* <> - -[#api-query-index] -=== View Active Index Queries - -.... -GET /api/query/index/{indexName} -.... - -==== Description - -Gets the details of all the active queries for any given FTS index in the system. - -==== Parameters - -[options="header", cols=".<2a,.<3a,.<9a,.<4a"] -|=== -|Type|Name|Description|Schema -|**Path** -|**indexName** + -__required__ -a|The name of a full-text index. -|string - -|**Query** -|**longerThan** + -__optional__ -a|Filters the queries running beyond the given span of time. - -The duration string is a signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `20s`, `-1.5h` or `2h45m`. -Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. -|string (duration) -|=== - -==== Example HTTP request - -[#request-1] -==== -Request 1: Find queries on the index `DemoIndex1` that have been running for longer than 1 ms. - -.Curl request -[source,shell] ----- -curl -XGET -H "Content-Type: application/json" \ --u : \ -'http://localhost:8094/api/query/index/DemoIndex1?longerThan=1ms' ----- -==== - -==== Example HTTP response - -==== -Result of <>. - -.Response 200 -[source,json] ----- -{ - "status": "ok", - "stats": { - "total": 3, - "successful": 3 - }, - "totalActiveQueryCount": 4, - "filteredActiveQueries": { - "indexName": "DemoIndex1", - "longerThan": "1ms", - "queryCount": 2, - "queryMap": { - "b91d75480470f979f65f04e8f20a1f7b-16": { - "QueryContext": { - "query": { - "query": "good restraunts in france" - }, - "size": 10, - "from": 0, - "timeout": 120000, - "index": "DemoIndex1" - }, - "executionTime": "1.059754811s" - }, - "f76b2d51397feee28c1e757ed426ef93-2": { - "QueryContext": { - "query": { - "query": "mexican food in england" - }, - "size": 10, - "from": 0, - "timeout": 120000, - "index": "DemoIndex1" - }, - "executionTime": "1.058247896s" - } - } - } -} ----- -==== - -[#api-query] -=== View Active Node Queries - -.... -GET /api/query -.... - -==== Description - -Gets the details of all the active queries in any FTS node in a cluster. The response of the endpoint will have the entries in `queryMap` whose key is of the format "nodeUUID-queryID". So, the key tells that the active query, which is the value, is running on the node with uuid equal to `nodeUUID` and has an ID `queryID` on that node. - -==== Parameters - -[options="header", cols=".<2a,.<3a,.<9a,.<4a"] -|=== -|Type|Name|Description|Schema -|**Query** -|**longerThan** + -__optional__ -a|Filters the queries running beyond the given span of time. - -The duration string is a signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `20s`, `-1.5h` or `2h45m`. -Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. -|string (duration) -|=== - -==== Example HTTP request - -[#request-2] -==== -Request 2: Find all active queries across the fts cluster. - -.Curl request -[source,shell] ----- -curl -XGET -H "Content-Type: application/json" \ --u : \ -http://localhost:8094/api/query ----- -==== - -[#request-3] -==== -Request 3: Find all queries across cluster that have been running for longer than 7s. - -.Curl request -[source,shell] ----- -curl -XGET -H "Content-Type: application/json" \ --u : \ -'http://localhost:8094/api/query?longerThan=7s' ----- -==== - -==== Example HTTP response - -==== -Result of <>. - -.Response 200 -[source,json] ----- -{ - "status": "ok", - "stats": { - "total": 3, - "successful": 3 - }, - "totalActiveQueryCount": 4, - "filteredActiveQueries": { - "queryCount": 4, - "queryMap": { - "b91d75480470f979f65f04e8f20a1f7b-17": { - "QueryContext": { - "query": { - "query": "good restraunts in france" - }, - "size": 10, - "from": 0, - "timeout": 120000, - "index": "DemoIndex1" - }, - "executionTime": "2.144802122s" - }, - "b91d75480470f979f65f04e8f20a1f7b-18": { - "QueryContext": { - "query": { - "query": "decent hotel with a pool in italy" - }, - "size": 10, - "from": 0, - "timeout": 120000, - "index": "DemoIndex2" - }, - "executionTime": "2.144712787s" - }, - "b91d75480470f979f65f04e8f20a1f7b-19": { - "QueryContext": { - "query": { - "query": "germany" - }, - "size": 10, - "from": 0, - "timeout": 120000, - "index": "DemoIndex2" - }, - "executionTime": "2.143957727s" - }, - "f76b2d51397feee28c1e757ed426ef93-3": { - "QueryContext": { - "query": { - "query": "mexican food in england" - }, - "size": 10, - "from": 0, - "timeout": 120000, - "index": "DemoIndex1" - }, - "executionTime": "2.14286421s" - } - } - } -} ----- -==== - -==== -Result of <>. - -.Response 200 -[source,json] ----- -{ - "status": "ok", - "stats": { - "total": 3, - "successful": 3 - }, - "totalActiveQueryCount": 3, - "filteredActiveQueries": { - "longerThan": "7s", - "queryCount": 1, - "queryMap": { - "b91d75480470f979f65f04e8f20a1f7b-21": { - "QueryContext": { - "query": { - "query": "decent hotel with a pool in italy" - }, - "size": 10, - "from": 0, - "timeout": 120000, - "index": "DemoIndex1" - }, - "executionTime": "10.541956741s" - } - } - } -} ----- -==== - -[#api-query-cancel] -=== Cancel Active Queries - -.... -POST /api/query/{queryID}/cancel -.... - -==== Description - -Allows the user to cancel an active query that's running longer than expected. This API is used along side the view active queries API to get the parameters `queryID` and `uuid` which will be used to cancel the query. - -==== Parameters - -[options="header", cols=".<2a,.<3a,.<9a,.<4a"] -|=== -|Type|Name|Description|Schema -|**Query ID** -|**queryID** + -__required__ -a|The active query's ID -|integer - -|**Node UUID** -|**uuid** + -__optional__ -a|Passed as a body parameter. `uuid` represents the active query's coordinator node's UUID, where the query will be canceled. This parameter allows the user to cancel a query anywhere in the system by specifying its coordinator node's UUID. -|string (duration) -|=== - -==== Example HTTP request - -[#request-4] -==== -Request 4: Cancel a long running query with query ID 24 whose coordinator node has a uuid b91d75480470f979f65f04e8f20a1f7b. - -.Curl request -[source,shell] ----- -curl -X POST -H "Content-Type: application/json" -u : \ -http://localhost:8094/api/query/24/cancel -d \ -'{ "uuid": "b91d75480470f979f65f04e8f20a1f7b" }' ----- -==== - -==== Example HTTP response - -==== -Result of <>. - -.Response 200 -[source,json] ----- -{ - "status": "ok", - "msg": "query with ID '24' on node 'b91d75480470f979f65f04e8f20a1f7b' was aborted!" -} ----- -==== \ No newline at end of file diff --git a/modules/rest-api/pages/rest-fts-statistics.adoc b/modules/rest-api/pages/rest-fts-statistics.adoc deleted file mode 100644 index d2b2178ed6..0000000000 --- a/modules/rest-api/pages/rest-fts-statistics.adoc +++ /dev/null @@ -1,194 +0,0 @@ -= Getting Search Statistics -:description: pass:q[Statistics for the Search Service can be retrieved with the `GET /api/nsstats` HTTP method and URI.] -:page-topic-type: reference - -[abstract] -{description} - -== HTTP method and URI - ----- -GET /api/nsstats ----- - -[#description] -== Description - -This retrieves statistics on the Search Service. -Information is provided on documents, partition indexes, mutations, compactions, queries, and more. - -[#curl-syntax] -== Curl Syntax - ----- -curl -X GET -u : - http://:/api/nsstats ----- - -The user authenticated by the `username` and `password` must have been assigned the xref:learn:security/roles.adoc#full-admin[Full Admin] or xref:learn:security/roles.adoc#cluster-admin[Cluster Admin] role for the cluster; or the xref:learn:security/roles.adoc#bucket-admin[Bucket Admin] role for the bucket or buckets on which search indexes have been created. -The `host` must be a node that hosts the Search Service. -The `port` must be `8094` or (for secure access) `18094`. - -[#responses] -== Responses - -If the call is successful, `200 OK` is given, and an object containing search-related statistics is returned. - -A malformed URI gives `400 Object Not Found`. -Failure to authenticate gives `401 Unauthorized`. - -Successful authentication by a user who has not been assigned an appropriate role gives `403 Forbidden`, and returns an object similar to the following: - ----- -{ - "message": "Forbidden. User needs one of the following permissions", - "permissions": [ - "cluster.fts!read" - ] -} ----- - -[#example] -== Example - -In the following example, the command-line output is piped to the http://stedolan.github.io/jq[jq] command, to facilitate readability. - ----- -curl -v -u Administrator:password -XGET \ -http://10.143.194.102:8094/api/nsstats | jq '.' ----- - -If successful, the call returns an object similar to the following. -Note that extensive statistics are returned on the two indexes that have been defined for the `travel-sample` bucket: `travel-sample-index` and `geoIndex`. - ----- -{ - "batch_bytes_added": 0, - "batch_bytes_removed": 0, - "curr_batches_blocked_by_herder": 0, - "num_bytes_used_ram": 213924088, - "pct_cpu_gc": 0.001428879394431173, - "tot_batches_flushed_on_maxops": 0, - "tot_batches_flushed_on_timer": 0, - "tot_bleve_dest_closed": 0, - "tot_bleve_dest_opened": 12, - "tot_grpc_listeners_closed": 0, - "tot_grpc_listeners_opened": 1, - "tot_grpc_queryreject_on_memquota": 0, - "tot_grpcs_listeners_closed": 0, - "tot_grpcs_listeners_opened": 1, - "tot_http_limitlisteners_closed": 0, - "tot_http_limitlisteners_opened": 1, - "tot_https_limitlisteners_closed": 0, - "tot_https_limitlisteners_opened": 1, - "tot_queryreject_on_memquota": 0, - "tot_remote_grpc": 0, - "tot_remote_grpc_tls": 0, - "tot_remote_http": 0, - "tot_remote_http2": 0, - "total_gc": 1861, - "total_queries_rejected_by_herder": 0, - "travel-sample:geoIndex:avg_grpc_internal_queries_latency": 0, - "travel-sample:geoIndex:avg_grpc_queries_latency": 0, - "travel-sample:geoIndex:avg_internal_queries_latency": 0, - "travel-sample:geoIndex:avg_queries_latency": 41.771365, - "travel-sample:geoIndex:batch_merge_count": 0, - "travel-sample:geoIndex:doc_count": 16694, - "travel-sample:geoIndex:iterator_next_count": 0, - "travel-sample:geoIndex:iterator_seek_count": 0, - "travel-sample:geoIndex:last_access_time": "2020-03-30T04:42:53.701-07:00", - "travel-sample:geoIndex:num_bytes_live_data": 0, - "travel-sample:geoIndex:num_bytes_used_disk": 295152367, - "travel-sample:geoIndex:num_bytes_used_disk_by_root": 99845092, - "travel-sample:geoIndex:num_files_on_disk": 52, - "travel-sample:geoIndex:num_mutations_to_index": 0, - "travel-sample:geoIndex:num_persister_nap_merger_break": 6, - "travel-sample:geoIndex:num_persister_nap_pause_completed": 6, - "travel-sample:geoIndex:num_pindexes_actual": 6, - "travel-sample:geoIndex:num_pindexes_target": 6, - "travel-sample:geoIndex:num_recs_to_persist": 0, - "travel-sample:geoIndex:num_root_filesegments": 6, - "travel-sample:geoIndex:num_root_memorysegments": 0, - "travel-sample:geoIndex:reader_get_count": 0, - "travel-sample:geoIndex:reader_multi_get_count": 0, - "travel-sample:geoIndex:reader_prefix_iterator_count": 0, - "travel-sample:geoIndex:reader_range_iterator_count": 0, - "travel-sample:geoIndex:timer_batch_store_count": 0, - "travel-sample:geoIndex:timer_data_delete_count": 0, - "travel-sample:geoIndex:timer_data_update_count": 0, - "travel-sample:geoIndex:timer_opaque_get_count": 2731, - "travel-sample:geoIndex:timer_opaque_set_count": 1024, - "travel-sample:geoIndex:timer_rollback_count": 0, - "travel-sample:geoIndex:timer_snapshot_start_count": 0, - "travel-sample:geoIndex:total_bytes_indexed": 0, - "travel-sample:geoIndex:total_bytes_query_results": 13730, - "travel-sample:geoIndex:total_compaction_written_bytes": 0, - "travel-sample:geoIndex:total_compactions": 0, - "travel-sample:geoIndex:total_grpc_internal_queries": 0, - "travel-sample:geoIndex:total_grpc_queries": 0, - "travel-sample:geoIndex:total_grpc_queries_error": 0, - "travel-sample:geoIndex:total_grpc_queries_slow": 0, - "travel-sample:geoIndex:total_grpc_queries_timeout": 0, - "travel-sample:geoIndex:total_grpc_request_time": 0, - "travel-sample:geoIndex:total_internal_queries": 0, - "travel-sample:geoIndex:total_queries": 9, - "travel-sample:geoIndex:total_queries_error": 1, - "travel-sample:geoIndex:total_queries_slow": 0, - "travel-sample:geoIndex:total_queries_timeout": 0, - "travel-sample:geoIndex:total_request_time": 376022106, - "travel-sample:geoIndex:total_term_searchers": 2226, - "travel-sample:geoIndex:total_term_searchers_finished": 2226, - "travel-sample:geoIndex:writer_execute_batch_count": 0, - "travel-sample:travel-sample-index:avg_grpc_internal_queries_latency": 0, - "travel-sample:travel-sample-index:avg_grpc_queries_latency": 0, - "travel-sample:travel-sample-index:avg_internal_queries_latency": 0, - "travel-sample:travel-sample-index:avg_queries_latency": 18.477376, - "travel-sample:travel-sample-index:batch_merge_count": 0, - "travel-sample:travel-sample-index:doc_count": 16694, - "travel-sample:travel-sample-index:iterator_next_count": 0, - "travel-sample:travel-sample-index:iterator_seek_count": 0, - "travel-sample:travel-sample-index:last_access_time": "2020-03-31T05:56:15.264-07:00", - "travel-sample:travel-sample-index:num_bytes_live_data": 0, - "travel-sample:travel-sample-index:num_bytes_used_disk": 247966801, - "travel-sample:travel-sample-index:num_bytes_used_disk_by_root": 104572509, - "travel-sample:travel-sample-index:num_files_on_disk": 54, - "travel-sample:travel-sample-index:num_mutations_to_index": 0, - "travel-sample:travel-sample-index:num_persister_nap_merger_break": 6, - "travel-sample:travel-sample-index:num_persister_nap_pause_completed": 6, - "travel-sample:travel-sample-index:num_pindexes_actual": 6, - "travel-sample:travel-sample-index:num_pindexes_target": 6, - "travel-sample:travel-sample-index:num_recs_to_persist": 0, - "travel-sample:travel-sample-index:num_root_filesegments": 10, - "travel-sample:travel-sample-index:num_root_memorysegments": 0, - "travel-sample:travel-sample-index:reader_get_count": 0, - "travel-sample:travel-sample-index:reader_multi_get_count": 0, - "travel-sample:travel-sample-index:reader_prefix_iterator_count": 0, - "travel-sample:travel-sample-index:reader_range_iterator_count": 0, - "travel-sample:travel-sample-index:timer_batch_store_count": 0, - "travel-sample:travel-sample-index:timer_data_delete_count": 0, - "travel-sample:travel-sample-index:timer_data_update_count": 0, - "travel-sample:travel-sample-index:timer_opaque_get_count": 2715, - "travel-sample:travel-sample-index:timer_opaque_set_count": 1024, - "travel-sample:travel-sample-index:timer_rollback_count": 0, - "travel-sample:travel-sample-index:timer_snapshot_start_count": 0, - "travel-sample:travel-sample-index:total_bytes_indexed": 0, - "travel-sample:travel-sample-index:total_bytes_query_results": 308694, - "travel-sample:travel-sample-index:total_compaction_written_bytes": 0, - "travel-sample:travel-sample-index:total_compactions": 0, - "travel-sample:travel-sample-index:total_grpc_internal_queries": 0, - "travel-sample:travel-sample-index:total_grpc_queries": 0, - "travel-sample:travel-sample-index:total_grpc_queries_error": 0, - "travel-sample:travel-sample-index:total_grpc_queries_slow": 0, - "travel-sample:travel-sample-index:total_grpc_queries_timeout": 0, - "travel-sample:travel-sample-index:total_grpc_request_time": 0, - "travel-sample:travel-sample-index:total_internal_queries": 0, - "travel-sample:travel-sample-index:total_queries": 34, - "travel-sample:travel-sample-index:total_queries_error": 5, - "travel-sample:travel-sample-index:total_queries_slow": 0, - "travel-sample:travel-sample-index:total_queries_timeout": 0, - "travel-sample:travel-sample-index:total_request_time": 628280816, - "travel-sample:travel-sample-index:total_term_searchers": 1669, - "travel-sample:travel-sample-index:total_term_searchers_finished": 1669, - "travel-sample:travel-sample-index:writer_execute_batch_count": 0 -} ----- diff --git a/modules/rest-api/pages/rest-fts.adoc b/modules/rest-api/pages/rest-fts.adoc index 24b5867272..740ada3f78 100644 --- a/modules/rest-api/pages/rest-fts.adoc +++ b/modules/rest-api/pages/rest-fts.adoc @@ -1,6 +1,7 @@ = Search API :description: pass:q[The Search API supports the creation and management of indexes for _Full Text Search_.] :page-topic-type: reference +:page-toclevels: 2 == APIs in this Section @@ -11,3 +12,66 @@ For information on required roles and permissions, see xref:learn:security/roles For a list of the methods and URIs covered by these pages in this section, see the tables below. include::partial$rest-search-service-table.adoc[] + +== Legacy APIs + +These endpoints are for legacy Search indexes and may be deprecated in a future release. + +[#index-definition-legacy] +=== Index Definition + +[cols="76,215,249"] +|=== +| HTTP Method | URI | Documented at + +| `GET` +| `/api/index` +| xref:fts-rest-indexing:index.adoc#g-api-index[Get All Search Index Definitions] + +| `GET` +| `/api/index/{INDEX_NAME}` +| xref:fts-rest-indexing:index.adoc#g-api-index-name[Get Index Definition] + +| `PUT` +| `/api/index/{INDEX_NAME}` +| xref:fts-rest-indexing:index.adoc#p-api-index-name[Create or Update an Index Definition] + +| `DELETE` +| `/api/index/{INDEX_NAME}` +| xref:fts-rest-indexing:index.adoc#d-api-index-name[Delete Index Definition] + +|=== + +[#index-management-legacy] +=== Index Management + +[cols="76,215,249"] +|=== +| HTTP Method | URI | Documented at + +| `POST` +| `/api/index/{INDEX_NAME}/ingestControl/{OP}` +| xref:fts-rest-indexing:index.adoc#p-api-idx-name-ingestcontrol[Set Index Ingestion Control] + +| `POST` +| `/api/index/{INDEX_NAME}/planFreezeControl/{OP}` +| xref:fts-rest-indexing:index.adoc#p-api-idx-name-planfreezecontrol[Freeze Index Partition Assignment] + +| `POST` +| `/api/index/{INDEX_NAME}/queryControl/{OP}` +| xref:fts-rest-indexing:index.adoc#p-api-idx-name-querycontrol[Stop Queries on an Index] + +|=== + +[#index-querying-legacy] +=== Index Querying + +[cols="76,215,249"] +|=== +| HTTP Method | URI | Documented at + +| `POST` +| `/api/index/{INDEX_NAME}/query` +| xref:fts-rest-indexing:index.adoc#p-api-index-name-query[Query a Search Index] + +|=== diff --git a/modules/rest-api/pages/rest-index-stats.adoc b/modules/rest-api/pages/rest-index-stats.adoc deleted file mode 100644 index 963999de6c..0000000000 --- a/modules/rest-api/pages/rest-index-stats.adoc +++ /dev/null @@ -1,6 +0,0 @@ -= Index Statistics API - -include::partial$index-stats/overview.adoc[tag=body] -include::partial$index-stats/paths.adoc[] -include::partial$index-stats/definitions.adoc[] -include::partial$index-stats/security.adoc[] \ No newline at end of file diff --git a/modules/rest-api/pages/rest-initialize-cluster.adoc b/modules/rest-api/pages/rest-initialize-cluster.adoc index f9efa9e8b3..0362c5e058 100644 --- a/modules/rest-api/pages/rest-initialize-cluster.adoc +++ b/modules/rest-api/pages/rest-initialize-cluster.adoc @@ -37,11 +37,11 @@ curl -X POST http://:8091/clusterInit -d hostname= -d username= -d password= - -d data_path= - -d index_path= - -d cbas_path= - -d eventing_path= - -d java_home= + -d dataPath= + -d indexPath= + -d analyticsPath= + -d eventingPath= + -d javaHome= -d sendStats=true -d clusterName= -d services= @@ -73,13 +73,13 @@ This parameter must be specified. A string that will be the password for the new cluster. This parameter must be specified. -* `data_path`, `index_path`, `cbas_path`, `eventing_path`. +* `dataPath`, `indexPath`, `analyticsPath`, `eventingPath`. Paths for the storage of data to be used by the Data, Index, Analytics, and Eventing Services. All paths must be writable by user `couchbase`. These parameters are optional. For the default values, see xref:rest-api:rest-initialize-node.adoc[Initializing a Node]. -* `java_home`. +* `javaHome`. Location of the JRE to be used by the Analytics Service. The specified path must be writable by user `couchbase`. This parameter is optional. @@ -91,7 +91,7 @@ See the xref:cli:cbcli/couchbase-cli-cluster-init.adoc#:~:text=software%20update It's always set to `true` for Couchbase Server Community Edition. In Couchbase Server Enterprise Edition, you can set the value to the default `true` or `false`. -* `cluster_name`. +* `clusterName`. A name for the cluster. This name is for convenience of identification, and will not be used for network access. This parameter is optional. diff --git a/modules/rest-api/pages/rest-initialize-node.adoc b/modules/rest-api/pages/rest-initialize-node.adoc index 6903ad734b..cc4a68d581 100644 --- a/modules/rest-api/pages/rest-initialize-node.adoc +++ b/modules/rest-api/pages/rest-initialize-node.adoc @@ -40,7 +40,7 @@ Per platform, the default data-folder locations for all services are: ---- curl -X POST http://:8091/nodes/self/controller/settings -u : - -d data_path= + -d path= -d index_path= -d cbas_path= -d eventing_path= @@ -64,7 +64,7 @@ The following example establishes the paths for the Data, Index, Analytics, and ---- curl -X POST \ http://10.142.181.103:8091/nodes/self/controller/settings \ - -d 'data_path=%2Fopt%2Fcouchbase%2Fvar%2Flib%2Fcouchbase%2Fdata&' \ + -d 'path=%2Fopt%2Fcouchbase%2Fvar%2Flib%2Fcouchbase%2Fdata&' \ -d 'index_path=%2Fopt%2Fcouchbase%2Fvar%2Flib%2Fcouchbase%2Fidata&' \ -d 'cbas_path=%2Fopt%2Fcouchbase%2Fvar%2Flib%2Fcouchbase%2Fadata&' \ -d 'eventing_path=%2Fopt%2Fcouchbase%2Fvar%2Flib%2Fcouchbase%2Fedata&' diff --git a/modules/rest-api/pages/rest-intro.adoc b/modules/rest-api/pages/rest-intro.adoc index fb9cefc8ac..5cc0d7874a 100644 --- a/modules/rest-api/pages/rest-intro.adoc +++ b/modules/rest-api/pages/rest-intro.adoc @@ -153,7 +153,7 @@ include::partial$rest-search-service-table.adoc[] == Eventing Service API The _Eventing Service_ REST API provides methods for working with _Eventing Functions_. -The complete API is listed at xref:eventing:eventing-api.adoc[Eventing REST API]. +The complete API is listed at xref:eventing-rest-api:index.adoc[]. == Analytics Service API diff --git a/modules/rest-api/pages/rest-sample-buckets.adoc b/modules/rest-api/pages/rest-sample-buckets.adoc index 7bf16099a2..a5ee97df9d 100644 --- a/modules/rest-api/pages/rest-sample-buckets.adoc +++ b/modules/rest-api/pages/rest-sample-buckets.adoc @@ -107,5 +107,4 @@ See xref:rest-get-cluster-tasks.adoc[] for more information. * For an overview of sample buckets, see xref:manage:manage-settings/install-sample-buckets.adoc[]. * xref:rest-api:rest-bucket-delete.adoc[] explains deleting buckets using the REST-API. -* xref:cli:cbdocloader-tool.adoc[cbdocloader] explains how to install sample buckets using the command line interface. * xref:manage:manage-settings/install-sample-buckets.adoc[] explains how to load sample buckets using the Couchbase Server Web Console. diff --git a/modules/rest-api/pages/rest-statistics-multiple.adoc b/modules/rest-api/pages/rest-statistics-multiple.adoc index 3b6495f132..d561b115b5 100644 --- a/modules/rest-api/pages/rest-statistics-multiple.adoc +++ b/modules/rest-api/pages/rest-statistics-multiple.adoc @@ -56,13 +56,13 @@ Each object takes the following form: { "label": , "value": , - "operator": "=" | "!=" | "=~" | "~=" + "operator": "=" | "!=" | "=~" | "!~" | "any" | "not_any" } ---- The value of the key `label`, `label_name`, must be a string that specifies how the metric is identified: for example, `name`, or `proc`. The value of the key `value`, `label_val`, must be a string that is the actual name used to identify the metric: for example, `sys_cpu_utilization_rate`. -The value of the key `"operator"` must be `=`, `!=`, `=~`, or `~=`. +The value of the key `"operator"` must be `=` | `!=` | `=~` | `!~` | `any` | `not_any`. * `applyFunctions.` Can be any of the functions described in the section xref:rest-api:rest-statistics-single.adoc#function[function], on the page xref:rest-api:rest-statistics-single.adoc[Getting a Single Statistic]. diff --git a/modules/rest-api/pages/rest-xdcr-adv-settings.adoc b/modules/rest-api/pages/rest-xdcr-adv-settings.adoc index 45e1c8fa3b..e6e5b7103e 100644 --- a/modules/rest-api/pages/rest-xdcr-adv-settings.adoc +++ b/modules/rest-api/pages/rest-xdcr-adv-settings.adoc @@ -94,32 +94,58 @@ If successful, the call returns an object similar to the following: ---- { + "cLogConnPoolGCIntervalMs": 60000, + "cLogConnPoolLimit": 30, + "cLogConnPoolReapIntervalMs": 120000, + "cLogErrorTimeWindowMs": 120000, + "cLogMaxErrorCount": 10, + "cLogNetworkRetryCount": 5, + "cLogNetworkRetryIntervalMs": 2000, + "cLogPoolGetTimeoutMs": 5000, + "cLogQueueCapacity": 6000, + "cLogReattemptDurationMs": 600000, + "cLogSetTimeoutMs": 5000, + "cLogWorkerCount": 20, + "casDriftThresholdSecs": 3900, "checkpointInterval": 600, "ckptSvcCacheEnabled": true, + "collectionsOSOMode": true, "compressionType": "Auto", + "conflictLogging": {}, + "dcpEnablePurgeRollback": false, "desiredLatency": 50, + "disableHlvBasedShortCircuit": false, "docBatchSizeKb": 2048, "failureRestartInterval": 10, + "filterBinary": false, "filterBypassExpiry": false, "filterBypassUncommittedTxn": false, "filterDeletion": false, "filterExpiration": false, + "genericServicesLogLevel": { + < ... diagnostic items cut out due to length ... > + }, "goGC": 100, "goMaxProcs": 4, - "hlvPruningWindowSec": 259200, "jsFunctionTimeoutMs": 20000, "logLevel": "Info", "mergeFunctionMapping": {}, + "mobile": "Off", "networkUsageLimit": 0, "optimisticReplicationThreshold": 256, + "preCheckCasDriftThresholdHours": 8760, "preReplicateVBMasterCheck": true, "priority": "High", "replicateCkptIntervalMin": 20, + "retryOnErrExceptAuthErrMaxWaitSec": 360, "retryOnRemoteAuthErr": true, - "retryOnRemoteAuthErrMaxWaitSec": 3600, + "retryOnRemoteAuthErrMaxWaitSec": 360, + "skipReplSpecAutoGc": false, "sourceNozzlePerNode": 2, + "targetTopologyLogFrequency": 1800, "statsInterval": 1000, "targetNozzlePerNode": 2, + "targetTopologyLogFrequency": 1800, "workerBatchSize": 500 } ---- @@ -209,6 +235,7 @@ If the call is successful, an object containing the settings for the specified r ---- { + "casDriftThresholdSecs": 100, "checkpointInterval": 600, "ckptSvcCacheEnabled": true, "colMappingRules": {}, @@ -217,29 +244,35 @@ If the call is successful, an object containing the settings for the specified r "collectionsMirroringMode": false, "collectionsOSOMode": true, "compressionType": "Auto", + "dcpEnablePurgeRollback": false, "desiredLatency": 50, "docBatchSizeKb": 2048, "failureRestartInterval": 10, + "filterBinary": false, "filterBypassExpiry": false, "filterBypassUncommittedTxn": false, "filterDeletion": false, "filterExpiration": false, "filterExpression": "", - "hlvPruningWindowSec": 259200, "jsFunctionTimeoutMs": 20000, "logLevel": "Info", "mergeFunctionMapping": {}, + "mobile": "Off", "networkUsageLimit": 0, "optimisticReplicationThreshold": 256, "pauseRequested": false, + "preCheckCasDriftThresholdHours": 8760, "preReplicateVBMasterCheck": true, "priority": "High", "replicateCkptIntervalMin": 20, + "retryOnErrExceptAuthErrMaxWaitSec": 360, "retryOnRemoteAuthErr": true, - "retryOnRemoteAuthErrMaxWaitSec": 3600, + "retryOnRemoteAuthErrMaxWaitSec": 360, + "skipReplSpecAutoGc": false, "sourceNozzlePerNode": 2, "statsInterval": 1000, "targetNozzlePerNode": 2, + "targetTopologyLogFrequency": 1800, "type": "xmem", "workerBatchSize": 500 } @@ -259,6 +292,7 @@ If successful, the call returns an object containing all current settings for th ---- { + "casDriftThresholdSecs": 100, "checkpointInterval": 700, "ckptSvcCacheEnabled": true, "colMappingRules": {}, @@ -267,34 +301,105 @@ If successful, the call returns an object containing all current settings for th "collectionsMirroringMode": false, "collectionsOSOMode": true, "compressionType": "Auto", + "dcpEnablePurgeRollback": false, "desiredLatency": 50, "docBatchSizeKb": 2048, "failureRestartInterval": 10, + "filterBinary": false, "filterBypassExpiry": false, "filterBypassUncommittedTxn": false, "filterDeletion": false, "filterExpiration": false, "filterExpression": "", - "hlvPruningWindowSec": 259200, "jsFunctionTimeoutMs": 20000, "logLevel": "Info", "mergeFunctionMapping": {}, + "mobile": "Off", "networkUsageLimit": 0, "optimisticReplicationThreshold": 256, "pauseRequested": false, + "preCheckCasDriftThresholdHours": 8760, "preReplicateVBMasterCheck": true, "priority": "High", "replicateCkptIntervalMin": 20, + "retryOnErrExceptAuthErrMaxWaitSec": 360, "retryOnRemoteAuthErr": true, - "retryOnRemoteAuthErrMaxWaitSec": 3600, + "retryOnRemoteAuthErrMaxWaitSec": 360, + "skipReplSpecAutoGc": false, "sourceNozzlePerNode": 2, "statsInterval": 1000, "targetNozzlePerNode": 2, + "targetTopologyLogFrequency": 1800, "type": "xmem", "workerBatchSize": 500 } ---- +[#change-existing-replication-with-mobile-active] +=== Change Settings for an Existing Replication to Set mobile=Active + +The following example modifies the value of `mobile` to `Active` for an existing replication: + +---- +curl -X POST -u Administrator:password http://localhost:8091/ -d mobile=Active | jq '.' +---- + +For information about _XDCR with Sync Gateway mobile clusters in a bi-directional, active-active replication_, see xref:learn:clusters-and-availability/xdcr-active-active-sgw.adoc[XDCR Active-Active with Sync Gateway]. + +===== Change Settings for XDCR Generic Services Log Levels + +The following example modifies the log levels for XDCR Generic Services, for a specific replication. +Usually, you modify the log levels only when requested by Couchbase Support. + +---- +curl -X POST -u Administrator:password http://localhost:8091/settings/replications -d 'genericServicesLogLevel={"RemoteClusterService":"Debug","ReplicationSpecService":"Error","BucketTopologyService":"Debug","CheckpointService":"Error"}' +---- + +If successful, the call returns an object containing all current Generic Services related log level settings for the replication, including what's changed: + +---- +{ +... +"genericServicesLogLevel": { + "AdminPort": "Info", + "AuditService": "Info", + "BackfillManager": "Info", + "BackfillReplicationService": "Info", + "BucketTopologyService": "Debug", + "CapiService": "Info", + "CheckpointService": "Error", + "CollectionsManifestService": "Info", + "Default": "Info", + "GenericSupervisor": "Info", + "GlobalSettingsService": "Info", + "HttpServer": "Info", + "InternalSettingsService": "Info", + "ManifestService": "Info", + "MessageUtils": "Info", + "MetaKVMetadataService": "Info", + "MigrationService": "Info", + "P2PManagerService": "Info", + "PipelineManager": "Info", + "RemoteClusterService": "Debug", + "ReplicationSettingService": "Info", + "ReplicationSpecService": "Error", + "ResourceManager": "Info", + "SecurityService": "Info", + "ThroughputThrottlerService": "Info", + "TopologyService": "Info", + "UtilsService": "Info", + "XDCRFactory": "Info" + }, +... +} +---- + +To view the current log levels for XDCR Generic Services anytime, use the following command: + +---- +curl -s -X GET -u Administrator:password http://localhost:8091/settings/replications | jq +---- + [#xdcr-advanced-settings-rest] == List of Advanced Settings @@ -409,6 +514,20 @@ For supported expressions, see xref:xdcr-reference:xdcr-filtering-reference-intr This setting can only be established for and retrieved from an individual replication: it cannot be established or retrieved as part of global settings. +| `genericServicesLogLevel` +| JSON Object +| This is the Generic Services related log level settings for the replication. + +The keys represent service names and the associated values represent log levels. + +In the example, the log level for `RemoteClusterService` is set to `Debug`, and for `ReplicationSpecService`, it's set to `Error`. + +By default, non-pipeline specific services use the `Info` log level. +You can also set the log level to `Error`, `Debug`, or `Trace`. + +The `Default` key sets the log level for shared utilities like connection pools and data pools, which are used by multiple services. +All these utilities use the same default logger, so changing the log level in `Default` affects all logger dependent utilities. + | `goGC` | Integer (0 to 100) or String ("off") | Default: 100. @@ -455,6 +574,11 @@ This setting can be established and retrieved either for an individual replicati | JSON Object | Each of the key-value pairs in the JSON object should map a collection-specifier (in the form `scope.collection`) to the name of a defined merge-function. +| `mobile` +| Active or Off +| Default: `Off`. +When set to `Active`, enables the setting _XDCR Active-Active with Sync Gateway 4.0+_ on the clusters of both sides of the replication. The default value `Off` indicates that the replication setup supports either _XDCR Active-Passive with Sync Gateway_ or _XDCR Active-Active without Sync Gateway_. For more information, see xref:learn:clusters-and-availability/xdcr-active-active-sgw.adoc[XDCR Active-Active with Sync Gateway]. + | `networkUsageLimit` | Integer | Default: 0. @@ -471,6 +595,8 @@ Documents with sizes less than this threshold (in bytes) will be replicated opti This setting can be established and retrieved either for an individual replication or globally. +XDCR optimistic replication is applicable only when the bucket property `enableCrossClusterVersioning` is disabled. For information about the property `enableCrossClusterVersioning`, see xref:learn:clusters-and-availability/xdcr-enable-crossclusterversioning.adoc#version-pruning-window-hrs[XDCR enableCrossClusterVersioning]. + | `pauseRequested` | Boolean (true or false) | Default: false. diff --git a/modules/rest-api/pages/rest-xdcr-create-ref.adoc b/modules/rest-api/pages/rest-xdcr-create-ref.adoc index 8eae507104..85f6cb11dc 100644 --- a/modules/rest-api/pages/rest-xdcr-create-ref.adoc +++ b/modules/rest-api/pages/rest-xdcr-create-ref.adoc @@ -1,128 +1,192 @@ -= Creating a Reference += Creating or Editing a Reference -:description: pass:q[The REST API can be used to create an XDCR reference to a destination cluster.] +:description: pass:q[You can use the REST API to create or edit an XDCR reference to a target cluster.] :page-topic-type: reference +:page-toclevels: 4 [abstract] {description} == Description -On the cluster that is intended to be a _source_ for XDCR, a _reference_ to an intended _target_ cluster must first be defined. -The target cluster is typically a different cluster from the source cluster; but may be the same cluster. +Use the `/pools/default/remoteClusters` REST API endpoint to create an XDCR reference to a target cluster. +You must create a reference to a target cluster on the source cluster before you can create a replication. +The target cluster is usually a different cluster from the source cluster. +However, you can create a replication where the source and target are the same cluster. -A source cluster's references can be defined by means of the REST API. +You can edit an existing reference using the REST API by adding its name to the URI. -The Full Admin, Cluster Admin, or XDCR Admin role is required. +== HTTP Method and URI -== HTTP method and URI +.Create a new reference +[source, uri] ---- POST /pools/default/remoteClusters +---- -POST /pools/default/remoteClusters/ +.Edit an existing reference +[source, uri] +---- +POST /pools/default/remoteClusters/{REFERENCE_NAME} ---- -== Curl Syntax +.Path Parameters +[cols="2,3,2", caption=] +|=== +|Name | Description | Schema + +| `REFERENCE_NAME` +| The name of an existing reference. +| String +|=== + +== curl Syntax + +[source, console] ---- curl -v -u : - http://]:8091/pools/default/remoteClusters/[] + http://:8091/pools/default/remoteClusters/[] -d name= - -d hostname=] + -d hostname=[:] [-d network_type='external'] - [-d username=] - [-d password=] [-d demandEncryption=[ 0 | 1 ] ] [-d secureType=[ 'none' | 'half' | 'full'] ] - [--data-urlencode "certificate=$(cat )"] - [--data-urlencode "clientCertificate=$(cat /Users/username/clientcert/travel-sample.pem)"] - [--data-urlencode "clientKey=$(cat /Users/username/clientcert/travel-sample.key)"] + [-d username=] + [-d password=] + [--data-urlencode "certificate=$(cat )"] + [--data-urlencode "clientCertificate=$(cat )"] + [--data-urlencode "clientKey=$(cat )"] ---- -The value of the `name` parameter (`target-cluster-local-name`) is for local reference only, and so need not correspond to any network-visible name established for the target cluster. -If an existing reference is being edited, the existing value of `name` should be specified as a path-parameter that terminates the endpoint: in such cases, the `name` flag itself must still be included in the the payload, specifying either the existing or a new value. - -The value of the `hostname` parameter (`target-cluster-ip-address-or-domain-name`) determines the target cluster to which the connection will be made. -This value can specify either the _internal_ or (if one has been configured) the _external_ address of the target cluster. -For information on using DNS SRV in this context, see xref:xdcr-reference:xdcr-security-and-networking.adoc[XDCR Security and Networking]. - -The value of the optional `network_type` parameter must be `external`. -If this is specified, and an external network has been configured for the target cluster, use of the target cluster's external address is attempted. -If no external network has been configured, and the hostname refers to a valid internal address for the cluster, the internal address is used. - -The values specified for the optional `username` and `password` parameters must be the username and password for the _target_ cluster, respectively. -These values must be established if `none` or `half` is the value of the `secureType` parameter. -These values must also be established if `full` is the value of the `secureType` parameter, and authentication with client certificates is not being attempted. -However, if `full` is the value of `secureType`, and authentication with client certificates _is_ being attempted, these values must _not_ be established. - -The optional `secureType` parameter can be `none` (which is the default), `half`, or `full`; and thus specifies the type of security to be used for the connection. -If the optional `demandEncryption` parameter is specified with a value of `1`, a secure connection is enforced: in such a case, if `secureType` is specified as `half` or `full`, the security of the connection is established according to the value of `secureType`; whereas if `secureType` is _not_ specified, the security of the connection is established as `full`. -The default value of `demandEncryption` is `0`. - -If `secureType` is `full`, the local pathname of the target cluster's root certificate must be specified, as the value of the `--data-urlencode` flag. -Note that this additionally requires _either_ that values be established for the `username` and `password` parameters; _or_ that no values be established for the `username` and `password` parameters, and instead, local pathnames to a client certificate and corresponding client private key be established. -Each certificate or key must be specified as the value of a separate `data-urlencode` flag. - -If `secureType` is `half`, and the target cluster is running a pre-5.5 version of Couchbase Server, the local pathname of the target cluster's root certificate must be specified, as the value of the `--data-urlencode` flag. -However, if the target cluster is running 5.5 or later, the pathname need not be specified. - -Note that Capella CAs are automatically trusted by XDCR when the REST API is used to enable fully secure replications from Couchbase Enterprise Server to Capella: in such cases, the option `--data-urlencode "certificate=$(cat )"`, provided for specifying the CA, does not need to be used. -See xref:manage:manage-xdcr/secure-xdcr-replication.adoc#capella-trusted-cas[Capella Trusted CAs]. +.POST Parameters +[cols="2,3,3", caption=] +|=== +|Name | Description | Schema +| `name` +| A local name for the reference. +The name does not need to correspond to any network-visible name for the target cluster. +When editing an existing reference by supplying its name in the REST API URI, you must still supply the `name` parameter. +Set it to the name of the existing reference to keep its name the same. +If you set the `name` parameter to a different value, Couchbase Server renames the reference. +| String + +|`hostname` +| The hostname or IP address of the reference's target cluster. +This value can specify either the internal address or external address (if one has been configured) of the target cluster. +For information about using DNS SRV in this context, see xref:xdcr-reference:xdcr-security-and-networking.adoc[XDCR Security and Networking]. +| String + +| `network_type` +| Whether the network address specified in `hostname` is internal or external. +| String. +Must be `external` if the network is external. +Do not supply this parameter if the network is internal. + +| `demandEncryption` +| Whether to use encryption for the reference's connection to the target cluster. +a| Integer. +Valid values: + +* `0`: The default value. +Couchbase Server does not use encryption for the connection to the target cluster. +* `1`: Encrypt the connection to the target cluster. +Use the `secureType` parameter to set whether the connection is partially or fully encrypted. + +| `secureType` +| Optional parameter that sets whether the connection to the target cluster is encrypted, and if so, whether it partially or fully encrypts communication. +If you do not supply this parameter and you set `demandEncryption` parameter to `1`, the `secureType` defaults to `full`. +a| String. Valid settings are: + +* `none`: (default value) Couchbase Server does not use encryption for the connection to the target cluster. +Couchbase Server sends all traffic unencrypted, including the password. +* `half`: Couchbase Server uses encryption only when sending the password to the target cluster during authentication. +The data it sends to the target is not encrypted. +When using this value, you must provide a username and password. +* `full`: Couchbase Server uses encryption to secure all communication with the target cluster. +When you choose this value, you must supply a path to a local copy of the target cluster's root certificate in the `certificate` parameter (except for Capella--see <<#capella_cert_note,the note after this table>>). You must also either supply a username and password or paths to a client certificate and key. + +| `username` and `password` +| The username and password to use when authenticating with the target cluster. +You must supply these parameters if you did not supply the `secureType` parameter or set it to `none` or `half`. +However, if `secureType` is `full` you can choose to either use a username and password or to supply a client certificate and key. +| String + +| `certificate` +| The local path to a copy of the root CA of the target cluster. +You must supply this parameter if you set `secureType` to `full`. +| URL-encoded string + +| `clientCertificate` and `clientKey` +| The client certificate and key that Couchbase Server uses to authenticate with the target cluster. +Set these parameters only if you set `secureType` to `full` and you have chosen to use a certificate instead of a username and password for authentication. +| URL-encoded string + +|=== + +[#capella_cert_note] +NOTE: XDCR automatically trusts Capella root certificates when you use the REST API to enable fully secure replications from Couchbase Enterprise Server to Capella. +In this case, you do not need to supply the `certificate` parameter to the command. +See xref:manage:manage-xdcr/secure-xdcr-replication.adoc#capella-trusted-cas[Capella Trusted CAs] for more information. == Responses -Successful execution returns `200 OK`, establishes the reference, and returns an object whose key-value pairs provide details of the reference. -The keys are as follows: - -* `certificate`. -The root certificate for the target cluster, if one was used, in the creation of a `half` secure or `full` secure connection. - -* `clientCertificate`. -The client certificate for the source cluster, if one was used, in the creation of a `full` secure connection. - -* `deleted`. -Whether the reference has been deleted. -The value can be one of the booleans `true` and `false`. - -* `hostname`. -A string that contains the IP address (or domain name) and port number of the target cluster. - -* `name`. -A string that is the locally defined reference to the target cluster. - -* `secureType`. -A string that specifies the level of security required for connection. -This can be `none`, `half`, or `full`. - -* `uri`. -A string that is the URI of the locally named target cluster. +200 OK:: +Successful execution. +Couchbase Server creates the reference, and returns the details of the reference in a JSON message. +The keys in the JSON message are: ++ +* `certificate`: the root certificate for the target cluster, if one was used, in the creation of a `half` secure or `full` secure connection. +* `clientCertificate`: the client certificate for the source cluster, if one was used, in the creation of a `full` secure connection. +* `deleted`: whether the reference has been deleted. +The value can be `true` or `false`. +* `hostname`: the IP address or domain name and port number of the target cluster. +* `name`: the locally defined reference to the target cluster. +* `secureType`: the level of security required for connection. +This value can be `none`, `half`, or `full`. +* `uri`: the URI of the locally named target cluster. For example, `"/pools/default/remoteClusters/FirstTarget"`. +* `username`: the username used for authentication with the target cluster. +This value is an empty string when not using a username for authentication. +* `uuid`: the universally unique identifier for the reference. +For example, `"5ccf771844cd32375df8c4de70e9d44e"`. +* `validateURI` the URI for internal validation of the reference. +For example, `"/pools/default/remoteClusters/SecondTarget?just_validate=1"`. -* `username`. -A string that is the name of the current user. +400 Bad Request:: +Occurs when `secureType` is `full` and you supply both client certificates and a username and password. +In this case, Couchbase Server also returns the following message: ++ +[source, json] +---- +{"_":"username and client certificate cannot both be given when secure type is full"} +---- ++ +Supply either client certificates or a username and password for authentication with the target cluster, not both. -* `uuid`. -A string that is the universally unique identifier for the reference. -For example, `"5ccf771844cd32375df8c4de70e9d44e"`. +401 Unauthorized:: +Authentication failure, such as incorrect username and password. -* `validateURI.` -A string that is the URI for internal validation of the reference. -For example, `"/pools/default/remoteClusters/SecondTarget?just_validate=1"`. +404 Object Not Found:: +The URI used in the REST API call was not correct. +Couchbase Server respond with this error code if you attempt to edit a non-existent reference by adding its name to the REST API URI. + +== Required Permissions -Failure to authenticate returns `401 Unauthorized`. -An incorrectly specified URI returns `404 Object Not Found`. -If `secureType` is `full`, and credentials _and_ client certificates are specified, connection fails with `400 Bad Request`, and an error message such as `{"_":"username and client certificate cannot both be given when secure type is full"}`. +You must have Full Admin, Cluster Admin, or XDCR Admin role to call this API. == Examples -The following examples demonstrate how a reference can be established. -All examples are piped to https://stedolan.github.io/jq/[jq^], and certificate output is truncated, in order to enhance the readability of output. +The following examples demonstrate how to create a reference. +All examples are piped through https://stedolan.github.io/jq/[jq^], and certificate output is truncated, to make the output more readable. -== Create a Fully Secure Reference, Using Credentials +=== Create a Fully Secure Reference, Using Credentials -To create a fully secure reference from `localhost` to `10.144.220.102` by means of the target cluster's administrative credentials and root certificate, enter the following. +This example creates a fully secure reference from `localhost` to `10.144.220.102`. +It uses a username and password plus the target cluster's root certificate to authenticate. +[source, console] ---- curl -X POST -u Administrator:password \ http://localhost:8091/pools/default/remoteClusters \ @@ -134,12 +198,14 @@ http://localhost:8091/pools/default/remoteClusters \ --data-urlencode "certificate=$(cat ./ca.pem)" | jq '.' ---- -The `username` and `password` specified are those of the target cluster. -Note that the `demandEncryption` flag is not specified; however, the `encryptionType` flag is set to `full`. -The `--data-urlencode` flag specifies the local path to the root certificate for the target cluster. +This example sets a `username` and `password` for an account on the target cluster. +It does not set the `demandEncryption`. +However, because it sets the `encryptionType` parameter to `full`, the reference uses full encryption. +Using `curl`'s `--data-urlencode` flag encodes the contents of the root certificate for the target cluster returned by the `cat` command. -Formatted, the output from a successful execution is as follows: +Formatted, the output from a successful execution is: +[source, json] ---- { "certificate": "-----BEGIN CERTIFICATE-----\nMIIDJzCC @@ -160,11 +226,13 @@ Formatted, the output from a successful execution is as follows: } ---- -== Create a Half-Secure Reference, Using Credentials +=== Create a Half-Secure Reference, Using Credentials -To create a half-secure reference from `localhost` to `10.142.180.102` by means of the remote cluster's administrative credentials and its root certificate, enter the following. -(Note that `10.144.220.102` is assumed to be running a pre-5.5 version of Couchbase Server.) +The following example creates a half-secure reference from `localhost` to `10.142.180.102`. +It uses a username and password to authenticate with the target cluster. + +[source, console] ---- curl -X POST -u Administrator:password \ http://localhost:8091/pools/default/remoteClusters \ @@ -176,12 +244,13 @@ http://localhost:8091/pools/default/remoteClusters \ --data-urlencode "certificate=$(cat ./ca.pem)" | jq '.' ---- -The `username` and `password` specified are those of the remote cluster. -Note that the `demandEncryption` flag is set to `1`, while, the `encryptionType` flag specifies `half`. -The `--data-urlencode` flag specifies the local path to the root certificate for the (pre-5.5) target cluster. +The `username` and `password` in the example are for an account on the remote cluster. +The reference is half-encrypted (only the password is encrypted) because the `demandEncryption` flag is `1` and the `encryptionType` flag is `half`. +Using `curl`'s `--data-urlencode` flag encodes the contents of the root certificate for the target cluster returned by the `cat` command. -If connection is successful, the following is returned: +If the source Couchbase Server connects to the target cluster, it returns the following message: +[source, json] ---- { "certificate": "-----BEGIN CERTIFICATE-----\nMIIDJzCCAg+gAwIBAgIUSaVkKhAwNl8aTxDkfyoeUiStp1cw/ @@ -203,14 +272,19 @@ If connection is successful, the following is returned: ---- -== Create a Fully Secure Reference, Using Certificates +=== Create a Fully Secure Reference, Using Certificates + +This example create a fully secure reference from `localhost` to `target.example.com` by doing the following: -To create a fully secure reference from `localhost` to `target.en.cl`, specifying that connection should occur with an external network, demanding full encryption, and authenticating by means of the remote cluster's root certificate, a client certificate, and a client private key, enter the following: +* Specifies that connection is over an external network +* Enables full encryption +* Authenticates using the remote cluster's root certificate, a client certificate, and a client private key. +[source, console] ---- curl -X POST -u Administrator:password http://localhost:8091/pools/default/remoteClusters \ -d name=TargetCluster \ --d hostname=target.en.cl \ +-d hostname=target.example.com \ -d network_type=external \ -d demandEncryption=1 \ --data-urlencode "certificate=$(cat ./ca.pem)" \ @@ -218,11 +292,14 @@ curl -X POST -u Administrator:password http://localhost:8091/pools/default/remot --data-urlencode "clientKey=$(cat ./travel-sample.key)" ---- -Note that the `demandEncryption` flag is set to `1`, and a fully encrypted connection is thus enforced. -The `network_type=external` parameter is specified, indicating that the target's external network should be connected to, if it has been configured; otherwise, connection to an internal network is attempted. +Because the example sets the `demandEncryption` flag to `1` and does not supply a `secureType` parameter, the connection to the target cluster is fully encrypted. +The `network_type=external` parameter indicates that Couchbase Server should connect to the target's external network if it has been configured. +If the target cluster does not have an external network defined, the source cluster attempts to connect to the target cluster's internal network. +The example supplies three URL-encoded values: the root certificate of the target cluster and a certificate and key for the client connection. If successful, the command returns the following: +[source, json] ---- { "certificate": "-----BEGIN CERTIFICATE-----\nMIIDJzCCAg+gAwIBAgIUSaVkKh @@ -238,7 +315,7 @@ If successful, the command returns the following: "deleted": false, "demandEncryption": true, "encryptionType": "full", - "hostname": "target.en.cl", + "hostname": "target.example.com", "name": "TargetCluster", "secureType": "full", "uri": "/pools/default/remoteClusters/TargetCluster", @@ -248,10 +325,13 @@ If successful, the command returns the following: } ---- -The `secureType` field specifies `full`: therefore, the reference and its associated replications have now been fully secured. -Both the target cluster's root certificate and the source cluster's client certificate are included in the output. +The `secureType` field specifies `full` which is the default value if you set `demandEncryption` to `1` and do not supply a `secureType` parameter in the REST API call. +The output includes both the target cluster's root certificate and the source cluster's client certificate. == See Also -For information on using the REST API to create secure connections, see xref:manage:manage-xdcr/secure-xdcr-replication.adoc[Secure a Replication]. -Additional information is provided in xref:learn:security/certificates.adoc[Certificates] and xref:xdcr-reference:xdcr-security-and-networking.adoc[XDCR Security and Networking]. +* See xref:manage:manage-xdcr/enable-full-secure-replication.adoc[] for an overview of securing replications. +* See xref:manage:manage-xdcr/secure-xdcr-replication.adoc[] +for information about using the REST API to create secure connections. +* See xref:learn:security/certificates.adoc[] for an overview of using certificates with Couchbase Server. +* See xref:xdcr-reference:xdcr-security-and-networking.adoc[] for some requirements when configuring XDCR. diff --git a/modules/rest-api/pages/rest-xdcr-create-replication.adoc b/modules/rest-api/pages/rest-xdcr-create-replication.adoc index b386c64ab2..a115edf42c 100644 --- a/modules/rest-api/pages/rest-xdcr-create-replication.adoc +++ b/modules/rest-api/pages/rest-xdcr-create-replication.adoc @@ -52,6 +52,7 @@ curl -v -X POST -u [admin]:[password] -d statsInterval=[Integer] -d logLevel=[String] -d networkUsageLimit=[Integer] + -d mobile=[Off | Active] ---- The `type` value must be `xmem`; which is sometimes referred to as *Version 2*, and corresponds to the _Memcached Binary_ protocol, used in XDCR communications. @@ -122,6 +123,11 @@ The value can be `High`, `Medium`, or `Low`. The default value is `High`. For information, see xref:learn:clusters-and-availability/xdcr-overview.adoc#xdcr-priority[XDCR Priority]. +Use the `mobile=[Off | Active]` flag to enable the setting _XDCR Active-Active with Sync Gateway 4.0+_ by changing the value to `Active` on the clusters of both sides of the replication. The default value is `Off` , which indicates that the setup supports either _XDCR Active-Passive with Sync Gateway_ or _XDCR Active-Active without Sync Gateway_. + +[NOTE] +To enable the setting `mobile=[Off | Active]`, ensure you have enabled the property `enableCrossClusterVersioning` on all the participating buckets, which is a prerequisite. For information about the bucket property `enableCrossClusterVersioning`, see xref:learn:clusters-and-availability/xdcr-enable-crossclusterversioning.adoc[XDCR enableCrossClusterVersioning]. + For information on all other flags, see xref:rest-api:rest-xdcr-adv-settings.adoc[Managing Advanced XDCR Settings]. [#responses] diff --git a/modules/rest-api/pages/rest-xdcr-intro.adoc b/modules/rest-api/pages/rest-xdcr-intro.adoc index aa2582ab5d..79f83caaa1 100644 --- a/modules/rest-api/pages/rest-xdcr-intro.adoc +++ b/modules/rest-api/pages/rest-xdcr-intro.adoc @@ -8,7 +8,8 @@ == APIs in this Section Cross Datacenter Replication (XDCR) configuration replicates data between a source bucket and a target bucket. -For a detailed introduction and overview, see xref:learn:clusters-and-availability/xdcr-overview.adoc[Cross Data Center Replication (XDCR)]. +For a detailed introduction and overview, see xref:learn:clusters-and-availability/xdcr-overview.adoc[]. +To learn how to secure an XDCR connection, see xref:manage:manage-xdcr/enable-full-secure-replication.adoc[]. For a list of the methods and URIs covered by the pages in this section, see the table below. diff --git a/modules/rest-api/partials/rest-analytics-service-table.adoc b/modules/rest-api/partials/rest-analytics-service-table.adoc index 606c5d1d51..962780669f 100644 --- a/modules/rest-api/partials/rest-analytics-service-table.adoc +++ b/modules/rest-api/partials/rest-analytics-service-table.adoc @@ -6,19 +6,19 @@ | `POST` | `/analytics/service` -| xref:analytics:rest-service.adoc#_post_service[Query Service] +| xref:analytics-rest-service:index.adoc#post_service[Query Service] | `GET` | `/analytics/service` -| xref:analytics:rest-service.adoc#_get_service[Read-Only Query Service] +| xref:analytics-rest-service:index.adoc#get_service[Read-Only Query Service] | `POST` | `/query/service` -| xref:analytics:rest-service.adoc#_post_query[Query Service (Alternative)] +| xref:analytics-rest-service:index.adoc#post_query[Query Service (Alternative)] | `GET` | `/query/service` -| xref:analytics:rest-service.adoc#_get_query[Read-Only Query Service (Alternative)] +| xref:analytics-rest-service:index.adoc#get_query[Read-Only Query Service (Alternative)] |=== @@ -30,31 +30,31 @@ | `GET` | `/analytics/admin/active_requests` -| xref:analytics:rest-admin.adoc#_return_active_requests[Active Requests] +| xref:analytics-rest-admin:index.adoc#return_active_requests[Active Requests] | `DELETE` | `/analytics/admin/active_requests` -| xref:analytics:rest-admin.adoc#_cancel_request[Request Cancellation] +| xref:analytics-rest-admin:index.adoc#cancel_request[Request Cancellation] | `GET` | `/analytics/admin/completed_requests` -| xref:analytics:rest-admin.adoc#_completed_requests[Completed Requests] +| xref:analytics-rest-admin:index.adoc#completed_requests[Completed Requests] | `GET` | `/analytics/cluster` -| xref:analytics:rest-admin.adoc#_cluster_status[Cluster Status] +| xref:analytics-rest-admin:index.adoc#cluster_status[Cluster Status] | `POST` | `/analytics/cluster/restart` -| xref:analytics:rest-admin.adoc#_restart_cluster[Cluster Restart] +| xref:analytics-rest-admin:index.adoc#restart_cluster[Cluster Restart] | `POST` | `/analytics/node/restart` -| xref:analytics:rest-admin.adoc#_restart_node[Node Restart] +| xref:analytics-rest-admin:index.adoc#restart_node[Node Restart] | `GET` | `/analytics/status/ingestion` -| xref:analytics:rest-admin.adoc#_ingestion_status[Ingestion Status] +| xref:analytics-rest-admin:index.adoc#ingestion_status[Ingestion Status] |=== @@ -66,19 +66,19 @@ | `GET` | `/analytics/config/service` -| xref:analytics:rest-config.adoc#_get_service[View Service-Level Parameters] +| xref:analytics-rest-config:index.adoc#get_service[View Service-Level Parameters] | `PUT` | `/analytics/config/service` -| xref:analytics:rest-config.adoc#_put_service[Modify Service-Level Parameters] +| xref:analytics-rest-config:index.adoc#put_service[Modify Service-Level Parameters] | `GET` | `/analytics/config/node` -| xref:analytics:rest-config.adoc#_get_node[View Node-Specific Parameters] +| xref:analytics-rest-config:index.adoc#get_node[View Node-Specific Parameters] | `PUT` | `/analytics/config/node` -| xref:analytics:rest-config.adoc#_put_node[Modify Node-Specific Parameters] +| xref:analytics-rest-config:index.adoc#put_node[Modify Node-Specific Parameters] |=== @@ -90,11 +90,11 @@ | `GET` | `/settings/analytics` -| xref:analytics:rest-settings.adoc#_get_settings[View Analytics Settings] +| xref:analytics-rest-settings:index.adoc#get_settings[View Analytics Settings] | `POST` | `/settings/analytics` -| xref:analytics:rest-settings.adoc#_post_settings[Modify Analytics Settings] +| xref:analytics-rest-settings:index.adoc#post_settings[Modify Analytics Settings] |=== @@ -106,27 +106,27 @@ | `POST` | `/analytics/link/{scope}/{name}` -| xref:analytics:rest-links.adoc#_post_link[Create Link] +| xref:analytics-rest-links:index.adoc#post_link[Create Link] | `GET` | `/analytics/link/{scope}/{name}` -| xref:analytics:rest-links.adoc#_get_link[Query Link] +| xref:analytics-rest-links:index.adoc#get_link[Query Link] | `PUT` | `/analytics/link/{scope}/{name}` -| xref:analytics:rest-links.adoc#_put_link[Edit Link] +| xref:analytics-rest-links:index.adoc#put_link[Edit Link] | `DELETE` | `/analytics/link/{scope}/{name}` -| xref:analytics:rest-links.adoc#_delete_link[Delete Link] +| xref:analytics-rest-links:index.adoc#delete_link[Delete Link] | `GET` | `/analytics/link` -| xref:analytics:rest-links.adoc#_get_all[Query All Links] +| xref:analytics-rest-links:index.adoc#get_all[Query All Links] | `GET` | `/analytics/link/{scope}` -| xref:analytics:rest-links.adoc#_get_scope[Query Scope Links] +| xref:analytics-rest-links:index.adoc#get_scope[Query Scope Links] |=== @@ -139,14 +139,14 @@ | `GET` | `/analytics/library` -| xref:analytics:rest-library.adoc#_get_collection[Read All Libraries] +| xref:analytics-rest-library:index.adoc#get_collection[Read All Libraries] | `POST` | `/analytics/library/{scope}/{library}` -| xref:analytics:rest-library.adoc#_post_library[Create or Update a Library] +| xref:analytics-rest-library:index.adoc#post_library[Create or Update a Library] | `DELETE` | `/analytics/library/{scope}/{library}` -| xref:analytics:rest-links.adoc#_delete_library[Delete a Library] +| xref:analytics-rest-links:index.adoc#delete_library[Delete a Library] |=== diff --git a/modules/rest-api/partials/rest-buckets-table.adoc b/modules/rest-api/partials/rest-buckets-table.adoc index 87b5554d42..94e8987762 100644 --- a/modules/rest-api/partials/rest-buckets-table.adoc +++ b/modules/rest-api/partials/rest-buckets-table.adoc @@ -23,8 +23,12 @@ | xref:rest-api:rest-retrieve-bucket-nodes.adoc[Listing Nodes by Bucket] | `GET` -| `/pools/default/buckets/[bucket-name]/stats` -| xref:rest-api:rest-bucket-stats.adoc[Getting Bucket Statistics] +| `/pools/default/stats/range/[metric_name]/[function-expression]` +| xref:rest-api:rest-statistics-single.adoc[Getting a Single Statistic] + +| `POST` +| `/pools/default/stats/range` +| xref:rest-api:rest-statistics-multiple.adoc[Getting Multiple Statistics] | `GET` | `/pools/default/buckets/default` diff --git a/modules/rest-api/partials/rest-index-service-table.adoc b/modules/rest-api/partials/rest-index-service-table.adoc index 90876bea91..6108cee288 100644 --- a/modules/rest-api/partials/rest-index-service-table.adoc +++ b/modules/rest-api/partials/rest-index-service-table.adoc @@ -12,14 +12,14 @@ | `GET` | `/api/v1/stats` -| xref:rest-api:rest-index-stats.adoc#_get_node_stats[Get Node Statistics] +| xref:index-rest-stats:index.adoc#get_node_stats[Get Node Statistics] | `GET` | `/api/v1/stats/{keyspace}` -| xref:rest-api:rest-index-stats.adoc#_get_keyspace_stats[Get Keyspace Statistics] +| xref:index-rest-stats:index.adoc#get_keyspace_stats[Get Keyspace Statistics] | `GET` | `/api/v1/stats/{keyspace}/{index}` -| xref:rest-api:rest-index-stats.adoc#_get_index_stats[Get Index Statistics] +| xref:index-rest-stats:index.adoc#get_index_stats[Get Index Statistics] |=== diff --git a/modules/rest-api/partials/rest-query-service-table.adoc b/modules/rest-api/partials/rest-query-service-table.adoc index 6a9f46c0a7..86eb3eb999 100644 --- a/modules/rest-api/partials/rest-query-service-table.adoc +++ b/modules/rest-api/partials/rest-query-service-table.adoc @@ -7,11 +7,11 @@ | `POST` | `/query/service` -| xref:n1ql:n1ql-rest-api/index.adoc#_post_service[Query Service] +| xref:n1ql-rest-query:index.adoc#post_service[Query Service] | `GET` | `/query/service` -| xref:n1ql:n1ql-rest-api/index.adoc#_get_service[Read-Only Query Service] +| xref:n1ql-rest-query:index.adoc#get_service[Read-Only Query Service] |=== // end::query-service[] @@ -25,108 +25,108 @@ | `GET` | `/admin/clusters` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_clusters[Read All Clusters] +| xref:n1ql-rest-admin:index.adoc#get_clusters[Read All Clusters] | `GET` | `/admin/clusters/{cluster}` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_cluster[Read a Cluster] +| xref:n1ql-rest-admin:index.adoc#get_cluster[Read a Cluster] | `GET` | `/admin/clusters/{cluster}/nodes` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_nodes[Read All Nodes] +| xref:n1ql-rest-admin:index.adoc#get_nodes[Read All Nodes] | `GET` | `/admin/clusters/{cluster}/nodes/{node}` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_node[Read a Node] +| xref:n1ql-rest-admin:index.adoc#get_node[Read a Node] | `GET` | `/admin/config` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_config[Read Configuration] +| xref:n1ql-rest-admin:index.adoc#get_config[Read Configuration] | `GET` | `/admin/prepareds` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_prepareds[Retrieve All Prepared Statements] +| xref:n1ql-rest-admin:index.adoc#get_prepareds[Retrieve All Prepared Statements] | `GET` | `/admin/prepareds/{name}` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_prepared[Retrieve a Prepared Statement] +| xref:n1ql-rest-admin:index.adoc#get_prepared[Retrieve a Prepared Statement] | `DELETE` | `/admin/prepareds/{name}` -| xref:n1ql:n1ql-rest-api/admin.adoc#_delete_prepared[Delete a Prepared Statement] +| xref:n1ql-rest-admin:index.adoc#delete_prepared[Delete a Prepared Statement] | `GET` | `/admin/indexes/prepareds` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_prepared_indexes[Retrieve Prepared Index Statements] +| xref:n1ql-rest-admin:index.adoc#get_prepared_indexes[Retrieve Prepared Index Statements] | `GET` | `/admin/active_requests` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_active_requests[Retrieve All Active Requests] +| xref:n1ql-rest-admin:index.adoc#get_active_requests[Retrieve All Active Requests] | `GET` | `/admin/active_requests/{request}` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_active_request[Retrieve an Active Request] +| xref:n1ql-rest-admin:index.adoc#get_active_request[Retrieve an Active Request] | `DELETE` | `/admin/active_requests/{request}` -| xref:n1ql:n1ql-rest-api/admin.adoc#_delete_active_request[Delete an Active Request] +| xref:n1ql-rest-admin:index.adoc#delete_active_request[Delete an Active Request] | `GET` | `/admin/indexes/active_requests` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_active_indexes[Retrieve Active Index Requests] +| xref:n1ql-rest-admin:index.adoc#get_active_indexes[Retrieve Active Index Requests] | `GET` | `/admin/completed_requests` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_completed_requests[Retrieve All Completed Requests] +| xref:n1ql-rest-admin:index.adoc#get_completed_requests[Retrieve All Completed Requests] | `GET` | `/admin/completed_requests/{request}` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_completed_request[Retrieve a Completed Request] +| xref:n1ql-rest-admin:index.adoc#get_completed_request[Retrieve a Completed Request] | `DELETE` | `/admin/completed_requests/{request}` -| xref:n1ql:n1ql-rest-api/admin.adoc#_delete_completed_request[Delete a Completed Request] +| xref:n1ql-rest-admin:index.adoc#delete_completed_request[Delete a Completed Request] | `GET` | `/admin/indexes/completed_requests` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_completed_indexes[Retrieve Completed Index Requests] +| xref:n1ql-rest-admin:index.adoc#get_completed_indexes[Retrieve Completed Index Requests] | `GET` | `/admin/vitals` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_vitals[Retrieve Vitals] +| xref:n1ql-rest-admin:index.adoc#get_vitals[Retrieve Vitals] | `GET` | `/admin/stats` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_stats[Retrieve All Statistics] +| xref:n1ql-rest-admin:index.adoc#get_stats[Retrieve All Statistics] | `GET` | `/admin/stats/{stats}` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_stat[Retrieve a Statistic] +| xref:n1ql-rest-admin:index.adoc#get_stat[Retrieve a Statistic] // deprecated method // | `GET` // | `/debug/vars` -// | xref:n1ql:n1ql-rest-api/admin.adoc#_get_debug_vars[Get Debug Variables] +// | xref:n1ql-rest-admin:index.adoc#get_debug_vars[Get Debug Variables] | `GET` | `/admin/settings` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_settings[Retrieve Node-Level Query Settings] +| xref:n1ql-rest-admin:index.adoc#get_settings[Retrieve Node-Level Query Settings] | `POST` | `/admin/settings` -| xref:n1ql:n1ql-rest-api/admin.adoc#_post_settings[Update Node-Level Query Settings] +| xref:n1ql-rest-admin:index.adoc#post_settings[Update Node-Level Query Settings] | `GET` | `/admin/ping` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_ping[Ping] +| xref:n1ql-rest-admin:index.adoc#get_ping[Ping] | `GET` | `/admin/gc` -| xref:n1ql:n1ql-rest-api/admin.adoc#_get_gc[Run Garbage Collector] +| xref:n1ql-rest-admin:index.adoc#get_gc[Run Garbage Collector] | `POST` | `/admin/gc` -| xref:n1ql:n1ql-rest-api/admin.adoc#_post_gc[Run Garbage Collector and Release Memory] +| xref:n1ql-rest-admin:index.adoc#post_gc[Run Garbage Collector and Release Memory] |=== // end::query-admin[] @@ -140,19 +140,19 @@ | `GET` | `/settings/querySettings` -| xref:rest-api:rest-cluster-query-settings.adoc#_get_settings[Retrieve Cluster-Level Query Settings] +| xref:n1ql-rest-settings:index.adoc#get_settings[Retrieve Cluster-Level Query Settings] | `POST` | `/settings/querySettings` -| xref:rest-api:rest-cluster-query-settings.adoc#_post_settings[Update Cluster-Level Query Settings] +| xref:n1ql-rest-settings:index.adoc#post_settings[Update Cluster-Level Query Settings] | `GET` | `/settings/querySettings/curlWhitelist` -| xref:rest-api:rest-cluster-query-settings.adoc#_get_access[Retrieve CURL Access List] +| xref:n1ql-rest-settings:index.adoc#get_access[Retrieve CURL Access List] | `POST` | `/settings/querySettings/curlWhitelist` -| xref:rest-api:rest-cluster-query-settings.adoc#_post_access[Update CURL Access List] +| xref:n1ql-rest-settings:index.adoc#post_access[Update CURL Access List] |=== // end::query-settings[] @@ -166,19 +166,19 @@ | `GET` | `/evaluator/v1/libraries` -| xref:n1ql:n1ql-rest-api/functions.adoc#_get_collection[Read All Libraries] +| xref:n1ql-rest-functions:index.adoc#get_collection[Read All Libraries] | `GET` | `/evaluator/v1/libraries/{library}` -| xref:n1ql:n1ql-rest-api/functions.adoc#_get_library[Read a Library] +| xref:n1ql-rest-functions:index.adoc#get_library[Read a Library] | `POST` | `/evaluator/v1/libraries/{library}` -| xref:n1ql:n1ql-rest-api/functions.adoc#_post_library[Create or Update a Library] +| xref:n1ql-rest-functions:index.adoc#post_library[Create or Update a Library] | `DELETE` | `/evaluator/v1/libraries/{library}` -| xref:n1ql:n1ql-rest-api/functions.adoc#_delete_library[Delete a Library] +| xref:n1ql-rest-functions:index.adoc#delete_library[Delete a Library] |=== // end::query-functions[] diff --git a/modules/rest-api/partials/rest-search-service-table.adoc b/modules/rest-api/partials/rest-search-service-table.adoc index 2a1db4b4bf..a481eaf097 100644 --- a/modules/rest-api/partials/rest-search-service-table.adoc +++ b/modules/rest-api/partials/rest-search-service-table.adoc @@ -1,168 +1,172 @@ -=== Index Definition +=== Node Configuration [cols="76,215,249"] |=== | HTTP Method | URI | Documented at | `GET` -| `/api/index` -| xref:rest-api:rest-fts-indexing.adoc#index-definition[Index Definition] +| `/api/cfg` +| xref:fts-rest-nodes:index.adoc#getClusterConfig[Get Cluster Configuration] -| `GET` -| `/api/index/{indexName}` -| xref:rest-api:rest-fts-indexing.adoc#index-definition[Index Definition] +| `POST` +| `/api/cfgRefresh` +| xref:fts-rest-nodes:index.adoc#refreshClusterConfig[Refresh Node Configuration] -| `PUT` -| `/api/index/{indexName}` -| xref:rest-api:rest-fts-indexing.adoc#index-definition[Index Definition] +| `POST` +| `/api/managerKick` +| xref:fts-rest-nodes:index.adoc#managerKick[Replan Resource Assignments] -| `DELETE` -| `/api/index/{indexName}` -| xref:rest-api:rest-fts-indexing.adoc#index-definition[Index Definition] +| `GET` +| `/api/managerMeta` +| xref:fts-rest-nodes:index.adoc#managerMeta[Get Node Capabilities] |=== -=== Index Management +=== Node Diagnostics [cols="76,215,249"] |=== | HTTP Method | URI | Documented at -| `POST` -| `/api/index/{indexName}/ingestControl/{op}` -| xref:rest-api:rest-fts-indexing.adoc#index-management[Index Management] +| `GET` +| `/api/diag` +| xref:fts-rest-nodes:index.adoc#getDiagnostics[Get Diagnostics] + +| `GET` +| `/api/log` +| xref:fts-rest-nodes:index.adoc#getLogs[Get Node Logs] + +| `GET` +| `/api/runtime` +| xref:fts-rest-nodes:index.adoc#getRuntimeInfo[Get Node Runtime Information] + +| `GET` +| `/api/runtime/args` +| xref:fts-rest-nodes:index.adoc#getRuntimeArgs[Get Node Runtime Arguments] | `POST` -| `/api/index/{indexName}/planFreezeControl/{op}` -| xref:rest-api:rest-fts-indexing.adoc#index-management[Index Management] +| `/api/runtime/profile/cpu` +| xref:fts-rest-nodes:index.adoc#captureCpuProfile[Capture CPU Profiling Information] | `POST` -| `/api/index/{indexName}/planQueryControl/{op}` -| xref:rest-api:rest-fts-indexing.adoc#index-management[Index Management] +| `/api/runtime/profile/memory` +| xref:fts-rest-nodes:index.adoc#captureMemoryProfile[Capture Memory Profiling Information] |=== -=== Index Monitoring and Debugging +=== Node Management [cols="76,215,249"] |=== | HTTP Method | URI | Documented at -| `GET` -| `/api/stats` -| xref:rest-api:rest-fts-indexing.adoc#index-monitoring-and-debugging[Index Monitoring And Debugging] - -| `GET` -| `/api/stats/{indexName}` -| xref:rest-api:rest-fts-indexing.adoc#index-monitoring-and-debugging[Index Monitoring And Debugging] - | `POST` -| `/api/stats/{indexName}/analyzeDoc` -| xref:rest-api:rest-fts-indexing.adoc#index-monitoring-and-debugging[Index Monitoring And Debugging] - -| `GET` -| `/api/query/index/{indexName}` -| xref:rest-api:rest-fts-indexing.adoc#index-monitoring-and-debugging[Index Monitoring And Debugging] +| `/api/runtime/gc` +| xref:fts-rest-nodes:index.adoc#performGC[Perform Garbage Collection] |=== -=== Index Querying +=== Node Monitoring [cols="76,215,249"] |=== | HTTP Method | URI | Documented at | `GET` -| `/api/index/{indexName}/count` -| xref:rest-api:rest-fts-indexing.adoc#index-querying[Index Querying] +| `/api/runtime/stats` +| xref:fts-rest-nodes:index.adoc#getRuntimeStats[Get Runtime Statistics] -| `POST` -| `/api/index/{indexName}/query` -| xref:rest-api:rest-fts-indexing.adoc#index-querying[Index Querying] +| `GET` +| `/api/runtime/stats/statsMem` +| xref:fts-rest-nodes:index.adoc#getMemoryStats[Get Memory Statistics] |=== -=== Node Configuration +=== Index Definition [cols="76,215,249"] |=== | HTTP Method | URI | Documented at | `GET` -| `/api/cfg` -| xref:rest-api:rest-fts-node.adoc#node-configuration[Node Configuration] +| `/api/bucket/{BUCKET_NAME}/scope/{SCOPE_NAME}/index` +| xref:fts-rest-indexing:index.adoc#g-api-scoped-index[Get All Search Index Definitions (Scoped)] -| `POST` -| `/api/cfgRefresh` -| xref:rest-api:rest-fts-node.adoc#node-configuration[Node Configuration] +| `GET` +| `/api/bucket/{BUCKET_NAME}/scope/{SCOPE_NAME}/index/{INDEX_NAME}` +| xref:fts-rest-indexing:index.adoc#g-api-scoped-index-name[Get Index Definition (Scoped)] -| `POST` -| `/api/managerKick` -| xref:rest-api:rest-fts-node.adoc#node-configuration[Node Configuration] +| `PUT` +| `/api/bucket/{BUCKET_NAME}/scope/{SCOPE_NAME}/index/{INDEX_NAME}` +| xref:fts-rest-indexing:index.adoc#p-api-scoped-index-name[Create or Update an Index Definition (Scoped)] -| `GET` -| `/api/managerMeta` -| xref:rest-api:rest-fts-node.adoc#node-configuration[Node Configuration] +| `DELETE` +| `/api/bucket/{BUCKET_NAME}/scope/{SCOPE_NAME}/index/{INDEX_NAME}` +| xref:fts-rest-indexing:index.adoc#d-api-scoped-index-name[Delete Index Definition (Scoped)] |=== -=== Node Diagnostics +=== Index Management [cols="76,215,249"] |=== | HTTP Method | URI | Documented at -| `GET` -| `/api/diag` -| xref:rest-api:rest-fts-node.adoc#node-diagnostics[Node Diagnostics] - -| `GET` -| `/api/log` -| xref:rest-api:rest-fts-node.adoc#node-diagnostics[Node Diagnostics] - -| `GET` -| `/api/runtime` -| xref:rest-api:rest-fts-node.adoc#node-diagnostics[Node Diagnostics] - -| `GET` -| `/api/runtime/args` -| xref:rest-api:rest-fts-node.adoc#node-diagnostics[Node Diagnostics] +| `POST` +| `/api/bucket/{BUCKET_NAME}/scope/{SCOPE_NAME}/index/{INDEX_NAME}/ingestControl/{OP}` +| xref:fts-rest-indexing:index.adoc#p-api-scoped-ingestcontrol[Set Index Ingestion Control (Scoped)] | `POST` -| `/api/runtime/profile/cpu` -| xref:rest-api:rest-fts-node.adoc#node-diagnostics[Node Diagnostics] +| `/api/bucket/{BUCKET_NAME}/scope/{SCOPE_NAME}/index/{INDEX_NAME}/planFreezeControl/{OP}` +| xref:fts-rest-indexing:index.adoc#p-api-scoped-planfreezecontrol[Freeze Index Partition Assignment (Scoped)] | `POST` -| `/api/runtime/profile/memory` -| xref:rest-api:rest-fts-node.adoc#node-diagnostics[Node Diagnostics] +| `/api/bucket/{BUCKET_NAME}/scope/{SCOPE_NAME}/index/{INDEX_NAME}/queryControl/{OP}` +| xref:fts-rest-indexing:index.adoc#p-api-scoped-querycontrol[Stop Queries on an Index (Scoped)] |=== -=== Node Management +=== Index Monitoring and Debugging [cols="76,215,249"] |=== | HTTP Method | URI | Documented at +| `GET` +| `/api/stats` +| xref:fts-rest-indexing:index.adoc#g-api-stats[Get Indexing and Data Metrics for All Indexes] + +| `GET` +| `/api/stats/{INDEX_NAME}` +| xref:fts-rest-indexing:index.adoc#g-api-stats-index-name[Get Indexing and Data Metrics for an Index] + | `POST` -| `/api/runtime/gc` -| xref:rest-api:rest-fts-node.adoc#node-management[Node Management] +| `/api/stats/{INDEX_NAME}/analyzeDoc` +| xref:fts-rest-indexing:index.adoc#g-api-stats-index-name-analyzeDoc[Analyze Document] + +| `GET` +| `/api/bucket/{BUCKET_NAME}/scope/{SCOPE_NAME}/index/{INDEX_NAME}/status` +| xref:fts-rest-indexing:index.adoc#g-api-scoped-status[Get Index Status (Scoped)] |=== -=== Node Monitoring +=== Index Querying [cols="76,215,249"] |=== | HTTP Method | URI | Documented at | `GET` -| `/api/runtime/stats` -| xref:rest-api:rest-fts-node.adoc#node-monitoring[Node Monitoring] +| `/api/index/{INDEX_NAME}/count` +| xref:fts-rest-indexing:index.adoc#g-api-index-name-count[Get Document Count for an Index] -| `GET` -| `/api/runtime/stats/statsMem` -| xref:rest-api:rest-fts-node.adoc#node-monitoring[Node Monitoring] +| `POST` +| `/api/bucket/{BUCKET_NAME}/scope/{SCOPE_NAME}/index/{INDEX_NAME}/pindexLookup` +| xref:fts-rest-indexing:index.adoc#p-api-pindex-lookup[Look up the Index Partition for a Document (Scoped)] + +| `POST` +| `/api/bucket/{BUCKET_NAME}/scope/{SCOPE_NAME}/index/{INDEX_NAME}/query` +| xref:fts-rest-indexing:index.adoc#p-api-scoped-query[Query a Search Index (Scoped)] |=== @@ -174,11 +178,11 @@ | `GET` | `/api/pindex` -| xref:rest-api:rest-fts-advanced.adoc#index-partition-definition[Advanced] +| xref:fts-rest-advanced:index.adoc#getPartition[Get Index Partition Information] | `GET` | `/api/pindex/{pindexName}` -| xref:rest-api:rest-fts-advanced.adoc#index-partition-definition[Advanced] +| xref:fts-rest-advanced:index.adoc#getPartitionName[Get Index Partition by Name] |=== @@ -190,11 +194,11 @@ | `GET` | `/api/pindex/{pindexName}/count` -| xref:rest-api:rest-fts-advanced.adoc#index-partition-querying[Advanced] +| xref:fts-rest-advanced:index.adoc#getPartitionCount[Get Index Partition Document Count] | `POST` | `/api/pindex/{pindexName}/query` -| xref:rest-api:rest-fts-advanced.adoc#index-partition-querying[Advanced] +| xref:fts-rest-advanced:index.adoc#queryPartition[Query Index Partition] |=== @@ -206,6 +210,54 @@ | `POST` | `/pools/default` -| xref:rest-api:rest-fts-advanced.adoc#fts-memory-quota[Advanced] +| xref:fts-rest-advanced:index.adoc#setFtsMemoryQuota[Set FTS Memory Quota] + +|=== + +=== Search Statistics + +[cols="76,215,249"] +|=== +| HTTP Method | URI | Documented at + +| `GET` +| `/api/nsstats` +| xref:fts-rest-stats:index.adoc#g-api-nsstats[Get Query, Mutation, and Partition Statistics for the Search Service] + +| `GET` +| `/api/nsstats/index/{INDEX_NAME}` +| xref:fts-rest-stats:index.adoc#g-api-nsstats-index-name[Get Query, Mutation, and Partition Statistics for an Index] + +|=== + +=== Active Queries + +[cols="76,215,249"] +|=== +| HTTP Method | URI | Documented at + +| `GET` +| `/api/query` +| xref:fts-rest-query:index.adoc#api-query[View Active Node Queries] + +| `GET` +| `/api/query/index/{indexName}` +| xref:fts-rest-query:index.adoc#api-query-index[View Active Index Queries] + +| `POST` +| `/api/query/{queryID}/cancel` +| xref:fts-rest-query:index.adoc#api-query-cancel[Cancel Active Queries] + +|=== + +=== Search Manager Options + +[cols="76,215,249"] +|=== +| HTTP Method | URI | Documented at + +| `GET` +| `/api/managerOptions` +| xref:fts-rest-manage:index.adoc#put_options[Rebalance Based on File Transfer] |=== diff --git a/modules/rest-api/partials/rest-xdcr-table.adoc b/modules/rest-api/partials/rest-xdcr-table.adoc index de6a8c9753..8b5361709e 100644 --- a/modules/rest-api/partials/rest-xdcr-table.adoc +++ b/modules/rest-api/partials/rest-xdcr-table.adoc @@ -44,7 +44,7 @@ | `GET` -| `/pools/default/buckets/[source_bucket]/stats/[destination_endpoint]` -| xref:rest-api:rest-xdcr-statistics.adoc[Getting Statistics] +| `/pools/default/stats/range/[statistics_name]` +| xref:rest-api:rest-statistics-single.adoc[Getting a Single Statistic] |=== diff --git a/modules/system-event-reference/pages/system-event-reference.adoc b/modules/system-event-reference/pages/system-event-reference.adoc index aa74b50711..23716aff7b 100644 --- a/modules/system-event-reference/pages/system-event-reference.adoc +++ b/modules/system-event-reference/pages/system-event-reference.adoc @@ -346,7 +346,7 @@ The following system events are returned for the Backup Service. | 6149 | A scheduled or one-off task has completed | Info | Cluster, repository, run type, and name. -| 6150 | Cluster, repository, run type, and name. failed | Error |luster, repository, run type, name, and error message. +| 6150 | A scheduled or one-off task has failed | Error | Cluster, repository, run type, name, and error message. | 6151 | Restore started | Info | NA diff --git a/modules/xdcr-reference/pages/xdcr-advanced-settings.adoc b/modules/xdcr-reference/pages/xdcr-advanced-settings.adoc index a641283ca3..02a5d39829 100644 --- a/modules/xdcr-reference/pages/xdcr-advanced-settings.adoc +++ b/modules/xdcr-reference/pages/xdcr-advanced-settings.adoc @@ -95,6 +95,8 @@ For documents that are smaller, XDCR replicates the document to the target witho Note that a low setting risks increased latency, due to a higher number or metadata fetches; but may also reduce the number of required replications (due to source and target having identical copies of the document). A high setting lowers latency during replication, since the number of metadata fetches is reduced; but may also raise the replication-rate excessively, overwhelming either network or target cluster. +XDCR optimistic replication is applicable only when the `enableCrossClusterVersioning` property is disabled. For information about the property `enableCrossClusterVersioning`, see xref:learn:clusters-and-availability/xdcr-enable-crossclusterversioning.adoc#version-pruning-window-hrs[XDCR enableCrossClusterVersioning]. + | XDCR Statistics Collection Interval | Specifies, in seconds, how frequently XDCR Statistics are updated. diff --git a/preview/HEAD.yml b/preview/HEAD.yml new file mode 100644 index 0000000000..896c1ba582 --- /dev/null +++ b/preview/HEAD.yml @@ -0,0 +1,6 @@ +sources: + docs-devex: + branches: release/7.6 + cb-swagger: + branches: release/7.6 +