diff --git a/.changeset/thin-snails-compete.md b/.changeset/thin-snails-compete.md new file mode 100644 index 000000000..9b37e6961 --- /dev/null +++ b/.changeset/thin-snails-compete.md @@ -0,0 +1,15 @@ +--- +'@powersync/service-core': minor +'@powersync/service-module-mssql': minor +'@powersync/service-module-postgres-storage': patch +'@powersync/service-module-mongodb-storage': patch +'@powersync/service-module-postgres': patch +'@powersync/service-errors': patch +'@powersync/service-module-mysql': patch +'@powersync/service-image': patch +--- + +- First iteration of MSSQL replication using Change Data Capture (CDC). +- Supports resumable snapshot replication +- Uses CDC polling for replication + diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c2a8e3604..f861e9595 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -292,3 +292,95 @@ jobs: - name: Test Storage run: pnpm --filter='./modules/module-mongodb-storage' test + + run-mssql-tests: + name: MSSQL Test + runs-on: ubuntu-latest + needs: run-core-tests + + env: + MSSQL_SA_PASSWORD: 321strong_ROOT_password + + strategy: + fail-fast: false + matrix: + mssql-version: [2022, 2025] + + steps: + - uses: actions/checkout@v5 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Start MSSQL + run: | + docker run \ + --name MSSQLTestDatabase \ + --health-cmd="/opt/mssql-tools18/bin/sqlcmd -C -S localhost -U sa -P \"${{ env.MSSQL_SA_PASSWORD }}\" -Q \"SELECT 1;\" || exit 1" \ + --health-interval 5s \ + --health-timeout 3s \ + --health-retries 30 \ + -e ACCEPT_EULA=Y \ + -e MSSQL_SA_PASSWORD=${{ env.MSSQL_SA_PASSWORD }} \ + -e MSSQL_PID=Developer \ + -e MSSQL_AGENT_ENABLED=true \ + -p 1433:1433 \ + -d mcr.microsoft.com/mssql/server:${{ matrix.mssql-version }}-latest + + - name: Wait for MSSQL to be healthy + run: | + timeout 120 bash -c 'until docker inspect --format="{{.State.Health.Status}}" MSSQLTestDatabase | grep -q "healthy"; do sleep 2; done' + + - name: Initialize MSSQL database + run: | + docker run \ + --rm \ + --network host \ + -e MSSQL_SA_PASSWORD=${{ env.MSSQL_SA_PASSWORD }} \ + -v ${{ github.workspace }}/modules/module-mssql/ci/init-mssql.sql:/scripts/init-mssql.sql:ro \ + mcr.microsoft.com/mssql/server:${{ matrix.mssql-version }}-latest \ + /bin/bash -c "/opt/mssql-tools18/bin/sqlcmd -C -S localhost -U sa -P \"${{ env.MSSQL_SA_PASSWORD }}\" -v DATABASE=powersync -v DB_USER=sa -i /scripts/init-mssql.sql" + + # The mongodb-github-action below doesn't use the Docker credentials for the pull. + # We pre-pull, so that the image is cached. + - name: Pre-pull Mongo image + run: docker pull mongo:8.0 + + - name: Start MongoDB + uses: supercharge/mongodb-github-action@1.12.0 + with: + mongodb-version: '8.0' + mongodb-replica-set: test-rs + + - name: Start PostgreSQL (Storage) + run: | + docker run \ + --health-cmd pg_isready \ + --health-interval 10s \ + --health-timeout 5s \ + --health-retries 5 \ + -e POSTGRES_PASSWORD=postgres \ + -e POSTGRES_DB=powersync_storage_test \ + -p 5431:5432 \ + -d postgres:18 + + - name: Enable Corepack + run: corepack enable + - name: Setup Node.js + uses: actions/setup-node@v6 + with: + node-version-file: '.nvmrc' + cache: pnpm + + - name: Install dependencies + run: pnpm install + + - name: Build + shell: bash + run: pnpm build + + - name: Test Replication + run: pnpm --filter='./modules/module-mssql' test diff --git a/modules/module-mongodb-storage/src/storage/implementation/MongoBucketBatch.ts b/modules/module-mongodb-storage/src/storage/implementation/MongoBucketBatch.ts index eecf747e2..454bd74ce 100644 --- a/modules/module-mongodb-storage/src/storage/implementation/MongoBucketBatch.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/MongoBucketBatch.ts @@ -146,6 +146,10 @@ export class MongoBucketBatch return this.last_checkpoint_lsn; } + get noCheckpointBeforeLsn() { + return this.no_checkpoint_before_lsn; + } + async flush(options?: storage.BatchBucketFlushOptions): Promise { let result: storage.FlushedResult | null = null; // One flush may be split over multiple transactions. diff --git a/modules/module-mssql/.npmignore b/modules/module-mssql/.npmignore new file mode 100644 index 000000000..90012116c --- /dev/null +++ b/modules/module-mssql/.npmignore @@ -0,0 +1 @@ +dev \ No newline at end of file diff --git a/modules/module-mssql/LICENSE b/modules/module-mssql/LICENSE new file mode 100644 index 000000000..3ff64c975 --- /dev/null +++ b/modules/module-mssql/LICENSE @@ -0,0 +1,67 @@ +# Functional Source License, Version 1.1, ALv2 Future License + +## Abbreviation + +FSL-1.1-ALv2 + +## Notice + +Copyright 2023-2025 Journey Mobile, Inc. + +## Terms and Conditions + +### Licensor ("We") + +The party offering the Software under these Terms and Conditions. + +### The Software + +The "Software" is each version of the software that we make available under these Terms and Conditions, as indicated by our inclusion of these Terms and Conditions with the Software. + +### License Grant + +Subject to your compliance with this License Grant and the Patents, Redistribution and Trademark clauses below, we hereby grant you the right to use, copy, modify, create derivative works, publicly perform, publicly display and redistribute the Software for any Permitted Purpose identified below. + +### Permitted Purpose + +A Permitted Purpose is any purpose other than a Competing Use. A Competing Use means making the Software available to others in a commercial product or service that: + +1. substitutes for the Software; +2. substitutes for any other product or service we offer using the Software that exists as of the date we make the Software available; or +3. offers the same or substantially similar functionality as the Software. + +Permitted Purposes specifically include using the Software: + +1. for your internal use and access; +2. for non-commercial education; +3. for non-commercial research; and +4. in connection with professional services that you provide to a licensee using the Software in accordance with these Terms and Conditions. + +### Patents + +To the extent your use for a Permitted Purpose would necessarily infringe our patents, the license grant above includes a license under our patents. If you make a claim against any party that the Software infringes or contributes to the infringement of any patent, then your patent license to the Software ends immediately. + +### Redistribution + +The Terms and Conditions apply to all copies, modifications and derivatives of the Software. +If you redistribute any copies, modifications or derivatives of the Software, you must include a copy of or a link to these Terms and Conditions and not remove any copyright notices provided in or with the Software. + +### Disclaimer + +THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTIES OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION WARRANTIES OF FITNESS FOR A PARTICULAR PURPOSE, MERCHANTABILITY, TITLE OR NON-INFRINGEMENT. +IN NO EVENT WILL WE HAVE ANY LIABILITY TO YOU ARISING OUT OF OR RELATED TO THE SOFTWARE, INCLUDING INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES, EVEN IF WE HAVE BEEN INFORMED OF THEIR POSSIBILITY IN ADVANCE. + +### Trademarks + +Except for displaying the License Details and identifying us as the origin of the Software, you have no right under these Terms and Conditions to use our trademarks, trade names, service marks or product names. + +## Grant of Future License + +We hereby irrevocably grant you an additional license to use the Software under the Apache License, Version 2.0 that is effective on the second anniversary of the date we make the Software available. On or after that date, you may use the Software under the Apache License, Version 2.0, in which case the following will apply: + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/modules/module-mssql/README.md b/modules/module-mssql/README.md new file mode 100644 index 000000000..abbf9d41c --- /dev/null +++ b/modules/module-mssql/README.md @@ -0,0 +1,3 @@ +# PowerSync MSSQL Module + +MSSQL replication module for PowerSync diff --git a/modules/module-mssql/ci/init-mssql.sql b/modules/module-mssql/ci/init-mssql.sql new file mode 100644 index 000000000..159771b96 --- /dev/null +++ b/modules/module-mssql/ci/init-mssql.sql @@ -0,0 +1,50 @@ +-- Create database (idempotent) +IF DB_ID('$(DATABASE)') IS NULL +BEGIN + CREATE DATABASE [$(DATABASE)]; +END +GO + +-- Enable CDC at the database level (idempotent) +USE [$(DATABASE)]; +IF (SELECT is_cdc_enabled FROM sys.databases WHERE name = '$(DATABASE)') = 0 +BEGIN + EXEC sys.sp_cdc_enable_db; +END +GO + +-- Create PowerSync checkpoints table +-- Powersync requires this table to ensure regular checkpoints appear in CDC +IF OBJECT_ID('dbo._powersync_checkpoints', 'U') IS NULL +BEGIN + CREATE TABLE dbo._powersync_checkpoints ( + id INT IDENTITY PRIMARY KEY, + last_updated DATETIME NOT NULL DEFAULT (GETDATE()) +); +END + +GRANT INSERT, UPDATE ON dbo._powersync_checkpoints TO [$(DB_USER)]; +GO + +-- Enable CDC for the powersync checkpoints table +IF NOT EXISTS (SELECT 1 FROM cdc.change_tables WHERE source_object_id = OBJECT_ID(N'dbo._powersync_checkpoints')) + BEGIN + EXEC sys.sp_cdc_enable_table + @source_schema = N'dbo', + @source_name = N'_powersync_checkpoints', + @role_name = N'cdc_reader', + @supports_net_changes = 0; +END +GO + +-- Wait until capture job exists - usually takes a few seconds after enabling CDC on a table for the first time +DECLARE @tries int = 10; +WHILE @tries > 0 AND NOT EXISTS (SELECT 1 FROM msdb.dbo.cdc_jobs WHERE job_type = N'capture') +BEGIN + WAITFOR DELAY '00:00:01'; + SET @tries -= 1; +END; + +-- Set the CDC capture job polling interval to 1 second (default is 5 seconds) +EXEC sys.sp_cdc_change_job @job_type = N'capture', @pollinginterval = 1; +GO \ No newline at end of file diff --git a/modules/module-mssql/dev/.env.template b/modules/module-mssql/dev/.env.template new file mode 100644 index 000000000..1fbba060e --- /dev/null +++ b/modules/module-mssql/dev/.env.template @@ -0,0 +1,4 @@ +ROOT_PASSWORD=321strong_ROOT_password +DATABASE=powersync +DB_USER=powersync_user +DB_USER_PASSWORD=321strong_POWERSYNC_password \ No newline at end of file diff --git a/modules/module-mssql/dev/README.md b/modules/module-mssql/dev/README.md new file mode 100644 index 000000000..8dfde62d8 --- /dev/null +++ b/modules/module-mssql/dev/README.md @@ -0,0 +1,82 @@ +# MSSQL Dev Database + +This directory contains Docker Compose configuration for running a local MSSQL Server instance with CDC (Change Data Capture) enabled for development and testing. The image used is the 2022 Edition of SQL Server. 2025 can also be used, but has issues on Mac OS X 26 Tahoe due to this issue: https://github.com/microsoft/mssql-docker/issues/942 + +## Prerequisites + +- Docker and Docker Compose installed +- A `.env` file in this directory see the `.env.template` for required variables + +## Environment Variables + +```bash +ROOT_PASSWORD= +DATABASE= +DB_USER= +DB_USER_PASSWORD= +``` + +**Note:** The `ROOT_PASSWORD` and `DB_USER_PASSWORD` must meet SQL Server password complexity requirements (at least 8 characters, including uppercase, lowercase, numbers, and special characters). + +## Usage + +### Starting the Database + +From the `dev` directory, run: + +```bash +docker compose up -d +``` + +This will: +1. Start the MSSQL Server container (`mssql-dev`) +2. Wait for the database to be healthy +3. Automatically run the setup container (`mssql-dev-setup`) which executes `init.sql` + +### Stopping the Database + +```bash +docker compose down +``` + +To also remove the data volume: + +```bash +docker compose down -v +``` + +### Viewing Logs + +```bash +docker compose logs -f +``` + +## What `init.sql` Does + +The initialization script (`init.sql`) performs the following setup steps: + +1. **Database Creation**: Creates the application database (if it doesn't exist) +2. **CDC Setup**: Enables Change Data Capture at the database level +3. **User Creation**: Creates a SQL Server login and database user with appropriate permissions +4. **Create PowerSync Checkpoints table**: Creates the required `_powersync_checkpoints` table. +5. **Demo Tables**: Creates sample tables (`lists` and `todos`) for testing (optional examples) +6. **CDC Table Enablement**: Enables CDC tracking on the demo tables +7. **Permissions**: Grants `db_datareader` and `cdc_reader` roles to the application user +8. **Sample Data**: Inserts initial test data into the `lists` table + +All operations are idempotent, so you can safely re-run the setup without errors. The demo tables section (steps 5–7) serves as an example of how to enable CDC on your own tables. + +## Connection Details + +- **Host**: `localhost` +- **Port**: `1433` +- **SA Login**: `sa` / `{ROOT_PASSWORD}` +- **App Login**: `{DB_USER}` / `{DB_USER_PASSWORD}` +- **Database**: `{DATABASE}` + +## Troubleshooting + +- If the setup container fails, check logs: `docker compose logs mssql-dev-setup` +- Ensure your `.env` file exists and contains all required variables +- The database container may take 30–60 seconds to become healthy on the first startup +- If you encounter connection issues, verify the container is running: `docker compose ps` diff --git a/modules/module-mssql/dev/docker-compose.yaml b/modules/module-mssql/dev/docker-compose.yaml new file mode 100644 index 000000000..e5f5d120a --- /dev/null +++ b/modules/module-mssql/dev/docker-compose.yaml @@ -0,0 +1,39 @@ +name: mssql-dev +services: + mssql-dev: + platform: linux/amd64 + image: mcr.microsoft.com/mssql/server:2022-latest # 2025 Can also be used, but not on Mac 26 Tahoe due to this issue: https://github.com/microsoft/mssql-docker/issues/942 + container_name: mssql-dev + ports: + - "1433:1433" + environment: + ACCEPT_EULA: "Y" + MSSQL_SA_PASSWORD: "${ROOT_PASSWORD}" + MSSQL_PID: "Developer" + MSSQL_AGENT_ENABLED: "true" # required for CDC capture/cleanup jobs + volumes: + - data:/var/opt/mssql + healthcheck: + test: [ "CMD-SHELL", "/opt/mssql-tools18/bin/sqlcmd -C -S localhost -U sa -P \"$${MSSQL_SA_PASSWORD}\" -Q \"SELECT 1;\" || exit 1" ] + interval: 5s + timeout: 3s + retries: 30 + + mssql-dev-setup: + platform: linux/amd64 + image: mcr.microsoft.com/mssql/server:2022-latest + container_name: mssql-dev-setup + depends_on: + mssql-dev: + condition: service_healthy + environment: + MSSQL_SA_PASSWORD: "${ROOT_PASSWORD}" + DATABASE: "${DATABASE}" + DB_USER: "${DB_USER}" + DB_USER_PASSWORD: "${DB_USER_PASSWORD}" + volumes: + - ./init.sql:/scripts/init.sql:ro + entrypoint: ["/bin/bash", "-lc", "/opt/mssql-tools18/bin/sqlcmd -C -S mssql-dev,1433 -U sa -P \"$${MSSQL_SA_PASSWORD}\" -i /scripts/init.sql && echo '✅ MSSQL init done'"] + +volumes: + data: diff --git a/modules/module-mssql/dev/init.sql b/modules/module-mssql/dev/init.sql new file mode 100644 index 000000000..55e2199c7 --- /dev/null +++ b/modules/module-mssql/dev/init.sql @@ -0,0 +1,155 @@ +-- Create database (idempotent) +IF DB_ID('$(DATABASE)') IS NULL +BEGIN + CREATE DATABASE [$(DATABASE)]; +END +GO + +-- Enable CDC at the database level (idempotent) +USE [$(DATABASE)]; +IF (SELECT is_cdc_enabled FROM sys.databases WHERE name = '$(DATABASE)') = 0 +BEGIN + EXEC sys.sp_cdc_enable_db; +END +GO + +-- Create a SQL login (server) if missing +USE [master]; +IF NOT EXISTS (SELECT 1 FROM sys.server_principals WHERE name = '$(DB_USER)') +BEGIN + CREATE LOGIN [$(DB_USER)] WITH PASSWORD = '$(DB_USER_PASSWORD)', CHECK_POLICY = ON; +END +GO + +-- Create DB user for the app DB if missing +USE [$(DATABASE)]; +IF NOT EXISTS (SELECT 1 FROM sys.database_principals WHERE name = '$(DB_USER)') +BEGIN + CREATE USER [$(DB_USER)] FOR LOGIN [$(DB_USER)]; +END +GO + +-- Required for PowerSync to access the sys.dm_db_log_stats DMV +USE [master]; +GRANT VIEW SERVER PERFORMANCE STATE TO [$(DB_USER)]; +GO + +-- Required for PowerSync to access the sys.dm_db_log_stats DMV and the sys.dm_db_partition_stats DMV +USE [$(DATABASE)]; +GRANT VIEW DATABASE PERFORMANCE STATE TO [$(DB_USER)]; +GO + +-- Create PowerSync checkpoints table +-- Powersync requires this table to ensure regular checkpoints appear in CDC +IF OBJECT_ID('dbo._powersync_checkpoints', 'U') IS NULL +BEGIN + CREATE TABLE dbo._powersync_checkpoints ( + id INT IDENTITY PRIMARY KEY, + last_updated DATETIME NOT NULL DEFAULT (GETDATE()) + ); +END + +GRANT INSERT, UPDATE ON dbo._powersync_checkpoints TO [$(DB_USER)]; +GO + +-- Enable CDC for the powersync checkpoints table +IF NOT EXISTS (SELECT 1 FROM cdc.change_tables WHERE source_object_id = OBJECT_ID(N'dbo._powersync_checkpoints')) +BEGIN + EXEC sys.sp_cdc_enable_table + @source_schema = N'dbo', + @source_name = N'_powersync_checkpoints', + @role_name = N'cdc_reader', + @supports_net_changes = 0; +END +GO + +-- Wait until capture job exists - usually takes a few seconds after enabling CDC on a table for the first time +DECLARE @tries int = 10; +WHILE @tries > 0 AND NOT EXISTS (SELECT 1 FROM msdb.dbo.cdc_jobs WHERE job_type = N'capture') +BEGIN + WAITFOR DELAY '00:00:01'; + SET @tries -= 1; +END; + +-- Set the CDC capture job polling interval to 1 second (default is 5 seconds) +EXEC sys.sp_cdc_change_job @job_type = N'capture', @pollinginterval = 1; +GO + +/* ----------------------------------------------------------- + Create demo lists and todos tables and enables CDC on them. + CDC must be enabled per table to actually capture changes. +------------------------------------------------------------*/ +IF OBJECT_ID('dbo.lists', 'U') IS NULL +BEGIN + CREATE TABLE dbo.lists ( + id UNIQUEIDENTIFIER NOT NULL DEFAULT NEWID(), + created_at DATETIME2 NOT NULL DEFAULT SYSUTCDATETIME(), + name NVARCHAR(MAX) NOT NULL, + owner_id UNIQUEIDENTIFIER NOT NULL, + CONSTRAINT PK_lists PRIMARY KEY (id) + ); +END + +GRANT INSERT, UPDATE, DELETE ON dbo.lists TO [$(DB_USER)]; +GO + +IF OBJECT_ID('dbo.todos', 'U') IS NULL +BEGIN + CREATE TABLE dbo.todos ( + id UNIQUEIDENTIFIER NOT NULL DEFAULT NEWID(), + created_at DATETIME2 NOT NULL DEFAULT SYSUTCDATETIME(), + completed_at DATETIME2 NULL, + description NVARCHAR(MAX) NOT NULL, + completed BIT NOT NULL DEFAULT 0, + created_by UNIQUEIDENTIFIER NULL, + completed_by UNIQUEIDENTIFIER NULL, + list_id UNIQUEIDENTIFIER NOT NULL, + CONSTRAINT PK_todos PRIMARY KEY (id), + CONSTRAINT FK_todos_lists FOREIGN KEY (list_id) REFERENCES dbo.lists(id) ON DELETE CASCADE + ); +END + +GRANT INSERT, UPDATE, DELETE ON dbo.todos TO [$(DB_USER)]; +GO + +-- Enable CDC for dbo.lists (idempotent guard) +IF NOT EXISTS (SELECT 1 FROM cdc.change_tables WHERE source_object_id = OBJECT_ID(N'dbo.lists')) +BEGIN + EXEC sys.sp_cdc_enable_table + @source_schema = N'dbo', + @source_name = N'lists', + @role_name = N'cdc_reader', + @supports_net_changes = 0; +END +GO + +-- Enable CDC for dbo.todos (idempotent guard) +IF NOT EXISTS (SELECT 1 FROM cdc.change_tables WHERE source_object_id = OBJECT_ID(N'dbo.todos')) +BEGIN + EXEC sys.sp_cdc_enable_table + @source_schema = N'dbo', + @source_name = N'todos', + @role_name = N'cdc_reader', + @supports_net_changes = 0; +END +GO + +-- Grant minimal rights to read CDC data +IF IS_ROLEMEMBER('db_datareader', '$(DB_USER)') = 0 +BEGIN + ALTER ROLE db_datareader ADD MEMBER [$(DB_USER)]; +END + +IF IS_ROLEMEMBER('cdc_reader', '$(DB_USER)') = 0 +BEGIN + ALTER ROLE cdc_reader ADD MEMBER [$(DB_USER)]; +END +GO + +-- Add demo data +IF NOT EXISTS (SELECT 1 FROM dbo.lists) +BEGIN +INSERT INTO dbo.lists (id, name, owner_id) +VALUES (NEWID(), 'Do a demo', NEWID()); +END +GO \ No newline at end of file diff --git a/modules/module-mssql/package.json b/modules/module-mssql/package.json new file mode 100644 index 000000000..37a955dd8 --- /dev/null +++ b/modules/module-mssql/package.json @@ -0,0 +1,51 @@ +{ + "name": "@powersync/service-module-mssql", + "repository": "https://github.com/powersync-ja/powersync-service", + "types": "dist/index.d.ts", + "version": "0.0.1", + "license": "FSL-1.1-ALv2", + "main": "dist/index.js", + "type": "module", + "publishConfig": { + "access": "public" + }, + "scripts": { + "build": "tsc -b", + "build:tests": "tsc -b test/tsconfig.json", + "clean": "rm -rf ./dist && tsc -b --clean", + "test": "vitest" + }, + "exports": { + ".": { + "import": "./dist/index.js", + "require": "./dist/index.js", + "default": "./dist/index.js" + }, + "./types": { + "import": "./dist/types/types.js", + "require": "./dist/types/types.js", + "default": "./dist/types/types.js" + } + }, + "dependencies": { + "@powersync/lib-services-framework": "workspace:*", + "@powersync/service-core": "workspace:*", + "@powersync/service-errors": "workspace:*", + "@powersync/service-sync-rules": "workspace:*", + "@powersync/service-types": "workspace:*", + "@powersync/service-jsonbig": "workspace:*", + "mssql": "^12.1.1", + "semver": "^7.7.2", + "ts-codec": "^1.3.0", + "uri-js": "^4.4.1", + "uuid": "^11.1.0" + }, + "devDependencies": { + "@powersync/service-core-tests": "workspace:*", + "@powersync/service-module-mongodb-storage": "workspace:*", + "@powersync/service-module-postgres-storage": "workspace:*", + "@types/mssql": "^9.1.8", + "@types/semver": "^7.7.1", + "@types/uuid": "^10.0.0" + } +} diff --git a/modules/module-mssql/src/api/MSSQLRouteAPIAdapter.ts b/modules/module-mssql/src/api/MSSQLRouteAPIAdapter.ts new file mode 100644 index 000000000..398a59a2f --- /dev/null +++ b/modules/module-mssql/src/api/MSSQLRouteAPIAdapter.ts @@ -0,0 +1,283 @@ +import { + api, + ParseSyncRulesOptions, + PatternResult, + ReplicationHeadCallback, + ReplicationLagOptions +} from '@powersync/service-core'; +import * as service_types from '@powersync/service-types'; +import * as sync_rules from '@powersync/service-sync-rules'; +import { SqlSyncRules, TablePattern } from '@powersync/service-sync-rules'; +import * as types from '../types/types.js'; +import { ExecuteSqlResponse } from '@powersync/service-types/dist/routes.js'; +import { MSSQLConnectionManager } from '../replication/MSSQLConnectionManager.js'; +import { + checkSourceConfiguration, + createCheckpoint, + getDebugTableInfo, + getLatestLSN, + POWERSYNC_CHECKPOINTS_TABLE +} from '../utils/mssql.js'; +import { getTablesFromPattern, ResolvedTable } from '../utils/schema.js'; +import { toExpressionTypeFromMSSQLType } from '../common/mssqls-to-sqlite.js'; + +export class MSSQLRouteAPIAdapter implements api.RouteAPI { + protected connectionManager: MSSQLConnectionManager; + + constructor(protected config: types.ResolvedMSSQLConnectionConfig) { + this.connectionManager = new MSSQLConnectionManager(config, {}); + } + + async createReplicationHead(callback: ReplicationHeadCallback): Promise { + const currentLSN = await getLatestLSN(this.connectionManager); + const result = await callback(currentLSN.toString()); + + // Updates the powersync checkpoints table on the source database, ensuring that an update with a newer LSN will be captured by the CDC. + await createCheckpoint(this.connectionManager); + + return result; + } + + async executeQuery(query: string, params: any[]): Promise { + if (!this.config.debug_api) { + return service_types.internal_routes.ExecuteSqlResponse.encode({ + results: { + columns: [], + rows: [] + }, + success: false, + error: 'SQL querying is not enabled' + }); + } + try { + const { recordset: result } = await this.connectionManager.query(query, params); + return service_types.internal_routes.ExecuteSqlResponse.encode({ + success: true, + results: { + columns: Object.values(result.columns).map((column) => column.name), + rows: result.map((row) => { + return Object.values(row).map((value: any) => { + const sqlValue = sync_rules.applyValueContext( + sync_rules.toSyncRulesValue(row), + sync_rules.CompatibilityContext.FULL_BACKWARDS_COMPATIBILITY + ); + + if (typeof sqlValue == 'bigint') { + return Number(row); + } else if (value instanceof Date) { + return value.toISOString(); + } else if (sync_rules.isJsonValue(sqlValue)) { + return sqlValue; + } else { + return null; + } + }); + }) + } + }); + } catch (e) { + return service_types.internal_routes.ExecuteSqlResponse.encode({ + results: { + columns: [], + rows: [] + }, + success: false, + error: e.message + }); + } + } + + async getConnectionSchema(): Promise { + const { recordset: results } = await this.connectionManager.query(` + SELECT + sch.name AS schema_name, + tbl.name AS table_name, + col.name AS column_name, + typ.name AS data_type, + CASE + WHEN typ.name IN ('nvarchar', 'nchar') + AND col.max_length > 0 + AND col.max_length != -1 + THEN typ.name + '(' + CAST(col.max_length / 2 AS VARCHAR) + ')' + WHEN typ.name IN ('varchar', 'char', 'varbinary', 'binary') + AND col.max_length > 0 + AND col.max_length != -1 + THEN typ.name + '(' + CAST(col.max_length AS VARCHAR) + ')' + WHEN typ.name IN ('varchar', 'nvarchar', 'char', 'nchar') + AND col.max_length = -1 + THEN typ.name + '(MAX)' + WHEN typ.name IN ('decimal', 'numeric') + AND col.precision > 0 + THEN typ.name + '(' + CAST(col.precision AS VARCHAR) + ',' + CAST(col.scale AS VARCHAR) + ')' + WHEN typ.name IN ('float', 'real') + AND col.precision > 0 + THEN typ.name + '(' + CAST(col.precision AS VARCHAR) + ')' + ELSE typ.name + END AS formatted_type + FROM sys.tables AS tbl + JOIN sys.schemas AS sch ON sch.schema_id = tbl.schema_id + JOIN sys.columns AS col ON col.object_id = tbl.object_id + JOIN sys.types AS typ ON typ.user_type_id = col.user_type_id + WHERE sch.name = '${this.connectionManager.schema}' + AND sch.name NOT IN ('sys', 'INFORMATION_SCHEMA', 'cdc') + AND tbl.name NOT IN ('systranschemas', '${POWERSYNC_CHECKPOINTS_TABLE}') + AND tbl.type = 'U' + AND col.is_computed = 0 + ORDER BY sch.name, tbl.name, col.column_id + `); + + /** + * Reduces the SQL results into a Record of {@link DatabaseSchema} + * then returns the values as an array. + */ + const schemas: Record = {}; + + for (const row of results) { + const schemaName = row.schema_name as string; + const tableName = row.table_name as string; + const columnName = row.column_name as string; + const dataType = row.data_type as string; + const formattedType = (row.formatted_type as string) || dataType; + + const schema = + schemas[schemaName] || + (schemas[schemaName] = { + name: schemaName, + tables: [] + }); + + let table = schema.tables.find((t) => t.name === tableName); + if (!table) { + table = { + name: tableName, + columns: [] + }; + schema.tables.push(table); + } + + table.columns.push({ + name: columnName, + type: formattedType, + sqlite_type: toExpressionTypeFromMSSQLType(dataType).typeFlags, + internal_type: formattedType, + pg_type: formattedType + }); + } + + return Object.values(schemas); + } + + async getConnectionStatus(): Promise { + const base = { + id: this.config?.id ?? '', + uri: this.config == null ? '' : types.baseUri(this.config) + }; + + try { + await this.connectionManager.query(`SELECT 'PowerSync connection test'`); + } catch (e) { + return { + ...base, + connected: false, + errors: [{ level: 'fatal', message: `${e.code} - message: ${e.message}` }] + }; + } + + try { + const errors = await checkSourceConfiguration(this.connectionManager); + if (errors.length) { + return { + ...base, + connected: true, + errors: errors.map((e) => ({ level: 'fatal', message: e })) + }; + } + } catch (e) { + return { + ...base, + connected: true, + errors: [{ level: 'fatal', message: e.message }] + }; + } + + return { + ...base, + connected: true, + errors: [] + }; + } + + async getDebugTablesInfo(tablePatterns: TablePattern[], sqlSyncRules: SqlSyncRules): Promise { + const result: PatternResult[] = []; + + for (const tablePattern of tablePatterns) { + const schema = tablePattern.schema; + const patternResult: PatternResult = { + schema: schema, + pattern: tablePattern.tablePattern, + wildcard: tablePattern.isWildcard + }; + result.push(patternResult); + + const tables = await getTablesFromPattern(this.connectionManager, tablePattern); + if (tablePattern.isWildcard) { + patternResult.tables = []; + for (const table of tables) { + const details = await getDebugTableInfo({ + connectionManager: this.connectionManager, + tablePattern, + table, + syncRules: sqlSyncRules + }); + patternResult.tables.push(details); + } + } else { + if (tables.length == 0) { + // This should tenchnically never happen, but we'll handle it anyway. + const resolvedTable: ResolvedTable = { + objectId: 0, + schema: schema, + name: tablePattern.name + }; + patternResult.table = await getDebugTableInfo({ + connectionManager: this.connectionManager, + tablePattern, + table: resolvedTable, + syncRules: sqlSyncRules + }); + } else { + patternResult.table = await getDebugTableInfo({ + connectionManager: this.connectionManager, + tablePattern, + table: tables[0], + syncRules: sqlSyncRules + }); + } + } + } + + return result; + } + + getParseSyncRulesOptions(): ParseSyncRulesOptions { + return { + defaultSchema: this.connectionManager.schema + }; + } + + async getReplicationLagBytes(options: ReplicationLagOptions): Promise { + return undefined; + } + + async getSourceConfig(): Promise { + return this.config; + } + + async [Symbol.asyncDispose]() { + await this.shutdown(); + } + + async shutdown(): Promise { + await this.connectionManager.end(); + } +} diff --git a/modules/module-mssql/src/common/LSN.ts b/modules/module-mssql/src/common/LSN.ts new file mode 100644 index 000000000..756c6a153 --- /dev/null +++ b/modules/module-mssql/src/common/LSN.ts @@ -0,0 +1,77 @@ +import { ReplicationAssertionError } from '@powersync/service-errors'; + +/** + * Helper class for interpreting and manipulating SQL Server Log Sequence Numbers (LSNs). + * In SQL Server, an LSN is stored as a 10-byte binary value. + * But it is commonly represented in a human-readable format as three hexadecimal parts separated by colons: + * `00000000:00000000:0000`. + * + * The three parts represent different hierarchical levels of the transaction log: + * 1. The first part identifies the Virtual Log File (VLF). + * 2. The second part points to the log block within the VLF. + * 3. The third part specifies the exact log record within the log block. + */ + +export class LSN { + /** + * The zero or null LSN value. All other LSN values are greater than this. + */ + static ZERO = '00000000:00000000:0000'; + + protected value: string; + + private constructor(lsn: string) { + this.value = lsn; + } + + /** + * Converts this LSN back into its raw 10-byte binary representation for use in SQL Server functions. + */ + toBinary(): Buffer { + let sanitized: string = this.value.replace(/:/g, ''); + return Buffer.from(sanitized, 'hex'); + } + + /** + * Converts a raw 10-byte binary LSN value into its string representation. + * An error is thrown if the binary value is not exactly 10 bytes. + * @param rawLSN + */ + static fromBinary(rawLSN: Buffer): LSN { + if (rawLSN.length !== 10) { + throw new ReplicationAssertionError(`LSN must be 10 bytes, got ${rawLSN.length}`); + } + const hex = rawLSN.toString('hex').toUpperCase(); // 20 hex chars + + return new LSN(`${hex.slice(0, 8)}:${hex.slice(8, 16)}:${hex.slice(16, 20)}`); + } + + /** + * Creates an LSN instance from the provided string representation. An error is thrown if the format is invalid. + * @param stringLSN + */ + static fromString(stringLSN: string): LSN { + if (!/^[0-9A-Fa-f]{8}:[0-9A-Fa-f]{8}:[0-9A-Fa-f]{4}$/.test(stringLSN)) { + throw new ReplicationAssertionError( + `Invalid LSN string. Expected format is [00000000:00000000:0000]. Got: ${stringLSN}` + ); + } + + return new LSN(stringLSN); + } + + compare(other: LSN): -1 | 0 | 1 { + if (this.value === other.value) { + return 0; + } + return this.value < other.value ? -1 : 1; + } + + valueOf(): string { + return this.value; + } + + toString(): string { + return this.value; + } +} diff --git a/modules/module-mssql/src/common/MSSQLSourceTable.ts b/modules/module-mssql/src/common/MSSQLSourceTable.ts new file mode 100644 index 000000000..0d0dbd597 --- /dev/null +++ b/modules/module-mssql/src/common/MSSQLSourceTable.ts @@ -0,0 +1,54 @@ +import { SourceTable } from '@powersync/service-core'; +import { toQualifiedTableName } from '../utils/mssql.js'; + +export interface CaptureInstance { + name: string; + schema: string; +} + +export interface MSSQLSourceTableOptions { + sourceTable: SourceTable; + /** + * The unique name of the CDC capture instance for this table + */ + captureInstance: CaptureInstance; +} + +export class MSSQLSourceTable { + constructor(private options: MSSQLSourceTableOptions) {} + + get sourceTable() { + return this.options.sourceTable; + } + + updateSourceTable(updated: SourceTable): void { + this.options.sourceTable = updated; + } + + get captureInstance() { + return this.options.captureInstance.name; + } + + get cdcSchema() { + return this.options.captureInstance.schema; + } + + get CTTable() { + return `${this.cdcSchema}.${this.captureInstance}_CT`; + } + + get allChangesFunction() { + return `${this.cdcSchema}.fn_cdc_get_all_changes_${this.captureInstance}`; + } + + get netChangesFunction() { + return `${this.cdcSchema}.fn_cdc_get_net_changes_${this.captureInstance}`; + } + + /** + * Escapes this source table's name and schema for use in MSSQL queries. + */ + toQualifiedName(): string { + return toQualifiedTableName(this.sourceTable.schema, this.sourceTable.name); + } +} diff --git a/modules/module-mssql/src/common/MSSQLSourceTableCache.ts b/modules/module-mssql/src/common/MSSQLSourceTableCache.ts new file mode 100644 index 000000000..18f984bd8 --- /dev/null +++ b/modules/module-mssql/src/common/MSSQLSourceTableCache.ts @@ -0,0 +1,36 @@ +import { SourceTable } from '@powersync/service-core'; +import { MSSQLSourceTable } from './MSSQLSourceTable.js'; +import { ServiceAssertionError } from '@powersync/service-errors'; + +export class MSSQLSourceTableCache { + private cache = new Map(); + + set(table: MSSQLSourceTable): void { + this.cache.set(table.sourceTable.objectId!, table); + } + + /** + * Updates the underlying source table of the cached MSSQLSourceTable. + * @param updatedTable + */ + updateSourceTable(updatedTable: SourceTable) { + const existingTable = this.cache.get(updatedTable.objectId!); + + if (!existingTable) { + throw new ServiceAssertionError('Tried to update a non-existing MSSQLSourceTable in the cache'); + } + existingTable.updateSourceTable(updatedTable); + } + + get(tableId: number): MSSQLSourceTable | undefined { + return this.cache.get(tableId); + } + + getAll(): MSSQLSourceTable[] { + return Array.from(this.cache.values()); + } + + delete(tableId: number): boolean { + return this.cache.delete(tableId); + } +} diff --git a/modules/module-mssql/src/common/mssqls-to-sqlite.ts b/modules/module-mssql/src/common/mssqls-to-sqlite.ts new file mode 100644 index 000000000..dec261923 --- /dev/null +++ b/modules/module-mssql/src/common/mssqls-to-sqlite.ts @@ -0,0 +1,151 @@ +import sql from 'mssql'; +import { DatabaseInputRow, ExpressionType, SqliteInputRow, toSyncRulesRow } from '@powersync/service-sync-rules'; +import { MSSQLUserDefinedType } from '../types/mssql-data-types.js'; + +export function toSqliteInputRow(row: any, columns: sql.IColumnMetadata): SqliteInputRow { + let result: DatabaseInputRow = {}; + for (const key in row) { + // We are very much expecting the column to be there + const columnMetadata = columns[key]; + + if (row[key] !== null) { + switch (columnMetadata.type) { + case sql.TYPES.BigInt: + // MSSQL returns BIGINT as a string to avoid precision loss + if (typeof row[key] === 'string') { + result[key] = BigInt(row[key]); + } + break; + case sql.TYPES.Bit: + // MSSQL returns BIT as boolean + result[key] = row[key] ? 1 : 0; + break; + // Convert Dates to string + case sql.TYPES.Date: + result[key] = toISODateString(row[key] as Date); + break; + case sql.TYPES.Time: + result[key] = toISOTimeString(row[key] as Date); + break; + case sql.TYPES.DateTime: + case sql.TYPES.DateTime2: + case sql.TYPES.SmallDateTime: + case sql.TYPES.DateTimeOffset: // The offset is lost when the driver converts to Date. This needs to be handled in the sql query. + const date = row[key] as Date; + result[key] = isNaN(date.getTime()) ? null : date.toISOString(); + break; + case sql.TYPES.Binary: + case sql.TYPES.VarBinary: + case sql.TYPES.Image: + result[key] = new Uint8Array(Object.values(row[key])); + break; + // TODO: Spatial types need to be converted to binary WKB, they are returned as a non standard object currently + case sql.TYPES.Geometry: + case sql.TYPES.Geography: + result[key] = JSON.stringify(row[key]); + break; + case sql.TYPES.UDT: + if (columnMetadata.udt.name === MSSQLUserDefinedType.HIERARCHYID) { + result[key] = new Uint8Array(Object.values(row[key])); + break; + } else { + result[key] = row[key]; + } + break; + default: + result[key] = row[key]; + } + } else { + // If the value is null, we just set it to null + result[key] = null; + } + } + return toSyncRulesRow(result); +} + +function toISODateString(date: Date): string | null { + return isNaN(date.getTime()) ? null : date.toISOString().split('T')[0]; +} + +/** + * MSSQL time format is HH:mm:ss[.nnnnnnn] + * @param date + * @returns + */ +function toISOTimeString(date: Date): string | null { + return isNaN(date.getTime()) ? null : date.toISOString().split('T')[1].replace('Z', ''); +} + +/** + * Converts MSSQL type names to SQLite ExpressionType + * @param mssqlType - The MSSQL type name (e.g., 'int', 'varchar', 'datetime2') + */ +export function toExpressionTypeFromMSSQLType(mssqlType: string | undefined): ExpressionType { + if (!mssqlType) { + return ExpressionType.TEXT; + } + + const baseType = mssqlType.toUpperCase(); + switch (baseType) { + case 'BIT': + case 'TINYINT': + case 'SMALLINT': + case 'INT': + case 'INTEGER': + case 'BIGINT': + return ExpressionType.INTEGER; + case 'BINARY': + case 'VARBINARY': + case 'IMAGE': + case 'TIMESTAMP': + return ExpressionType.BLOB; + case 'FLOAT': + case 'REAL': + case 'MONEY': + case 'SMALLMONEY': + case 'DECIMAL': + case 'NUMERIC': + return ExpressionType.REAL; + case 'JSON': + return ExpressionType.TEXT; + // System and extended types + case 'SYSNAME': + // SYSNAME is essentially NVARCHAR(128), map to TEXT + return ExpressionType.TEXT; + case 'HIERARCHYID': + // HIERARCHYID is a CLR UDT representing hierarchical data, stored as string representation + return ExpressionType.TEXT; + case 'GEOMETRY': + case 'GEOGRAPHY': + // Spatial CLR UDT types, typically stored as WKT (Well-Known Text) strings + return ExpressionType.TEXT; + case 'VECTOR': + // Vector type (SQL Server 2022+), stored as binary data + return ExpressionType.BLOB; + default: + // In addition to the normal text types, includes: VARCHAR, NVARCHAR, CHAR, NCHAR, TEXT, NTEXT, DATE, TIME, DATETIME, DATETIME2, SMALLDATETIME, DATETIMEOFFSET, XML, UNIQUEIDENTIFIER, SQL_VARIANT + return ExpressionType.TEXT; + } +} + +export interface CDCRowToSqliteRowOptions { + row: any; + columns: sql.IColumnMetadata; +} +// CDC metadata columns in CDCS rows that should be excluded +const CDC_METADATA_COLUMNS = ['__$operation', '__$start_lsn', '__$end_lsn', '__$seqval', '__$update_mask']; +/** + * Convert CDC row data to SqliteRow format. + * CDC rows include table columns plus CDC metadata columns (__$operation, __$start_lsn, etc.) + * which we filter out. + */ +export function CDCToSqliteRow(options: CDCRowToSqliteRowOptions): SqliteInputRow { + const { row, columns } = options; + const filteredRow: DatabaseInputRow = {}; + for (const key in row) { + if (!CDC_METADATA_COLUMNS.includes(key)) { + filteredRow[key] = row[key]; + } + } + return toSqliteInputRow(filteredRow, columns); +} diff --git a/modules/module-mssql/src/index.ts b/modules/module-mssql/src/index.ts new file mode 100644 index 000000000..844339f8e --- /dev/null +++ b/modules/module-mssql/src/index.ts @@ -0,0 +1 @@ +export * from './module/MSSQLModule.js'; diff --git a/modules/module-mssql/src/module/MSSQLModule.ts b/modules/module-mssql/src/module/MSSQLModule.ts new file mode 100644 index 000000000..95fcd9ca8 --- /dev/null +++ b/modules/module-mssql/src/module/MSSQLModule.ts @@ -0,0 +1,82 @@ +import { + api, + ConfigurationFileSyncRulesProvider, + ConnectionTestResult, + replication, + system, + TearDownOptions +} from '@powersync/service-core'; +import { MSSQLConnectionManagerFactory } from '../replication/MSSQLConnectionManagerFactory.js'; +import * as types from '../types/types.js'; +import { CDCReplicator } from '../replication/CDCReplicator.js'; +import { MSSQLConnectionManager } from '../replication/MSSQLConnectionManager.js'; +import { checkSourceConfiguration } from '../utils/mssql.js'; +import { MSSQLErrorRateLimiter } from '../replication/MSSQLErrorRateLimiter.js'; +import { MSSQLRouteAPIAdapter } from '../api/MSSQLRouteAPIAdapter.js'; + +export class MSSQLModule extends replication.ReplicationModule { + constructor() { + super({ + name: 'MSSQL', + type: types.MSSQL_CONNECTION_TYPE, + configSchema: types.MSSQLConnectionConfig + }); + } + + async onInitialized(context: system.ServiceContextContainer): Promise {} + + protected createRouteAPIAdapter(): api.RouteAPI { + return new MSSQLRouteAPIAdapter(this.resolveConfig(this.decodedConfig!)); + } + + protected createReplicator(context: system.ServiceContext): replication.AbstractReplicator { + const normalisedConfig = this.resolveConfig(this.decodedConfig!); + const syncRuleProvider = new ConfigurationFileSyncRulesProvider(context.configuration.sync_rules); + const connectionFactory = new MSSQLConnectionManagerFactory(normalisedConfig); + + return new CDCReplicator({ + id: this.getDefaultId(normalisedConfig.database), + syncRuleProvider: syncRuleProvider, + storageEngine: context.storageEngine, + metricsEngine: context.metricsEngine, + connectionFactory: connectionFactory, + rateLimiter: new MSSQLErrorRateLimiter(), + additionalConfig: normalisedConfig.additionalConfig + }); + } + + /** + * Combines base config with normalized connection settings + */ + private resolveConfig(config: types.MSSQLConnectionConfig): types.ResolvedMSSQLConnectionConfig { + return { + ...config, + ...types.normalizeConnectionConfig(config) + }; + } + + async teardown(options: TearDownOptions): Promise { + // No specific teardown required for MSSQL + } + + async testConnection(config: types.MSSQLConnectionConfig) { + this.decodeConfig(config); + const normalizedConfig = this.resolveConfig(this.decodedConfig!); + return await MSSQLModule.testConnection(normalizedConfig); + } + + static async testConnection(normalizedConfig: types.ResolvedMSSQLConnectionConfig): Promise { + const connectionManager = new MSSQLConnectionManager(normalizedConfig, { max: 1 }); + try { + const errors = await checkSourceConfiguration(connectionManager); + if (errors.length > 0) { + throw new Error(errors.join('\n')); + } + } finally { + await connectionManager.end(); + } + return { + connectionDescription: normalizedConfig.hostname + }; + } +} diff --git a/modules/module-mssql/src/replication/CDCPoller.ts b/modules/module-mssql/src/replication/CDCPoller.ts new file mode 100644 index 000000000..be3f66b92 --- /dev/null +++ b/modules/module-mssql/src/replication/CDCPoller.ts @@ -0,0 +1,247 @@ +import { Logger, logger as defaultLogger, ReplicationAssertionError } from '@powersync/lib-services-framework'; +import timers from 'timers/promises'; +import { MSSQLConnectionManager } from './MSSQLConnectionManager.js'; +import { MSSQLSourceTable } from '../common/MSSQLSourceTable.js'; +import { LSN } from '../common/LSN.js'; +import sql from 'mssql'; +import { getMinLSN, incrementLSN } from '../utils/mssql.js'; +import { AdditionalConfig } from '../types/types.js'; + +enum Operation { + DELETE = 1, + INSERT = 2, + UPDATE_BEFORE = 3, + UPDATE_AFTER = 4 +} +/** + * Schema changes that are detectable by inspecting query events. + * Create table statements are not included here, since new tables are automatically detected when row events + * are received for them. + */ +export enum SchemaChangeType { + RENAME_TABLE = 'Rename Table', + DROP_TABLE = 'Drop Table', + TRUNCATE_TABLE = 'Truncate Table', + ALTER_TABLE_COLUMN = 'Alter Table Column', + REPLICATION_IDENTITY = 'Alter Replication Identity' +} + +export interface SchemaChange { + type: SchemaChangeType; + /** + * The table that the schema change applies to. + */ + table: string; + schema: string; + /** + * Populated for table renames if the newTable was matched by the DatabaseFilter + */ + newTable?: string; +} + +export interface CDCEventHandler { + onInsert: (row: any, table: MSSQLSourceTable, collumns: sql.IColumnMetadata) => Promise; + onUpdate: (rowAfter: any, rowBefore: any, table: MSSQLSourceTable, collumns: sql.IColumnMetadata) => Promise; + onDelete: (row: any, table: MSSQLSourceTable, collumns: sql.IColumnMetadata) => Promise; + onCommit: (lsn: string, transactionCount: number) => Promise; + onSchemaChange: (change: SchemaChange) => Promise; +} + +export interface CDCPollerOptions { + connectionManager: MSSQLConnectionManager; + eventHandler: CDCEventHandler; + sourceTables: MSSQLSourceTable[]; + startLSN: LSN; + logger?: Logger; + additionalConfig: AdditionalConfig; +} + +/** + * + */ +export class CDCPoller { + private connectionManager: MSSQLConnectionManager; + private eventHandler: CDCEventHandler; + private currentLSN: LSN; + private logger: Logger; + private listenerError: Error | null; + + private isStopped: boolean = false; + private isStopping: boolean = false; + private isPolling: boolean = false; + + constructor(public options: CDCPollerOptions) { + this.logger = options.logger ?? defaultLogger; + this.connectionManager = options.connectionManager; + this.eventHandler = options.eventHandler; + this.currentLSN = options.startLSN; + this.listenerError = null; + } + + private get pollingBatchSize(): number { + return this.options.additionalConfig.pollingBatchSize; + } + + private get pollingIntervalMs(): number { + return this.options.additionalConfig.pollingIntervalMs; + } + + private get sourceTables(): MSSQLSourceTable[] { + return this.options.sourceTables; + } + + public async stop(): Promise { + if (!(this.isStopped || this.isStopping)) { + this.isStopping = true; + this.isStopped = true; + } + } + + public async replicateUntilStopped(): Promise { + this.logger.info(`CDC polling started with interval of ${this.pollingIntervalMs}ms...`); + this.logger.info(`Polling a maximum of ${this.pollingBatchSize} transactions per polling cycle.`); + while (!this.isStopped) { + // Don't poll if already polling (concurrency guard) + if (this.isPolling) { + await timers.setTimeout(this.pollingIntervalMs); + continue; + } + + try { + const hasChanges = await this.poll(); + if (!hasChanges) { + // No changes found, wait before next poll + await timers.setTimeout(this.pollingIntervalMs); + } + // If changes were found, poll immediately again (no wait) + } catch (error) { + if (!(this.isStopped || this.isStopping)) { + this.listenerError = error as Error; + this.logger.error('Error during CDC polling:', error); + this.stop(); + } + break; + } + } + + if (this.listenerError) { + this.logger.error('CDC polling was stopped due to an error:', this.listenerError); + throw this.listenerError; + } + + this.logger.info(`CDC polling stopped...`); + } + + private async poll(): Promise { + // Set polling flag to prevent concurrent polling cycles + this.isPolling = true; + + try { + // Calculate the LSN bounds for this batch + // CDC bounds are inclusive, so the new startLSN is the currentLSN incremented by 1 + const startLSN = await incrementLSN(this.currentLSN, this.connectionManager); + + const { recordset: results } = await this.connectionManager.query( + `SELECT TOP (${this.pollingBatchSize}) start_lsn + FROM cdc.lsn_time_mapping + WHERE start_lsn >= @startLSN + ORDER BY start_lsn ASC + `, + [{ name: 'startLSN', type: sql.VarBinary, value: startLSN.toBinary() }] + ); + + // No new LSNs found, no changes to process + if (results.length === 0) { + return false; + } + + // The new endLSN is the largest LSN in the result + const endLSN = LSN.fromBinary(results[results.length - 1].start_lsn); + + this.logger.info(`Polling bounds are ${startLSN} -> ${endLSN} spanning ${results.length} transaction(s).`); + + let transactionCount = 0; + for (const table of this.sourceTables) { + const tableTransactionCount = await this.pollTable(table, { startLSN, endLSN }); + // We poll for batch size transactions, but these include transactions not applicable to our Source Tables. + // Each Source Table may or may not have transactions that are applicable to it, so just keep track of the highest number of transactions processed for any Source Table. + if (tableTransactionCount > transactionCount) { + transactionCount = tableTransactionCount; + } + } + + this.logger.info( + `Processed ${results.length} transaction(s), including ${transactionCount} Source Table transaction(s).` + ); + // Call eventHandler.onCommit() with toLSN after processing all tables + await this.eventHandler.onCommit(endLSN.toString(), transactionCount); + + this.currentLSN = endLSN; + + return true; + } finally { + // Always clear polling flag, even on error + this.isPolling = false; + } + } + + private async pollTable(table: MSSQLSourceTable, bounds: { startLSN: LSN; endLSN: LSN }): Promise { + // Ensure that the startLSN is not before the minimum LSN for the table + const minLSN = await getMinLSN(this.connectionManager, table.captureInstance); + if (minLSN > bounds.endLSN) { + return 0; + } else if (minLSN >= bounds.startLSN) { + bounds.startLSN = minLSN; + } + const { recordset: results } = await this.connectionManager.query( + ` + SELECT * FROM ${table.allChangesFunction}(@from_lsn, @to_lsn, 'all update old') ORDER BY __$start_lsn, __$seqval + `, + [ + { name: 'from_lsn', type: sql.VarBinary, value: bounds.startLSN.toBinary() }, + { name: 'to_lsn', type: sql.VarBinary, value: bounds.endLSN.toBinary() } + ] + ); + + let transactionCount = 0; + let updateBefore: any = null; + let lastTransactionLSN: LSN | null = null; + for (const row of results) { + const transactionLSN = LSN.fromBinary(row.__$start_lsn); + switch (row.__$operation) { + case Operation.DELETE: + await this.eventHandler.onDelete(row, table, results.columns); + this.logger.info(`Processed DELETE row LSN: ${transactionLSN}`); + break; + case Operation.INSERT: + await this.eventHandler.onInsert(row, table, results.columns); + this.logger.info(`Processed INSERT row LSN: ${transactionLSN}`); + break; + case Operation.UPDATE_BEFORE: + updateBefore = row; + this.logger.debug(`Processed UPDATE, before row LSN: ${transactionLSN}`); + break; + case Operation.UPDATE_AFTER: + if (updateBefore === null) { + throw new ReplicationAssertionError('Missing before image for update event.'); + } + await this.eventHandler.onUpdate(row, updateBefore, table, results.columns); + updateBefore = null; + this.logger.info(`Processed UPDATE row LSN: ${transactionLSN}`); + break; + default: + this.logger.warn(`Unknown operation type [${row.__$operation}] encountered in CDC changes.`); + } + + // Increment transaction count when we encounter a new transaction LSN (except for UPDATE_BEFORE rows) + if (transactionLSN != lastTransactionLSN) { + lastTransactionLSN = transactionLSN; + if (row.__$operation !== Operation.UPDATE_BEFORE) { + transactionCount++; + } + } + } + + return transactionCount; + } +} diff --git a/modules/module-mssql/src/replication/CDCReplicationJob.ts b/modules/module-mssql/src/replication/CDCReplicationJob.ts new file mode 100644 index 000000000..120649544 --- /dev/null +++ b/modules/module-mssql/src/replication/CDCReplicationJob.ts @@ -0,0 +1,87 @@ +import { replication } from '@powersync/service-core'; +import { MSSQLConnectionManagerFactory } from './MSSQLConnectionManagerFactory.js'; +import { container, logger as defaultLogger } from '@powersync/lib-services-framework'; +import { CDCDataExpiredError, CDCStream } from './CDCStream.js'; +import { AdditionalConfig } from '../types/types.js'; + +export interface CDCReplicationJobOptions extends replication.AbstractReplicationJobOptions { + connectionFactory: MSSQLConnectionManagerFactory; + additionalConfig: AdditionalConfig; +} + +export class CDCReplicationJob extends replication.AbstractReplicationJob { + private connectionFactory: MSSQLConnectionManagerFactory; + private lastStream: CDCStream | null = null; + private cdcReplicationJobOptions: CDCReplicationJobOptions; + + constructor(options: CDCReplicationJobOptions) { + super(options); + this.logger = defaultLogger.child({ prefix: `[powersync_${this.options.storage.group_id}] ` }); + this.connectionFactory = options.connectionFactory; + this.cdcReplicationJobOptions = options; + } + + async keepAlive() { + // TODO Might need to leverage checkpoints table as a keepAlive + } + + async replicate() { + try { + await this.replicateOnce(); + } catch (e) { + // Fatal exception + if (!this.isStopped) { + // Ignore aborted errors + this.logger.error(`Replication error`, e); + if (e.cause != null) { + this.logger.error(`cause`, e.cause); + } + + container.reporter.captureException(e, { + metadata: {} + }); + + // This sets the retry delay + this.rateLimiter.reportError(e); + } + if (e instanceof CDCDataExpiredError) { + // This stops replication and restarts with a new instance + await this.options.storage.factory.restartReplication(this.storage.group_id); + } + } finally { + this.abortController.abort(); + } + } + + async replicateOnce() { + // New connections on every iteration (every error with retry), + // otherwise we risk repeating errors related to the connection, + // such as caused by cached PG schemas. + const connectionManager = this.connectionFactory.create({ + idleTimeoutMillis: 30_000, + max: 2 + }); + try { + await this.rateLimiter?.waitUntilAllowed({ signal: this.abortController.signal }); + if (this.isStopped) { + return; + } + const stream = new CDCStream({ + logger: this.logger, + abortSignal: this.abortController.signal, + storage: this.options.storage, + metrics: this.options.metrics, + connections: connectionManager, + additionalConfig: this.cdcReplicationJobOptions.additionalConfig + }); + this.lastStream = stream; + await stream.replicate(); + } finally { + await connectionManager.end(); + } + } + + async getReplicationLagMillis(): Promise { + return this.lastStream?.getReplicationLagMillis(); + } +} diff --git a/modules/module-mssql/src/replication/CDCReplicator.ts b/modules/module-mssql/src/replication/CDCReplicator.ts new file mode 100644 index 000000000..7c203b783 --- /dev/null +++ b/modules/module-mssql/src/replication/CDCReplicator.ts @@ -0,0 +1,70 @@ +import { replication, storage } from '@powersync/service-core'; +import { MSSQLConnectionManagerFactory } from './MSSQLConnectionManagerFactory.js'; +import { CDCReplicationJob } from './CDCReplicationJob.js'; +import { MSSQLModule } from '../module/MSSQLModule.js'; +import { AdditionalConfig } from '../types/types.js'; + +export interface CDCReplicatorOptions extends replication.AbstractReplicatorOptions { + connectionFactory: MSSQLConnectionManagerFactory; + additionalConfig: AdditionalConfig; +} + +export class CDCReplicator extends replication.AbstractReplicator { + private readonly connectionFactory: MSSQLConnectionManagerFactory; + private readonly cdcReplicatorOptions: CDCReplicatorOptions; + + constructor(options: CDCReplicatorOptions) { + super(options); + this.connectionFactory = options.connectionFactory; + this.cdcReplicatorOptions = options; + } + + createJob(options: replication.CreateJobOptions): CDCReplicationJob { + return new CDCReplicationJob({ + id: this.createJobId(options.storage.group_id), + storage: options.storage, + metrics: this.metrics, + lock: options.lock, + connectionFactory: this.connectionFactory, + rateLimiter: this.rateLimiter, + additionalConfig: this.cdcReplicatorOptions.additionalConfig + }); + } + + async cleanUp(syncRulesStorage: storage.SyncRulesBucketStorage): Promise {} + + async stop(): Promise { + await super.stop(); + await this.connectionFactory.shutdown(); + } + + async testConnection() { + return await MSSQLModule.testConnection(this.connectionFactory.connectionConfig); + } + + async getReplicationLagMillis(): Promise { + // TODO:Get replication lag + const lag = await super.getReplicationLagMillis(); + if (lag != null) { + return lag; + } + + // Booting or in an error loop. Check last active replication status. + // This includes sync rules in an ERROR state. + const content = await this.storage.getActiveSyncRulesContent(); + if (content == null) { + return undefined; + } + // Measure the lag from the last commit or keepalive timestamp. + // This is not 100% accurate since it is the commit time in the storage db rather than + // the source db, but it's the best we currently have for mssql. + const checkpointTs = content.last_checkpoint_ts?.getTime() ?? 0; + const keepaliveTs = content.last_keepalive_ts?.getTime() ?? 0; + const latestTs = Math.max(checkpointTs, keepaliveTs); + if (latestTs != 0) { + return Date.now() - latestTs; + } + + return undefined; + } +} diff --git a/modules/module-mssql/src/replication/CDCStream.ts b/modules/module-mssql/src/replication/CDCStream.ts new file mode 100644 index 000000000..de8c3d787 --- /dev/null +++ b/modules/module-mssql/src/replication/CDCStream.ts @@ -0,0 +1,691 @@ +import { + container, + DatabaseConnectionError, + ErrorCode, + Logger, + logger as defaultLogger, + ReplicationAbortedError, + ReplicationAssertionError, + ServiceAssertionError +} from '@powersync/lib-services-framework'; +import { getUuidReplicaIdentityBson, MetricsEngine, SourceEntityDescriptor, storage } from '@powersync/service-core'; + +import { SqliteInputRow, SqliteRow, SqlSyncRules, TablePattern } from '@powersync/service-sync-rules'; + +import { ReplicationMetric } from '@powersync/service-types'; +import { + BatchedSnapshotQuery, + IdSnapshotQuery, + MSSQLSnapshotQuery, + PrimaryKeyValue, + SimpleSnapshotQuery +} from './MSSQLSnapshotQuery.js'; +import { MSSQLConnectionManager } from './MSSQLConnectionManager.js'; +import { getReplicationIdentityColumns, getTablesFromPattern, ResolvedTable } from '../utils/schema.js'; +import { + checkSourceConfiguration, + createCheckpoint, + getCaptureInstance, + getLatestLSN, + getLatestReplicatedLSN, + isIColumnMetadata, + isTableEnabledForCDC, + isWithinRetentionThreshold, + toQualifiedTableName +} from '../utils/mssql.js'; +import sql from 'mssql'; +import { CDCToSqliteRow, toSqliteInputRow } from '../common/mssqls-to-sqlite.js'; +import { LSN } from '../common/LSN.js'; +import { MSSQLSourceTable } from '../common/MSSQLSourceTable.js'; +import { MSSQLSourceTableCache } from '../common/MSSQLSourceTableCache.js'; +import { CDCEventHandler, CDCPoller } from './CDCPoller.js'; +import { AdditionalConfig } from '../types/types.js'; + +export interface CDCStreamOptions { + connections: MSSQLConnectionManager; + storage: storage.SyncRulesBucketStorage; + metrics: MetricsEngine; + abortSignal: AbortSignal; + logger?: Logger; + /** + * Override snapshot batch size for testing. + * Defaults to 10_000. + * Note that queries are streamed, so we don't keep that much data in memory. + */ + snapshotBatchSize?: number; + + additionalConfig: AdditionalConfig; +} + +export enum SnapshotStatus { + IN_PROGRESS = 'in-progress', + DONE = 'done', + RESTART_REQUIRED = 'restart-required' +} + +export interface SnapshotStatusResult { + status: SnapshotStatus; + snapshotLSN: string | null; +} + +export class CDCConfigurationError extends Error { + constructor(message: string) { + super(message); + } +} + +/** + * Thrown when required updates in the CDC instance tables are no longer available + * + * Possible reasons: + * * Older data has been cleaned up due to exceeding the retention period. + * This can happen if PowerSync was stopped for a long period of time. + */ +export class CDCDataExpiredError extends DatabaseConnectionError { + constructor(message: string, cause: any) { + super(ErrorCode.PSYNC_S1500, message, cause); + } +} + +export class CDCStream { + private readonly syncRules: SqlSyncRules; + private readonly storage: storage.SyncRulesBucketStorage; + private readonly connections: MSSQLConnectionManager; + private readonly abortSignal: AbortSignal; + private readonly logger: Logger; + + private tableCache = new MSSQLSourceTableCache(); + + /** + * Time of the oldest uncommitted change, according to the source db. + * This is used to determine the replication lag. + */ + private oldestUncommittedChange: Date | null = null; + /** + * Keep track of whether we have done a commit or keepalive yet. + * We can only compute replication lag if isStartingReplication == false, or oldestUncommittedChange is present. + */ + public isStartingReplication = true; + + constructor(private options: CDCStreamOptions) { + this.logger = options.logger ?? defaultLogger; + this.storage = options.storage; + this.syncRules = options.storage.getParsedSyncRules({ defaultSchema: options.connections.schema }); + this.connections = options.connections; + this.abortSignal = options.abortSignal; + } + + private get metrics() { + return this.options.metrics; + } + + get stopped() { + return this.abortSignal.aborted; + } + + get defaultSchema() { + return this.connections.schema; + } + + get groupId() { + return this.options.storage.group_id; + } + + get connectionId() { + const { connectionId } = this.connections; + // Default to 1 if not set + if (!connectionId) { + return 1; + } + /** + * This is often `"default"` (string) which will parse to `NaN` + */ + const parsed = Number.parseInt(connectionId); + if (isNaN(parsed)) { + return 1; + } + return parsed; + } + + get connectionTag() { + return this.connections.connectionTag; + } + + get snapshotBatchSize() { + return this.options.snapshotBatchSize ?? 10_000; + } + + async replicate() { + try { + await this.initReplication(); + await this.streamChanges(); + } catch (e) { + await this.storage.reportError(e); + throw e; + } + } + + async populateTableCache() { + const sourceTables = this.syncRules.getSourceTables(); + await this.storage.startBatch( + { + logger: this.logger, + zeroLSN: LSN.ZERO, + defaultSchema: this.defaultSchema, + storeCurrentData: true + }, + async (batch) => { + for (let tablePattern of sourceTables) { + const tables = await this.getQualifiedTableNames(batch, tablePattern); + for (const table of tables) { + this.tableCache.set(table); + } + } + } + ); + } + + async getQualifiedTableNames( + batch: storage.BucketStorageBatch, + tablePattern: TablePattern + ): Promise { + if (tablePattern.connectionTag != this.connections.connectionTag) { + return []; + } + + const matchedTables: ResolvedTable[] = await getTablesFromPattern(this.connections, tablePattern); + + const tables: MSSQLSourceTable[] = []; + for (const matchedTable of matchedTables) { + const isEnabled = await isTableEnabledForCDC({ + connectionManager: this.connections, + table: matchedTable.name, + schema: matchedTable.schema + }); + + if (!isEnabled) { + this.logger.info(`Skipping ${matchedTable.schema}.${matchedTable.name} - table is not enabled for CDC.`); + continue; + } + + // TODO: Check RLS settings for table + + const replicaIdColumns = await getReplicationIdentityColumns({ + connectionManager: this.connections, + tableName: matchedTable.name, + schema: matchedTable.schema + }); + + const table = await this.processTable( + batch, + { + name: matchedTable.name, + schema: matchedTable.schema, + objectId: matchedTable.objectId, + replicaIdColumns: replicaIdColumns.columns + }, + false + ); + + tables.push(table); + } + return tables; + } + + async processTable( + batch: storage.BucketStorageBatch, + table: SourceEntityDescriptor, + snapshot: boolean + ): Promise { + if (!table.objectId && typeof table.objectId != 'number') { + throw new ReplicationAssertionError(`objectId expected, got ${typeof table.objectId}`); + } + const resolved = await this.storage.resolveTable({ + group_id: this.groupId, + connection_id: this.connectionId, + connection_tag: this.connectionTag, + entity_descriptor: table, + sync_rules: this.syncRules + }); + const captureInstance = await getCaptureInstance({ + connectionManager: this.connections, + tableName: resolved.table.name, + schema: resolved.table.schema + }); + if (!captureInstance) { + throw new ServiceAssertionError( + `Missing capture instance for table ${toQualifiedTableName(resolved.table.schema, resolved.table.name)}` + ); + } + const resolvedTable = new MSSQLSourceTable({ + sourceTable: resolved.table, + captureInstance: captureInstance + }); + + // Drop conflicting tables. This includes for example renamed tables. + await batch.drop(resolved.dropTables); + + // Snapshot if: + // 1. Snapshot is requested (false for initial snapshot, since that process handles it elsewhere) + // 2. Snapshot is not already done, AND: + // 3. The table is used in sync rules. + const shouldSnapshot = snapshot && !resolved.table.snapshotComplete && resolved.table.syncAny; + + if (shouldSnapshot) { + // Truncate this table in case a previous snapshot was interrupted. + await batch.truncate([resolved.table]); + + // Start the snapshot inside a transaction. + try { + await this.snapshotTableInTx(batch, resolvedTable); + } finally { + // TODO Cleanup? + } + } + + return resolvedTable; + } + + private async snapshotTableInTx( + batch: storage.BucketStorageBatch, + table: MSSQLSourceTable, + limited?: PrimaryKeyValue[] + ): Promise { + // Note: We use the "Read Committed" isolation level here, not snapshot isolation. + // The data may change during the transaction, but that is compensated for in the streaming + // replication afterward. + const transaction = await this.connections.createTransaction(); + await transaction.begin(sql.ISOLATION_LEVEL.READ_COMMITTED); + try { + await this.snapshotTable(batch, transaction, table, limited); + + // Get the current LSN. + // The data will only be consistent once incremental replication has passed that point. + // We have to get this LSN _after_ we have finished the table snapshot. + // + // There are basically two relevant LSNs here: + // A: PreSnapshot: The LSN before the snapshot starts. + // B: PostSnapshot: The LSN after the table snapshot is complete, which is what we get here. + // When we do the snapshot queries, the data that we get back for each batch could match the state + // anywhere between A and B. To actually have a consistent state on our side, we need to: + // 1. Complete the snapshot. + // 2. Wait until logical replication has caught up with all the changes between A and B. + // Calling `markSnapshotDone(LSN B)` covers that. + const postSnapshotLSN = await getLatestLSN(this.connections); + // Side note: A ROLLBACK would probably also be fine here, since we only read in this transaction. + await transaction.commit(); + const [updatedSourceTable] = await batch.markSnapshotDone([table.sourceTable], postSnapshotLSN.toString()); + this.tableCache.updateSourceTable(updatedSourceTable); + } catch (e) { + await transaction.rollback(); + throw e; + } + } + + private async snapshotTable( + batch: storage.BucketStorageBatch, + transaction: sql.Transaction, + table: MSSQLSourceTable, + limited?: PrimaryKeyValue[] + ) { + let totalEstimatedCount = table.sourceTable.snapshotStatus?.totalEstimatedCount; + let replicatedCount = table.sourceTable.snapshotStatus?.replicatedCount ?? 0; + let lastCountTime = 0; + let query: MSSQLSnapshotQuery; + // We do streaming on two levels: + // 1. Coarse select from the entire table, stream rows 1 by one + // 2. Fine level: Stream batches of rows with each fetch call + if (limited) { + query = new IdSnapshotQuery(transaction, table, limited); + } else if (BatchedSnapshotQuery.supports(table)) { + // Single primary key - we can use the primary key for chunking + const orderByKey = table.sourceTable.replicaIdColumns[0]; + query = new BatchedSnapshotQuery( + transaction, + table, + this.snapshotBatchSize, + table.sourceTable.snapshotStatus?.lastKey ?? null + ); + if (table.sourceTable.snapshotStatus?.lastKey != null) { + this.logger.info( + `Replicating ${table.toQualifiedName()} ${table.sourceTable.formatSnapshotProgress()} - resuming from ${orderByKey.name} > ${(query as BatchedSnapshotQuery).lastKey}` + ); + } else { + this.logger.info( + `Replicating ${table.toQualifiedName()} ${table.sourceTable.formatSnapshotProgress()} - resumable` + ); + } + } else { + // Fallback case - query the entire table + this.logger.info( + `Replicating ${table.toQualifiedName()} ${table.sourceTable.formatSnapshotProgress()} - not resumable` + ); + query = new SimpleSnapshotQuery(transaction, table); + replicatedCount = 0; + } + await query.initialize(); + + let hasRemainingData = true; + while (hasRemainingData) { + // Fetch 10k at a time. + // The balance here is between latency overhead per FETCH call, + // and not spending too much time on each FETCH call. + // We aim for a couple of seconds on each FETCH call. + let batchReplicatedCount = 0; + let columns: sql.IColumnMetadata | null = null; + const cursor = query.next(); + for await (const result of cursor) { + if (columns == null && isIColumnMetadata(result)) { + columns = result; + continue; + } else { + if (!columns) { + throw new ReplicationAssertionError(`Missing column metadata`); + } + const inputRow: SqliteInputRow = toSqliteInputRow(result, columns); + const row = this.syncRules.applyRowContext(inputRow); + // This auto-flushes when the batch reaches its size limit + await batch.save({ + tag: storage.SaveOperationTag.INSERT, + sourceTable: table.sourceTable, + before: undefined, + beforeReplicaId: undefined, + after: row, + afterReplicaId: getUuidReplicaIdentityBson(row, table.sourceTable.replicaIdColumns) + }); + + replicatedCount++; + batchReplicatedCount++; + this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1); + } + + this.touch(); + } + + // Important: flush before marking progress + await batch.flush(); + if (limited == null) { + let lastKey: Uint8Array | undefined; + if (query instanceof BatchedSnapshotQuery) { + lastKey = query.getLastKeySerialized(); + } + if (lastCountTime < performance.now() - 10 * 60 * 1000) { + // Even though we're doing the snapshot inside a transaction, the transaction uses + // the default "Read Committed" isolation level. This means we can get new data + // within the transaction, so we re-estimate the count every 10 minutes when replicating + // large tables. + totalEstimatedCount = await this.estimatedCountNumber(table, transaction); + lastCountTime = performance.now(); + } + const updatedSourceTable = await batch.updateTableProgress(table.sourceTable, { + lastKey: lastKey, + replicatedCount: replicatedCount, + totalEstimatedCount: totalEstimatedCount + }); + this.tableCache.updateSourceTable(updatedSourceTable); + + this.logger.info(`Replicating ${table.toQualifiedName()} ${table.sourceTable.formatSnapshotProgress()}`); + } else { + this.logger.info(`Replicating ${table.toQualifiedName()} ${replicatedCount}/${limited.length} for resnapshot`); + } + + if (this.abortSignal.aborted) { + // We only abort after flushing + throw new ReplicationAbortedError(`Initial replication interrupted`); + } + + // When the batch of rows is smaller than the requested batch size we know it is the final batch + if (batchReplicatedCount < this.snapshotBatchSize) { + hasRemainingData = false; + } + } + } + + /** + * Estimate the number of rows in a table. This query uses partition stats view to get a fast estimate of the row count. + * This requires that the MSSQL DB user has the VIEW DATABASE PERFORMANCE STATE permission. + * @param table + * @param transaction + */ + async estimatedCountNumber(table: MSSQLSourceTable, transaction?: sql.Transaction): Promise { + const request = transaction ? transaction.request() : await this.connections.createRequest(); + const { recordset: result } = await request.query( + `SELECT SUM(row_count) AS total_rows + FROM sys.dm_db_partition_stats + WHERE object_id = OBJECT_ID('${table.toQualifiedName()}') + AND index_id < 2;` + ); + return result[0].total_rows ?? -1; + } + + /** + * Start initial replication. + * + * If (partial) replication was done before on this slot, this clears the state + * and starts again from scratch. + */ + async startInitialReplication(snapshotStatus: SnapshotStatusResult) { + let { status, snapshotLSN } = snapshotStatus; + + if (status === SnapshotStatus.RESTART_REQUIRED) { + this.logger.info(`Snapshot restart required, clearing state.`); + // This happens if the last replicated checkpoint LSN is no longer available in the CDC tables. + await this.storage.clear({ signal: this.abortSignal }); + } + + await this.storage.startBatch( + { + logger: this.logger, + zeroLSN: LSN.ZERO, + defaultSchema: this.defaultSchema, + storeCurrentData: false, + skipExistingRows: true + }, + async (batch) => { + if (snapshotLSN == null) { + // First replication attempt - set the snapshot LSN to the current LSN before starting + snapshotLSN = (await getLatestReplicatedLSN(this.connections)).toString(); + await batch.setResumeLsn(snapshotLSN); + const latestLSN = (await getLatestLSN(this.connections)).toString(); + this.logger.info(`Marking snapshot at ${snapshotLSN}, Latest DB LSN ${latestLSN}.`); + } else { + this.logger.info(`Resuming snapshot at ${snapshotLSN}.`); + } + + const tablesToSnapshot: MSSQLSourceTable[] = []; + for (const table of this.tableCache.getAll()) { + if (table.sourceTable.snapshotComplete) { + this.logger.info(`Skipping table [${table.toQualifiedName()}] - snapshot already done.`); + continue; + } + + const count = await this.estimatedCountNumber(table); + const updatedSourceTable = await batch.updateTableProgress(table.sourceTable, { + totalEstimatedCount: count + }); + this.tableCache.updateSourceTable(updatedSourceTable); + tablesToSnapshot.push(table); + + this.logger.info(`To replicate: ${table.toQualifiedName()} ${table.sourceTable.formatSnapshotProgress()}`); + } + + for (const table of tablesToSnapshot) { + await this.snapshotTableInTx(batch, table); + this.touch(); + } + + // This will not create a consistent checkpoint yet, but will persist the op. + // Actual checkpoint will be created when streaming replication caught up. + await batch.commit(snapshotLSN); + + this.logger.info( + `Snapshot done. Need to replicate from ${snapshotLSN} to ${batch.noCheckpointBeforeLsn} to be consistent` + ); + } + ); + } + + async initReplication() { + const errors = await checkSourceConfiguration(this.connections); + if (errors.length > 0) { + throw new CDCConfigurationError(`CDC Configuration Errors: ${errors.join(', ')}`); + } + + await this.populateTableCache(); + const snapshotStatus = await this.checkSnapshotStatus(); + if (snapshotStatus.status !== SnapshotStatus.DONE) { + await this.startInitialReplication(snapshotStatus); + } + } + + /** + * Checks if the initial sync has already been completed and if updates from the last checkpoint are still available + * in the CDC instances. + */ + private async checkSnapshotStatus(): Promise { + const status = await this.storage.getStatus(); + if (status.snapshot_done && status.checkpoint_lsn) { + // Snapshot is done, but we still need to check that the last known checkpoint LSN is still + // within the threshold of the CDC tables + this.logger.info(`Initial replication already done`); + + const lastCheckpointLSN = LSN.fromString(status.checkpoint_lsn); + // Check that the CDC tables still have valid data + const isAvailable = await isWithinRetentionThreshold({ + checkpointLSN: lastCheckpointLSN, + tables: this.tableCache.getAll(), + connectionManager: this.connections + }); + if (!isAvailable) { + this.logger.warn( + `Updates from the last checkpoint are no longer available in the CDC instance, starting initial replication again.` + ); + } + return { status: isAvailable ? SnapshotStatus.DONE : SnapshotStatus.RESTART_REQUIRED, snapshotLSN: null }; + } else { + return { status: SnapshotStatus.IN_PROGRESS, snapshotLSN: status.snapshot_lsn }; + } + } + + async streamChanges() { + await this.storage.startBatch( + { + logger: this.logger, + zeroLSN: LSN.ZERO, + defaultSchema: this.defaultSchema, + storeCurrentData: false, + skipExistingRows: false + }, + async (batch) => { + if (batch.resumeFromLsn == null) { + throw new ReplicationAssertionError(`No LSN found to resume replication from.`); + } + const startLSN = LSN.fromString(batch.resumeFromLsn); + const sourceTables: MSSQLSourceTable[] = this.tableCache.getAll(); + const eventHandler = this.createEventHandler(batch); + + const poller = new CDCPoller({ + connectionManager: this.connections, + eventHandler, + sourceTables, + startLSN, + logger: this.logger, + additionalConfig: this.options.additionalConfig + }); + + this.abortSignal.addEventListener( + 'abort', + async () => { + await poller.stop(); + }, + { once: true } + ); + + await createCheckpoint(this.connections); + + this.logger.info(`Streaming changes from: ${startLSN}`); + await poller.replicateUntilStopped(); + } + ); + } + + private createEventHandler(batch: storage.BucketStorageBatch): CDCEventHandler { + return { + onInsert: async (row: any, table: MSSQLSourceTable, columns: sql.IColumnMetadata) => { + const afterRow = this.toSqliteRow(row, columns); + await batch.save({ + tag: storage.SaveOperationTag.INSERT, + sourceTable: table.sourceTable, + before: undefined, + beforeReplicaId: undefined, + after: afterRow, + afterReplicaId: getUuidReplicaIdentityBson(afterRow, table.sourceTable.replicaIdColumns) + }); + this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1); + }, + onUpdate: async (rowAfter: any, rowBefore: any, table: MSSQLSourceTable, columns: sql.IColumnMetadata) => { + const beforeRow = this.toSqliteRow(rowBefore, columns); + const afterRow = this.toSqliteRow(rowAfter, columns); + await batch.save({ + tag: storage.SaveOperationTag.UPDATE, + sourceTable: table.sourceTable, + before: beforeRow, + beforeReplicaId: getUuidReplicaIdentityBson(beforeRow, table.sourceTable.replicaIdColumns), + after: afterRow, + afterReplicaId: getUuidReplicaIdentityBson(afterRow, table.sourceTable.replicaIdColumns) + }); + this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1); + }, + onDelete: async (row: any, table: MSSQLSourceTable, columns: sql.IColumnMetadata) => { + const beforeRow = this.toSqliteRow(row, columns); + await batch.save({ + tag: storage.SaveOperationTag.DELETE, + sourceTable: table.sourceTable, + before: beforeRow, + beforeReplicaId: getUuidReplicaIdentityBson(beforeRow, table.sourceTable.replicaIdColumns), + after: undefined, + afterReplicaId: undefined + }); + this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1); + }, + onCommit: async (lsn: string, transactionCount: number) => { + await batch.commit(lsn); + this.metrics.getCounter(ReplicationMetric.TRANSACTIONS_REPLICATED).add(transactionCount); + this.isStartingReplication = false; + }, + onSchemaChange: async () => { + // TODO: Handle schema changes + } + }; + } + + /** + * Convert CDC row data to SqliteRow format. + * CDC rows include table columns plus CDC metadata columns (__$operation, __$start_lsn, etc.). + * We filter out the CDC metadata columns. + */ + private toSqliteRow(row: any, columns: sql.IColumnMetadata): SqliteRow { + const inputRow: SqliteInputRow = CDCToSqliteRow({ row, columns }); + + return this.syncRules.applyRowContext(inputRow); + } + + async getReplicationLagMillis(): Promise { + if (this.oldestUncommittedChange == null) { + if (this.isStartingReplication) { + // We don't have anything to compute replication lag with yet. + return undefined; + } else { + // We don't have any uncommitted changes, so replication is up-to-date. + return 0; + } + } + return Date.now() - this.oldestUncommittedChange.getTime(); + } + + private touch() { + container.probes.touch().catch((e) => { + this.logger.error(`Error touching probe`, e); + }); + } +} diff --git a/modules/module-mssql/src/replication/MSSQLConnectionManager.ts b/modules/module-mssql/src/replication/MSSQLConnectionManager.ts new file mode 100644 index 000000000..28925ca1d --- /dev/null +++ b/modules/module-mssql/src/replication/MSSQLConnectionManager.ts @@ -0,0 +1,113 @@ +import { BaseObserver, logger } from '@powersync/lib-services-framework'; +import sql from 'mssql'; +import { NormalizedMSSQLConnectionConfig } from '../types/types.js'; +import { POWERSYNC_VERSION } from '@powersync/service-core'; +import { MSSQLParameter } from '../types/mssql-data-types.js'; +import { addParameters } from '../utils/mssql.js'; + +export const DEFAULT_SCHEMA = 'dbo'; + +export interface MSSQLConnectionManagerListener { + onEnded(): void; +} + +export class MSSQLConnectionManager extends BaseObserver { + private readonly pool: sql.ConnectionPool; + + constructor( + public options: NormalizedMSSQLConnectionConfig, + poolOptions: sql.PoolOpts + ) { + super(); + // The pool is lazy - no connections are opened until a query is performed. + this.pool = new sql.ConnectionPool({ + authentication: options.authentication, + user: options.username, + password: options.password, + server: options.hostname, + port: options.port, + database: options.database, + pool: poolOptions, + options: { + appName: `powersync/${POWERSYNC_VERSION}`, + encrypt: true, // Required for Azure + trustServerCertificate: options.additionalConfig.trustServerCertificate + } + }); + } + + public get connectionTag() { + return this.options.tag; + } + + public get connectionId() { + return this.options.id; + } + + public get databaseName() { + return this.options.database; + } + + public get schema() { + return this.options.schema ?? DEFAULT_SCHEMA; + } + + private async ensureConnected(): Promise { + await this.pool.connect(); + } + + async createTransaction(): Promise { + await this.ensureConnected(); + return this.pool.transaction(); + } + + async createRequest(): Promise { + await this.ensureConnected(); + return this.pool.request(); + } + + async query(query: string, parameters?: MSSQLParameter[]): Promise> { + await this.ensureConnected(); + for (let tries = 2; ; tries--) { + try { + logger.debug(`Executing query: ${query}`); + let request = this.pool.request(); + if (parameters) { + request = addParameters(request, parameters); + } + return await request.query(query); + } catch (e) { + if (tries == 1) { + throw e; + } + logger.warn('Query error, retrying..', e); + } + } + } + + async execute(procedure: string, parameters?: MSSQLParameter[]): Promise> { + await this.ensureConnected(); + let request = this.pool.request(); + if (parameters) { + if (parameters) { + request = addParameters(request, parameters); + } + } + return request.execute(procedure); + } + + async end(): Promise { + if (this.pool.connected) { + try { + await this.pool.close(); + } catch (error) { + // We don't particularly care if any errors are thrown when shutting down the pool + logger.warn('Error shutting down MSSQL connection pool', error); + } finally { + this.iterateListeners((listener) => { + listener.onEnded?.(); + }); + } + } + } +} diff --git a/modules/module-mssql/src/replication/MSSQLConnectionManagerFactory.ts b/modules/module-mssql/src/replication/MSSQLConnectionManagerFactory.ts new file mode 100644 index 000000000..890c48f3a --- /dev/null +++ b/modules/module-mssql/src/replication/MSSQLConnectionManagerFactory.ts @@ -0,0 +1,33 @@ +import { logger } from '@powersync/lib-services-framework'; +import { ResolvedMSSQLConnectionConfig } from '../types/types.js'; +import { MSSQLConnectionManager } from './MSSQLConnectionManager.js'; +import sql from 'mssql'; + +export class MSSQLConnectionManagerFactory { + private readonly connectionManagers: Set; + public readonly connectionConfig: ResolvedMSSQLConnectionConfig; + + constructor(connectionConfig: ResolvedMSSQLConnectionConfig) { + this.connectionConfig = connectionConfig; + this.connectionManagers = new Set(); + } + + create(poolOptions: sql.PoolOpts) { + const manager = new MSSQLConnectionManager(this.connectionConfig, poolOptions); + manager.registerListener({ + onEnded: () => { + this.connectionManagers.delete(manager); + } + }); + this.connectionManagers.add(manager); + return manager; + } + + async shutdown() { + logger.info('Shutting down MSSQL connection Managers...'); + for (const manager of this.connectionManagers.values()) { + await manager.end(); + } + logger.info('MSSQL connection Managers shutdown completed.'); + } +} diff --git a/modules/module-mssql/src/replication/MSSQLErrorRateLimiter.ts b/modules/module-mssql/src/replication/MSSQLErrorRateLimiter.ts new file mode 100644 index 000000000..896e9f971 --- /dev/null +++ b/modules/module-mssql/src/replication/MSSQLErrorRateLimiter.ts @@ -0,0 +1,36 @@ +import { ErrorRateLimiter } from '@powersync/service-core'; +import { setTimeout } from 'timers/promises'; + +export class MSSQLErrorRateLimiter implements ErrorRateLimiter { + nextAllowed: number = Date.now(); + + async waitUntilAllowed(options?: { signal?: AbortSignal | undefined } | undefined): Promise { + const delay = Math.max(0, this.nextAllowed - Date.now()); + // Minimum delay between connections, even without errors + this.setDelay(500); + await setTimeout(delay, undefined, { signal: options?.signal }); + } + + mayPing(): boolean { + return Date.now() >= this.nextAllowed; + } + + reportError(e: any): void { + const message = (e.message as string) ?? ''; + if (message.includes('password authentication failed')) { + this.setDelay(900_000); + } else if (message.includes('ENOTFOUND')) { + // DNS lookup issue - incorrect URI or deleted instance + this.setDelay(120_000); + } else if (message.includes('ECONNREFUSED')) { + // Could be fail2ban or similar + this.setDelay(120_000); + } else { + this.setDelay(30_000); + } + } + + private setDelay(delay: number) { + this.nextAllowed = Math.max(this.nextAllowed, Date.now() + delay); + } +} diff --git a/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts b/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts new file mode 100644 index 000000000..3bd837692 --- /dev/null +++ b/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts @@ -0,0 +1,230 @@ +import { bson, ColumnDescriptor, SourceTable } from '@powersync/service-core'; +import { SqliteValue } from '@powersync/service-sync-rules'; +import { ServiceAssertionError } from '@powersync/lib-services-framework'; +import { MSSQLBaseType } from '../types/mssql-data-types.js'; +import sql from 'mssql'; +import { escapeIdentifier } from '../utils/mssql.js'; +import { MSSQLSourceTable } from '../common/MSSQLSourceTable.js'; + +export interface MSSQLSnapshotQuery { + initialize(): Promise; + + /** + * Returns an async iterable iterator that yields the column metadata for the query followed by rows of data. + */ + next(): AsyncIterableIterator>; +} + +export type PrimaryKeyValue = Record; + +/** + * Snapshot query using a plain SELECT * FROM table + * + * This supports all tables but does not efficiently resume the snapshot + * if the process is restarted. + */ +export class SimpleSnapshotQuery implements MSSQLSnapshotQuery { + public constructor( + private readonly transaction: sql.Transaction, + private readonly table: MSSQLSourceTable + ) {} + + public async initialize(): Promise {} + + public async *next(): AsyncIterableIterator> { + const metadataRequest = this.transaction.request(); + metadataRequest.stream = true; + const metadataPromise = new Promise((resolve, reject) => { + metadataRequest.on('recordset', resolve); + metadataRequest.on('error', reject); + }); + + metadataRequest.query(`SELECT TOP(0) * FROM ${this.table.toQualifiedName()}`); + + const columnMetadata: sql.IColumnMetadata = await metadataPromise; + yield columnMetadata; + + const request = this.transaction.request(); + const stream = request.toReadableStream(); + + request.query(`SELECT * FROM ${this.table.toQualifiedName()}`); + + // MSSQL only streams one row at a time + for await (const row of stream) { + yield row; + } + } +} + +/** + * Performs a table snapshot query, batching by ranges of primary key data. + * + * This may miss some rows if they are modified during the snapshot query. + * In that case, replication will pick up those rows afterward, + * possibly resulting in an IdSnapshotQuery. + * + * Currently, this only supports a table with a single primary key column, + * of a select few types. + */ +export class BatchedSnapshotQuery implements MSSQLSnapshotQuery { + /** + * Primary key types that we support for batched snapshots. + * + * Can expand this over time as we add more tests, + * and ensure there are no issues with type conversion. + */ + static SUPPORTED_TYPES = [ + MSSQLBaseType.TEXT, + MSSQLBaseType.NTEXT, + MSSQLBaseType.VARCHAR, + MSSQLBaseType.NVARCHAR, + MSSQLBaseType.CHAR, + MSSQLBaseType.NCHAR, + MSSQLBaseType.UNIQUEIDENTIFIER, + MSSQLBaseType.TINYINT, + MSSQLBaseType.SMALLINT, + MSSQLBaseType.INT, + MSSQLBaseType.BIGINT + ]; + + static supports(table: SourceTable | MSSQLSourceTable): boolean { + const sourceTable = table instanceof MSSQLSourceTable ? table.sourceTable : table; + if (sourceTable.replicaIdColumns.length != 1) { + return false; + } + const primaryKey = sourceTable.replicaIdColumns[0]; + + return primaryKey.typeId != null && BatchedSnapshotQuery.SUPPORTED_TYPES.includes(Number(primaryKey.typeId)); + } + + private readonly key: ColumnDescriptor; + lastKey: string | bigint | null = null; + + public constructor( + private readonly transaction: sql.Transaction, + private readonly table: MSSQLSourceTable, + private readonly batchSize: number = 10_000, + lastKeySerialized: Uint8Array | null + ) { + this.key = table.sourceTable.replicaIdColumns[0]; + + if (lastKeySerialized != null) { + this.lastKey = this.deserializeKey(lastKeySerialized); + } + } + + public async initialize(): Promise { + // No-op + } + + public getLastKeySerialized(): Uint8Array { + return bson.serialize({ [this.key.name]: this.lastKey }); + } + + public async *next(): AsyncIterableIterator> { + const escapedKeyName = escapeIdentifier(this.key.name); + const metadataRequest = this.transaction.request(); + metadataRequest.stream = true; + const metadataPromise = new Promise((resolve, reject) => { + metadataRequest.on('recordset', resolve); + metadataRequest.on('error', reject); + }); + metadataRequest.query(`SELECT TOP(0) * FROM ${this.table.toQualifiedName()}`); + + const columnMetadata: sql.IColumnMetadata = await metadataPromise; + + const foundPrimaryKey = columnMetadata[this.key.name]; + if (!foundPrimaryKey) { + throw new Error( + `Cannot find primary key column ${this.key.name} in results. Keys: ${Object.keys(columnMetadata.columns).join(', ')}` + ); + } + + yield columnMetadata; + + const request = this.transaction.request(); + const stream = request.toReadableStream(); + if (this.lastKey == null) { + request.query(`SELECT TOP(${this.batchSize}) * FROM ${this.table.toQualifiedName()} ORDER BY ${escapedKeyName}`); + } else { + if (this.key.typeId == null) { + throw new Error(`typeId required for primary key ${this.key.name}`); + } + request + .input('lastKey', this.lastKey) + .query( + `SELECT TOP(${this.batchSize}) * FROM ${this.table.toQualifiedName()} WHERE ${escapedKeyName} > @lastKey ORDER BY ${escapedKeyName}` + ); + } + + // MSSQL only streams one row at a time + for await (const row of stream) { + this.lastKey = row[this.key.name]; + yield row; + } + } + + private deserializeKey(key: Uint8Array) { + const decoded = bson.deserialize(key, { useBigInt64: true }); + const keys = Object.keys(decoded); + if (keys.length != 1) { + throw new ServiceAssertionError(`Multiple keys found: ${keys.join(', ')}`); + } + if (keys[0] != this.key.name) { + throw new ServiceAssertionError(`Key name mismatch: expected ${this.key.name}, got ${keys[0]}`); + } + + return decoded[this.key.name]; + } +} + +/** + * This performs a snapshot query using a list of primary keys. + * + * This is not used for general snapshots, but is used when we need to re-fetch specific rows + * during streaming replication. + */ +export class IdSnapshotQuery implements MSSQLSnapshotQuery { + static supports(table: SourceTable | MSSQLSourceTable) { + // We have the same requirements as BatchedSnapshotQuery. + // This is typically only used as a fallback when ChunkedSnapshotQuery + // skipped some rows. + return BatchedSnapshotQuery.supports(table); + } + + public constructor( + private readonly transaction: sql.Transaction, + private readonly table: MSSQLSourceTable, + private readonly keys: PrimaryKeyValue[] + ) {} + + public async initialize(): Promise { + // No-op + } + + public async *next(): AsyncIterableIterator> { + const metadataRequest = this.transaction.request(); + metadataRequest.stream = true; + const metadataPromise = new Promise((resolve, reject) => { + metadataRequest.on('recordset', resolve); + metadataRequest.on('error', reject); + }); + metadataRequest.query(`SELECT TOP(0) * FROM ${this.table.toQualifiedName()}`); + const columnMetadata: sql.IColumnMetadata = await metadataPromise; + yield columnMetadata; + + const keyDefinition = this.table.sourceTable.replicaIdColumns[0]; + const ids = this.keys.map((record) => record[keyDefinition.name]); + + const request = this.transaction.request(); + const stream = request.toReadableStream(); + request + .input('ids', ids) + .query(`SELECT * FROM ${this.table.toQualifiedName()} WHERE ${escapeIdentifier(keyDefinition.name)} = @ids`); + + // MSSQL only streams one row at a time + for await (const row of stream) { + yield row; + } + } +} diff --git a/modules/module-mssql/src/types/mssql-data-types.ts b/modules/module-mssql/src/types/mssql-data-types.ts new file mode 100644 index 000000000..7cdf73413 --- /dev/null +++ b/modules/module-mssql/src/types/mssql-data-types.ts @@ -0,0 +1,79 @@ +import { ColumnDescriptor } from '@powersync/service-core'; +import { ISqlType } from 'mssql'; + +export interface MSSQLColumnDescriptor extends ColumnDescriptor { + /** The underlying system type id. For base types system type id == user type id */ + typeId: number; + /** The unique user type id that uniquely identifies the type */ + userTypeId: number; + // /** The name of the user/alias type. For example SYSNAME, GEOMETRY */ + // userTypeName: string; +} + +/** The shared system type id for all CLR_UDT types in SQL Server */ +export const CLR_UDT_TYPE_ID = 240; + +/** + * Enum mapping the base MSSQL data types to their system type IDs. + */ +export enum MSSQLBaseType { + IMAGE = 34, + TEXT = 35, + UNIQUEIDENTIFIER = 36, + DATE = 40, + TIME = 41, + DATETIME2 = 42, + DATETIMEOFFSET = 43, + TINYINT = 48, + SMALLINT = 52, + INT = 56, + SMALLDATETIME = 58, + REAL = 59, + MONEY = 60, + DATETIME = 61, + FLOAT = 62, + SQL_VARIANT = 98, + NTEXT = 99, + BIT = 104, + DECIMAL = 106, + NUMERIC = 108, + SMALLMONEY = 122, + BIGINT = 127, + VARBINARY = 165, + VARCHAR = 167, + BINARY = 173, + CHAR = 175, + TIMESTAMP = 189, + NVARCHAR = 231, + NCHAR = 239, + XML = 241, + JSON = 244 +} + +/** + * Enum mapping some of the extended user-defined MSSQL data types to their user type IDs. + */ +export enum MSSQLExtendedUserType { + // VARBINARY system type [155] + VECTOR = 255, + // NVARCHAR system type [231] + SYSNAME = 256, + // CLR_UDT system type [240] + HIERARCHYID = 128, + // CLR_UDT system type [240] + GEOMETRY = 129, + // CLR_UDT system type [240] + GEOGRAPHY = 130 +} + +export enum MSSQLUserDefinedType { + VECTOR = 'vector', + SYSNAME = 'sysname', + HIERARCHYID = 'hierarchyid' +} + +export interface MSSQLParameter { + name: string; + value: any; + type?: (() => ISqlType) | ISqlType; +} diff --git a/modules/module-mssql/src/types/types.ts b/modules/module-mssql/src/types/types.ts new file mode 100644 index 000000000..d89fc847b --- /dev/null +++ b/modules/module-mssql/src/types/types.ts @@ -0,0 +1,220 @@ +import { ErrorCode, makeHostnameLookupFunction, ServiceError } from '@powersync/lib-services-framework'; +import * as service_types from '@powersync/service-types'; +import { LookupFunction } from 'node:net'; +import * as t from 'ts-codec'; +import * as urijs from 'uri-js'; + +export const MSSQL_CONNECTION_TYPE = 'mssql' as const; + +export const AzureActiveDirectoryPasswordAuthentication = t.object({ + type: t.literal('azure-active-directory-password'), + options: t.object({ + /** + * A user need to provide `userName` associate to their account. + */ + userName: t.string, + /** + * A user need to provide `password` associate to their account. + */ + password: t.string, + /** + * A client id to use. + */ + clientId: t.string, + /** + * Azure tenant ID + */ + tenantId: t.string + }) +}); +export type AzureActiveDirectoryPasswordAuthentication = t.Decoded; + +export const AzureActiveDirectoryServicePrincipalSecret = t.object({ + type: t.literal('azure-active-directory-service-principal-secret'), + options: t.object({ + /** + * Application (`client`) ID from your registered Azure application + */ + clientId: t.string, + /** + * The created `client secret` for this registered Azure application + */ + clientSecret: t.string, + /** + * Directory (`tenant`) ID from your registered Azure application + */ + tenantId: t.string + }) +}); +export type AzureActiveDirectoryServicePrincipalSecret = t.Decoded; + +export const DefaultAuthentication = t.object({ + type: t.literal('default'), + options: t.object({ + /** + * User name to use for sql server login. + */ + userName: t.string, + /** + * Password to use for sql server login. + */ + password: t.string + }) +}); +export type DefaultAuthentication = t.Decoded; + +export const Authentication = DefaultAuthentication.or(AzureActiveDirectoryPasswordAuthentication).or( + AzureActiveDirectoryServicePrincipalSecret +); +export type Authentication = t.Decoded; + +export const AdditionalConfig = t.object({ + /** + * Interval in milliseconds to wait between polling cycles. Defaults to 1000 milliseconds. + */ + pollingIntervalMs: t.number.optional(), + /** + * Maximum number of transactions to poll per polling cycle. Defaults to 10. + */ + pollingBatchSize: t.number.optional(), + + /** + * Whether to trust the server certificate. Set to true for local development and self-signed certificates. + * Default is false. + */ + trustServerCertificate: t.boolean.optional() +}); + +export interface AdditionalConfig { + /** + * Interval in milliseconds to wait between polling cycles. Defaults to 1000 milliseconds. + */ + pollingIntervalMs: number; + /** + * Maximum number of transactions to poll per polling cycle. Defaults to 10. + */ + pollingBatchSize: number; + /** + * Whether to trust the server certificate. Set to true for local development and self-signed certificates. + * Default is false. + */ + trustServerCertificate: boolean; +} + +export interface NormalizedMSSQLConnectionConfig { + id: string; + tag: string; + + username?: string; + password?: string; + hostname: string; + port: number; + database: string; + schema?: string; + + authentication?: Authentication; + + lookup?: LookupFunction; + + additionalConfig: AdditionalConfig; +} + +export const MSSQLConnectionConfig = service_types.configFile.DataSourceConfig.and( + t.object({ + type: t.literal(MSSQL_CONNECTION_TYPE), + uri: t.string.optional(), + username: t.string.optional(), + password: t.string.optional(), + database: t.string.optional(), + schema: t.string.optional(), + hostname: t.string.optional(), + port: service_types.configFile.portCodec.optional(), + + authentication: Authentication.optional(), + + reject_ip_ranges: t.array(t.string).optional(), + additionalConfig: AdditionalConfig.optional() + }) +); + +/** + * Config input specified when starting services + */ +export type MSSQLConnectionConfig = t.Decoded; + +/** + * Resolved version of {@link MSSQLConnectionConfig} + */ +export type ResolvedMSSQLConnectionConfig = MSSQLConnectionConfig & NormalizedMSSQLConnectionConfig; + +/** + * Validate and normalize connection options. + * + * Returns destructured options. + */ +export function normalizeConnectionConfig(options: MSSQLConnectionConfig): NormalizedMSSQLConnectionConfig { + let uri: urijs.URIComponents; + if (options.uri) { + uri = urijs.parse(options.uri); + if (uri.scheme != 'mssql') { + throw new ServiceError( + ErrorCode.PSYNC_S1109, + `Invalid URI - protocol must be mssql, got ${JSON.stringify(uri.scheme)}` + ); + } + } else { + uri = urijs.parse('mssql:///'); + } + + const hostname = options.hostname ?? uri.host ?? ''; + const port = Number(options.port ?? uri.port ?? 1433); + + const database = options.database ?? uri.path?.substring(1) ?? ''; + + const [uri_username, uri_password] = (uri.userinfo ?? '').split(':'); + + const username = options.username ?? uri_username ?? ''; + const password = options.password ?? uri_password ?? ''; + + if (hostname == '') { + throw new ServiceError(ErrorCode.PSYNC_S1106, `MSSQL connection: hostname required`); + } + + if (username == '' && !options.authentication) { + throw new ServiceError(ErrorCode.PSYNC_S1107, `MSSQL connection: username or authentication config is required`); + } + + if (password == '' && !options.authentication) { + throw new ServiceError(ErrorCode.PSYNC_S1108, `MSSQL connection: password or authentication config is required`); + } + + if (database == '') { + throw new ServiceError(ErrorCode.PSYNC_S1105, `MSSQL connection: database required`); + } + + const lookup = makeHostnameLookupFunction(hostname, { reject_ip_ranges: options.reject_ip_ranges ?? [] }); + + return { + id: options.id ?? 'default', + tag: options.tag ?? 'default', + + username, + password, + hostname, + port, + database, + + lookup, + authentication: options.authentication, + + additionalConfig: { + pollingIntervalMs: options.additionalConfig?.pollingIntervalMs ?? 1000, + pollingBatchSize: options.additionalConfig?.pollingBatchSize ?? 10, + trustServerCertificate: options.additionalConfig?.trustServerCertificate ?? false + } + } satisfies NormalizedMSSQLConnectionConfig; +} + +export function baseUri(config: ResolvedMSSQLConnectionConfig) { + return `mssql://${config.hostname}:${config.port}/${config.database}`; +} diff --git a/modules/module-mssql/src/utils/mssql.ts b/modules/module-mssql/src/utils/mssql.ts new file mode 100644 index 000000000..1e3a040ee --- /dev/null +++ b/modules/module-mssql/src/utils/mssql.ts @@ -0,0 +1,420 @@ +import sql from 'mssql'; +import { coerce, gte } from 'semver'; +import { logger } from '@powersync/lib-services-framework'; +import { MSSQLConnectionManager } from '../replication/MSSQLConnectionManager.js'; +import { LSN } from '../common/LSN.js'; +import { CaptureInstance, MSSQLSourceTable } from '../common/MSSQLSourceTable.js'; +import { MSSQLParameter } from '../types/mssql-data-types.js'; +import { SqlSyncRules, TablePattern } from '@powersync/service-sync-rules'; +import { getReplicationIdentityColumns, ReplicationIdentityColumnsResult, ResolvedTable } from './schema.js'; +import * as service_types from '@powersync/service-types'; +import * as sync_rules from '@powersync/service-sync-rules'; + +export const POWERSYNC_CHECKPOINTS_TABLE = '_powersync_checkpoints'; + +export const SUPPORTED_ENGINE_EDITIONS = new Map([ + [2, 'Standard'], + [3, 'Enterprise - Enterprise, Developer, Evaluation'], + [5, 'SqlDatabase - Azure SQL Database'], + [8, 'SqlManagedInstance - Azure SQL Managed Instance'] +]); + +// SQL Server 2022 and newer +export const MINIMUM_SUPPORTED_VERSION = '16.0'; + +export async function checkSourceConfiguration(connectionManager: MSSQLConnectionManager): Promise { + const errors: string[] = []; + // 1) Check MSSQL version and Editions + const { recordset: versionResult } = await connectionManager.query(` + SELECT + CAST(SERVERPROPERTY('EngineEdition') AS int) AS engine, + CAST(SERVERPROPERTY('Edition') AS nvarchar(128)) AS edition, + CAST(SERVERPROPERTY('ProductVersion') AS nvarchar(128)) AS version + `); + + // If the edition is unsupported, return immediately + if (!SUPPORTED_ENGINE_EDITIONS.has(versionResult[0]?.engine)) { + errors.push( + `The SQL Server edition '${versionResult[0]?.edition}' is not supported. PowerSync requires a MSSQL edition that supports CDC: ${Array.from( + SUPPORTED_ENGINE_EDITIONS.values() + ).join(', ')}.` + ); + return errors; + } + + // Only applicable to SQL Server stand-alone editions + if (versionResult[0]?.engine == 2 || versionResult[0]?.engine == 3) { + if (!isVersionAtLeast(versionResult[0]?.version, MINIMUM_SUPPORTED_VERSION)) { + errors.push( + `The SQL Server version '${versionResult[0]?.version}' is not supported. PowerSync requires MSSQL 2022 (v16) or newer.` + ); + } + } + + // 2) Check DB-level CDC + const { recordset: cdcEnabledResult } = await connectionManager.query(` + SELECT name AS db_name, is_cdc_enabled FROM sys.databases WHERE name = DB_NAME(); + `); + const cdcEnabled = cdcEnabledResult[0]?.is_cdc_enabled; + + if (!cdcEnabled) { + errors.push(`CDC is not enabled for database. Please enable it.`); + } + + // 3) Check CDC user permissions + const { recordset: cdcUserResult } = await connectionManager.query(` + SELECT + CASE + WHEN IS_SRVROLEMEMBER('sysadmin') = 1 + OR IS_MEMBER('db_owner') = 1 + OR IS_MEMBER('cdc_admin') = 1 + OR IS_MEMBER('cdc_reader') = 1 + THEN 1 ELSE 0 + END AS has_cdc_access; + `); + + if (!cdcUserResult[0]?.has_cdc_access) { + errors.push(`The current user does not have the 'cdc_reader' role. Please assign this role to the user.`); + } + + // 4) Check if the _powersync_checkpoints table is correctly configured + const checkpointTableErrors = await ensurePowerSyncCheckpointsTable(connectionManager); + errors.push(...checkpointTableErrors); + + return errors; +} + +export async function ensurePowerSyncCheckpointsTable(connectionManager: MSSQLConnectionManager): Promise { + const errors: string[] = []; + try { + // check if the dbo_powersync_checkpoints table exists + const { recordset: checkpointsResult } = await connectionManager.query(` + SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '${connectionManager.schema}' AND TABLE_NAME = '${POWERSYNC_CHECKPOINTS_TABLE}'; + `); + if (checkpointsResult.length > 0) { + // Table already exists, check if CDC is enabled + const isEnabled = await isTableEnabledForCDC({ + connectionManager, + table: POWERSYNC_CHECKPOINTS_TABLE, + schema: connectionManager.schema + }); + if (!isEnabled) { + // Enable CDC on the table + await enableCDCForTable({ + connectionManager, + table: POWERSYNC_CHECKPOINTS_TABLE + }); + } + return errors; + } + } catch (error) { + errors.push(`Failed ensure ${POWERSYNC_CHECKPOINTS_TABLE} table is correctly configured: ${error}`); + } + + // Try to create the table + try { + await connectionManager.query(` + CREATE TABLE ${connectionManager.schema}.${POWERSYNC_CHECKPOINTS_TABLE} ( + id INT IDENTITY PRIMARY KEY, + last_updated DATETIME NOT NULL DEFAULT (GETDATE()) + )`); + } catch (error) { + errors.push(`Failed to create ${POWERSYNC_CHECKPOINTS_TABLE} table: ${error}`); + } + + try { + // Enable CDC on the table if not already enabled + await enableCDCForTable({ + connectionManager, + table: POWERSYNC_CHECKPOINTS_TABLE + }); + } catch (error) { + errors.push(`Failed to enable CDC on ${POWERSYNC_CHECKPOINTS_TABLE} table: ${error}`); + } + + return errors; +} + +export async function createCheckpoint(connectionManager: MSSQLConnectionManager): Promise { + await connectionManager.query(` + MERGE ${connectionManager.schema}.${POWERSYNC_CHECKPOINTS_TABLE} AS target + USING (SELECT 1 AS id) AS source + ON target.id = source.id + WHEN MATCHED THEN + UPDATE SET last_updated = GETDATE() + WHEN NOT MATCHED THEN + INSERT (last_updated) VALUES (GETDATE()); + `); +} + +export interface IsTableEnabledForCDCOptions { + connectionManager: MSSQLConnectionManager; + table: string; + schema: string; +} +/** + * Check if the specified table is enabled for CDC. + * @param options + */ +export async function isTableEnabledForCDC(options: IsTableEnabledForCDCOptions): Promise { + const { connectionManager, table, schema } = options; + + const { recordset: checkResult } = await connectionManager.query( + ` + SELECT 1 FROM cdc.change_tables ct + JOIN sys.tables AS tbl ON tbl.object_id = ct.source_object_id + JOIN sys.schemas AS sch ON sch.schema_id = tbl.schema_id + WHERE sch.name = '${schema}' + AND tbl.name = '${table}' + ` + ); + return checkResult.length > 0; +} + +export interface EnableCDCForTableOptions { + connectionManager: MSSQLConnectionManager; + table: string; +} + +export async function enableCDCForTable(options: EnableCDCForTableOptions): Promise { + const { connectionManager, table } = options; + + await connectionManager.execute('sys.sp_cdc_enable_table', [ + { name: 'source_schema', value: connectionManager.schema }, + { name: 'source_name', value: table }, + { name: 'role_name', value: 'NULL' }, + { name: 'supports_net_changes', value: 1 } + ]); +} + +/** + * Check if the supplied version is newer or equal to the target version. + * @param version + * @param minimumVersion + */ +export function isVersionAtLeast(version: string, minimumVersion: string): boolean { + const coercedVersion = coerce(version); + const coercedMinimumVersion = coerce(minimumVersion); + + return gte(coercedVersion!, coercedMinimumVersion!, { loose: true }); +} + +export interface IsWithinRetentionThresholdOptions { + checkpointLSN: LSN; + tables: MSSQLSourceTable[]; + connectionManager: MSSQLConnectionManager; +} + +/** + * Checks that CDC the specified checkpoint LSN is within the retention threshold for all specified tables. + * CDC periodically cleans up old data up to the retention threshold. If replication has been stopped for too long it is + * possible for the checkpoint LSN to be older than the minimum LSN in the CDC tables. In such a case we need to perform a new snapshot. + * @param options + */ +export async function isWithinRetentionThreshold(options: IsWithinRetentionThresholdOptions): Promise { + const { checkpointLSN, tables, connectionManager } = options; + for (const table of tables) { + const minLSN = await getMinLSN(connectionManager, table.captureInstance); + if (minLSN > checkpointLSN) { + logger.warn( + `The checkpoint LSN:[${checkpointLSN}] is older than the minimum LSN:[${minLSN}] for table ${table.sourceTable.qualifiedName}. This indicates that the checkpoint LSN is outside of the retention window.` + ); + return false; + } + } + return true; +} + +export async function getMinLSN(connectionManager: MSSQLConnectionManager, captureInstance: string): Promise { + const { recordset: result } = await connectionManager.query( + `SELECT sys.fn_cdc_get_min_lsn('${captureInstance}') AS min_lsn` + ); + const rawMinLSN: Buffer = result[0].min_lsn; + return LSN.fromBinary(rawMinLSN); +} + +export async function incrementLSN(lsn: LSN, connectionManager: MSSQLConnectionManager): Promise { + const { recordset: result } = await connectionManager.query( + `SELECT sys.fn_cdc_increment_lsn(@lsn) AS incremented_lsn`, + [{ name: 'lsn', type: sql.VarBinary, value: lsn.toBinary() }] + ); + return LSN.fromBinary(result[0].incremented_lsn); +} + +export interface GetCaptureInstanceOptions { + connectionManager: MSSQLConnectionManager; + tableName: string; + schema: string; +} + +export async function getCaptureInstance(options: GetCaptureInstanceOptions): Promise { + const { connectionManager, tableName, schema } = options; + const { recordset: result } = await connectionManager.query( + ` + SELECT + ct.capture_instance, + OBJECT_SCHEMA_NAME(ct.[object_id]) AS cdc_schema + FROM + sys.tables tbl + INNER JOIN sys.schemas sch ON tbl.schema_id = sch.schema_id + INNER JOIN cdc.change_tables ct ON ct.source_object_id = tbl.object_id + WHERE sch.name = '${schema}' + AND tbl.name = '${tableName}' + AND ct.end_lsn IS NULL; + ` + ); + + if (result.length === 0) { + return null; + } + + return { + name: result[0].capture_instance, + schema: result[0].cdc_schema + }; +} + +/** + * Return the LSN of the latest transaction recorded in the transaction log + * @param connectionManager + */ +export async function getLatestLSN(connectionManager: MSSQLConnectionManager): Promise { + const { recordset: result } = await connectionManager.query( + 'SELECT log_end_lsn FROM sys.dm_db_log_stats(DB_ID()) AS log_end_lsn' + ); + return LSN.fromString(result[0].log_end_lsn); +} + +/** + * Return the LSN of the lastest transaction replicated to the CDC tables. + * @param connectionManager + */ +export async function getLatestReplicatedLSN(connectionManager: MSSQLConnectionManager): Promise { + const { recordset: result } = await connectionManager.query('SELECT sys.fn_cdc_get_max_lsn() AS max_lsn;'); + // LSN is a binary(10) returned as a Buffer + const rawLSN: Buffer = result[0].max_lsn; + return LSN.fromBinary(rawLSN); +} + +/** + * Escapes an identifier for use in MSSQL queries. + * @param identifier + */ +export function escapeIdentifier(identifier: string): string { + return `[${identifier}]`; +} + +export function toQualifiedTableName(schema: string, tableName: string): string { + return `${escapeIdentifier(schema)}.${escapeIdentifier(tableName)}`; +} + +export function isIColumnMetadata(obj: any): obj is sql.IColumnMetadata { + if (obj === null || typeof obj !== 'object' || Array.isArray(obj)) { + return false; + } + + let propertiesMatched = true; + for (const value of Object.values(obj)) { + const property = value as any; + propertiesMatched = + typeof property.index === 'number' && + typeof property.name === 'string' && + (typeof property.length === 'number' || typeof property.length === 'undefined') && + (typeof property.type === 'function' || typeof property.type === 'object') && + typeof property.nullable === 'boolean' && + typeof property.caseSensitive === 'boolean' && + typeof property.identity === 'boolean' && + typeof property.readOnly === 'boolean'; + } + + return propertiesMatched; +} + +export function addParameters(request: sql.Request, parameters: MSSQLParameter[]): sql.Request { + for (const param of parameters) { + if (param.type) { + request.input(param.name, param.type, param.value); + } else { + request.input(param.name, param.value); + } + } + return request; +} + +export interface GetDebugTableInfoOptions { + connectionManager: MSSQLConnectionManager; + tablePattern: TablePattern; + table: ResolvedTable; + syncRules: SqlSyncRules; +} + +export async function getDebugTableInfo(options: GetDebugTableInfoOptions): Promise { + const { connectionManager, tablePattern, table, syncRules } = options; + const { schema } = tablePattern; + + let idColumnsResult: ReplicationIdentityColumnsResult | null = null; + let idColumnsError: service_types.ReplicationError | null = null; + try { + idColumnsResult = await getReplicationIdentityColumns({ + connectionManager: connectionManager, + schema, + tableName: table.name + }); + } catch (ex) { + idColumnsError = { level: 'fatal', message: ex.message }; + } + + const idColumns = idColumnsResult?.columns ?? []; + const sourceTable: sync_rules.SourceTableInterface = { + connectionTag: connectionManager.connectionTag, + schema: schema, + name: table.name + }; + const syncData = syncRules.tableSyncsData(sourceTable); + const syncParameters = syncRules.tableSyncsParameters(sourceTable); + + if (idColumns.length === 0 && idColumnsError == null) { + let message = `No replication id found for ${toQualifiedTableName(schema, table.name)}. Replica identity: ${idColumnsResult?.identity}.`; + if (idColumnsResult?.identity === 'default') { + message += ' Configure a primary key on the table.'; + } + idColumnsError = { level: 'fatal', message }; + } + + let selectError: service_types.ReplicationError | null = null; + try { + await connectionManager.query(`SELECT TOP 1 * FROM [${toQualifiedTableName(schema, table.name)}]`); + } catch (e) { + selectError = { level: 'fatal', message: e.message }; + } + + // Check if CDC is enabled for the table + let cdcError: service_types.ReplicationError | null = null; + try { + const isEnabled = await isTableEnabledForCDC({ + connectionManager: connectionManager, + table: table.name, + schema: schema + }); + if (!isEnabled) { + cdcError = { + level: 'fatal', + message: `CDC is not enabled for table ${toQualifiedTableName(schema, table.name)}. Enable CDC with: sys.sp_cdc_enable_table @source_schema = '${schema}', @source_name = '${table.name}', @role_name = NULL, @supports_net_changes = 1` + }; + } + } catch (e) { + cdcError = { level: 'warning', message: `Could not check CDC status: ${e.message}` }; + } + + // TODO check RLS settings for table + + return { + schema: schema, + name: table.name, + pattern: tablePattern.isWildcard ? tablePattern.tablePattern : undefined, + replication_id: idColumns.map((c) => c.name), + data_queries: syncData, + parameter_queries: syncParameters, + errors: [idColumnsError, selectError, cdcError].filter((error) => error != null) as service_types.ReplicationError[] + }; +} diff --git a/modules/module-mssql/src/utils/schema.ts b/modules/module-mssql/src/utils/schema.ts new file mode 100644 index 000000000..ffb00b93a --- /dev/null +++ b/modules/module-mssql/src/utils/schema.ts @@ -0,0 +1,172 @@ +import { SourceEntityDescriptor } from '@powersync/service-core'; +import { TablePattern } from '@powersync/service-sync-rules'; +import { MSSQLConnectionManager } from '../replication/MSSQLConnectionManager.js'; +import { MSSQLColumnDescriptor } from '../types/mssql-data-types.js'; +import { escapeIdentifier } from './mssql.js'; + +export interface GetColumnsOptions { + connectionManager: MSSQLConnectionManager; + schema: string; + tableName: string; +} + +async function getColumns(options: GetColumnsOptions): Promise { + const { connectionManager, schema, tableName } = options; + + const { recordset: columnResults } = await connectionManager.query(` + SELECT + col.name AS [name], + typ.name AS [type], + typ.system_type_id AS type_id, + typ.user_type_id AS user_type_id + FROM sys.columns AS col + JOIN sys.tables AS tbl ON tbl.object_id = col.object_id + JOIN sys.schemas AS sch ON sch.schema_id = tbl.schema_id + JOIN sys.types AS typ ON typ.user_type_id = col.user_type_id + WHERE sch.name = '${schema}' + AND tbl.name = '${tableName}' + ORDER BY col.column_id; + `); + + return columnResults.map((row) => { + return { + name: row.name, + type: row.type, + typeId: row.type_id, + userTypeId: row.user_type_id + }; + }); +} + +export interface GetReplicationIdentityColumnsOptions { + connectionManager: MSSQLConnectionManager; + schema: string; + tableName: string; +} + +export interface ReplicationIdentityColumnsResult { + columns: MSSQLColumnDescriptor[]; + identity: 'default' | 'nothing' | 'full' | 'index'; +} + +export async function getReplicationIdentityColumns( + options: GetReplicationIdentityColumnsOptions +): Promise { + const { connectionManager, schema, tableName } = options; + const { recordset: primaryKeyColumns } = await connectionManager.query(` + SELECT + col.name AS [name], + typ.name AS [type], + typ.system_type_id AS type_id, + typ.user_type_id AS user_type_id + FROM sys.tables AS tbl + JOIN sys.schemas AS sch ON sch.schema_id = tbl.schema_id + JOIN sys.indexes AS idx ON idx.object_id = tbl.object_id AND idx.is_primary_key = 1 + JOIN sys.index_columns AS idx_col ON idx_col.object_id = idx.object_id AND idx_col.index_id = idx.index_id + JOIN sys.columns AS col ON col.object_id = idx_col.object_id AND col.column_id = idx_col.column_id + JOIN sys.types AS typ ON typ.user_type_id = col.user_type_id + WHERE sch.name = '${schema}' + AND tbl.name = '${tableName}' + ORDER BY idx_col.key_ordinal; + `); + + if (primaryKeyColumns.length > 0) { + return { + columns: primaryKeyColumns.map((row) => ({ + name: row.name, + type: row.type, + typeId: row.type_id, + userTypeId: row.user_type_id + })), + identity: 'default' + }; + } + + // No primary key, check if any of the columns have a unique constraint we can use + const { recordset: uniqueKeyColumns } = await connectionManager.query(` + SELECT + col.name AS [name], + typ.name AS [type], + typ.system_type_id AS type_id, + typ.user_type_id AS user_type_id + FROM sys.tables AS tbl + JOIN sys.schemas AS sch ON sch.schema_id = tbl.schema_id + JOIN sys.indexes AS idx ON idx.object_id = tbl.object_id AND idx.is_unique_constraint = 1 + JOIN sys.index_columns AS idx_col ON idx_col.object_id = idx.object_id AND idx_col.index_id = idx.index_id + JOIN sys.columns AS col ON col.object_id = idx_col.object_id AND col.column_id = idx_col.column_id + JOIN sys.types AS typ ON typ.user_type_id = col.user_type_id + WHERE sch.name = '${schema}' + AND tbl.name = '${tableName}' + ORDER BY idx_col.key_ordinal; + `); + + if (uniqueKeyColumns.length > 0) { + return { + columns: uniqueKeyColumns.map((row) => ({ + name: row.name, + type: row.type, + typeId: row.type_id, + userTypeId: row.user_type_id + })), + identity: 'index' + }; + } + + const allColumns = await getColumns(options); + + return { + columns: allColumns, + identity: 'full' + }; +} + +export type ResolvedTable = Omit; + +export async function getTablesFromPattern( + connectionManager: MSSQLConnectionManager, + tablePattern: TablePattern +): Promise { + if (tablePattern.isWildcard) { + const { recordset: tableResults } = await connectionManager.query(` + SELECT + tbl.name AS [table], + sch.name AS [schema], + tbl.object_id AS object_id + FROM sys.tables tbl + JOIN sys.schemas sch ON tbl.schema_id = sch.schema_id + WHERE sch.name = '${tablePattern.schema}' + AND tbl.name LIKE '${tablePattern.tablePattern}' + `); + + return tableResults + .map((row) => { + return { + objectId: row.object_id, + schema: row.schema, + name: row.table + }; + }) + .filter((table: ResolvedTable) => table.name.startsWith(tablePattern.tablePrefix)); + } else { + const { recordset: tableResults } = await connectionManager.query( + ` + SELECT + tbl.name AS [table], + sch.name AS [schema], + tbl.object_id AS object_id + FROM sys.tables tbl + JOIN sys.schemas sch ON tbl.schema_id = sch.schema_id + WHERE sch.name = '${tablePattern.schema}' + AND tbl.name = '${tablePattern.name}' + ` + ); + + return tableResults.map((row) => { + return { + objectId: row.object_id, + schema: row.schema, + name: row.table + }; + }); + } +} diff --git a/modules/module-mssql/test/src/CDCStream.test.ts b/modules/module-mssql/test/src/CDCStream.test.ts new file mode 100644 index 000000000..03bd6e442 --- /dev/null +++ b/modules/module-mssql/test/src/CDCStream.test.ts @@ -0,0 +1,198 @@ +import { describe, expect, test } from 'vitest'; +import { METRICS_HELPER, putOp, removeOp } from '@powersync/service-core-tests'; +import { ReplicationMetric } from '@powersync/service-types'; +import { createTestTable, describeWithStorage, insertTestData, waitForPendingCDCChanges } from './util.js'; +import { storage } from '@powersync/service-core'; +import { CDCStreamTestContext } from './CDCStreamTestContext.js'; +import { getLatestReplicatedLSN } from '@module/utils/mssql.js'; +import sql from 'mssql'; + +const BASIC_SYNC_RULES = ` +bucket_definitions: + global: + data: + - SELECT id, description FROM "test_data" +`; + +describe('CDCStream tests', () => { + describeWithStorage({ timeout: 20_000 }, defineCDCStreamTests); +}); + +function defineCDCStreamTests(factory: storage.TestStorageFactory) { + test('Initial snapshot sync', async () => { + await using context = await CDCStreamTestContext.open(factory); + const { connectionManager } = context; + await context.updateSyncRules(BASIC_SYNC_RULES); + + await createTestTable(connectionManager, 'test_data'); + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + const testData = await insertTestData(connectionManager, 'test_data'); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + + await context.replicateSnapshot(); + await context.startStreaming(); + + const endRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + const data = await context.getBucketData('global[]'); + expect(data).toMatchObject([putOp('test_data', testData)]); + expect(endRowCount - startRowCount).toEqual(1); + }); + + test('Replicate basic values', async () => { + await using context = await CDCStreamTestContext.open(factory); + const { connectionManager } = context; + await context.updateSyncRules(BASIC_SYNC_RULES); + + await createTestTable(connectionManager, 'test_data'); + await context.replicateSnapshot(); + + const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + const startTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0; + + await context.startStreaming(); + + const testData = await insertTestData(connectionManager, 'test_data'); + + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([putOp('test_data', testData)]); + const endRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + const endTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0; + expect(endRowCount - startRowCount).toEqual(1); + expect(endTxCount - startTxCount).toEqual(1); + }); + + test('Replicate row updates', async () => { + await using context = await CDCStreamTestContext.open(factory); + const { connectionManager } = context; + await context.updateSyncRules(BASIC_SYNC_RULES); + + await createTestTable(connectionManager, 'test_data'); + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + const testData = await insertTestData(connectionManager, 'test_data'); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + await context.replicateSnapshot(); + + await context.startStreaming(); + + const updatedTestData = { ...testData }; + updatedTestData.description = 'updated'; + await connectionManager.query(`UPDATE test_data SET description = @description WHERE id = @id`, [ + { name: 'description', type: sql.NVarChar(sql.MAX), value: updatedTestData.description }, + { name: 'id', type: sql.UniqueIdentifier, value: updatedTestData.id } + ]); + + const data = await context.getBucketData('global[]'); + expect(data).toMatchObject([putOp('test_data', testData), putOp('test_data', updatedTestData)]); + }); + + test('Replicate row deletions', async () => { + await using context = await CDCStreamTestContext.open(factory); + const { connectionManager } = context; + await context.updateSyncRules(BASIC_SYNC_RULES); + + await createTestTable(connectionManager, 'test_data'); + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + const testData = await insertTestData(connectionManager, 'test_data'); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + await context.replicateSnapshot(); + + await context.startStreaming(); + + await connectionManager.query(`DELETE FROM test_data WHERE id = @id`, [ + { name: 'id', type: sql.UniqueIdentifier, value: testData.id } + ]); + + const data = await context.getBucketData('global[]'); + expect(data).toMatchObject([putOp('test_data', testData), removeOp('test_data', testData.id)]); + }); + + test('Replicate matched wild card tables in sync rules', async () => { + await using context = await CDCStreamTestContext.open(factory); + const { connectionManager } = context; + await context.updateSyncRules(` + bucket_definitions: + global: + data: + - SELECT id, description FROM "test_data_%"`); + + await createTestTable(connectionManager, 'test_data_1'); + await createTestTable(connectionManager, 'test_data_2'); + + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + const testData11 = await insertTestData(connectionManager, 'test_data_1'); + const testData21 = await insertTestData(connectionManager, 'test_data_2'); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + await context.replicateSnapshot(); + await context.startStreaming(); + + const testData12 = await insertTestData(connectionManager, 'test_data_1'); + const testData22 = await insertTestData(connectionManager, 'test_data_2'); + + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([ + putOp('test_data_1', testData11), + putOp('test_data_2', testData21), + putOp('test_data_1', testData12), + putOp('test_data_2', testData22) + ]); + }); + + test('Replication for tables not in the sync rules are ignored', async () => { + await using context = await CDCStreamTestContext.open(factory); + const { connectionManager } = context; + await context.updateSyncRules(BASIC_SYNC_RULES); + + await createTestTable(connectionManager, 'test_donotsync'); + + await context.replicateSnapshot(); + + const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + const startTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0; + + await context.startStreaming(); + + await insertTestData(connectionManager, 'test_donotsync'); + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([]); + const endRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + const endTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0; + + // There was a transaction, but it is not counted since it is not for a table in the sync rules + expect(endRowCount - startRowCount).toEqual(0); + expect(endTxCount - startTxCount).toEqual(0); + }); + + test('Replicate case sensitive table', async () => { + await using context = await CDCStreamTestContext.open(factory); + const { connectionManager } = context; + await context.updateSyncRules(` + bucket_definitions: + global: + data: + - SELECT id, description FROM "test_DATA" + `); + + await createTestTable(connectionManager, 'test_DATA'); + + await context.replicateSnapshot(); + + const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + const startTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0; + + await context.startStreaming(); + + const testData = await insertTestData(connectionManager, 'test_DATA'); + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([putOp('test_DATA', testData)]); + const endRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + const endTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0; + expect(endRowCount - startRowCount).toEqual(1); + expect(endTxCount - startTxCount).toBeGreaterThanOrEqual(1); + }); +} diff --git a/modules/module-mssql/test/src/CDCStreamTestContext.ts b/modules/module-mssql/test/src/CDCStreamTestContext.ts new file mode 100644 index 000000000..6b674befc --- /dev/null +++ b/modules/module-mssql/test/src/CDCStreamTestContext.ts @@ -0,0 +1,212 @@ +import { + BucketStorageFactory, + createCoreReplicationMetrics, + initializeCoreReplicationMetrics, + InternalOpId, + OplogEntry, + storage, + SyncRulesBucketStorage +} from '@powersync/service-core'; +import { METRICS_HELPER, test_utils } from '@powersync/service-core-tests'; +import { clearTestDb, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './util.js'; +import { CDCStream, CDCStreamOptions } from '@module/replication/CDCStream.js'; +import { MSSQLConnectionManager } from '@module/replication/MSSQLConnectionManager.js'; +import timers from 'timers/promises'; + +/** + * Tests operating on the change data capture need to configure the stream and manage asynchronous + * replication, which gets a little tricky. + * + * This wraps all the context required for testing, and tears it down afterward + * by using `await using`. + */ +export class CDCStreamTestContext implements AsyncDisposable { + private _cdcStream?: CDCStream; + private abortController = new AbortController(); + private streamPromise?: Promise; + public storage?: SyncRulesBucketStorage; + private snapshotPromise?: Promise; + private replicationDone = false; + + static async open( + factory: (options: storage.TestStorageOptions) => Promise, + options?: { doNotClear?: boolean; cdcStreamOptions?: Partial } + ) { + const f = await factory({ doNotClear: options?.doNotClear }); + const connectionManager = new MSSQLConnectionManager(TEST_CONNECTION_OPTIONS, {}); + + if (!options?.doNotClear) { + await clearTestDb(connectionManager); + } + + return new CDCStreamTestContext(f, connectionManager, options?.cdcStreamOptions); + } + + constructor( + public factory: BucketStorageFactory, + public connectionManager: MSSQLConnectionManager, + private cdcStreamOptions?: Partial + ) { + createCoreReplicationMetrics(METRICS_HELPER.metricsEngine); + initializeCoreReplicationMetrics(METRICS_HELPER.metricsEngine); + } + + async [Symbol.asyncDispose]() { + try { + await this.dispose(); + } catch (err) { + console.error('Error disposing CDCStreamTestContext', err); + } + } + + async dispose() { + this.abortController.abort(); + await this.snapshotPromise; + await this.streamPromise; + await this.connectionManager.end(); + await this.factory?.[Symbol.asyncDispose](); + } + + get connectionTag() { + return this.connectionManager.connectionTag; + } + + async updateSyncRules(content: string) { + const syncRules = await this.factory.updateSyncRules({ content: content, validate: true }); + this.storage = this.factory.getInstance(syncRules); + return this.storage!; + } + + async loadNextSyncRules() { + const syncRules = await this.factory.getNextSyncRulesContent(); + if (syncRules == null) { + throw new Error(`Next sync rules not available`); + } + + this.storage = this.factory.getInstance(syncRules); + return this.storage!; + } + + async loadActiveSyncRules() { + const syncRules = await this.factory.getActiveSyncRulesContent(); + if (syncRules == null) { + throw new Error(`Active sync rules not available`); + } + + this.storage = this.factory.getInstance(syncRules); + return this.storage!; + } + + get cdcStream() { + if (this.storage == null) { + throw new Error('updateSyncRules() first'); + } + if (this._cdcStream) { + return this._cdcStream; + } + const options: CDCStreamOptions = { + storage: this.storage, + metrics: METRICS_HELPER.metricsEngine, + connections: this.connectionManager, + abortSignal: this.abortController.signal, + additionalConfig: { + pollingBatchSize: 10, + pollingIntervalMs: 1000, + trustServerCertificate: true + }, + ...this.cdcStreamOptions + }; + this._cdcStream = new CDCStream(options); + return this._cdcStream!; + } + + /** + * Replicate a snapshot, start streaming, and wait for a consistent checkpoint. + */ + async initializeReplication() { + await this.replicateSnapshot(); + // TODO: renable this.startStreaming(); + // Make sure we're up to date + await this.getCheckpoint(); + } + + async replicateSnapshot() { + await this.cdcStream.initReplication(); + this.replicationDone = true; + } + + // TODO: Enable once streaming is implemented + startStreaming() { + if (!this.replicationDone) { + throw new Error('Call replicateSnapshot() before startStreaming()'); + } + this.streamPromise = this.cdcStream.streamChanges(); + // Wait for the replication to start before returning. + // This avoids a bunch of unpredictable race conditions that appear in testing + return new Promise(async (resolve) => { + while (this.cdcStream.isStartingReplication) { + await timers.setTimeout(50); + } + + resolve(); + }); + } + + async getCheckpoint(options?: { timeout?: number }) { + let checkpoint = await Promise.race([ + getClientCheckpoint(this.connectionManager, this.factory, { timeout: options?.timeout ?? 15_000 }), + this.streamPromise + ]); + if (checkpoint == null) { + // This indicates an issue with the test setup - streamingPromise completed instead + // of getClientCheckpoint() + throw new Error('Test failure - streamingPromise completed'); + } + return checkpoint; + } + + async getBucketsDataBatch(buckets: Record, options?: { timeout?: number }) { + let checkpoint = await this.getCheckpoint(options); + const map = new Map(Object.entries(buckets)); + return test_utils.fromAsync(this.storage!.getBucketDataBatch(checkpoint, map)); + } + + /** + * This waits for a client checkpoint. + */ + async getBucketData(bucket: string, start?: InternalOpId | string | undefined, options?: { timeout?: number }) { + start ??= 0n; + if (typeof start == 'string') { + start = BigInt(start); + } + const checkpoint = await this.getCheckpoint(options); + const map = new Map([[bucket, start]]); + let data: OplogEntry[] = []; + while (true) { + const batch = this.storage!.getBucketDataBatch(checkpoint, map); + + const batches = await test_utils.fromAsync(batch); + data = data.concat(batches[0]?.chunkData.data ?? []); + if (batches.length == 0 || !batches[0]!.chunkData.has_more) { + break; + } + map.set(bucket, BigInt(batches[0]!.chunkData.next_after)); + } + return data; + } + + /** + * This does not wait for a client checkpoint. + */ + async getCurrentBucketData(bucket: string, start?: InternalOpId | string | undefined) { + start ??= 0n; + if (typeof start == 'string') { + start = BigInt(start); + } + const { checkpoint } = await this.storage!.getCheckpoint(); + const map = new Map([[bucket, start]]); + const batch = this.storage!.getBucketDataBatch(checkpoint, map); + const batches = await test_utils.fromAsync(batch); + return batches[0]?.chunkData.data ?? []; + } +} diff --git a/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts b/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts new file mode 100644 index 000000000..19b886b93 --- /dev/null +++ b/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts @@ -0,0 +1,158 @@ +import { describe, expect, test } from 'vitest'; +import { env } from './env.js'; +import { createTestTableWithBasicId, describeWithStorage, waitForPendingCDCChanges } from './util.js'; +import { TestStorageFactory } from '@powersync/service-core'; +import { METRICS_HELPER } from '@powersync/service-core-tests'; +import { ReplicationMetric } from '@powersync/service-types'; +import * as timers from 'node:timers/promises'; +import { logger, ReplicationAbortedError } from '@powersync/lib-services-framework'; +import { CDCStreamTestContext } from './CDCStreamTestContext.js'; +import { getLatestReplicatedLSN } from '@module/utils/mssql.js'; + +describe.skipIf(!(env.CI || env.SLOW_TESTS))('batch replication', function () { + describeWithStorage({ timeout: 240_000 }, function (factory) { + test('resuming initial replication (1)', async () => { + // Stop early - likely to not include deleted row in first replication attempt. + await testResumingReplication(factory, 2000); + }); + test('resuming initial replication (2)', async () => { + // Stop late - likely to include deleted row in first replication attempt. + await testResumingReplication(factory, 8000); + }); + }); +}); + +async function testResumingReplication(factory: TestStorageFactory, stopAfter: number) { + // This tests interrupting and then resuming initial replication. + // We interrupt replication after test_data1 has fully replicated, and + // test_data2 has partially replicated. + // This test relies on interval behavior that is not 100% deterministic: + // 1. We attempt to abort initial replication once a certain number of + // rows have been replicated, but this is not exact. Our only requirement + // is that we have not fully replicated test_data2 yet. + // 2. Order of replication is not deterministic, so which specific rows + // have been / have not been replicated at that point is not deterministic. + // We do allow for some variation in the test results to account for this. + + await using context = await CDCStreamTestContext.open(factory, { cdcStreamOptions: { snapshotBatchSize: 1000 } }); + + await context.updateSyncRules(`bucket_definitions: + global: + data: + - SELECT * FROM test_data1 + - SELECT * FROM test_data2`); + const { connectionManager } = context; + + await createTestTableWithBasicId(connectionManager, 'test_data1'); + await createTestTableWithBasicId(connectionManager, 'test_data2'); + + await connectionManager.query(`INSERT INTO test_data1(description) SELECT 'value' FROM GENERATE_SERIES(1, 1000, 1)`); + let beforeLSN = await getLatestReplicatedLSN(connectionManager); + await connectionManager.query(`INSERT INTO test_data2(description) SELECT 'value' FROM GENERATE_SERIES(1, 10000, 1)`); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const p = context.replicateSnapshot(); + + let done = false; + + const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + try { + (async () => { + while (!done) { + const count = + ((await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0) - startRowCount; + + if (count >= stopAfter) { + logger.info(`Stopped initial replication after replicating ${count} rows.`); + break; + } + await timers.setTimeout(1); + } + // This interrupts initial replication + await context.dispose(); + })(); + // This confirms that initial replication was interrupted + const error = await p.catch((e) => e); + expect(error).toBeInstanceOf(ReplicationAbortedError); + done = true; + } finally { + done = true; + } + + // Bypass the usual "clear db on factory open" step. + await using context2 = await CDCStreamTestContext.open(factory, { + doNotClear: true, + cdcStreamOptions: { snapshotBatchSize: 1000 } + }); + + // This delete should be using one of the ids already replicated + const { + recordset: [deleteResult] + } = await context2.connectionManager.query(`DELETE TOP (1) FROM test_data2 OUTPUT DELETED.id`); + // This update should also be using one of the ids already replicated + const id1 = deleteResult.id; + logger.info(`Deleted row with id: ${id1}`); + const { + recordset: [updateResult] + } = await context2.connectionManager.query( + `UPDATE test_data2 SET description = 'update1' OUTPUT INSERTED.id WHERE id = (SELECT TOP 1 id FROM test_data2)` + ); + const id2 = updateResult.id; + logger.info(`Updated row with id: ${id2}`); + beforeLSN = await getLatestReplicatedLSN(context2.connectionManager); + const { + recordset: [insertResult] + } = await context2.connectionManager.query( + `INSERT INTO test_data2(description) OUTPUT INSERTED.id VALUES ('insert1')` + ); + const id3 = insertResult.id; + logger.info(`Inserted row with id: ${id3}`); + await waitForPendingCDCChanges(beforeLSN, context2.connectionManager); + + await context2.loadNextSyncRules(); + await context2.replicateSnapshot(); + + await context2.startStreaming(); + const data = await context2.getBucketData('global[]', undefined, {}); + + const deletedRowOps = data.filter((row) => row.object_type == 'test_data2' && row.object_id === String(id1)); + const updatedRowOps = data.filter((row) => row.object_type == 'test_data2' && row.object_id === String(id2)); + const insertedRowOps = data.filter((row) => row.object_type == 'test_data2' && row.object_id === String(id3)); + + if (deletedRowOps.length != 0) { + // The deleted row was part of the first replication batch, + // so it is removed by streaming replication. + expect(deletedRowOps.length).toEqual(2); + expect(deletedRowOps[1].op).toEqual('REMOVE'); + } else { + // The deleted row was not part of the first replication batch, + // so it's not in the resulting ops at all. + } + + expect(updatedRowOps.length).toEqual(2); + // description for the first op could be 'foo' or 'update1'. + // We only test the final version. + expect(JSON.parse(updatedRowOps[1].data as string).description).toEqual('update1'); + + expect(insertedRowOps.length).toEqual(2); + expect(JSON.parse(insertedRowOps[0].data as string).description).toEqual('insert1'); + expect(JSON.parse(insertedRowOps[1].data as string).description).toEqual('insert1'); + + // 1000 of test_data1 during first replication attempt. + // N >= 1000 of test_data2 during first replication attempt. + // 10000 - N - 1 + 1 of test_data2 during second replication attempt. + // An additional update during streaming replication (2x total for this row). + // An additional insert during streaming replication (2x total for this row). + // If the deleted row was part of the first replication batch, it's removed by streaming replication. + // This adds 2 ops. + // We expect this to be 11002 for stopAfter: 2000, and 11004 for stopAfter: 8000. + // However, this is not deterministic. + const expectedCount = 11002 + deletedRowOps.length; + expect(data.length).toEqual(expectedCount); + + const replicatedCount = + ((await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0) - startRowCount; + + // With resumable replication, there should be no need to re-replicate anything. + expect(replicatedCount).toEqual(expectedCount); +} diff --git a/modules/module-mssql/test/src/env.ts b/modules/module-mssql/test/src/env.ts new file mode 100644 index 000000000..f3cc7a6cc --- /dev/null +++ b/modules/module-mssql/test/src/env.ts @@ -0,0 +1,11 @@ +import { utils } from '@powersync/lib-services-framework'; + +export const env = utils.collectEnvironmentVariables({ + MSSQL_TEST_URI: utils.type.string.default(`mssql://sa:321strong_ROOT_password@localhost:1433/powersync`), + MONGO_TEST_URL: utils.type.string.default('mongodb://localhost:27017/powersync_test'), + CI: utils.type.boolean.default('false'), + SLOW_TESTS: utils.type.boolean.default('false'), + PG_STORAGE_TEST_URL: utils.type.string.default('postgres://postgres:postgres@localhost:5431/powersync_storage_test'), + TEST_MONGO_STORAGE: utils.type.boolean.default('true'), + TEST_POSTGRES_STORAGE: utils.type.boolean.default('true') +}); diff --git a/modules/module-mssql/test/src/mssql-to-sqlite.test.ts b/modules/module-mssql/test/src/mssql-to-sqlite.test.ts new file mode 100644 index 000000000..bac600c3c --- /dev/null +++ b/modules/module-mssql/test/src/mssql-to-sqlite.test.ts @@ -0,0 +1,474 @@ +import { SqliteInputRow } from '@powersync/service-sync-rules'; +import { afterAll, beforeEach, describe, expect, test } from 'vitest'; +import { clearTestDb, createUpperCaseUUID, TEST_CONNECTION_OPTIONS, waitForPendingCDCChanges } from './util.js'; +import { CDCToSqliteRow, toSqliteInputRow } from '@module/common/mssqls-to-sqlite.js'; +import { MSSQLConnectionManager } from '@module/replication/MSSQLConnectionManager.js'; +import { + enableCDCForTable, + getCaptureInstance, + getLatestReplicatedLSN, + getMinLSN, + toQualifiedTableName +} from '@module/utils/mssql.js'; +import sql from 'mssql'; + +describe('MSSQL Data Types Tests', () => { + const connectionManager = new MSSQLConnectionManager(TEST_CONNECTION_OPTIONS, {}); + + beforeEach(async () => { + await clearTestDb(connectionManager); + await setupTestTable(); + }); + afterAll(async () => { + await connectionManager.end(); + }); + + async function setupTestTable() { + await connectionManager.query(` + CREATE TABLE ${connectionManager.schema}.test_data ( + id INT IDENTITY(1,1) PRIMARY KEY, + tinyint_col TINYINT, + smallint_col SMALLINT, + int_col INT, + bigint_col BIGINT, + float_col FLOAT, + real_col REAL, + decimal_col DECIMAL(10,2), + numeric_col NUMERIC(10,2), + money_col MONEY, + smallmoney_col SMALLMONEY, + bit_col BIT, + + date_col DATE, + datetime_col DATETIME, + datetime2_col DATETIME2(6), + smalldatetime_col SMALLDATETIME, + datetimeoffset_col DATETIMEOFFSET(3), + time_col TIME(6), + + char_col CHAR(10), + varchar_col VARCHAR(255), + varchar_max_col VARCHAR(MAX), + nchar_col NCHAR(15), + nvarchar_col NVARCHAR(255), + nvarchar_max_col NVARCHAR(MAX), + text_col TEXT, + ntext_col NTEXT, + + binary_col BINARY(16), + varbinary_col VARBINARY(256), + varbinary_max_col VARBINARY(MAX), + image_col IMAGE, + + uniqueidentifier_col UNIQUEIDENTIFIER, + xml_col XML, + json_col NVARCHAR(MAX), + + hierarchyid_col HIERARCHYID, + geometry_col GEOMETRY, + geography_col GEOGRAPHY + ) + `); + + await enableCDCForTable({ connectionManager, table: 'test_data' }); + } + + test('Number types mappings', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + await connectionManager.query(` + INSERT INTO ${connectionManager.schema}.test_data( + tinyint_col, + smallint_col, + int_col, + bigint_col, + float_col, + real_col, + decimal_col, + numeric_col, + money_col, + smallmoney_col, + bit_col + ) VALUES ( + 255, -- TINYINT maximum value + 32767, -- SMALLINT maximum value + 2147483647, -- INT maximum value + 9223372036854775807, -- BIGINT maximum value + 3.1415926535, -- FLOAT example + 3.14, -- REAL example + 12345.67, -- DECIMAL(10,2) example + 12345.67, -- NUMERIC(10,2) example + 12345.67, -- MONEY example + 123.45, -- SMALLMONEY example + 1 -- BIT value + ) + `); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + + const expectedResult: SqliteInputRow = { + tinyint_col: 255, + smallint_col: 32767, + int_col: 2147483647, + bigint_col: 9223372036854775807n, + float_col: 3.1415926535, + real_col: expect.closeTo(3.14, 2), + decimal_col: 12345.67, + numeric_col: 12345.67, + money_col: 12345.67, + smallmoney_col: 123.45, + bit_col: 1 + }; + expect(databaseRows[0]).toMatchObject(expectedResult); + expect(replicatedRows[0]).toMatchObject(expectedResult); + }); + + test('Character types mappings', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + await connectionManager.query(` + INSERT INTO [${connectionManager.schema}].test_data ( + char_col, + varchar_col, + varchar_max_col, + nchar_col, + nvarchar_col, + nvarchar_max_col, + text_col, + ntext_col + ) VALUES ( + 'CharData', -- CHAR(10) with padding spaces + 'Variable character data',-- VARCHAR(255) + 'Variable character data MAX', -- VARCHAR(MAX) + N'UnicodeChar', -- NCHAR(15) + N'Variable Unicode data', -- NVARCHAR(255) + N'Variable Unicode data MAX', -- NVARCHAR(MAX) + 'TextData', -- TEXT + N'UnicodeTextData' -- NTEXT + ) + `); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + const expectedResult = { + char_col: 'CharData ', // CHAR pads with spaces up to the defined length (10) + varchar_col: 'Variable character data', + varchar_max_col: 'Variable character data MAX', + nchar_col: 'UnicodeChar ', // NCHAR pads with spaces up to the defined length (15) + nvarchar_col: 'Variable Unicode data', + nvarchar_max_col: 'Variable Unicode data MAX', + text_col: 'TextData', + ntext_col: 'UnicodeTextData' + }; + + expect(databaseRows[0]).toMatchObject(expectedResult); + expect(replicatedRows[0]).toMatchObject(expectedResult); + }); + + test('Binary types mappings', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + const binaryData = Buffer.from('BinaryData'); + await connectionManager.query( + ` + INSERT INTO [${connectionManager.schema}].test_data ( + binary_col, + varbinary_col, + varbinary_max_col, + image_col + ) VALUES ( + @binary_col, + @varbinary_col, + @varbinary_max_col, + @image_col + ) + `, + [ + { name: 'binary_col', type: sql.Binary, value: binaryData }, + { name: 'varbinary_col', type: sql.VarBinary, value: binaryData }, + { name: 'varbinary_max_col', type: sql.VarBinary(sql.MAX), value: binaryData }, + { name: 'image_col', type: sql.Image, value: binaryData } + ] + ); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + + const expectedBinary = new Uint8Array(binaryData); + const expectedBinaryPadded = new Uint8Array(16); + expectedBinaryPadded.set(expectedBinary.slice(0, 16), 0); + + const expectedResult: SqliteInputRow = { + binary_col: expectedBinaryPadded, + varbinary_col: expectedBinary, + varbinary_max_col: expectedBinary, + image_col: expectedBinary + }; + + expect(databaseRows[0]).toMatchObject(expectedResult); + expect(replicatedRows[0]).toMatchObject(expectedResult); + }); + + test('Date types mappings', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + const testDate = new Date('2023-03-06T15:47:00.000Z'); + await connectionManager.query( + ` + INSERT INTO [${connectionManager.schema}].test_data( + date_col, + datetime_col, + datetime2_col, + smalldatetime_col, + time_col + ) + VALUES ( + @date_col, + @datetime_col, + @datetime2_col, + @smalldatetime_col, + @time_col + ) + `, + [ + { name: 'date_col', type: sql.Date, value: testDate }, + { name: 'datetime_col', type: sql.DateTime, value: testDate }, + { name: 'datetime2_col', type: sql.DateTime2(6), value: testDate }, + { name: 'smalldatetime_col', type: sql.SmallDateTime, value: testDate }, + { name: 'time_col', type: sql.Time(6), value: testDate } + ] + ); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + const expectedResult = { + date_col: '2023-03-06', + datetime_col: '2023-03-06T15:47:00.000Z', + datetime2_col: '2023-03-06T15:47:00.000Z', + smalldatetime_col: '2023-03-06T15:47:00.000Z', + time_col: '15:47:00.000' + }; + + expect(databaseRows[0]).toMatchObject(expectedResult); + expect(replicatedRows[0]).toMatchObject(expectedResult); + }); + + test('Date types edge cases mappings', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + + await connectionManager.query(` + INSERT INTO [${connectionManager.schema}].test_data(datetime2_col) + VALUES ('0001-01-01 00:00:00.000') + `); + await connectionManager.query(` + INSERT INTO [${connectionManager.schema}].test_data(datetime2_col) + VALUES ('9999-12-31 23:59:59.999') + `); + await connectionManager.query(` + INSERT INTO [${connectionManager.schema}].test_data(datetime_col) + VALUES ('1753-01-01 00:00:00') + `); + await connectionManager.query(` + INSERT INTO [${connectionManager.schema}].test_data(datetime_col) + VALUES ('9999-12-31 23:59:59.997') + `); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const expectedResults = [ + { datetime2_col: '0001-01-01T00:00:00.000Z' }, + { datetime2_col: '9999-12-31T23:59:59.999Z' }, + { datetime_col: '1753-01-01T00:00:00.000Z' }, + { datetime_col: '9999-12-31T23:59:59.997Z' } + ]; + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + + for (let i = 0; i < expectedResults.length; i++) { + expect(databaseRows[i]).toMatchObject(expectedResults[i]); + expect(replicatedRows[i]).toMatchObject(expectedResults[i]); + } + }); + + test('DateTimeOffset type mapping', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + // DateTimeOffset preserves timezone information + await connectionManager.query(` + INSERT INTO [${connectionManager.schema}].test_data(datetimeoffset_col) + VALUES ('2023-03-06 15:47:00.000 +05:00') + `); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const expectedResult = { + datetimeoffset_col: '2023-03-06T10:47:00.000Z' // Converted to UTC + }; + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + + // Note: The driver converts DateTimeOffset to Date, which incorporates the timezone offset which is then represented in UTC. + expect(databaseRows[0]).toMatchObject(expectedResult); + expect(replicatedRows[0]).toMatchObject(expectedResult); + }); + + test('UniqueIdentifier type mapping', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + + const testGuid = createUpperCaseUUID(); + await connectionManager.query( + ` + INSERT INTO [${connectionManager.schema}].test_data(uniqueidentifier_col) + VALUES (@guid) + `, + [{ name: 'guid', type: sql.UniqueIdentifier, value: testGuid }] + ); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + + // GUIDs are returned as strings + expect(databaseRows[0].uniqueidentifier_col).toBe(testGuid); + expect(replicatedRows[0].uniqueidentifier_col).toBe(testGuid); + }); + + test('JSON type mapping', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + const expectedJSON = { name: 'John Doe', age: 30, married: true }; + await connectionManager.query( + ` + INSERT INTO [${connectionManager.schema}].test_data(json_col) + VALUES (@json) + `, + [{ name: 'json', type: sql.NVarChar(sql.MAX), value: JSON.stringify(expectedJSON) }] + ); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + + const actualDBJSONValue = JSON.parse(databaseRows[0].json_col as string); + const actualReplicatedJSONValue = JSON.parse(replicatedRows[0].json_col as string); + expect(actualDBJSONValue).toEqual(expectedJSON); + expect(actualReplicatedJSONValue).toEqual(expectedJSON); + }); + + test('XML type mapping', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + const xmlData = 'value'; + await connectionManager.query( + ` + INSERT INTO [${connectionManager.schema}].test_data(xml_col) + VALUES (@xml) + `, + [{ name: 'xml', type: sql.Xml, value: xmlData }] + ); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + + expect(databaseRows[0].xml_col).toBe(xmlData); + expect(replicatedRows[0].xml_col).toBe(xmlData); + }); + + // TODO: Update test when properly converting spatial types + // test('Spatial types mappings', async () => { + // const beforeLSN = await getLatestReplicatedLSN(connectionManager); + // // Geometry and Geography types are stored as binary/WKT strings + // await connectionManager.query(` + // INSERT INTO [${connectionManager.schema}].test_data(geometry_col, geography_col) + // VALUES ( + // geometry::STGeomFromText('POINT(1 2)', 0), + // geography::STGeomFromText('POINT(1 2)', 4326) + // ) + // `); + // await waitForPendingCDCChanges(beforeLSN, connectionManager); + // + // const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + // const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + // + // // The driver currently returns spatial types as non standard objects. We just convert them to JSON strings for now + // expect(databaseRows[0].geometry_col).toBeDefined(); + // expect(databaseRows[0].geography_col).toBeDefined(); + // expect(replicatedRows[0].geometry_col).toBeDefined(); + // expect(replicatedRows[0].geography_col).toBeDefined(); + // }); + + // TODO: Enable when HierarchyID type is properly supported + // test('HierarchyID type mapping', async () => { + // const hierarchyid = '/1/'; + // const beforeLSN = await getLatestReplicatedLSN(connectionManager); + // await connectionManager.query(` + // INSERT INTO [${connectionManager.schema}].test_data(hierarchyid_col) + // VALUES (@hierarchyid) + // `, + // [{ name: 'hierarchyid', type: sql.VarChar, value: hierarchyid }] + // ); + // await waitForPendingCDCChanges(beforeLSN, connectionManager); + // + // const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + // const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + // + // const expectedBinary = new Uint8Array(Buffer.from(hierarchyid)); + // + // expect(databaseRows[0].hierarchyid_col).toEqual(expectedBinary); + // expect(replicatedRows[0].hierarchyid_col).toEqual(expectedBinary); + // }); +}); + +async function getDatabaseRows( + connectionManager: MSSQLConnectionManager, + tableName: string +): Promise { + const { recordset: rows } = await connectionManager.query( + `SELECT * FROM ${toQualifiedTableName(connectionManager.schema, tableName)}` + ); + return rows.map((row) => { + const converted = toSqliteInputRow(row, rows.columns); + // Exclude id column from results + const { id, ...rest } = converted; + return rest; + }); +} + +/** + * Return all the updates from the CDC stream for the table. + */ +async function getReplicatedRows( + connectionManager: MSSQLConnectionManager, + tableName: string +): Promise { + const endLSN = await getLatestReplicatedLSN(connectionManager); + + const captureInstance = await getCaptureInstance({ + connectionManager, + schema: connectionManager.schema, + tableName + }); + if (!captureInstance) { + throw new Error(`No CDC capture instance found for table ${tableName}`); + } + + const startLSN = await getMinLSN(connectionManager, captureInstance.name); + // Query CDC changes + const { recordset: results } = await connectionManager.query( + ` + SELECT * FROM ${captureInstance.schema}.fn_cdc_get_all_changes_${captureInstance.name}(@from_lsn, @to_lsn, 'all update old') ORDER BY __$start_lsn, __$seqval + `, + [ + { name: 'from_lsn', type: sql.VarBinary, value: startLSN.toBinary() }, + { name: 'to_lsn', type: sql.VarBinary, value: endLSN.toBinary() } + ] + ); + + return results + .filter((row) => row.__$operation === 2) // Only INSERT operations + .map((row) => { + const converted = CDCToSqliteRow({ row, columns: results.columns }); + // Exclude id column from results + const { id, ...rest } = converted; + return rest; + }); +} diff --git a/modules/module-mssql/test/src/setup.ts b/modules/module-mssql/test/src/setup.ts new file mode 100644 index 000000000..8d0b885e6 --- /dev/null +++ b/modules/module-mssql/test/src/setup.ts @@ -0,0 +1,12 @@ +import { container } from '@powersync/lib-services-framework'; +import { METRICS_HELPER } from '@powersync/service-core-tests'; +import { beforeAll, beforeEach } from 'vitest'; + +beforeAll(async () => { + // Executes for every test file + container.registerDefaults(); +}); + +beforeEach(async () => { + METRICS_HELPER.resetMetrics(); +}); diff --git a/modules/module-mssql/test/src/util.ts b/modules/module-mssql/test/src/util.ts new file mode 100644 index 000000000..e4eaacef0 --- /dev/null +++ b/modules/module-mssql/test/src/util.ts @@ -0,0 +1,192 @@ +import * as types from '@module/types/types.js'; +import { logger } from '@powersync/lib-services-framework'; +import { BucketStorageFactory, InternalOpId, ReplicationCheckpoint, TestStorageFactory } from '@powersync/service-core'; + +import * as mongo_storage from '@powersync/service-module-mongodb-storage'; +import * as postgres_storage from '@powersync/service-module-postgres-storage'; + +import { describe, TestOptions } from 'vitest'; +import { env } from './env.js'; +import { MSSQLConnectionManager } from '@module/replication/MSSQLConnectionManager.js'; +import { createCheckpoint, enableCDCForTable, getLatestLSN } from '@module/utils/mssql.js'; +import sql from 'mssql'; +import { v4 as uuid } from 'uuid'; +import { LSN } from '@module/common/LSN.js'; + +export const TEST_URI = env.MSSQL_TEST_URI; + +export const INITIALIZED_MONGO_STORAGE_FACTORY = mongo_storage.test_utils.mongoTestStorageFactoryGenerator({ + url: env.MONGO_TEST_URL, + isCI: env.CI +}); + +export const INITIALIZED_POSTGRES_STORAGE_FACTORY = postgres_storage.test_utils.postgresTestStorageFactoryGenerator({ + url: env.PG_STORAGE_TEST_URL +}); + +export function describeWithStorage(options: TestOptions, fn: (factory: TestStorageFactory) => void) { + describe.skipIf(!env.TEST_MONGO_STORAGE)(`mongodb storage`, options, function () { + fn(INITIALIZED_MONGO_STORAGE_FACTORY); + }); + + describe.skipIf(!env.TEST_POSTGRES_STORAGE)(`postgres storage`, options, function () { + fn(INITIALIZED_POSTGRES_STORAGE_FACTORY); + }); +} + +export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({ + type: 'mssql', + uri: TEST_URI, + additionalConfig: { + pollingBatchSize: 10, + pollingIntervalMs: 1000, + trustServerCertificate: true + } +}); + +/** + * Clears all test tables (those prefixed with 'test_') from the database. Also removes CDC instances for those tables. + * @param connectionManager + */ +export async function clearTestDb(connectionManager: MSSQLConnectionManager) { + const { recordset: tables } = await connectionManager.query(` + SELECT TABLE_SCHEMA, TABLE_NAME + FROM INFORMATION_SCHEMA.TABLES + WHERE TABLE_TYPE = 'BASE TABLE' AND TABLE_NAME LIKE 'test_%' + `); + for (const row of tables) { + // Disable CDC for the table if enabled + await connectionManager.execute('sys.sp_cdc_disable_table', [ + { name: 'source_schema', value: row.TABLE_SCHEMA }, + { name: 'source_name', value: row.TABLE_NAME }, + { name: 'capture_instance', value: 'all' } + ]); + // Drop Tables + await connectionManager.query(`DROP TABLE [${row.TABLE_NAME}]`); + } +} + +/** + * Create a new database for testing and enables CDC on it. + * @param connectionManager + * @param dbName + */ +export async function createTestDb(connectionManager: MSSQLConnectionManager, dbName: string) { + await connectionManager.query(`DROP DATABASE IF EXISTS ${dbName}`); + await connectionManager.query(`CREATE DATABASE ${dbName}`); + await connectionManager.execute(` + USE ${dbName}; + GO + + EXEC sys.sp_cdc_enable_db; + GO`); +} + +export async function createTestTable(connectionManager: MSSQLConnectionManager, tableName: string): Promise { + await connectionManager.query(` + CREATE TABLE ${connectionManager.schema}.${tableName} ( + id UNIQUEIDENTIFIER PRIMARY KEY, + description VARCHAR(MAX) + ) + `); + await enableCDCForTable({ connectionManager, table: tableName }); +} + +export async function createTestTableWithBasicId( + connectionManager: MSSQLConnectionManager, + tableName: string +): Promise { + await connectionManager.query(` + CREATE TABLE ${connectionManager.schema}.${tableName} ( + id INT IDENTITY(1,1) PRIMARY KEY, + description VARCHAR(MAX) + ) + `); + await enableCDCForTable({ connectionManager, table: tableName }); +} + +export interface TestData { + id: string; + description: string; +} +export async function insertTestData(connectionManager: MSSQLConnectionManager, tableName: string): Promise { + const id = createUpperCaseUUID(); + const description = `description_${id}`; + await connectionManager.query( + ` + INSERT INTO ${connectionManager.schema}.${tableName} (id, description) VALUES (@id, @description) + `, + [ + { name: 'id', type: sql.UniqueIdentifier, value: id }, + { name: 'description', type: sql.NVarChar(sql.MAX), value: description } + ] + ); + + return { id, description }; +} + +export async function waitForPendingCDCChanges( + beforeLSN: LSN, + connectionManager: MSSQLConnectionManager +): Promise { + while (true) { + const { recordset: result } = await connectionManager.query( + ` + SELECT TOP 1 start_lsn + FROM cdc.lsn_time_mapping + WHERE start_lsn > @before_lsn + ORDER BY start_lsn DESC + `, + [{ name: 'before_lsn', type: sql.VarBinary, value: beforeLSN.toBinary() }] + ); + + if (result.length === 0) { + logger.info(`CDC changes pending. Waiting for 500ms...`); + await new Promise((resolve) => setTimeout(resolve, 500)); + } else { + logger.info(`Found LSN: ${LSN.fromBinary(result[0].start_lsn).toString()}`); + return; + } + } +} + +export async function getClientCheckpoint( + connectionManager: MSSQLConnectionManager, + storageFactory: BucketStorageFactory, + options?: { timeout?: number } +): Promise { + const start = Date.now(); + + const lsn = await getLatestLSN(connectionManager); + await createCheckpoint(connectionManager); + + // This old API needs a persisted checkpoint id. + // Since we don't use LSNs anymore, the only way to get that is to wait. + + const timeout = options?.timeout ?? 50_000; + let lastCp: ReplicationCheckpoint | null = null; + + logger.info(`Waiting for LSN checkpoint: ${lsn}`); + while (Date.now() - start < timeout) { + const storage = await storageFactory.getActiveStorage(); + const cp = await storage?.getCheckpoint(); + if (cp != null) { + lastCp = cp; + if (cp.lsn != null && cp.lsn >= lsn.toString()) { + logger.info(`Got write checkpoint: ${lsn} : ${cp.checkpoint}`); + return cp.checkpoint; + } + } + + await new Promise((resolve) => setTimeout(resolve, 30)); + } + + throw new Error(`Timeout while waiting for checkpoint ${lsn}. Last checkpoint: ${lastCp?.lsn}`); +} + +/** + * Generates a new UUID string in uppercase for testing purposes to match the SQL Server UNIQUEIDENTIFIER format. + */ +export function createUpperCaseUUID(): string { + return uuid().toUpperCase(); +} diff --git a/modules/module-mssql/test/tsconfig.json b/modules/module-mssql/test/tsconfig.json new file mode 100644 index 000000000..18898c4ee --- /dev/null +++ b/modules/module-mssql/test/tsconfig.json @@ -0,0 +1,28 @@ +{ + "extends": "../../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "baseUrl": "./", + "noEmit": true, + "esModuleInterop": true, + "skipLibCheck": true, + "sourceMap": true, + "paths": { + "@/*": ["../../../packages/service-core/src/*"], + "@module/*": ["../src/*"], + "@core-tests/*": ["../../../packages/service-core/test/src/*"] + } + }, + "include": ["src"], + "references": [ + { + "path": "../" + }, + { + "path": "../../../packages/service-core/test" + }, + { + "path": "../../../packages/service-core/" + } + ] +} diff --git a/modules/module-mssql/tsconfig.json b/modules/module-mssql/tsconfig.json new file mode 100644 index 000000000..00738ba7e --- /dev/null +++ b/modules/module-mssql/tsconfig.json @@ -0,0 +1,26 @@ +{ + "extends": "../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "outDir": "dist", + "esModuleInterop": true, + "skipLibCheck": true, + "sourceMap": true, + "typeRoots": ["./node_modules/@types"] + }, + "include": ["src"], + "references": [ + { + "path": "../../packages/types" + }, + { + "path": "../../packages/sync-rules" + }, + { + "path": "../../packages/service-core" + }, + { + "path": "../../libs/lib-services" + } + ] +} diff --git a/modules/module-mssql/vitest.config.ts b/modules/module-mssql/vitest.config.ts new file mode 100644 index 000000000..7a39c1f71 --- /dev/null +++ b/modules/module-mssql/vitest.config.ts @@ -0,0 +1,15 @@ +import { defineConfig } from 'vitest/config'; +import tsconfigPaths from 'vite-tsconfig-paths'; + +export default defineConfig({ + plugins: [tsconfigPaths()], + test: { + setupFiles: './test/src/setup.ts', + poolOptions: { + threads: { + singleThread: true + } + }, + pool: 'threads' + } +}); diff --git a/modules/module-mysql/package.json b/modules/module-mysql/package.json index 624f38be9..6d81f9d0f 100644 --- a/modules/module-mysql/package.json +++ b/modules/module-mysql/package.json @@ -47,6 +47,6 @@ "@powersync/service-module-mongodb-storage": "workspace:*", "@powersync/service-module-postgres-storage": "workspace:*", "@types/async": "^3.2.24", - "@types/semver": "^7.5.4" + "@types/semver": "^7.7.1" } } diff --git a/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts b/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts index ef0b7642c..79f87d5c1 100644 --- a/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts +++ b/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts @@ -4,9 +4,9 @@ import * as sync_rules from '@powersync/service-sync-rules'; import * as service_types from '@powersync/service-types'; import mysql from 'mysql2/promise'; import * as common from '../common/common-index.js'; +import { toExpressionTypeFromMySQLType } from '../common/common-index.js'; import * as mysql_utils from '../utils/mysql-utils.js'; import * as types from '../types/types.js'; -import { toExpressionTypeFromMySQLType } from '../common/common-index.js'; type SchemaResult = { schema_name: string; @@ -22,7 +22,7 @@ export class MySQLRouteAPIAdapter implements api.RouteAPI { } async shutdown(): Promise { - return this.pool.end(); + await this.pool.end(); } async getSourceConfig(): Promise { @@ -288,11 +288,8 @@ export class MySQLRouteAPIAdapter implements api.RouteAPI { async createReplicationHead(callback: ReplicationHeadCallback): Promise { const head = await this.getReplicationHead(); - const r = await callback(head); - // TODO: make sure another message is replicated - - return r; + return await callback(head); } async getConnectionSchema(): Promise { diff --git a/modules/module-mysql/src/replication/BinLogReplicationJob.ts b/modules/module-mysql/src/replication/BinLogReplicationJob.ts index 32c2371b7..bf72ca728 100644 --- a/modules/module-mysql/src/replication/BinLogReplicationJob.ts +++ b/modules/module-mysql/src/replication/BinLogReplicationJob.ts @@ -1,6 +1,6 @@ import { container, logger as defaultLogger } from '@powersync/lib-services-framework'; import { POWERSYNC_VERSION, replication } from '@powersync/service-core'; -import { BinlogConfigurationError, BinLogStream } from './BinLogStream.js'; +import { BinLogStream } from './BinLogStream.js'; import { MySQLConnectionManagerFactory } from './MySQLConnectionManagerFactory.js'; export interface BinLogReplicationJobOptions extends replication.AbstractReplicationJobOptions { diff --git a/modules/module-mysql/src/replication/BinLogStream.ts b/modules/module-mysql/src/replication/BinLogStream.ts index ef1dc057b..98d9dc665 100644 --- a/modules/module-mysql/src/replication/BinLogStream.ts +++ b/modules/module-mysql/src/replication/BinLogStream.ts @@ -147,7 +147,7 @@ export class BinLogStream { const shouldSnapshot = snapshot && !result.table.snapshotComplete && result.table.syncAny; if (shouldSnapshot) { - // Truncate this table, in case a previous snapshot was interrupted. + // Truncate this table in case a previous snapshot was interrupted. await batch.truncate([result.table]); let gtid: common.ReplicatedGTID; @@ -189,7 +189,7 @@ export class BinLogStream { const matchedTables: string[] = await common.getTablesFromPattern(connection, tablePattern); connection.release(); - let tables: storage.SourceTable[] = []; + const tables: storage.SourceTable[] = []; for (const matchedTable of matchedTables) { const replicaIdColumns = await this.getReplicaIdColumns(matchedTable, tablePattern.schema); diff --git a/modules/module-mysql/src/replication/MySQLErrorRateLimiter.ts b/modules/module-mysql/src/replication/MySQLErrorRateLimiter.ts index 8966cd201..c6ca77d2b 100644 --- a/modules/module-mysql/src/replication/MySQLErrorRateLimiter.ts +++ b/modules/module-mysql/src/replication/MySQLErrorRateLimiter.ts @@ -1,5 +1,6 @@ import { ErrorRateLimiter } from '@powersync/service-core'; import { setTimeout } from 'timers/promises'; +import { BinlogConfigurationError } from './BinLogStream.js'; export class MySQLErrorRateLimiter implements ErrorRateLimiter { nextAllowed: number = Date.now(); @@ -17,8 +18,10 @@ export class MySQLErrorRateLimiter implements ErrorRateLimiter { reportError(e: any): void { const message = (e.message as string) ?? ''; - if (message.includes('password authentication failed')) { - // Wait 15 minutes, to avoid triggering Supabase's fail2ban + if (e instanceof BinlogConfigurationError) { + // Short delay + this.setDelay(2_000); + } else if (message.includes('password authentication failed')) { this.setDelay(900_000); } else if (message.includes('ENOTFOUND')) { // DNS lookup issue - incorrect URI or deleted instance diff --git a/modules/module-postgres-storage/src/storage/batch/PostgresBucketBatch.ts b/modules/module-postgres-storage/src/storage/batch/PostgresBucketBatch.ts index 62e7f118e..ebae078ca 100644 --- a/modules/module-postgres-storage/src/storage/batch/PostgresBucketBatch.ts +++ b/modules/module-postgres-storage/src/storage/batch/PostgresBucketBatch.ts @@ -100,6 +100,10 @@ export class PostgresBucketBatch return this.last_checkpoint_lsn; } + get noCheckpointBeforeLsn() { + return this.no_checkpoint_before_lsn; + } + async [Symbol.asyncDispose]() { super.clearListeners(); } diff --git a/modules/module-postgres/src/replication/SnapshotQuery.ts b/modules/module-postgres/src/replication/SnapshotQuery.ts index b826c215f..d496d857d 100644 --- a/modules/module-postgres/src/replication/SnapshotQuery.ts +++ b/modules/module-postgres/src/replication/SnapshotQuery.ts @@ -23,7 +23,7 @@ export interface MissingRow { /** * Snapshot query using a plain SELECT * FROM table; chunked using - * DELCLARE CURSOR / FETCH. + * DECLARE CURSOR / FETCH. * * This supports all tables, but does not efficiently resume the snapshot * if the process is restarted. diff --git a/modules/module-postgres/src/replication/WalStreamReplicationJob.ts b/modules/module-postgres/src/replication/WalStreamReplicationJob.ts index 8b6021cd6..340af22b9 100644 --- a/modules/module-postgres/src/replication/WalStreamReplicationJob.ts +++ b/modules/module-postgres/src/replication/WalStreamReplicationJob.ts @@ -1,4 +1,4 @@ -import { container, logger, ReplicationAbortedError } from '@powersync/lib-services-framework'; +import { container, logger } from '@powersync/lib-services-framework'; import { PgManager } from './PgManager.js'; import { MissingReplicationSlotError, sendKeepAlive, WalStream } from './WalStream.js'; diff --git a/packages/service-core/src/storage/BucketStorageBatch.ts b/packages/service-core/src/storage/BucketStorageBatch.ts index 62db7dd43..f71226191 100644 --- a/packages/service-core/src/storage/BucketStorageBatch.ts +++ b/packages/service-core/src/storage/BucketStorageBatch.ts @@ -83,6 +83,8 @@ export interface BucketStorageBatch extends ObserverClient; updateTableProgress(table: SourceTable, progress: Partial): Promise; diff --git a/packages/service-errors/src/codes.ts b/packages/service-errors/src/codes.ts index 74f92a73f..f7d26e68d 100644 --- a/packages/service-errors/src/codes.ts +++ b/packages/service-errors/src/codes.ts @@ -297,6 +297,14 @@ export enum ErrorCode { // ## PSYNC_S2xxx: Service API + /** + * Required updates in the Change Data Capture (CDC) are no longer available. + * + * Possible causes: + * * Older data has been cleaned up due to exceeding the retention period. + */ + PSYNC_S1500 = 'PSYNC_S1500', + /** * Generic internal server error (HTTP 500). * diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 327052e44..5015f40f8 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -255,6 +255,61 @@ importers: specifier: workspace:* version: link:../../packages/service-core-tests + modules/module-mssql: + dependencies: + '@powersync/lib-services-framework': + specifier: workspace:* + version: link:../../libs/lib-services + '@powersync/service-core': + specifier: workspace:* + version: link:../../packages/service-core + '@powersync/service-errors': + specifier: workspace:* + version: link:../../packages/service-errors + '@powersync/service-jsonbig': + specifier: workspace:* + version: link:../../packages/jsonbig + '@powersync/service-sync-rules': + specifier: workspace:* + version: link:../../packages/sync-rules + '@powersync/service-types': + specifier: workspace:* + version: link:../../packages/types + mssql: + specifier: ^12.1.1 + version: 12.1.1 + semver: + specifier: ^7.7.2 + version: 7.7.3 + ts-codec: + specifier: ^1.3.0 + version: 1.3.0 + uri-js: + specifier: ^4.4.1 + version: 4.4.1 + uuid: + specifier: ^11.1.0 + version: 11.1.0 + devDependencies: + '@powersync/service-core-tests': + specifier: workspace:* + version: link:../../packages/service-core-tests + '@powersync/service-module-mongodb-storage': + specifier: workspace:* + version: link:../module-mongodb-storage + '@powersync/service-module-postgres-storage': + specifier: workspace:* + version: link:../module-postgres-storage + '@types/mssql': + specifier: ^9.1.8 + version: 9.1.8 + '@types/semver': + specifier: ^7.7.1 + version: 7.7.1 + '@types/uuid': + specifier: ^10.0.0 + version: 10.0.0 + modules/module-mysql: dependencies: '@powersync/lib-services-framework': @@ -310,8 +365,8 @@ importers: specifier: ^3.2.24 version: 3.2.24 '@types/semver': - specifier: ^7.5.4 - version: 7.5.8 + specifier: ^7.7.1 + version: 7.7.1 modules/module-postgres: dependencies: @@ -678,6 +733,9 @@ importers: '@powersync/service-module-mongodb-storage': specifier: workspace:* version: link:../modules/module-mongodb-storage + '@powersync/service-module-mssql': + specifier: workspace:* + version: link:../modules/module-mssql '@powersync/service-module-mysql': specifier: workspace:* version: link:../modules/module-mysql @@ -743,6 +801,74 @@ importers: packages: + '@azure-rest/core-client@2.5.1': + resolution: {integrity: sha512-EHaOXW0RYDKS5CFffnixdyRPak5ytiCtU7uXDcP/uiY+A6jFRwNGzzJBiznkCzvi5EYpY+YWinieqHb0oY916A==} + engines: {node: '>=20.0.0'} + + '@azure/abort-controller@2.1.2': + resolution: {integrity: sha512-nBrLsEWm4J2u5LpAPjxADTlq3trDgVZZXHNKabeXZtpq3d3AbN/KGO82R87rdDz5/lYB024rtEf10/q0urNgsA==} + engines: {node: '>=18.0.0'} + + '@azure/core-auth@1.10.1': + resolution: {integrity: sha512-ykRMW8PjVAn+RS6ww5cmK9U2CyH9p4Q88YJwvUslfuMmN98w/2rdGRLPqJYObapBCdzBVeDgYWdJnFPFb7qzpg==} + engines: {node: '>=20.0.0'} + + '@azure/core-client@1.10.1': + resolution: {integrity: sha512-Nh5PhEOeY6PrnxNPsEHRr9eimxLwgLlpmguQaHKBinFYA/RU9+kOYVOQqOrTsCL+KSxrLLl1gD8Dk5BFW/7l/w==} + engines: {node: '>=20.0.0'} + + '@azure/core-http-compat@2.3.1': + resolution: {integrity: sha512-az9BkXND3/d5VgdRRQVkiJb2gOmDU8Qcq4GvjtBmDICNiQ9udFmDk4ZpSB5Qq1OmtDJGlQAfBaS4palFsazQ5g==} + engines: {node: '>=20.0.0'} + + '@azure/core-lro@2.7.2': + resolution: {integrity: sha512-0YIpccoX8m/k00O7mDDMdJpbr6mf1yWo2dfmxt5A8XVZVVMz2SSKaEbMCeJRvgQ0IaSlqhjT47p4hVIRRy90xw==} + engines: {node: '>=18.0.0'} + + '@azure/core-paging@1.6.2': + resolution: {integrity: sha512-YKWi9YuCU04B55h25cnOYZHxXYtEvQEbKST5vqRga7hWY9ydd3FZHdeQF8pyh+acWZvppw13M/LMGx0LABUVMA==} + engines: {node: '>=18.0.0'} + + '@azure/core-rest-pipeline@1.22.1': + resolution: {integrity: sha512-UVZlVLfLyz6g3Hy7GNDpooMQonUygH7ghdiSASOOHy97fKj/mPLqgDX7aidOijn+sCMU+WU8NjlPlNTgnvbcGA==} + engines: {node: '>=20.0.0'} + + '@azure/core-tracing@1.3.1': + resolution: {integrity: sha512-9MWKevR7Hz8kNzzPLfX4EAtGM2b8mr50HPDBvio96bURP/9C+HjdH3sBlLSNNrvRAr5/k/svoH457gB5IKpmwQ==} + engines: {node: '>=20.0.0'} + + '@azure/core-util@1.13.1': + resolution: {integrity: sha512-XPArKLzsvl0Hf0CaGyKHUyVgF7oDnhKoP85Xv6M4StF/1AhfORhZudHtOyf2s+FcbuQ9dPRAjB8J2KvRRMUK2A==} + engines: {node: '>=20.0.0'} + + '@azure/identity@4.13.0': + resolution: {integrity: sha512-uWC0fssc+hs1TGGVkkghiaFkkS7NkTxfnCH+Hdg+yTehTpMcehpok4PgUKKdyCH+9ldu6FhiHRv84Ntqj1vVcw==} + engines: {node: '>=20.0.0'} + + '@azure/keyvault-common@2.0.0': + resolution: {integrity: sha512-wRLVaroQtOqfg60cxkzUkGKrKMsCP6uYXAOomOIysSMyt1/YM0eUn9LqieAWM8DLcU4+07Fio2YGpPeqUbpP9w==} + engines: {node: '>=18.0.0'} + + '@azure/keyvault-keys@4.10.0': + resolution: {integrity: sha512-eDT7iXoBTRZ2n3fLiftuGJFD+yjkiB1GNqzU2KbY1TLYeXeSPVTVgn2eJ5vmRTZ11978jy2Kg2wI7xa9Tyr8ag==} + engines: {node: '>=18.0.0'} + + '@azure/logger@1.3.0': + resolution: {integrity: sha512-fCqPIfOcLE+CGqGPd66c8bZpwAji98tZ4JI9i/mlTNTlsIWslCfpg48s/ypyLxZTump5sypjrKn2/kY7q8oAbA==} + engines: {node: '>=20.0.0'} + + '@azure/msal-browser@4.25.1': + resolution: {integrity: sha512-kAdOSNjvMbeBmEyd5WnddGmIpKCbAAGj4Gg/1iURtF+nHmIfS0+QUBBO3uaHl7CBB2R1SEAbpOgxycEwrHOkFA==} + engines: {node: '>=0.8.0'} + + '@azure/msal-common@15.13.0': + resolution: {integrity: sha512-8oF6nj02qX7eE/6+wFT5NluXRHc05AgdCC3fJnkjiJooq8u7BcLmxaYYSwc2AfEkWRMRi6Eyvvbeqk4U4412Ag==} + engines: {node: '>=0.8.0'} + + '@azure/msal-node@3.8.0': + resolution: {integrity: sha512-23BXm82Mp5XnRhrcd4mrHa0xuUNRp96ivu3nRatrfdAqjoeWAGyD0eEAafxAOHAEWWmdlyFK4ELFcdziXyw2sA==} + engines: {node: '>=16'} + '@babel/code-frame@7.24.7': resolution: {integrity: sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==} engines: {node: '>=6.9.0'} @@ -1034,6 +1160,9 @@ packages: '@jridgewell/trace-mapping@0.3.9': resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==} + '@js-joda/core@5.6.5': + resolution: {integrity: sha512-3zwefSMwHpu8iVUW8YYz227sIv6UFqO31p1Bf1ZH/Vom7CmNyUsXjDBlnNzcuhmOL1XfxZ3nvND42kR23XlbcQ==} + '@js-sdsl/ordered-set@4.4.2': resolution: {integrity: sha512-ieYQ8WlBPKYzEo81H3q0DFbd8WtFRXXABb4+vRCF0AO3WWtJZFxYvRGdipUXGrd6tlSySmqhcPuO3J6SCodCxg==} @@ -1565,6 +1694,9 @@ packages: resolution: {integrity: sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==} engines: {node: '>=14.16'} + '@tediousjs/connection-string@0.6.0': + resolution: {integrity: sha512-GxlsW354Vi6QqbUgdPyQVcQjI7cZBdGV5vOYVYuCVDTylx2wl3WHR2HlhcxxHTrMigbelpXsdcZso+66uxPfow==} + '@tootallnate/once@2.0.0': resolution: {integrity: sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==} engines: {node: '>= 10'} @@ -1613,6 +1745,9 @@ packages: '@types/lodash@4.17.6': resolution: {integrity: sha512-OpXEVoCKSS3lQqjx9GGGOapBeuW5eUboYHRlHP9urXPX25IKZ6AnP5ZRxtVf63iieUbsHxLn8NQ5Nlftc6yzAA==} + '@types/mssql@9.1.8': + resolution: {integrity: sha512-mt9h5jWj+DYE5jxnKaWSV/GqDf9FV52XYVk6T3XZF69noEe+JJV6MKirii48l81+cjmAkSq+qeKX+k61fHkYrQ==} + '@types/mysql@2.15.27': resolution: {integrity: sha512-YfWiV16IY0OeBfBCk8+hXKmdTKrKlwKN1MNKAPBu5JYxLwBEZl7QzeEpGnlZb3VMGJrrGmB84gXiH+ofs/TezA==} @@ -1640,12 +1775,18 @@ packages: '@types/pg@8.15.4': resolution: {integrity: sha512-I6UNVBAoYbvuWkkU3oosC8yxqH21f4/Jc4DK71JLG3dT2mdlGe1z+ep/LQGXaKaOgcvUrsQoPRqfgtMcvZiJhg==} + '@types/readable-stream@4.0.21': + resolution: {integrity: sha512-19eKVv9tugr03IgfXlA9UVUVRbW6IuqRO5B92Dl4a6pT7K8uaGrNS0GkxiZD0BOk6PLuXl5FhWl//eX/pzYdTQ==} + '@types/semver-utils@1.1.3': resolution: {integrity: sha512-T+YwkslhsM+CeuhYUxyAjWm7mJ5am/K10UX40RuA6k6Lc7eGtq8iY2xOzy7Vq0GOqhl/xZl5l2FwURZMTPTUww==} '@types/semver@7.5.8': resolution: {integrity: sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==} + '@types/semver@7.7.1': + resolution: {integrity: sha512-FmgJfu+MOcQ370SD0ev7EI8TlCAfKYU+B4m5T3yXc1CiRN94g/SZPtsCkk506aUDtlMnFZvasDwHHUcZUEaYuA==} + '@types/shimmer@1.2.0': resolution: {integrity: sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg==} @@ -1661,6 +1802,9 @@ packages: '@types/triple-beam@1.3.5': resolution: {integrity: sha512-6WaYesThRMCl19iryMYP7/x2OVgCtbIVflDGFpWnb9irXI3UjYE4AzmYuiUKY1AJstGijoY+MgUszMgRxIYTYw==} + '@types/uuid@10.0.0': + resolution: {integrity: sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==} + '@types/webidl-conversions@7.0.3': resolution: {integrity: sha512-CiJJvcRtIgzadHCYXw7dqEnMNRjhGZlYK05Mj9OyktqV8uVT8fD2BFOB7S1uwBE3Kj2Z+4UyPmFw/Ixgw/LAlA==} @@ -1670,6 +1814,10 @@ packages: '@types/ws@8.2.3': resolution: {integrity: sha512-ahRJZquUYCdOZf/rCsWg88S0/+cb9wazUBHv6HZEe3XdYaBe2zr/slM8J28X07Hn88Pnm4ezo7N8/ofnOgrPVQ==} + '@typespec/ts-http-runtime@0.3.1': + resolution: {integrity: sha512-SnbaqayTVFEA6/tYumdF0UmybY0KHyKwGPBXnyckFlrrKdhWFrL3a2HIPXHjht5ZOElKGcXfD2D63P36btb+ww==} + engines: {node: '>=20.0.0'} + '@vitest/expect@3.2.4': resolution: {integrity: sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==} @@ -1706,6 +1854,10 @@ packages: abbrev@1.1.1: resolution: {integrity: sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==} + abort-controller@3.0.0: + resolution: {integrity: sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==} + engines: {node: '>=6.5'} + abstract-logging@2.0.1: resolution: {integrity: sha512-2BjRTZxTPvheOvGbBslFSYOUkr+SjPtOnrLP33f+VIWLzezQpZcqVg7ja3L4dBXmzzgwT+a029jRx5PCi3JuiA==} @@ -1732,6 +1884,10 @@ packages: resolution: {integrity: sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==} engines: {node: '>= 6.0.0'} + agent-base@7.1.4: + resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} + engines: {node: '>= 14'} + agentkeepalive@4.5.0: resolution: {integrity: sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==} engines: {node: '>= 8.0.0'} @@ -1862,6 +2018,9 @@ packages: bl@4.1.0: resolution: {integrity: sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==} + bl@6.1.4: + resolution: {integrity: sha512-ZV/9asSuknOExbM/zPPA8z00lc1ihPKWaStHkkQrxHNeYx+yY+TmF+v80dpv2G0mv3HVXBu7ryoAsxbFFhf4eg==} + boxen@7.1.1: resolution: {integrity: sha512-2hCgjEmP8YLWQ130n2FerGv7rYpfBmnmp9Uy2Le1vge6X3gZIfSmEzP5QTDElFxcvVcXlEn8Aq6MU/PZygIOog==} engines: {node: '>=14.16'} @@ -1880,12 +2039,22 @@ packages: resolution: {integrity: sha512-WIsKqkSC0ABoBJuT1LEX+2HEvNmNKKgnTAyd0fL8qzK4SH2i9NXg+t08YtdZp/V9IZ33cxe3iV4yM0qg8lMQng==} engines: {node: '>=16.20.1'} + buffer-equal-constant-time@1.0.1: + resolution: {integrity: sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==} + buffer-from@1.1.2: resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} buffer@5.7.1: resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} + buffer@6.0.3: + resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} + + bundle-name@4.1.0: + resolution: {integrity: sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==} + engines: {node: '>=18'} + cac@6.7.14: resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} engines: {node: '>=8'} @@ -2013,6 +2182,10 @@ packages: resolution: {integrity: sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==} engines: {node: '>=14'} + commander@11.1.0: + resolution: {integrity: sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ==} + engines: {node: '>=16'} + commander@12.1.0: resolution: {integrity: sha512-Vw8qHK3bZM9y/P10u3Vib8o/DdkvA2OtPtZvD871QKjy74Wj1WSKFILMPRPSdUSx5RFK1arlJzEtA4PkFgnbuA==} engines: {node: '>=18'} @@ -2096,15 +2269,6 @@ packages: supports-color: optional: true - debug@4.4.0: - resolution: {integrity: sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==} - engines: {node: '>=6.0'} - peerDependencies: - supports-color: '*' - peerDependenciesMeta: - supports-color: - optional: true - debug@4.4.1: resolution: {integrity: sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==} engines: {node: '>=6.0'} @@ -2142,6 +2306,14 @@ packages: resolution: {integrity: sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==} engines: {node: '>=4.0.0'} + default-browser-id@5.0.0: + resolution: {integrity: sha512-A6p/pu/6fyBcA1TRz/GqWYPViplrftcW2gZC9q79ngNCKAeR/X3gcEdXQHl4KNXV+3wgIJ1CPkJQ3IHM6lcsyA==} + engines: {node: '>=18'} + + default-browser@5.2.1: + resolution: {integrity: sha512-WY/3TUME0x3KPYdRRxEJJvXRHV4PyPoUsxtZa78lwItwRQRHhd2U9xOscaT/YTf8uCXIAjeJOFBVEh/7FtD8Xg==} + engines: {node: '>=18'} + defaults@1.0.4: resolution: {integrity: sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==} @@ -2149,6 +2321,10 @@ packages: resolution: {integrity: sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==} engines: {node: '>=10'} + define-lazy-prop@3.0.0: + resolution: {integrity: sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==} + engines: {node: '>=12'} + delegates@1.0.0: resolution: {integrity: sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==} @@ -2192,6 +2368,9 @@ packages: eastasianwidth@0.2.0: resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==} + ecdsa-sig-formatter@1.0.11: + resolution: {integrity: sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==} + emoji-regex@8.0.0: resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} @@ -2246,6 +2425,14 @@ packages: event-stream@3.3.4: resolution: {integrity: sha512-QHpkERcGsR0T7Qm3HNJSyXKEEj8AHNxkY3PK8TS2KJvQ7NiSHe3DDpwVKKtoYprL/AreyzFBeIkBIWChAqn60g==} + event-target-shim@5.0.1: + resolution: {integrity: sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==} + engines: {node: '>=6'} + + events@3.3.0: + resolution: {integrity: sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==} + engines: {node: '>=0.8.x'} + expect-type@1.2.2: resolution: {integrity: sha512-JhFGDVJ7tmDJItKhYgJCGLOWjuK9vPxiXoUFLwLDc99NlmklilbiQJwoctZtt13+xMw91MCk/REan6MWHqDjyA==} engines: {node: '>=12.0.0'} @@ -2475,6 +2662,10 @@ packages: resolution: {integrity: sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==} engines: {node: '>= 6'} + http-proxy-agent@7.0.2: + resolution: {integrity: sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==} + engines: {node: '>= 14'} + http2-wrapper@2.2.1: resolution: {integrity: sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==} engines: {node: '>=10.19.0'} @@ -2483,6 +2674,10 @@ packages: resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==} engines: {node: '>= 6'} + https-proxy-agent@7.0.6: + resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==} + engines: {node: '>= 14'} + human-id@1.0.2: resolution: {integrity: sha512-UNopramDEhHJD+VR+ehk8rOslwSfByxPIZyJRfV739NDhN5LF1fa1MqnzKm2lGTQRjNrjK19Q5fhkgIfjlVUKw==} @@ -2497,6 +2692,10 @@ packages: resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} engines: {node: '>=0.10.0'} + iconv-lite@0.7.0: + resolution: {integrity: sha512-cf6L2Ds3h57VVmkZe+Pn+5APsT7FpqJtEhhieDCvrE2MK5Qk9MyffgQyuxQTm6BChfeZNtcOLHp9IcWRVcIcBQ==} + engines: {node: '>=0.10.0'} + ieee754@1.2.1: resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} @@ -2574,6 +2773,11 @@ packages: resolution: {integrity: sha512-a5dFJih5ZLYlRtDc0dZWP7RiKr6xIKzmn/oAYCDvdLThadVgyJwlaoQPmRtMSpz+rk0OGAgIu+TcM9HUF0fk1A==} engines: {node: '>= 0.4'} + is-docker@3.0.0: + resolution: {integrity: sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + hasBin: true + is-extglob@2.1.1: resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} engines: {node: '>=0.10.0'} @@ -2586,6 +2790,11 @@ packages: resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} engines: {node: '>=0.10.0'} + is-inside-container@1.0.0: + resolution: {integrity: sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==} + engines: {node: '>=14.16'} + hasBin: true + is-installed-globally@0.4.0: resolution: {integrity: sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==} engines: {node: '>=10'} @@ -2635,6 +2844,10 @@ packages: resolution: {integrity: sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==} engines: {node: '>=0.10.0'} + is-wsl@3.1.0: + resolution: {integrity: sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==} + engines: {node: '>=16'} + is-yarn-global@0.4.1: resolution: {integrity: sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ==} engines: {node: '>=12'} @@ -2660,6 +2873,9 @@ packages: jose@4.15.9: resolution: {integrity: sha512-1vUQX+IdDMVPj4k8kOxgUqlcK518yluMuGZwqlr44FS1ppZB/5GWh4rZG89erpOBOJjU/OBsnCVFfapsRz6nEA==} + js-md4@0.3.2: + resolution: {integrity: sha512-/GDnfQYsltsjRswQhN9fhv3EMw2sCpUdrdxyWDOUK7eyD++r3gRhzgiQgc/x4MAv2i1iuQ4lxO5mvqM3vj4bwA==} + js-tokens@4.0.0: resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} @@ -2712,10 +2928,20 @@ packages: resolution: {integrity: sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==} engines: {node: '>=0.10.0'} + jsonwebtoken@9.0.2: + resolution: {integrity: sha512-PRp66vJ865SSqOlgqS8hujT5U4AOgMfhrwYIuIhfKaoSCZcirrmASQr8CX7cUg+RMih+hgznrjp99o+W4pJLHQ==} + engines: {node: '>=12', npm: '>=6'} + jsox@1.2.121: resolution: {integrity: sha512-9Ag50tKhpTwS6r5wh3MJSAvpSof0UBr39Pto8OnzFT32Z/pAbxAsKHzyvsyMEHVslELvHyO/4/jaQELHk8wDcw==} hasBin: true + jwa@1.4.2: + resolution: {integrity: sha512-eeH5JO+21J78qMvTIDdBXidBd6nG2kZjg5Ohz/1fpa28Z4CcsWUzJ1ZZyFq/3z3N17aZy+ZuBoHljASbL1WfOw==} + + jws@3.2.2: + resolution: {integrity: sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==} + keyv@4.5.4: resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} @@ -2745,6 +2971,27 @@ packages: resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} engines: {node: '>=10'} + lodash.includes@4.3.0: + resolution: {integrity: sha512-W3Bx6mdkRTGtlJISOvVD/lbqjTlPPUDTMnlXZFnVwi9NKJ6tiAk6LVdlhZMm17VZisqhKcgzpO5Wz91PCt5b0w==} + + lodash.isboolean@3.0.3: + resolution: {integrity: sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg==} + + lodash.isinteger@4.0.4: + resolution: {integrity: sha512-DBwtEWN2caHQ9/imiNeEA5ys1JoRtRfY3d7V9wkqtbycnAmTvRRmbHKDV4a0EYc678/dia0jrte4tjYwVBaZUA==} + + lodash.isnumber@3.0.3: + resolution: {integrity: sha512-QYqzpfwO3/CWf3XP+Z+tkQsfaLL/EnUlXWVkIk5FUPc4sBdTehEqZONuyRt2P67PXAk+NXmTBcc97zw9t1FQrw==} + + lodash.isplainobject@4.0.6: + resolution: {integrity: sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==} + + lodash.isstring@4.0.1: + resolution: {integrity: sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw==} + + lodash.once@4.1.1: + resolution: {integrity: sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg==} + lodash.startcase@4.4.0: resolution: {integrity: sha512-+WKqsK294HMSc2jEbNgpHpd0JfIBhp7rEV4aqXWqFr6AlXov+SlcgB1Fv01y2kGe3Gc8nMW7VA0SrGuSkRfIEg==} @@ -2937,6 +3184,11 @@ packages: ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + mssql@12.1.1: + resolution: {integrity: sha512-nUTXi0unU6p72YKe6KDR9vW2mSQWsmy1KZqV0JkaT2v3RSkxlwx4Y4srjYmH+DZNbyA53Ijp6o2OaLnLc4F2Qg==} + engines: {node: '>=18'} + hasBin: true + mute-stream@1.0.0: resolution: {integrity: sha512-avsJQhyd+680gKXyG/sQc0nXaC6rBkPOfyHYcFb9+hdkqQkR9bdnkJ0AMZhke0oesPqIO+mFFJ+IdBc7mst4IA==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} @@ -2954,6 +3206,9 @@ packages: engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} hasBin: true + native-duplexpair@1.0.0: + resolution: {integrity: sha512-E7QQoM+3jvNtlmyfqRZ0/U75VFgCls+fSkbml2MpgWkWyz3ox8Y58gNhfuziuQYGNNQAbFZJQck55LHCnCK6CA==} + nearley@2.20.1: resolution: {integrity: sha512-+Mc8UaAebFzgV+KpI5n7DasuuQCHA89dmwm7JXw3TV43ukfNQ9DnBH3Mdb2g/I4Fdxc26pwimBWvjIw0UAILSQ==} hasBin: true @@ -3077,6 +3332,10 @@ packages: resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==} engines: {node: '>=6'} + open@10.2.0: + resolution: {integrity: sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA==} + engines: {node: '>=18'} + ora@5.4.1: resolution: {integrity: sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==} engines: {node: '>=10'} @@ -3287,6 +3546,10 @@ packages: process-warning@5.0.0: resolution: {integrity: sha512-a39t9ApHNx2L4+HBnQKqxxHNs1r7KF+Intd8Q/g1bUh6q0WIp9voPXJ/x0j+ZL45KF1pJd9+q2jLIRMfvEshkA==} + process@0.11.10: + resolution: {integrity: sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==} + engines: {node: '>= 0.6.0'} + progress@2.0.3: resolution: {integrity: sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==} engines: {node: '>=0.4.0'} @@ -3383,6 +3646,10 @@ packages: resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==} engines: {node: '>= 6'} + readable-stream@4.7.0: + resolution: {integrity: sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + readdirp@3.6.0: resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} engines: {node: '>=8.10.0'} @@ -3482,6 +3749,10 @@ packages: rsocket-websocket-client@1.0.0-alpha.3: resolution: {integrity: sha512-CwTwTNMGa8BKvrWde/kM3q8IHuzO8RCIfzuj25BsVe9y8eehDQHt4fXk0g1i/wpsxTm+RY6DxE6Vr5snozKVOg==} + run-applescript@7.1.0: + resolution: {integrity: sha512-DPe5pVFaAsinSaV6QjQ6gdiedWDcRCbUuiQfQa2wmWV7+xC9bGulGI8+TdRmoFkAPaBXk8CrAbnlY2ISniJ47Q==} + engines: {node: '>=18'} + run-async@3.0.0: resolution: {integrity: sha512-540WwVDOMxA6dN6We19EcT9sc3hkXPw5mzRNGM3FkdN/vtE9NFvj5lFAPNwUDmJjXidm3v7TC1cTE7t17Ulm1Q==} engines: {node: '>=0.12.0'} @@ -3523,6 +3794,11 @@ packages: engines: {node: '>=10'} hasBin: true + semver@7.7.3: + resolution: {integrity: sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==} + engines: {node: '>=10'} + hasBin: true + seq-queue@0.0.5: resolution: {integrity: sha512-hr3Wtp/GZIc/6DAGPDcV4/9WoZhjrkXsi5B/07QgX8tsdc6ilr7BFM6PM6rbdAX1kFSDYeZGLipIZZKyQP0O5Q==} @@ -3739,6 +4015,18 @@ packages: resolution: {integrity: sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==} engines: {node: '>=10'} + tarn@3.0.2: + resolution: {integrity: sha512-51LAVKUSZSVfI05vjPESNc5vwqqZpbXCsU+/+wxlOrUjk2SnFTt97v9ZgQrD4YmxYW1Px6w2KjaDitCfkvgxMQ==} + engines: {node: '>=8.0.0'} + + tedious@18.6.1: + resolution: {integrity: sha512-9AvErXXQTd6l7TDd5EmM+nxbOGyhnmdbp/8c3pw+tjaiSXW9usME90ET/CRG1LN1Y9tPMtz/p83z4Q97B4DDpw==} + engines: {node: '>=18'} + + tedious@19.1.3: + resolution: {integrity: sha512-6O6efTeYtcnar3Cqf/ptqJs+U10fYYjp/SHRNm3VGuCTUDys+AUgIbxWbT2kzl4baXAzuy9byV3qCgOimrRfTA==} + engines: {node: '>=18.17'} + term-size@2.2.1: resolution: {integrity: sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==} engines: {node: '>=8'} @@ -3859,6 +4147,9 @@ packages: tslib@2.6.3: resolution: {integrity: sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==} + tslib@2.8.1: + resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} + tuf-js@1.1.7: resolution: {integrity: sha512-i3P9Kgw3ytjELUfpuKVDNBJvk4u5bXL6gskv572mcevPbSKCV3zt3djhmlEQ65yERjIbOSncy7U4cQJaB1CBCg==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} @@ -3939,6 +4230,10 @@ packages: resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} hasBin: true + uuid@8.3.2: + resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==} + hasBin: true + v8-compile-cache-lib@3.0.1: resolution: {integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==} @@ -4113,6 +4408,10 @@ packages: utf-8-validate: optional: true + wsl-utils@0.1.0: + resolution: {integrity: sha512-h3Fbisa2nKGPxCpm89Hk33lBLsnaGBvctQopaBSOW/uIs6FTe1ATyAnKFJrzVs9vpGdsTe73WF3V4lIsk4Gacw==} + engines: {node: '>=18'} + xdg-basedir@5.1.0: resolution: {integrity: sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ==} engines: {node: '>=12'} @@ -4174,6 +4473,151 @@ packages: snapshots: + '@azure-rest/core-client@2.5.1': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.1 + '@azure/core-rest-pipeline': 1.22.1 + '@azure/core-tracing': 1.3.1 + '@typespec/ts-http-runtime': 0.3.1 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/abort-controller@2.1.2': + dependencies: + tslib: 2.8.1 + + '@azure/core-auth@1.10.1': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-util': 1.13.1 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/core-client@1.10.1': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.1 + '@azure/core-rest-pipeline': 1.22.1 + '@azure/core-tracing': 1.3.1 + '@azure/core-util': 1.13.1 + '@azure/logger': 1.3.0 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/core-http-compat@2.3.1': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-client': 1.10.1 + '@azure/core-rest-pipeline': 1.22.1 + transitivePeerDependencies: + - supports-color + + '@azure/core-lro@2.7.2': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-util': 1.13.1 + '@azure/logger': 1.3.0 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/core-paging@1.6.2': + dependencies: + tslib: 2.8.1 + + '@azure/core-rest-pipeline@1.22.1': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.1 + '@azure/core-tracing': 1.3.1 + '@azure/core-util': 1.13.1 + '@azure/logger': 1.3.0 + '@typespec/ts-http-runtime': 0.3.1 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/core-tracing@1.3.1': + dependencies: + tslib: 2.8.1 + + '@azure/core-util@1.13.1': + dependencies: + '@azure/abort-controller': 2.1.2 + '@typespec/ts-http-runtime': 0.3.1 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/identity@4.13.0': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.1 + '@azure/core-client': 1.10.1 + '@azure/core-rest-pipeline': 1.22.1 + '@azure/core-tracing': 1.3.1 + '@azure/core-util': 1.13.1 + '@azure/logger': 1.3.0 + '@azure/msal-browser': 4.25.1 + '@azure/msal-node': 3.8.0 + open: 10.2.0 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/keyvault-common@2.0.0': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.1 + '@azure/core-client': 1.10.1 + '@azure/core-rest-pipeline': 1.22.1 + '@azure/core-tracing': 1.3.1 + '@azure/core-util': 1.13.1 + '@azure/logger': 1.3.0 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/keyvault-keys@4.10.0': + dependencies: + '@azure-rest/core-client': 2.5.1 + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.1 + '@azure/core-http-compat': 2.3.1 + '@azure/core-lro': 2.7.2 + '@azure/core-paging': 1.6.2 + '@azure/core-rest-pipeline': 1.22.1 + '@azure/core-tracing': 1.3.1 + '@azure/core-util': 1.13.1 + '@azure/keyvault-common': 2.0.0 + '@azure/logger': 1.3.0 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/logger@1.3.0': + dependencies: + '@typespec/ts-http-runtime': 0.3.1 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/msal-browser@4.25.1': + dependencies: + '@azure/msal-common': 15.13.0 + + '@azure/msal-common@15.13.0': {} + + '@azure/msal-node@3.8.0': + dependencies: + '@azure/msal-common': 15.13.0 + jsonwebtoken: 9.0.2 + uuid: 8.3.2 + '@babel/code-frame@7.24.7': dependencies: '@babel/highlight': 7.24.7 @@ -4206,7 +4650,7 @@ snapshots: outdent: 0.5.0 prettier: 2.8.8 resolve-from: 5.0.0 - semver: 7.6.2 + semver: 7.7.3 '@changesets/assemble-release-plan@6.0.4': dependencies: @@ -4215,7 +4659,7 @@ snapshots: '@changesets/should-skip-package': 0.1.1 '@changesets/types': 6.0.0 '@manypkg/get-packages': 1.1.3 - semver: 7.6.2 + semver: 7.7.3 '@changesets/changelog-git@0.2.0': dependencies: @@ -4273,7 +4717,7 @@ snapshots: '@changesets/types': 6.0.0 '@manypkg/get-packages': 1.1.3 picocolors: 1.1.0 - semver: 7.6.2 + semver: 7.7.3 '@changesets/get-release-plan@4.0.4': dependencies: @@ -4483,6 +4927,8 @@ snapshots: '@jridgewell/resolve-uri': 3.1.2 '@jridgewell/sourcemap-codec': 1.5.0 + '@js-joda/core@5.6.5': {} + '@js-sdsl/ordered-set@4.4.2': {} '@manypkg/find-root@1.1.0': @@ -4520,11 +4966,11 @@ snapshots: '@npmcli/fs@2.1.2': dependencies: '@gar/promisify': 1.1.3 - semver: 7.6.2 + semver: 7.7.3 '@npmcli/fs@3.1.1': dependencies: - semver: 7.6.2 + semver: 7.7.3 '@npmcli/git@4.1.0': dependencies: @@ -4534,7 +4980,7 @@ snapshots: proc-log: 3.0.0 promise-inflight: 1.0.1 promise-retry: 2.0.1 - semver: 7.6.2 + semver: 7.7.3 which: 3.0.1 transitivePeerDependencies: - bluebird @@ -4812,7 +5258,7 @@ snapshots: '@types/shimmer': 1.2.0 import-in-the-middle: 1.14.2 require-in-the-middle: 7.3.0 - semver: 7.6.2 + semver: 7.7.3 shimmer: 1.2.1 transitivePeerDependencies: - supports-color @@ -5096,6 +5542,8 @@ snapshots: dependencies: defer-to-connect: 2.0.1 + '@tediousjs/connection-string@0.6.0': {} + '@tootallnate/once@2.0.0': {} '@tsconfig/node10@1.0.11': {} @@ -5133,6 +5581,14 @@ snapshots: '@types/lodash@4.17.6': {} + '@types/mssql@9.1.8': + dependencies: + '@types/node': 22.16.2 + tarn: 3.0.2 + tedious: 18.6.1 + transitivePeerDependencies: + - supports-color + '@types/mysql@2.15.27': dependencies: '@types/node': 22.16.2 @@ -5161,10 +5617,16 @@ snapshots: pg-protocol: 1.6.1 pg-types: 2.2.0 + '@types/readable-stream@4.0.21': + dependencies: + '@types/node': 22.16.2 + '@types/semver-utils@1.1.3': {} '@types/semver@7.5.8': {} + '@types/semver@7.7.1': {} + '@types/shimmer@1.2.0': {} '@types/strip-bom@3.0.0': {} @@ -5177,6 +5639,8 @@ snapshots: '@types/triple-beam@1.3.5': {} + '@types/uuid@10.0.0': {} + '@types/webidl-conversions@7.0.3': {} '@types/whatwg-url@11.0.5': @@ -5187,6 +5651,14 @@ snapshots: dependencies: '@types/node': 22.16.2 + '@typespec/ts-http-runtime@0.3.1': + dependencies: + http-proxy-agent: 7.0.2 + https-proxy-agent: 7.0.6 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + '@vitest/expect@3.2.4': dependencies: '@types/chai': 5.2.2 @@ -5238,6 +5710,10 @@ snapshots: abbrev@1.1.1: {} + abort-controller@3.0.0: + dependencies: + event-target-shim: 5.0.1 + abstract-logging@2.0.1: {} acorn-import-attributes@1.9.5(acorn@8.15.0): @@ -5258,6 +5734,8 @@ snapshots: transitivePeerDependencies: - supports-color + agent-base@7.1.4: {} + agentkeepalive@4.5.0: dependencies: humanize-ms: 1.2.1 @@ -5372,6 +5850,13 @@ snapshots: inherits: 2.0.4 readable-stream: 3.6.2 + bl@6.1.4: + dependencies: + '@types/readable-stream': 4.0.21 + buffer: 6.0.3 + inherits: 2.0.4 + readable-stream: 4.7.0 + boxen@7.1.1: dependencies: ansi-align: 3.0.1 @@ -5398,6 +5883,8 @@ snapshots: bson@6.10.4: {} + buffer-equal-constant-time@1.0.1: {} + buffer-from@1.1.2: {} buffer@5.7.1: @@ -5405,6 +5892,15 @@ snapshots: base64-js: 1.5.1 ieee754: 1.2.1 + buffer@6.0.3: + dependencies: + base64-js: 1.5.1 + ieee754: 1.2.1 + + bundle-name@4.1.0: + dependencies: + run-applescript: 7.1.0 + cac@6.7.14: {} cacache@16.1.3: @@ -5565,6 +6061,8 @@ snapshots: commander@10.0.1: {} + commander@11.1.0: {} + commander@12.1.0: {} commander@2.20.3: {} @@ -5653,10 +6151,6 @@ snapshots: dependencies: ms: 2.1.3 - debug@4.4.0: - dependencies: - ms: 2.1.3 - debug@4.4.1: dependencies: ms: 2.1.3 @@ -5673,12 +6167,21 @@ snapshots: deep-extend@0.6.0: {} + default-browser-id@5.0.0: {} + + default-browser@5.2.1: + dependencies: + bundle-name: 4.1.0 + default-browser-id: 5.0.0 + defaults@1.0.4: dependencies: clone: 1.0.4 defer-to-connect@2.0.1: {} + define-lazy-prop@3.0.0: {} + delegates@1.0.0: {} denque@2.1.0: {} @@ -5709,6 +6212,10 @@ snapshots: eastasianwidth@0.2.0: {} + ecdsa-sig-formatter@1.0.11: + dependencies: + safe-buffer: 5.2.1 + emoji-regex@8.0.0: {} emoji-regex@9.2.2: {} @@ -5782,6 +6289,10 @@ snapshots: stream-combiner: 0.0.4 through: 2.3.8 + event-target-shim@5.0.1: {} + + events@3.3.0: {} + expect-type@1.2.2: {} exponential-backoff@3.1.1: {} @@ -6042,6 +6553,13 @@ snapshots: transitivePeerDependencies: - supports-color + http-proxy-agent@7.0.2: + dependencies: + agent-base: 7.1.4 + debug: 4.4.1 + transitivePeerDependencies: + - supports-color + http2-wrapper@2.2.1: dependencies: quick-lru: 5.1.1 @@ -6054,6 +6572,13 @@ snapshots: transitivePeerDependencies: - supports-color + https-proxy-agent@7.0.6: + dependencies: + agent-base: 7.1.4 + debug: 4.4.1 + transitivePeerDependencies: + - supports-color + human-id@1.0.2: {} humanize-ms@1.2.1: @@ -6068,6 +6593,10 @@ snapshots: dependencies: safer-buffer: 2.1.2 + iconv-lite@0.7.0: + dependencies: + safer-buffer: 2.1.2 + ieee754@1.2.1: {} ignore-by-default@1.0.1: {} @@ -6142,6 +6671,8 @@ snapshots: dependencies: hasown: 2.0.2 + is-docker@3.0.0: {} + is-extglob@2.1.1: {} is-fullwidth-code-point@3.0.0: {} @@ -6150,6 +6681,10 @@ snapshots: dependencies: is-extglob: 2.1.1 + is-inside-container@1.0.0: + dependencies: + is-docker: 3.0.0 + is-installed-globally@0.4.0: dependencies: global-dirs: 3.0.1 @@ -6181,6 +6716,10 @@ snapshots: is-windows@1.0.2: {} + is-wsl@3.1.0: + dependencies: + is-inside-container: 1.0.0 + is-yarn-global@0.4.1: {} isarray@0.0.1: {} @@ -6204,6 +6743,8 @@ snapshots: jose@4.15.9: {} + js-md4@0.3.2: {} + js-tokens@4.0.0: {} js-tokens@9.0.1: {} @@ -6245,8 +6786,32 @@ snapshots: jsonpointer@5.0.1: {} + jsonwebtoken@9.0.2: + dependencies: + jws: 3.2.2 + lodash.includes: 4.3.0 + lodash.isboolean: 3.0.3 + lodash.isinteger: 4.0.4 + lodash.isnumber: 3.0.3 + lodash.isplainobject: 4.0.6 + lodash.isstring: 4.0.1 + lodash.once: 4.1.1 + ms: 2.1.3 + semver: 7.7.3 + jsox@1.2.121: {} + jwa@1.4.2: + dependencies: + buffer-equal-constant-time: 1.0.1 + ecdsa-sig-formatter: 1.0.11 + safe-buffer: 5.2.1 + + jws@3.2.2: + dependencies: + jwa: 1.4.2 + safe-buffer: 5.2.1 + keyv@4.5.4: dependencies: json-buffer: 3.0.1 @@ -6275,6 +6840,20 @@ snapshots: dependencies: p-locate: 5.0.0 + lodash.includes@4.3.0: {} + + lodash.isboolean@3.0.3: {} + + lodash.isinteger@4.0.4: {} + + lodash.isnumber@3.0.3: {} + + lodash.isplainobject@4.0.6: {} + + lodash.isstring@4.0.1: {} + + lodash.once@4.1.1: {} + lodash.startcase@4.4.0: {} lodash@4.17.21: {} @@ -6468,6 +7047,16 @@ snapshots: ms@2.1.3: {} + mssql@12.1.1: + dependencies: + '@tediousjs/connection-string': 0.6.0 + commander: 11.1.0 + debug: 4.4.1 + tarn: 3.0.2 + tedious: 19.1.3 + transitivePeerDependencies: + - supports-color + mute-stream@1.0.0: {} mysql2@3.11.3: @@ -6488,6 +7077,8 @@ snapshots: nanoid@3.3.11: {} + native-duplexpair@1.0.0: {} + nearley@2.20.1: dependencies: commander: 2.20.3 @@ -6519,7 +7110,7 @@ snapshots: nopt: 6.0.0 npmlog: 6.0.2 rimraf: 3.0.2 - semver: 7.6.2 + semver: 7.7.3 tar: 6.2.1 which: 2.0.2 transitivePeerDependencies: @@ -6542,7 +7133,7 @@ snapshots: ignore-by-default: 1.0.1 minimatch: 3.1.2 pstree.remy: 1.1.8 - semver: 7.6.2 + semver: 7.7.3 simple-update-notifier: 2.0.0 supports-color: 5.5.0 touch: 3.1.1 @@ -6561,7 +7152,7 @@ snapshots: dependencies: hosted-git-info: 6.1.1 is-core-module: 2.14.0 - semver: 7.6.2 + semver: 7.7.3 validate-npm-package-license: 3.0.4 normalize-path@3.0.0: {} @@ -6599,7 +7190,7 @@ snapshots: rc-config-loader: 4.1.3 remote-git-tags: 3.0.0 rimraf: 5.0.9 - semver: 7.6.2 + semver: 7.7.3 semver-utils: 1.1.4 source-map-support: 0.5.21 spawn-please: 2.0.2 @@ -6615,7 +7206,7 @@ snapshots: npm-install-checks@6.3.0: dependencies: - semver: 7.6.2 + semver: 7.7.3 npm-normalize-package-bin@3.0.1: {} @@ -6623,7 +7214,7 @@ snapshots: dependencies: hosted-git-info: 6.1.1 proc-log: 3.0.0 - semver: 7.6.2 + semver: 7.7.3 validate-npm-package-name: 5.0.1 npm-packlist@7.0.4: @@ -6635,7 +7226,7 @@ snapshots: npm-install-checks: 6.3.0 npm-normalize-package-bin: 3.0.1 npm-package-arg: 10.1.0 - semver: 7.6.2 + semver: 7.7.3 npm-registry-fetch@14.0.5: dependencies: @@ -6672,6 +7263,13 @@ snapshots: dependencies: mimic-fn: 2.1.0 + open@10.2.0: + dependencies: + default-browser: 5.2.1 + define-lazy-prop: 3.0.0 + is-inside-container: 1.0.0 + wsl-utils: 0.1.0 + ora@5.4.1: dependencies: bl: 4.1.0 @@ -6727,7 +7325,7 @@ snapshots: got: 12.6.1 registry-auth-token: 5.0.2 registry-url: 6.0.1 - semver: 7.6.2 + semver: 7.7.3 package-manager-detector@0.2.0: {} @@ -6882,6 +7480,8 @@ snapshots: process-warning@5.0.0: {} + process@0.11.10: {} + progress@2.0.3: {} promise-inflight@1.0.1: {} @@ -7008,6 +7608,14 @@ snapshots: string_decoder: 1.3.0 util-deprecate: 1.0.2 + readable-stream@4.7.0: + dependencies: + abort-controller: 3.0.0 + buffer: 6.0.3 + events: 3.3.0 + process: 0.11.10 + string_decoder: 1.3.0 + readdirp@3.6.0: dependencies: picomatch: 2.3.1 @@ -7032,7 +7640,7 @@ snapshots: require-in-the-middle@7.3.0: dependencies: - debug: 4.4.0 + debug: 4.4.1 module-details-from-path: 1.0.3 resolve: 1.22.8 transitivePeerDependencies: @@ -7112,6 +7720,8 @@ snapshots: dependencies: rsocket-core: 1.0.0-alpha.3 + run-applescript@7.1.0: {} + run-async@3.0.0: {} run-parallel@1.2.0: @@ -7138,12 +7748,14 @@ snapshots: semver-diff@4.0.0: dependencies: - semver: 7.6.2 + semver: 7.7.3 semver-utils@1.1.4: {} semver@7.6.2: {} + semver@7.7.3: {} + seq-queue@0.0.5: {} set-blocking@2.0.0: {} @@ -7188,7 +7800,7 @@ snapshots: simple-update-notifier@2.0.0: dependencies: - semver: 7.6.2 + semver: 7.7.3 sisteransi@1.0.5: {} @@ -7352,6 +7964,38 @@ snapshots: mkdirp: 1.0.4 yallist: 4.0.0 + tarn@3.0.2: {} + + tedious@18.6.1: + dependencies: + '@azure/core-auth': 1.10.1 + '@azure/identity': 4.13.0 + '@azure/keyvault-keys': 4.10.0 + '@js-joda/core': 5.6.5 + '@types/node': 22.16.2 + bl: 6.1.4 + iconv-lite: 0.6.3 + js-md4: 0.3.2 + native-duplexpair: 1.0.0 + sprintf-js: 1.1.3 + transitivePeerDependencies: + - supports-color + + tedious@19.1.3: + dependencies: + '@azure/core-auth': 1.10.1 + '@azure/identity': 4.13.0 + '@azure/keyvault-keys': 4.10.0 + '@js-joda/core': 5.6.5 + '@types/node': 22.16.2 + bl: 6.1.4 + iconv-lite: 0.7.0 + js-md4: 0.3.2 + native-duplexpair: 1.0.0 + sprintf-js: 1.1.3 + transitivePeerDependencies: + - supports-color + term-size@2.2.1: {} text-hex@1.0.0: {} @@ -7463,6 +8107,8 @@ snapshots: tslib@2.6.3: {} + tslib@2.8.1: {} + tuf-js@1.1.7: dependencies: '@tufjs/models': 1.0.4 @@ -7528,7 +8174,7 @@ snapshots: is-yarn-global: 0.4.1 latest-version: 7.0.0 pupa: 3.1.0 - semver: 7.6.2 + semver: 7.7.3 semver-diff: 4.0.0 xdg-basedir: 5.1.0 @@ -7540,6 +8186,8 @@ snapshots: uuid@11.1.0: {} + uuid@8.3.2: {} + v8-compile-cache-lib@3.0.1: {} validate-npm-package-license@3.0.4: @@ -7799,6 +8447,10 @@ snapshots: ws@8.18.0: {} + wsl-utils@0.1.0: + dependencies: + is-wsl: 3.1.0 + xdg-basedir@5.1.0: {} xtend@4.0.2: {} diff --git a/service/Dockerfile b/service/Dockerfile index 66c86808e..102d20367 100644 --- a/service/Dockerfile +++ b/service/Dockerfile @@ -22,6 +22,7 @@ COPY modules/module-postgres-storage/package.json modules/module-postgres-storag COPY modules/module-mongodb/package.json modules/module-mongodb/tsconfig.json modules/module-mongodb/ COPY modules/module-mongodb-storage/package.json modules/module-mongodb-storage/tsconfig.json modules/module-mongodb-storage/ COPY modules/module-mysql/package.json modules/module-mysql/tsconfig.json modules/module-mysql/ +COPY modules/module-mssql/package.json modules/module-mssql/tsconfig.json modules/module-mssql/ RUN corepack enable pnpm && corepack install RUN pnpm install --frozen-lockfile @@ -48,6 +49,7 @@ COPY modules/module-postgres-storage/src modules/module-postgres-storage/src/ COPY modules/module-mongodb/src modules/module-mongodb/src/ COPY modules/module-mongodb-storage/src modules/module-mongodb-storage/src/ COPY modules/module-mysql/src modules/module-mysql/src/ +COPY modules/module-mssql/src modules/module-mssql/src/ RUN pnpm build:production && \ rm -rf node_modules **/node_modules && \ diff --git a/service/package.json b/service/package.json index 15ebfa29c..77241e9b1 100644 --- a/service/package.json +++ b/service/package.json @@ -16,6 +16,7 @@ "@powersync/service-module-postgres-storage": "workspace:*", "@powersync/service-module-mongodb": "workspace:*", "@powersync/service-module-mongodb-storage": "workspace:*", + "@powersync/service-module-mssql": "workspace:*", "@powersync/service-module-mysql": "workspace:*", "@powersync/service-rsocket-router": "workspace:*", "@powersync/service-module-core": "workspace:*", diff --git a/service/src/entry.ts b/service/src/entry.ts index 61b943f17..b67997922 100644 --- a/service/src/entry.ts +++ b/service/src/entry.ts @@ -5,6 +5,7 @@ import { CoreModule } from '@powersync/service-module-core'; import { MongoModule } from '@powersync/service-module-mongodb'; import { MongoStorageModule } from '@powersync/service-module-mongodb-storage'; import { MySQLModule } from '@powersync/service-module-mysql'; +import { MSSQLModule } from '@powersync/service-module-mssql'; import { PostgresModule } from '@powersync/service-module-postgres'; import { PostgresStorageModule } from '@powersync/service-module-postgres-storage'; import { startServer } from './runners/server.js'; @@ -21,6 +22,7 @@ moduleManager.register([ new CoreModule(), new MongoModule(), new MongoStorageModule(), + new MSSQLModule(), new MySQLModule(), new PostgresModule(), new PostgresStorageModule() diff --git a/service/tsconfig.json b/service/tsconfig.json index 6a9560f45..c308b2058 100644 --- a/service/tsconfig.json +++ b/service/tsconfig.json @@ -47,6 +47,9 @@ }, { "path": "../modules/module-mysql" + }, + { + "path": "../modules/module-mssql" } ] } diff --git a/tsconfig.json b/tsconfig.json index e9d0017c5..78586205e 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -25,6 +25,9 @@ { "path": "./modules/module-postgres-storage" }, + { + "path": "./modules/module-mssql" + }, { "path": "./modules/module-mysql" },