From 8f06250e52c460017bfa499fd709e8c46dfe098c Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Wed, 27 Aug 2025 13:25:40 +0200 Subject: [PATCH 01/42] Added mssql docker compose for dev --- modules/module-mssql/dev/docker-compose.yaml | 38 +++++ modules/module-mssql/dev/scripts/init.sql | 143 +++++++++++++++++++ 2 files changed, 181 insertions(+) create mode 100644 modules/module-mssql/dev/docker-compose.yaml create mode 100644 modules/module-mssql/dev/scripts/init.sql diff --git a/modules/module-mssql/dev/docker-compose.yaml b/modules/module-mssql/dev/docker-compose.yaml new file mode 100644 index 000000000..a7bb364ef --- /dev/null +++ b/modules/module-mssql/dev/docker-compose.yaml @@ -0,0 +1,38 @@ +services: + mssql: + platform: linux/amd64 + image: mcr.microsoft.com/mssql/server:2025-latest + container_name: mssql + ports: + - "1433:1433" + environment: + ACCEPT_EULA: "Y" + MSSQL_SA_PASSWORD: "${SA_PASSWORD}" + MSSQL_PID: "Developer" + MSSQL_AGENT_ENABLED: "true" # required for CDC capture/cleanup jobs + volumes: + - mssql-data:/var/opt/mssql + healthcheck: + test: [ "CMD-SHELL", "/opt/mssql-tools18/bin/sqlcmd -C -S localhost -U sa -P \"$${MSSQL_SA_PASSWORD}\" -Q \"SELECT 1;\" || exit 1" ] + interval: 5s + timeout: 3s + retries: 30 + + mssql-setup: + platform: linux/amd64 + image: mcr.microsoft.com/mssql/server:2025-latest + container_name: mssql-setup + depends_on: + mssql: + condition: service_healthy + environment: + SA_PASSWORD: "${SA_PASSWORD}" + APP_DB: "${APP_DB:-appdb}" + APP_LOGIN: "${APP_LOGIN:-appuser}" + APP_PASSWORD: "${APP_PASSWORD:-P@ssw0rd!App}" + volumes: + - ./scripts:/scripts:ro + entrypoint: ["/bin/bash", "-lc", "/opt/mssql-tools18/bin/sqlcmd -C -S mssql,1433 -U sa -P \"$SA_PASSWORD\" -i /scripts/init.sql && echo '✅ MSSQL init done'"] + +volumes: + mssql-data: \ No newline at end of file diff --git a/modules/module-mssql/dev/scripts/init.sql b/modules/module-mssql/dev/scripts/init.sql new file mode 100644 index 000000000..74d641a2f --- /dev/null +++ b/modules/module-mssql/dev/scripts/init.sql @@ -0,0 +1,143 @@ +-- Create database (idempotent) +DECLARE @db sysname = '$(APP_DB)'; +IF DB_ID(@db) IS NULL +BEGIN + DECLARE @sql nvarchar(max) = N'CREATE DATABASE [' + @db + N'];'; +EXEC(@sql); +END +GO + +-- Enable CLR (idempotent, needed for CDC net changes update-mask optimization) +IF (SELECT CAST(value_in_use AS INT) FROM sys.configurations WHERE name = 'clr enabled') = 0 +BEGIN + EXEC sp_configure 'show advanced options', 1; + RECONFIGURE; + EXEC sp_configure 'clr enabled', 1; + RECONFIGURE; +END +GO + +-- Enable CDC at the database level (idempotent) +DECLARE @db sysname = '$(APP_DB)'; +DECLARE @cmd nvarchar(max) = N'USE [' + @db + N']; +IF EXISTS (SELECT 1 FROM sys.databases WHERE name = ''' + @db + N''' AND is_cdc_enabled = 0) + EXEC sys.sp_cdc_enable_db;'; +EXEC(@cmd); +GO + +-- Create a SQL login (server) and user (db), then grant CDC read access +-- Note: 'cdc_reader' role is auto-created when CDC is enabled on the DB. +DECLARE @db sysname = '$(APP_DB)'; +DECLARE @login sysname = '$(APP_LOGIN)'; +DECLARE @password nvarchar(128) = '$(APP_PASSWORD)'; +-- Create login if missing +IF NOT EXISTS (SELECT 1 FROM sys.server_principals WHERE name = @login) +BEGIN + DECLARE @mklogin nvarchar(max) = N'CREATE LOGIN [' + @login + N'] WITH PASSWORD = ''' + @password + N''', CHECK_POLICY = ON;'; +EXEC(@mklogin); +END; + +-- Create user in DB if missing +DECLARE @mkuser nvarchar(max) = N'USE [' + @db + N']; +IF NOT EXISTS (SELECT 1 FROM sys.database_principals WHERE name = ''' + @login + N''') + CREATE USER [' + @login + N'] FOR LOGIN [' + @login + N'];'; +EXEC(@mkuser); +GO +/* ----------------------------------------------------------- + OPTIONAL: enable CDC for specific tables. + You must enable CDC per table to actually capture changes. + Example below creates a demo table and enables CDC on it. +------------------------------------------------------------*/ + +DECLARE @db sysname = '$(APP_DB)'; +EXEC(N'USE [' + @db + N']; +IF OBJECT_ID(''dbo.lists'', ''U'') IS NULL +BEGIN + CREATE TABLE dbo.lists ( + id UNIQUEIDENTIFIER NOT NULL DEFAULT NEWID(), -- GUID (36 characters), + created_at DATETIME2 NOT NULL DEFAULT SYSUTCDATETIME(), + name NVARCHAR(MAX) NOT NULL, + owner_id UNIQUEIDENTIFIER NOT NULL, + CONSTRAINT PK_lists PRIMARY KEY (id) + ); +END; +'); + + +EXEC(N'USE [' + @db + N']; +IF OBJECT_ID(''dbo.todos'', ''U'') IS NULL +BEGIN + CREATE TABLE dbo.todos ( + id UNIQUEIDENTIFIER NOT NULL DEFAULT NEWID(), -- GUID (36 characters) + created_at DATETIME2 NOT NULL DEFAULT SYSUTCDATETIME(), + completed_at DATETIME2 NULL, + description NVARCHAR(MAX) NOT NULL, + completed BIT NOT NULL DEFAULT 0, + created_by UNIQUEIDENTIFIER NULL, + completed_by UNIQUEIDENTIFIER NULL, + list_id UNIQUEIDENTIFIER NOT NULL, + CONSTRAINT PK_todos PRIMARY KEY (id), + CONSTRAINT FK_todos_lists FOREIGN KEY (list_id) REFERENCES dbo.lists(id) ON DELETE CASCADE + ); +END; +'); +GO + +-- Enable CDC for dbo.lists (idempotent guard) +DECLARE @db sysname = '$(APP_DB)'; +DECLARE @login sysname = '$(APP_LOGIN)'; +DECLARE @enableListsTable nvarchar(max) = N'USE [' + @db + N']; +IF NOT EXISTS ( + SELECT 1 + FROM cdc.change_tables + WHERE source_object_id = OBJECT_ID(N''dbo.lists'') +) +BEGIN + EXEC sys.sp_cdc_enable_table + @source_schema = N''dbo'', + @source_name = N''lists'', + @role_name = N''cdc_reader'', + @supports_net_changes = 1; +END;'; +EXEC(@enableListsTable); + +-- Enable CDC for dbo.todos (idempotent guard) +DECLARE @enableTodosTable nvarchar(max) = N'USE [' + @db + N']; +IF NOT EXISTS ( + SELECT 1 + FROM cdc.change_tables + WHERE source_object_id = OBJECT_ID(N''dbo.todos'') +) +BEGIN + EXEC sys.sp_cdc_enable_table + @source_schema = N''dbo'', + @source_name = N''todos'', + @role_name = N''cdc_reader'', + @supports_net_changes = 1; +END;'; +EXEC(@enableTodosTable); + +-- Grant minimal rights to read CDC data: +-- 1) read access to base tables (db_datareader) +-- 2) membership in cdc_reader (allows selecting from CDC change tables & functions) +DECLARE @grant nvarchar(max) = N'USE [' + @db + N']; +IF NOT EXISTS (SELECT 1 FROM sys.database_role_members rm + JOIN sys.database_principals r ON rm.role_principal_id = r.principal_id AND r.name = ''db_datareader'' + JOIN sys.database_principals u ON rm.member_principal_id = u.principal_id AND u.name = ''' + @login + N''') + ALTER ROLE db_datareader ADD MEMBER [' + @login + N']; + +IF NOT EXISTS (SELECT 1 FROM sys.database_role_members rm + JOIN sys.database_principals r ON rm.role_principal_id = r.principal_id AND r.name = ''cdc_reader'' + JOIN sys.database_principals u ON rm.member_principal_id = u.principal_id AND u.name = ''' + @login + N''') + ALTER ROLE cdc_reader ADD MEMBER [' + @login + N'];'; +EXEC(@grant); +GO + +DECLARE @db sysname = '$(APP_DB)'; +EXEC(N'USE [' + @db + N']; +BEGIN + INSERT INTO dbo.lists (id, name, owner_id) + VALUES (NEWID(), ''Do a demo'', NEWID()); +END; +'); +GO \ No newline at end of file From 5179741eb466a283fd7eaf7e11fc06fd9802d2da Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Tue, 16 Sep 2025 12:07:54 +0200 Subject: [PATCH 02/42] Added LSN helper class --- modules/module-mssql/src/common/LSN.ts | 73 ++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 modules/module-mssql/src/common/LSN.ts diff --git a/modules/module-mssql/src/common/LSN.ts b/modules/module-mssql/src/common/LSN.ts new file mode 100644 index 000000000..a361d73dc --- /dev/null +++ b/modules/module-mssql/src/common/LSN.ts @@ -0,0 +1,73 @@ +import { ReplicationAssertionError } from '@powersync/service-errors'; + +/** + * Helper class for interpreting and manipulating SQL Server Log Sequence Numbers (LSNs). + * In SQL Server, an LSN is stored as a 10-byte binary value. + * But it is commonly represented in a human-readable format as three hexadecimal parts separated by colons: + * `00000000:00000000:0000`. + * + * The three parts represent different hierarchical levels of the transaction log: + * 1. The first part identifies the Virtual Log File (VLF). + * 2. The second part points to the log block within the VLF. + * 3. The third part specifies the exact log record within the log block. + */ + +export class LSN { + /** + * The zero or null LSN value. All other LSN values are greater than this. + */ + static ZERO = '00000000:00000000:0000'; + + protected value: string; + + private constructor(lsn: string) { + this.value = lsn; + } + + /** + * Converts this LSN back into its raw 10-byte binary representation for use in SQL Server functions. + */ + toBinary(): Buffer { + let sanitized: string = this.value.replace(/:/g, ''); + return Buffer.from(sanitized, 'hex'); + } + + /** + * Converts a raw 10-byte binary LSN value into its string representation. + * An error is thrown if the binary value is not exactly 10 bytes. + * @param rawLSN + */ + static fromBinary(rawLSN: Buffer): LSN { + if (rawLSN.length !== 10) { + throw new ReplicationAssertionError(`LSN must be 10 bytes, got ${rawLSN.length}`); + } + const hex = rawLSN.toString('hex').toUpperCase(); // 20 hex chars + + return new LSN(`${hex.slice(0, 8)}:${hex.slice(8, 16)}:${hex.slice(16, 20)}`); + } + + /** + * Creates an LSN instance from the provided string representation. An error is thrown if the format is invalid. + * @param stringLSN + */ + static fromString(stringLSN: string): LSN { + if (!/^[0-9A-Fa-f]{8}:[0-9A-Fa-f]{8}:[0-9A-Fa-f]{4}$/.test(stringLSN)) { + throw new ReplicationAssertionError( + `Invalid LSN string. Expected format is [00000000:00000000:0000]. Got: ${stringLSN}` + ); + } + + return new LSN(stringLSN); + } + + compare(other: LSN): -1 | 0 | 1 { + if (this.value === other.value) { + return 0; + } + return this.value < other.value ? -1 : 1; + } + + valueOf(): string { + return this.value; + } +} From 4aa3cb6bcf312f9e6724266b41f7c04e0ea60bba Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Mon, 20 Oct 2025 17:05:37 +0200 Subject: [PATCH 03/42] Added mssql to sqlite type mapping --- modules/module-mssql/src/common/LSN.ts | 4 ++ .../src/common/mssqls-to-sqlite.ts | 50 +++++++++++++++++++ 2 files changed, 54 insertions(+) create mode 100644 modules/module-mssql/src/common/mssqls-to-sqlite.ts diff --git a/modules/module-mssql/src/common/LSN.ts b/modules/module-mssql/src/common/LSN.ts index a361d73dc..756c6a153 100644 --- a/modules/module-mssql/src/common/LSN.ts +++ b/modules/module-mssql/src/common/LSN.ts @@ -70,4 +70,8 @@ export class LSN { valueOf(): string { return this.value; } + + toString(): string { + return this.value; + } } diff --git a/modules/module-mssql/src/common/mssqls-to-sqlite.ts b/modules/module-mssql/src/common/mssqls-to-sqlite.ts new file mode 100644 index 000000000..fd5a9eb8c --- /dev/null +++ b/modules/module-mssql/src/common/mssqls-to-sqlite.ts @@ -0,0 +1,50 @@ +import sql from 'mssql'; +import { DatabaseInputRow, SqliteInputRow, toSyncRulesRow } from '@powersync/service-sync-rules'; + +export function toSqliteInputRow(row: sql.IRecordSet, columns: sql.IColumnMetadata): SqliteInputRow { + let result: DatabaseInputRow = {}; + for (const key in row) { + // We are very much expecting the column to be there + const columnMetadata = columns[key]; + + if (row[key] !== null) { + switch (columnMetadata.type) { + case sql.TYPES.BigInt: + // MSSQL returns BIGINT as a string to avoid precision loss + if (typeof row[key] === 'string') { + result[key] = BigInt(row[key]); + } + break; + // Convert Dates to string + case sql.TYPES.Date: + result[key] = toISODateString(row[key] as Date); + break; + case sql.TYPES.Time: + result[key] = toISOTimeString(row[key] as Date); + break; + case sql.TYPES.DateTime: + case sql.TYPES.DateTime2: + case sql.TYPES.SmallDateTime: + case sql.TYPES.DateTimeOffset: // The offset is lost when the driver converts to Date. This needs to be handled in the sql query. + const date = row[key] as Date; + result[key] = isNaN(date.getTime()) ? null : date.toISOString(); + break; + // TODO: Confirm case sql.TYPES.UDT + default: + result[key] = row[key]; + } + } else { + // If the value is null, we just set it to null + result[key] = null; + } + } + return toSyncRulesRow(result); +} + +function toISODateString(date: Date): string | null { + return isNaN(date.getTime()) ? null : date.toISOString().split('T')[0]; +} + +function toISOTimeString(date: Date): string | null { + return isNaN(date.getTime()) ? null : date.toISOString().split('T')[1]; +} From 918a4622c6953ce71b6afcaa33db1278dceed50a Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Mon, 20 Oct 2025 17:06:30 +0200 Subject: [PATCH 04/42] Added MSSQLSourceTable and helper cache to track tables during runtime --- .../src/common/MSSQLSourceTable.ts | 54 +++++++++++++++++++ .../src/common/MSSQLSourceTableCache.ts | 36 +++++++++++++ 2 files changed, 90 insertions(+) create mode 100644 modules/module-mssql/src/common/MSSQLSourceTable.ts create mode 100644 modules/module-mssql/src/common/MSSQLSourceTableCache.ts diff --git a/modules/module-mssql/src/common/MSSQLSourceTable.ts b/modules/module-mssql/src/common/MSSQLSourceTable.ts new file mode 100644 index 000000000..768d03cb7 --- /dev/null +++ b/modules/module-mssql/src/common/MSSQLSourceTable.ts @@ -0,0 +1,54 @@ +import { SourceTable } from '@powersync/service-core'; +import { escapeIdentifier } from '../utils/mssql.js'; + +export interface CaptureInstance { + name: string; + schema: string; +} + +export interface MSSQLSourceTableOptions { + sourceTable: SourceTable; + /** + * The unique name of the CDC capture instance for this table + */ + captureInstance: CaptureInstance; +} + +export class MSSQLSourceTable { + constructor(private options: MSSQLSourceTableOptions) {} + + get sourceTable() { + return this.options.sourceTable; + } + + updateSourceTable(updated: SourceTable): void { + this.options.sourceTable = updated; + } + + get captureInstance() { + return this.options.captureInstance.name; + } + + get cdcSchema() { + return this.options.captureInstance.schema; + } + + get CTTable() { + return `${this.cdcSchema}.${this.captureInstance}_CT`; + } + + get allChangesFunction() { + return `${this.cdcSchema}.fn_cdc_get_all_changes_${this.captureInstance}`; + } + + get netChangesFunction() { + return `${this.cdcSchema}.fn_cdc_get_net_changes_${this.captureInstance}`; + } + + /** + * Escapes this source table's name and schema for use in MSSQL queries. + */ + toQualifiedName(): string { + return `${escapeIdentifier(this.sourceTable.schema)}.${escapeIdentifier(this.sourceTable.name)}`; + } +} diff --git a/modules/module-mssql/src/common/MSSQLSourceTableCache.ts b/modules/module-mssql/src/common/MSSQLSourceTableCache.ts new file mode 100644 index 000000000..18f984bd8 --- /dev/null +++ b/modules/module-mssql/src/common/MSSQLSourceTableCache.ts @@ -0,0 +1,36 @@ +import { SourceTable } from '@powersync/service-core'; +import { MSSQLSourceTable } from './MSSQLSourceTable.js'; +import { ServiceAssertionError } from '@powersync/service-errors'; + +export class MSSQLSourceTableCache { + private cache = new Map(); + + set(table: MSSQLSourceTable): void { + this.cache.set(table.sourceTable.objectId!, table); + } + + /** + * Updates the underlying source table of the cached MSSQLSourceTable. + * @param updatedTable + */ + updateSourceTable(updatedTable: SourceTable) { + const existingTable = this.cache.get(updatedTable.objectId!); + + if (!existingTable) { + throw new ServiceAssertionError('Tried to update a non-existing MSSQLSourceTable in the cache'); + } + existingTable.updateSourceTable(updatedTable); + } + + get(tableId: number): MSSQLSourceTable | undefined { + return this.cache.get(tableId); + } + + getAll(): MSSQLSourceTable[] { + return Array.from(this.cache.values()); + } + + delete(tableId: number): boolean { + return this.cache.delete(tableId); + } +} From 223c07d33fc35b846c6ea114b8a5847e9e05b179 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Mon, 20 Oct 2025 17:07:40 +0200 Subject: [PATCH 05/42] Added connection types for mssql More module templating --- .../src/api/MSSQLRouteAPIAdapter.ts | 59 ++++++ .../module-mssql/src/module/MSSQLModule.ts | 81 ++++++++ .../src/types/mssql-data-types.ts | 73 ++++++++ modules/module-mssql/src/types/types.ts | 176 ++++++++++++++++++ 4 files changed, 389 insertions(+) create mode 100644 modules/module-mssql/src/api/MSSQLRouteAPIAdapter.ts create mode 100644 modules/module-mssql/src/module/MSSQLModule.ts create mode 100644 modules/module-mssql/src/types/mssql-data-types.ts create mode 100644 modules/module-mssql/src/types/types.ts diff --git a/modules/module-mssql/src/api/MSSQLRouteAPIAdapter.ts b/modules/module-mssql/src/api/MSSQLRouteAPIAdapter.ts new file mode 100644 index 000000000..4ff2b3b39 --- /dev/null +++ b/modules/module-mssql/src/api/MSSQLRouteAPIAdapter.ts @@ -0,0 +1,59 @@ +import { + api, + ParseSyncRulesOptions, + PatternResult, + ReplicationHeadCallback, + ReplicationLagOptions +} from '@powersync/service-core'; +import { Promise } from 'mssql'; +import * as service_types from '@powersync/service-types'; +import { SqlSyncRules, TablePattern } from '@powersync/service-sync-rules'; +import { ResolvedConnectionConfig } from '../types/types.js'; +import { ExecuteSqlResponse } from '@powersync/service-types/dist/routes.js'; +import { MSSQLConnectionManager } from '../replication/MSSQLConnectionManager.js'; + +export class MSSQLRouteAPIAdapter implements api.RouteAPI { + protected connectionManager: MSSQLConnectionManager; + + constructor(protected config: ResolvedConnectionConfig) { + this.connectionManager = new MSSQLConnectionManager(config, {}); + } + + createReplicationHead(callback: ReplicationHeadCallback): Promise { + return Promise.resolve(undefined); + } + + executeQuery(query: string, params: any[]): Promise { + return Promise.resolve(undefined); + } + + getConnectionSchema(): Promise { + return Promise.resolve([]); + } + + getConnectionStatus(): Promise { + return Promise.resolve(undefined); + } + + getDebugTablesInfo(tablePatterns: TablePattern[], sqlSyncRules: SqlSyncRules): Promise { + return Promise.resolve([]); + } + + getParseSyncRulesOptions(): ParseSyncRulesOptions { + return { + defaultSchema: this.connectionManager.schema + }; + } + + getReplicationLagBytes(options: ReplicationLagOptions): Promise { + return Promise.resolve(undefined); + } + + getSourceConfig(): Promise { + return Promise.resolve(undefined); + } + + shutdown(): Promise { + return Promise.resolve(undefined); + } +} diff --git a/modules/module-mssql/src/module/MSSQLModule.ts b/modules/module-mssql/src/module/MSSQLModule.ts new file mode 100644 index 000000000..f070134fc --- /dev/null +++ b/modules/module-mssql/src/module/MSSQLModule.ts @@ -0,0 +1,81 @@ +import { + api, + ConfigurationFileSyncRulesProvider, + ConnectionTestResult, + replication, + system, + TearDownOptions +} from '@powersync/service-core'; +import { MSSQLConnectionManagerFactory } from '../replication/MSSQLConnectionManagerFactory.js'; +import * as types from '../types/types.js'; +import { CDCReplicator } from '../replication/CDCReplicator.js'; +import { MSSQLConnectionManager } from '../replication/MSSQLConnectionManager.js'; +import { checkSourceConfiguration } from '../utils/mssql.js'; +import { MSSQLErrorRateLimiter } from '../replication/MSSQLErrorRateLimiter.js'; +import { MSSQLRouteAPIAdapter } from '../api/MSSQLRouteAPIAdapter.js'; + +export class MSSQLModule extends replication.ReplicationModule { + constructor() { + super({ + name: 'MSSQL', + type: types.MSSQL_CONNECTION_TYPE, + configSchema: types.MSSQLConnectionConfig + }); + } + + async onInitialized(context: system.ServiceContextContainer): Promise {} + + protected createRouteAPIAdapter(): api.RouteAPI { + return new MSSQLRouteAPIAdapter(this.resolveConfig(this.decodedConfig!)); + } + + protected createReplicator(context: system.ServiceContext): replication.AbstractReplicator { + const normalisedConfig = this.resolveConfig(this.decodedConfig!); + const syncRuleProvider = new ConfigurationFileSyncRulesProvider(context.configuration.sync_rules); + const connectionFactory = new MSSQLConnectionManagerFactory(normalisedConfig); + + return new CDCReplicator({ + id: this.getDefaultId(normalisedConfig.database), + syncRuleProvider: syncRuleProvider, + storageEngine: context.storageEngine, + metricsEngine: context.metricsEngine, + connectionFactory: connectionFactory, + rateLimiter: new MSSQLErrorRateLimiter() + }); + } + + /** + * Combines base config with normalized connection settings + */ + private resolveConfig(config: types.MSSQLConnectionConfig): types.ResolvedConnectionConfig { + return { + ...config, + ...types.normalizeConnectionConfig(config) + }; + } + + async teardown(options: TearDownOptions): Promise { + // No specific teardown required for MSSQL + } + + async testConnection(config: types.MSSQLConnectionConfig) { + this.decodeConfig(config); + const normalizedConfig = this.resolveConfig(this.decodedConfig!); + return await MSSQLModule.testConnection(normalizedConfig); + } + + static async testConnection(normalizedConfig: types.ResolvedConnectionConfig): Promise { + const connectionManager = new MSSQLConnectionManager(normalizedConfig, { max: 1 }); + try { + const errors = await checkSourceConfiguration(connectionManager); + if (errors.length > 0) { + throw new Error(errors.join('\n')); + } + } finally { + await connectionManager.end(); + } + return { + connectionDescription: normalizedConfig.hostname + }; + } +} diff --git a/modules/module-mssql/src/types/mssql-data-types.ts b/modules/module-mssql/src/types/mssql-data-types.ts new file mode 100644 index 000000000..402def00f --- /dev/null +++ b/modules/module-mssql/src/types/mssql-data-types.ts @@ -0,0 +1,73 @@ +import { ColumnDescriptor } from '@powersync/service-core'; +import { ISqlType } from 'mssql'; + +export interface MSSQLColumnDescriptor extends ColumnDescriptor { + /** The underlying system type id. For base types system type id == user type id */ + typeId: number; + /** The unique user type id that uniquely identifies the type */ + userTypeId: number; + // /** The name of the user/alias type. For example SYSNAME, GEOMETRY */ + // userTypeName: string; +} + +/** The shared system type id for all CLR_UDT types in SQL Server */ +export const CLR_UDT_TYPE_ID = 240; + +/** + * Enum mapping the base MSSQL data types to their system type IDs. + */ +export enum MSSQLBaseType { + IMAGE = 34, + TEXT = 35, + UNIQUEIDENTIFIER = 36, + DATE = 40, + TIME = 41, + DATETIME2 = 42, + DATETIMEOFFSET = 43, + TINYINT = 48, + SMALLINT = 52, + INT = 56, + SMALLDATETIME = 58, + REAL = 59, + MONEY = 60, + DATETIME = 61, + FLOAT = 62, + SQL_VARIANT = 98, + NTEXT = 99, + BIT = 104, + DECIMAL = 106, + NUMERIC = 108, + SMALLMONEY = 122, + BIGINT = 127, + VARBINARY = 165, + VARCHAR = 167, + BINARY = 173, + CHAR = 175, + TIMESTAMP = 189, + NVARCHAR = 231, + NCHAR = 239, + XML = 241, + JSON = 244 +} + +/** + * Enum mapping some of the extended user-defined MSSQL data types to their user type IDs. + */ +export enum MSSQLExtendedUserType { + // VARBINARY system type [155] + VECTOR = 255, + // NVARCHAR system type [231] + SYSNAME = 256, + // CLR_UDT system type [240] + HIERARCHYID = 128, + // CLR_UDT system type [240] + GEOMETRY = 129, + // CLR_UDT system type [240] + GEOGRAPHY = 130 +} + +export interface MSSQLParameter { + name: string; + value: any; + type?: (() => ISqlType) | ISqlType; +} diff --git a/modules/module-mssql/src/types/types.ts b/modules/module-mssql/src/types/types.ts new file mode 100644 index 000000000..061b647e4 --- /dev/null +++ b/modules/module-mssql/src/types/types.ts @@ -0,0 +1,176 @@ +import { ErrorCode, makeHostnameLookupFunction, ServiceError } from '@powersync/lib-services-framework'; +import * as service_types from '@powersync/service-types'; +import { LookupFunction } from 'node:net'; +import * as t from 'ts-codec'; +import * as urijs from 'uri-js'; + +export const MSSQL_CONNECTION_TYPE = 'mssql' as const; + +export const AzureActiveDirectoryPasswordAuthentication = t.object({ + type: t.literal('azure-active-directory-password'), + options: t.object({ + /** + * A user need to provide `userName` associate to their account. + */ + userName: t.string, + /** + * A user need to provide `password` associate to their account. + */ + password: t.string, + /** + * A client id to use. + */ + clientId: t.string, + /** + * Azure tenant ID + */ + tenantId: t.string + }) +}); +export type AzureActiveDirectoryPasswordAuthentication = t.Decoded; + +export const AzureActiveDirectoryServicePrincipalSecret = t.object({ + type: t.literal('azure-active-directory-service-principal-secret'), + options: t.object({ + /** + * Application (`client`) ID from your registered Azure application + */ + clientId: t.string, + /** + * The created `client secret` for this registered Azure application + */ + clientSecret: t.string, + /** + * Directory (`tenant`) ID from your registered Azure application + */ + tenantId: t.string + }) +}); +export type AzureActiveDirectoryServicePrincipalSecret = t.Decoded; + +export const DefaultAuthentication = t.object({ + type: t.literal('default'), + options: t.object({ + /** + * User name to use for sql server login. + */ + userName: t.string, + /** + * Password to use for sql server login. + */ + password: t.string + }) +}); +export type DefaultAuthentication = t.Decoded; + +export type AuthenticationType = + | DefaultAuthentication + | AzureActiveDirectoryPasswordAuthentication + | AzureActiveDirectoryServicePrincipalSecret; + +export interface NormalizedMSSQLConnectionConfig { + id: string; + tag: string; + + username?: string; + password?: string; + hostname: string; + port: number; + database: string; + schema?: string; + + authentication?: AuthenticationType; + + lookup?: LookupFunction; +} + +export const MSSQLConnectionConfig = service_types.configFile.DataSourceConfig.and( + t.object({ + type: t.literal(MSSQL_CONNECTION_TYPE), + uri: t.string.optional(), + username: t.string.optional(), + password: t.string.optional(), + database: t.string.optional(), + schema: t.string.optional(), + hostname: t.string.optional(), + port: service_types.configFile.portCodec.optional(), + + authentication: DefaultAuthentication.or(AzureActiveDirectoryPasswordAuthentication) + .or(AzureActiveDirectoryServicePrincipalSecret) + .optional(), + + reject_ip_ranges: t.array(t.string).optional() + }) +); + +/** + * Config input specified when starting services + */ +export type MSSQLConnectionConfig = t.Decoded; + +/** + * Resolved version of {@link MSSQLConnectionConfig} + */ +export type ResolvedConnectionConfig = MSSQLConnectionConfig & NormalizedMSSQLConnectionConfig; + +/** + * Validate and normalize connection options. + * + * Returns destructured options. + */ +export function normalizeConnectionConfig(options: MSSQLConnectionConfig): NormalizedMSSQLConnectionConfig { + let uri: urijs.URIComponents; + if (options.uri) { + uri = urijs.parse(options.uri); + if (uri.scheme != 'mssql') { + throw new ServiceError( + ErrorCode.PSYNC_S1109, + `Invalid URI - protocol must be mssql, got ${JSON.stringify(uri.scheme)}` + ); + } + } else { + uri = urijs.parse('mssql:///'); + } + + const hostname = options.hostname ?? uri.host ?? ''; + const port = Number(options.port ?? uri.port ?? 1433); + + const database = options.database ?? uri.path?.substring(1) ?? ''; + + const [uri_username, uri_password] = (uri.userinfo ?? '').split(':'); + + const username = options.username ?? uri_username ?? ''; + const password = options.password ?? uri_password ?? ''; + + if (hostname == '') { + throw new ServiceError(ErrorCode.PSYNC_S1106, `MSSQL connection: hostname required`); + } + + if (username == '' && !options.authentication) { + throw new ServiceError(ErrorCode.PSYNC_S1107, `MSSQL connection: username or authentication config is required`); + } + + if (password == '' && !options.authentication) { + throw new ServiceError(ErrorCode.PSYNC_S1108, `MSSQL connection: password or authentication config is required`); + } + + if (database == '') { + throw new ServiceError(ErrorCode.PSYNC_S1105, `MSSQL connection: database required`); + } + + const lookup = makeHostnameLookupFunction(hostname, { reject_ip_ranges: options.reject_ip_ranges ?? [] }); + + return { + id: options.id ?? 'default', + tag: options.tag ?? 'default', + + username, + password, + hostname, + port, + database, + + lookup, + authentication: options.authentication + } satisfies NormalizedMSSQLConnectionConfig; +} From 1c0ac2dbbad59d4285d973231d788367417fb8c8 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Mon, 20 Oct 2025 17:08:58 +0200 Subject: [PATCH 06/42] Added: - MSSQL Connection manager - Various utils for db querying - Added schema utils --- .../src/replication/MSSQLConnectionManager.ts | 104 ++++++ .../MSSQLConnectionManagerFactory.ts | 28 ++ .../src/replication/MSSQLErrorRateLimiter.ts | 36 ++ modules/module-mssql/src/utils/mssql.ts | 340 ++++++++++++++++++ modules/module-mssql/src/utils/schema.ts | 172 +++++++++ 5 files changed, 680 insertions(+) create mode 100644 modules/module-mssql/src/replication/MSSQLConnectionManager.ts create mode 100644 modules/module-mssql/src/replication/MSSQLConnectionManagerFactory.ts create mode 100644 modules/module-mssql/src/replication/MSSQLErrorRateLimiter.ts create mode 100644 modules/module-mssql/src/utils/mssql.ts create mode 100644 modules/module-mssql/src/utils/schema.ts diff --git a/modules/module-mssql/src/replication/MSSQLConnectionManager.ts b/modules/module-mssql/src/replication/MSSQLConnectionManager.ts new file mode 100644 index 000000000..d48e2aad0 --- /dev/null +++ b/modules/module-mssql/src/replication/MSSQLConnectionManager.ts @@ -0,0 +1,104 @@ +import { logger } from '@powersync/lib-services-framework'; +import sql from 'mssql'; +import { NormalizedMSSQLConnectionConfig } from '../types/types.js'; +import { POWERSYNC_VERSION } from '@powersync/service-core'; +import { MSSQLParameter } from '../types/mssql-data-types.js'; +import { addParameters } from '../utils/mssql.js'; + +export const DEFAULT_SCHEMA = 'dbo'; + +export class MSSQLConnectionManager { + private readonly pool: sql.ConnectionPool; + + constructor( + public options: NormalizedMSSQLConnectionConfig, + poolOptions: sql.PoolOpts + ) { + // The pool is lazy - no connections are opened until a query is performed. + this.pool = new sql.ConnectionPool({ + authentication: options.authentication, + user: options.username, + password: options.password, + server: options.hostname, + port: options.port, + database: options.database, + pool: poolOptions, + options: { + appName: `powersync/${POWERSYNC_VERSION}`, + encrypt: true, // for azure + trustServerCertificate: true // change to true for local dev / self-signed certs + } + }); + } + + public get connectionTag() { + return this.options.tag; + } + + public get connectionId() { + return this.options.id; + } + + public get databaseName() { + return this.options.database; + } + + public get schema() { + return this.options.schema ?? DEFAULT_SCHEMA; + } + + private async ensureConnected(): Promise { + await this.pool.connect(); + } + + async createTransaction(): Promise { + await this.ensureConnected(); + return this.pool.transaction(); + } + + async createRequest(): Promise { + await this.ensureConnected(); + return this.pool.request(); + } + + async query(query: string, parameters?: MSSQLParameter[]): Promise> { + await this.ensureConnected(); + for (let tries = 2; ; tries--) { + try { + logger.debug(`Executing query: ${query}`); + let request = this.pool.request(); + if (parameters) { + request = addParameters(request, parameters); + } + return await request.query(query); + } catch (e) { + if (tries == 1) { + throw e; + } + logger.warn('Query error, retrying..', e); + } + } + } + + async execute(procedure: string, parameters?: MSSQLParameter[]): Promise> { + await this.ensureConnected(); + let request = this.pool.request(); + if (parameters) { + if (parameters) { + request = addParameters(request, parameters); + } + } + return request.execute(procedure); + } + + async end(): Promise { + if (this.pool.connected) { + try { + await this.pool.close(); + } catch (error) { + // We don't particularly care if any errors are thrown when shutting down the pool + logger.warn('Error shutting down MSSQL connection pool', error); + } + } + } +} diff --git a/modules/module-mssql/src/replication/MSSQLConnectionManagerFactory.ts b/modules/module-mssql/src/replication/MSSQLConnectionManagerFactory.ts new file mode 100644 index 000000000..23c3a2e26 --- /dev/null +++ b/modules/module-mssql/src/replication/MSSQLConnectionManagerFactory.ts @@ -0,0 +1,28 @@ +import { logger } from '@powersync/lib-services-framework'; +import { ResolvedConnectionConfig } from '../types/types.js'; +import { MSSQLConnectionManager } from './MSSQLConnectionManager.js'; +import sql from 'mssql'; + +export class MSSQLConnectionManagerFactory { + private readonly connectionManagers: MSSQLConnectionManager[]; + public readonly connectionConfig: ResolvedConnectionConfig; + + constructor(connectionConfig: ResolvedConnectionConfig) { + this.connectionConfig = connectionConfig; + this.connectionManagers = []; + } + + create(poolOptions: sql.PoolOpts) { + const manager = new MSSQLConnectionManager(this.connectionConfig, poolOptions); + this.connectionManagers.push(manager); + return manager; + } + + async shutdown() { + logger.info('Shutting down MSSQL connection Managers...'); + for (const manager of this.connectionManagers) { + await manager.end(); + } + logger.info('MSSQL connection Managers shutdown completed.'); + } +} diff --git a/modules/module-mssql/src/replication/MSSQLErrorRateLimiter.ts b/modules/module-mssql/src/replication/MSSQLErrorRateLimiter.ts new file mode 100644 index 000000000..896e9f971 --- /dev/null +++ b/modules/module-mssql/src/replication/MSSQLErrorRateLimiter.ts @@ -0,0 +1,36 @@ +import { ErrorRateLimiter } from '@powersync/service-core'; +import { setTimeout } from 'timers/promises'; + +export class MSSQLErrorRateLimiter implements ErrorRateLimiter { + nextAllowed: number = Date.now(); + + async waitUntilAllowed(options?: { signal?: AbortSignal | undefined } | undefined): Promise { + const delay = Math.max(0, this.nextAllowed - Date.now()); + // Minimum delay between connections, even without errors + this.setDelay(500); + await setTimeout(delay, undefined, { signal: options?.signal }); + } + + mayPing(): boolean { + return Date.now() >= this.nextAllowed; + } + + reportError(e: any): void { + const message = (e.message as string) ?? ''; + if (message.includes('password authentication failed')) { + this.setDelay(900_000); + } else if (message.includes('ENOTFOUND')) { + // DNS lookup issue - incorrect URI or deleted instance + this.setDelay(120_000); + } else if (message.includes('ECONNREFUSED')) { + // Could be fail2ban or similar + this.setDelay(120_000); + } else { + this.setDelay(30_000); + } + } + + private setDelay(delay: number) { + this.nextAllowed = Math.max(this.nextAllowed, Date.now() + delay); + } +} diff --git a/modules/module-mssql/src/utils/mssql.ts b/modules/module-mssql/src/utils/mssql.ts new file mode 100644 index 000000000..6a4d25a58 --- /dev/null +++ b/modules/module-mssql/src/utils/mssql.ts @@ -0,0 +1,340 @@ +import sql from 'mssql'; +import { SourceTable } from '@powersync/service-core'; +import { coerce, gte } from 'semver'; +import { logger } from '@powersync/lib-services-framework'; +import { MSSQLConnectionManager } from '../replication/MSSQLConnectionManager.js'; +import { LSN } from '../common/LSN.js'; +import { CaptureInstance, MSSQLSourceTable } from '../common/MSSQLSourceTable.js'; +import { MSSQLParameter } from '../types/mssql-data-types.js'; + +export interface CreateStreamingQueryOptions { + query: string; + // Request to create the streaming query from + request: sql.Request; + // Cancel the iteration if this signal is aborted + signal?: AbortSignal; + // Maximum number of rows to buffer before pausing the request + maxQueueSize?: number; +} + +export interface StreamingQuery { + columns: { [name: string]: sql.IColumn }; + [Symbol.asyncIterator](): AsyncIterator>; +} + +export async function createStreamingQuery(options: CreateStreamingQueryOptions): Promise { + const { query, request, signal } = options; + const maxQueueSize = options.maxQueueSize ?? 1000; + + // Wait for the recordSet event before returning + let columns: { [name: string]: sql.IColumn } = await new Promise((resolve) => { + // Record Column metadata + request.on('recordSet', (recordSet: { [name: string]: sql.IColumn }) => { + columns = recordSet; + resolve(recordSet); + }); + }); + + async function* rowGenerator(): AsyncGenerator> { + const rowQueue: Array> = []; + let resolveNext: (() => void) | null = null; + let streamingError: Error | null = null; + let isPaused = false; + let isDone = false; + + try { + request.on('row', (row: Record) => { + rowQueue.push(row); + if (rowQueue.length >= maxQueueSize) { + request.pause(); + isPaused = true; + } + if (resolveNext) { + resolveNext(); + resolveNext = null; + } + }); + + request.on('done', () => { + isDone = true; + if (resolveNext) { + resolveNext(); + resolveNext = null; + } + }); + + request.on('error', (err) => { + streamingError = err; + isDone = true; + }); + + // Don't start the query if we are already aborted + if (signal && signal.aborted) { + isDone = true; + } else { + // Start streaming + request.query(query); + + // Handle aborts by cancelling the request + signal?.addEventListener( + 'abort', + () => { + isDone = true; + request.cancel(); + if (resolveNext) { + resolveNext(); + resolveNext = null; + } + }, + { once: true } + ); + } + + // Loop until the stream is done and the queue is empty + while (!isDone || rowQueue.length > 0) { + if (rowQueue.length > 0) { + yield rowQueue.shift() as Record; + // Resume streaming if we are below half the max queue size + if (isPaused && rowQueue.length <= maxQueueSize / 2) { + request.resume(); + } + } else if (!isDone) { + await new Promise((resolve) => { + resolveNext = resolve; + }); + } + } + + if (streamingError) { + throw streamingError; + } + } finally { + request.cancel(); + } + } + + return { + columns: columns, + [Symbol.asyncIterator]: rowGenerator + }; +} + +export const SUPPORTED_ENGINE_EDITIONS = new Map([ + [2, 'Standard'], + [3, 'Enterprise - Enterprise, Developer, Evaluation'], + [5, 'SqlDatabase - Azure SQL Database'], + [8, 'SqlManagedInstance - Azure SQL Managed Instance'] +]); + +// SQL Server 2022 and newer +export const MINIMUM_SUPPORTED_VERSION = '16.0'; + +export async function checkSourceConfiguration(connectionManager: MSSQLConnectionManager): Promise { + const errors: string[] = []; + // 1) Check MSSQL version and Editions + const { recordset: versionResult } = await connectionManager.query(` + SELECT + CAST(SERVERPROPERTY('EngineEdition') AS int) AS engine, + CAST(SERVERPROPERTY('Edition') AS nvarchar(128)) AS edition, + CAST(SERVERPROPERTY('ProductVersion') AS nvarchar(128)) AS version + `); + + // If the edition is unsupported, return immediately + if (!SUPPORTED_ENGINE_EDITIONS.has(versionResult[0]?.engine)) { + errors.push( + `The SQL Server edition '${versionResult[0]?.edition}' is not supported. PowerSync requires a MSSQL edition that supports CDC: ${Array.from( + SUPPORTED_ENGINE_EDITIONS.values() + ).join(', ')}.` + ); + return errors; + } + + // Only applicable to SQL Server stand-alone editions + if (versionResult[0]?.engine == 2 || versionResult[0]?.engine == 3) { + if (!isVersionAtLeast(versionResult[0]?.version, MINIMUM_SUPPORTED_VERSION)) { + errors.push( + `The SQL Server version '${versionResult[0]?.version}' is not supported. PowerSync requires MSSQL 2022 (v16) or newer.` + ); + } + } + + // 2) Check DB-level CDC + const { recordset: cdcEnabledResult } = await connectionManager.query(` + SELECT name AS db_name, is_cdc_enabled FROM sys.databases WHERE name = DB_NAME(); + `); + const cdcEnabled = cdcEnabledResult[0]?.is_cdc_enabled; + + if (!cdcEnabled) { + errors.push(`CDC is not enabled for database. Please enable it.`); + } + + // 3) Check CDC user permissions + const { recordset: cdcUserResult } = await connectionManager.query(` + SELECT + CASE + WHEN IS_SRVROLEMEMBER('sysadmin') = 1 + OR IS_MEMBER('db_owner') = 1 + OR IS_MEMBER('cdc_admin') = 1 + OR IS_MEMBER('cdc_reader') = 1 + THEN 1 ELSE 0 + END AS has_cdc_access; + `); + + if (!cdcUserResult[0]?.has_cdc_access) { + errors.push(`The current user does not have the 'cdc_reader' role. Please assign this role to the user.`); + } + + return errors; +} + +export interface IsTableEnabledForCDCOptions { + connectionManager: MSSQLConnectionManager; + table: string; + schema: string; +} +/** + * Check if the specified table is enabled for CDC. + * @param options + */ +export async function isTableEnabledForCDC(options: IsTableEnabledForCDCOptions): Promise { + const { connectionManager, table, schema } = options; + + const { recordset: checkResult } = await connectionManager.query( + ` + SELECT 1 FROM cdc.change_tables ct + JOIN sys.tables AS tbl ON tbl.object_id = ct.source_object_id + JOIN sys.schemas AS sch ON sch.schema_id = tbl.schema_id + WHERE sch.name = '${schema}' + AND tbl.name = '${table}' + ` + ); + return checkResult.length > 0; +} + +/** + * Check if the supplied version is newer or equal to the target version. + * @param version + * @param minimumVersion + */ +export function isVersionAtLeast(version: string, minimumVersion: string): boolean { + const coercedVersion = coerce(version); + const coercedMinimumVersion = coerce(minimumVersion); + + return gte(coercedVersion!, coercedMinimumVersion!, { loose: true }); +} + +export interface IsWithinRetentionThresholdOptions { + checkpointLSN: LSN; + tables: MSSQLSourceTable[]; + connectionManager: MSSQLConnectionManager; +} + +/** + * Checks that CDC the specified checkpoint LSN is within the retention threshold for all specified tables. + * CDC periodically cleans up old data up to the retention threshold. If replication has been stopped for too long it is + * possible for the checkpoint LSN to be older than the minimum LSN in the CDC tables. In such a case we need to perform a new snapshot. + * @param options + */ +export async function isWithinRetentionThreshold(options: IsWithinRetentionThresholdOptions): Promise { + const { checkpointLSN, tables, connectionManager } = options; + for (const table of tables) { + const { recordset: result } = await connectionManager.query('SELECT sys.fn_cdc_get_min_lsn(dbo_lists) AS min_lsn', [ + { + name: 'capture_instance', + type: sql.NVarChar, + value: table.captureInstance + } + ]); + + const rawMinLSN: Buffer = result[0].min_lsn; + const minLSN = LSN.fromBinary(rawMinLSN); + if (minLSN > checkpointLSN) { + logger.warn( + `The checkpoint LSN:[${checkpointLSN}] is older than the minimum LSN:[${minLSN}] for table ${table.sourceTable.qualifiedName}. This indicates that the checkpoint LSN is outside of the retention window.` + ); + return false; + } + } + return true; +} + +export async function getCaptureInstance( + connectionManager: MSSQLConnectionManager, + table: SourceTable +): Promise { + const { recordset: result } = await connectionManager.query( + ` + SELECT + ct.capture_instance, + OBJECT_SCHEMA_NAME(ct.[object_id]) AS cdc_schema + FROM + sys.tables tbl + INNER JOIN sys.schemas sch ON tbl.schema_id = sch.schema_id + INNER JOIN cdc.change_tables ct ON ct.source_object_id = tbl.object_id + WHERE sch.name = '${table.schema}' + AND tbl.name = '${table.name}' + AND ct.end_lsn IS NULL; + ` + ); + + if (result.length === 0) { + return null; + } + + return { + name: result[0].capture_instance, + schema: result[0].cdc_schema + }; +} + +/** + * Return the maximum LSN in the CDC tables. This is the LSN that corresponds to the latest update available. + * @param connectionManager + */ +export async function getLatestLSN(connectionManager: MSSQLConnectionManager): Promise { + const { recordset: result } = await connectionManager.query('SELECT sys.fn_cdc_get_max_lsn() AS max_lsn;'); + // LSN is a binary(10) returned as a Buffer + const rawLSN: Buffer = result[0].max_lsn; + return LSN.fromBinary(rawLSN); +} + +/** + * Escapes an identifier for use in MSSQL queries. + * @param identifier + */ +export function escapeIdentifier(identifier: string): string { + return `[${identifier}]`; +} + +export function isIColumnMetadata(obj: any): obj is sql.IColumnMetadata { + if (obj === null || typeof obj !== 'object' || Array.isArray(obj)) { + return false; + } + + let propertiesMatched = true; + for (const value of Object.values(obj)) { + const property = value as any; + propertiesMatched = + typeof property.index === 'number' && + typeof property.name === 'string' && + typeof property.length === 'number' && + (typeof property.type === 'function' || typeof property.type === 'object') && + typeof property.nullable === 'boolean' && + typeof property.caseSensitive === 'boolean' && + typeof property.identity === 'boolean' && + typeof property.readOnly === 'boolean'; + } + + return propertiesMatched; +} + +export function addParameters(request: sql.Request, parameters: MSSQLParameter[]): sql.Request { + for (const param of parameters) { + if (param.type) { + request.input(param.name, param.type, param.value); + } else { + request.input(param.name, param.value); + } + } + return request; +} diff --git a/modules/module-mssql/src/utils/schema.ts b/modules/module-mssql/src/utils/schema.ts new file mode 100644 index 000000000..ffb00b93a --- /dev/null +++ b/modules/module-mssql/src/utils/schema.ts @@ -0,0 +1,172 @@ +import { SourceEntityDescriptor } from '@powersync/service-core'; +import { TablePattern } from '@powersync/service-sync-rules'; +import { MSSQLConnectionManager } from '../replication/MSSQLConnectionManager.js'; +import { MSSQLColumnDescriptor } from '../types/mssql-data-types.js'; +import { escapeIdentifier } from './mssql.js'; + +export interface GetColumnsOptions { + connectionManager: MSSQLConnectionManager; + schema: string; + tableName: string; +} + +async function getColumns(options: GetColumnsOptions): Promise { + const { connectionManager, schema, tableName } = options; + + const { recordset: columnResults } = await connectionManager.query(` + SELECT + col.name AS [name], + typ.name AS [type], + typ.system_type_id AS type_id, + typ.user_type_id AS user_type_id + FROM sys.columns AS col + JOIN sys.tables AS tbl ON tbl.object_id = col.object_id + JOIN sys.schemas AS sch ON sch.schema_id = tbl.schema_id + JOIN sys.types AS typ ON typ.user_type_id = col.user_type_id + WHERE sch.name = '${schema}' + AND tbl.name = '${tableName}' + ORDER BY col.column_id; + `); + + return columnResults.map((row) => { + return { + name: row.name, + type: row.type, + typeId: row.type_id, + userTypeId: row.user_type_id + }; + }); +} + +export interface GetReplicationIdentityColumnsOptions { + connectionManager: MSSQLConnectionManager; + schema: string; + tableName: string; +} + +export interface ReplicationIdentityColumnsResult { + columns: MSSQLColumnDescriptor[]; + identity: 'default' | 'nothing' | 'full' | 'index'; +} + +export async function getReplicationIdentityColumns( + options: GetReplicationIdentityColumnsOptions +): Promise { + const { connectionManager, schema, tableName } = options; + const { recordset: primaryKeyColumns } = await connectionManager.query(` + SELECT + col.name AS [name], + typ.name AS [type], + typ.system_type_id AS type_id, + typ.user_type_id AS user_type_id + FROM sys.tables AS tbl + JOIN sys.schemas AS sch ON sch.schema_id = tbl.schema_id + JOIN sys.indexes AS idx ON idx.object_id = tbl.object_id AND idx.is_primary_key = 1 + JOIN sys.index_columns AS idx_col ON idx_col.object_id = idx.object_id AND idx_col.index_id = idx.index_id + JOIN sys.columns AS col ON col.object_id = idx_col.object_id AND col.column_id = idx_col.column_id + JOIN sys.types AS typ ON typ.user_type_id = col.user_type_id + WHERE sch.name = '${schema}' + AND tbl.name = '${tableName}' + ORDER BY idx_col.key_ordinal; + `); + + if (primaryKeyColumns.length > 0) { + return { + columns: primaryKeyColumns.map((row) => ({ + name: row.name, + type: row.type, + typeId: row.type_id, + userTypeId: row.user_type_id + })), + identity: 'default' + }; + } + + // No primary key, check if any of the columns have a unique constraint we can use + const { recordset: uniqueKeyColumns } = await connectionManager.query(` + SELECT + col.name AS [name], + typ.name AS [type], + typ.system_type_id AS type_id, + typ.user_type_id AS user_type_id + FROM sys.tables AS tbl + JOIN sys.schemas AS sch ON sch.schema_id = tbl.schema_id + JOIN sys.indexes AS idx ON idx.object_id = tbl.object_id AND idx.is_unique_constraint = 1 + JOIN sys.index_columns AS idx_col ON idx_col.object_id = idx.object_id AND idx_col.index_id = idx.index_id + JOIN sys.columns AS col ON col.object_id = idx_col.object_id AND col.column_id = idx_col.column_id + JOIN sys.types AS typ ON typ.user_type_id = col.user_type_id + WHERE sch.name = '${schema}' + AND tbl.name = '${tableName}' + ORDER BY idx_col.key_ordinal; + `); + + if (uniqueKeyColumns.length > 0) { + return { + columns: uniqueKeyColumns.map((row) => ({ + name: row.name, + type: row.type, + typeId: row.type_id, + userTypeId: row.user_type_id + })), + identity: 'index' + }; + } + + const allColumns = await getColumns(options); + + return { + columns: allColumns, + identity: 'full' + }; +} + +export type ResolvedTable = Omit; + +export async function getTablesFromPattern( + connectionManager: MSSQLConnectionManager, + tablePattern: TablePattern +): Promise { + if (tablePattern.isWildcard) { + const { recordset: tableResults } = await connectionManager.query(` + SELECT + tbl.name AS [table], + sch.name AS [schema], + tbl.object_id AS object_id + FROM sys.tables tbl + JOIN sys.schemas sch ON tbl.schema_id = sch.schema_id + WHERE sch.name = '${tablePattern.schema}' + AND tbl.name LIKE '${tablePattern.tablePattern}' + `); + + return tableResults + .map((row) => { + return { + objectId: row.object_id, + schema: row.schema, + name: row.table + }; + }) + .filter((table: ResolvedTable) => table.name.startsWith(tablePattern.tablePrefix)); + } else { + const { recordset: tableResults } = await connectionManager.query( + ` + SELECT + tbl.name AS [table], + sch.name AS [schema], + tbl.object_id AS object_id + FROM sys.tables tbl + JOIN sys.schemas sch ON tbl.schema_id = sch.schema_id + WHERE sch.name = '${tablePattern.schema}' + AND tbl.name = '${tablePattern.name}' + ` + ); + + return tableResults.map((row) => { + return { + objectId: row.object_id, + schema: row.schema, + name: row.table + }; + }); + } +} From 80f49149e97094c2fc701ddffb4587f19b1c1618 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Mon, 20 Oct 2025 17:09:48 +0200 Subject: [PATCH 07/42] Wip: Added mssql replication base classes for snapshot queries. --- .../src/replication/CDCReplicationJob.ts | 99 +++ .../src/replication/CDCReplicator.ts | 67 ++ .../module-mssql/src/replication/CDCStream.ts | 818 ++++++++++++++++++ .../src/replication/MSSQLSnapshotQuery.ts | 225 +++++ 4 files changed, 1209 insertions(+) create mode 100644 modules/module-mssql/src/replication/CDCReplicationJob.ts create mode 100644 modules/module-mssql/src/replication/CDCReplicator.ts create mode 100644 modules/module-mssql/src/replication/CDCStream.ts create mode 100644 modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts diff --git a/modules/module-mssql/src/replication/CDCReplicationJob.ts b/modules/module-mssql/src/replication/CDCReplicationJob.ts new file mode 100644 index 000000000..d2a27f3f3 --- /dev/null +++ b/modules/module-mssql/src/replication/CDCReplicationJob.ts @@ -0,0 +1,99 @@ +import { replication } from '@powersync/service-core'; +import { MSSQLConnectionManagerFactory } from './MSSQLConnectionManagerFactory.js'; +import { container, logger as defaultLogger } from '@powersync/lib-services-framework'; +import { CDCDataExpiredError, CDCStream } from './CDCStream.js'; + +export interface CDCReplicationJobOptions extends replication.AbstractReplicationJobOptions { + connectionFactory: MSSQLConnectionManagerFactory; +} + +export class CDCReplicationJob extends replication.AbstractReplicationJob { + private connectionFactory: MSSQLConnectionManagerFactory; + private lastStream: CDCStream | null = null; + + constructor(options: CDCReplicationJobOptions) { + super(options); + this.logger = defaultLogger.child({ prefix: `[powersync_${this.options.storage.group_id}] ` }); + this.connectionFactory = options.connectionFactory; + } + + async keepAlive() { + // Keepalives are handled by the binlog heartbeat mechanism + } + + async replicate() { + try { + await this.replicateLoop(); + } catch (e) { + // Fatal exception + container.reporter.captureException(e, { + metadata: {} + }); + this.logger.error(`Replication failed`, e); + + if (e instanceof CDCDataExpiredError) { + // This stops replication and restarts with a new instance + await this.options.storage.factory.restartReplication(this.storage.group_id); + } + } finally { + this.abortController.abort(); + } + } + + async replicateLoop() { + while (!this.isStopped) { + await this.replicateOnce(); + + if (!this.isStopped) { + await new Promise((resolve) => setTimeout(resolve, 5000)); + } + } + } + + async replicateOnce() { + // New connections on every iteration (every error with retry), + // otherwise we risk repeating errors related to the connection, + // such as caused by cached PG schemas. + const connectionManager = this.connectionFactory.create({}); + try { + await this.rateLimiter?.waitUntilAllowed({ signal: this.abortController.signal }); + if (this.isStopped) { + return; + } + const stream = new CDCStream({ + logger: this.logger, + abortSignal: this.abortController.signal, + storage: this.options.storage, + metrics: this.options.metrics, + connections: connectionManager + }); + this.lastStream = stream; + await stream.replicate(); + } catch (e) { + if (this.abortController.signal.aborted) { + return; + } + this.logger.error(`Replication error`, e); + if (e.cause != null) { + this.logger.error(`cause`, e.cause); + } + + if (e instanceof CDCDataExpiredError) { + throw e; + } else { + // Report the error if relevant, before retrying + container.reporter.captureException(e, { + metadata: {} + }); + // This sets the retry delay + this.rateLimiter?.reportError(e); + } + } finally { + await connectionManager.end(); + } + } + + async getReplicationLagMillis(): Promise { + return this.lastStream?.getReplicationLagMillis(); + } +} diff --git a/modules/module-mssql/src/replication/CDCReplicator.ts b/modules/module-mssql/src/replication/CDCReplicator.ts new file mode 100644 index 000000000..385e37f6e --- /dev/null +++ b/modules/module-mssql/src/replication/CDCReplicator.ts @@ -0,0 +1,67 @@ +import { replication, storage } from '@powersync/service-core'; +import { MSSQLConnectionManagerFactory } from './MSSQLConnectionManagerFactory.js'; +import { CDCReplicationJob } from './CDCReplicationJob.js'; +import { MSSQLModule } from '../module/MSSQLModule.js'; + +export interface CDCReplicatorOptions extends replication.AbstractReplicatorOptions { + connectionFactory: MSSQLConnectionManagerFactory; +} + +export class CDCReplicator extends replication.AbstractReplicator { + private readonly connectionFactory: MSSQLConnectionManagerFactory; + + constructor(options: CDCReplicatorOptions) { + super(options); + this.connectionFactory = options.connectionFactory; + } + + createJob(options: replication.CreateJobOptions): CDCReplicationJob { + return new CDCReplicationJob({ + id: this.createJobId(options.storage.group_id), + storage: options.storage, + metrics: this.metrics, + lock: options.lock, + connectionFactory: this.connectionFactory, + rateLimiter: this.rateLimiter + }); + } + + async cleanUp(syncRulesStorage: storage.SyncRulesBucketStorage): Promise { + // The MySQL module does not create anything which requires cleanup on the MySQL server. + } + + async stop(): Promise { + await super.stop(); + await this.connectionFactory.shutdown(); + } + + async testConnection() { + return await MSSQLModule.testConnection(this.connectionFactory.connectionConfig); + } + + async getReplicationLagMillis(): Promise { + // TODO:Get replication lag + const lag = await super.getReplicationLagMillis(); + if (lag != null) { + return lag; + } + + // Booting or in an error loop. Check last active replication status. + // This includes sync rules in an ERROR state. + const content = await this.storage.getActiveSyncRulesContent(); + if (content == null) { + return undefined; + } + // Measure the lag from the last commit or keepalive timestamp. + // This is not 100% accurate since it is the commit time in the storage db rather than + // the source db, but it's the best we currently have for mysql. + const checkpointTs = content.last_checkpoint_ts?.getTime() ?? 0; + const keepaliveTs = content.last_keepalive_ts?.getTime() ?? 0; + const latestTs = Math.max(checkpointTs, keepaliveTs); + if (latestTs != 0) { + return Date.now() - latestTs; + } + + return undefined; + } +} diff --git a/modules/module-mssql/src/replication/CDCStream.ts b/modules/module-mssql/src/replication/CDCStream.ts new file mode 100644 index 000000000..1a801c902 --- /dev/null +++ b/modules/module-mssql/src/replication/CDCStream.ts @@ -0,0 +1,818 @@ +import { + container, + DatabaseConnectionError, + ErrorCode, + errors, + Logger, + logger as defaultLogger, + ReplicationAssertionError, + ReplicationAbortedError, + ServiceAssertionError +} from '@powersync/lib-services-framework'; +import { + ColumnDescriptor, + getUuidReplicaIdentityBson, + MetricsEngine, + SaveUpdate, + SourceEntityDescriptor, + storage +} from '@powersync/service-core'; + +import { + applyValueContext, + CompatibilityContext, + SqliteInputRow, + SqlSyncRules, + TablePattern +} from '@powersync/service-sync-rules'; + +import { ReplicationMetric } from '@powersync/service-types'; +import { + BatchedSnapshotQuery, + IdSnapshotQuery, + PrimaryKeyValue, + SimpleSnapshotQuery, + MSSQLSnapshotQuery +} from './MSSQLSnapshotQuery.js'; +import { MSSQLConnectionManager } from './MSSQLConnectionManager.js'; +import * as schema_utils from '../utils/schema.js'; +import { + checkSourceConfiguration, + getCaptureInstance, + getLatestLSN, + isIColumnMetadata, + isTableEnabledForCDC, + isWithinRetentionThreshold +} from '../utils/mssql.js'; +import { ResolvedTable } from '../utils/schema.js'; +import sql from 'mssql'; +import { toSqliteInputRow } from '../common/mssqls-to-sqlite.js'; +import { LSN } from '../common/LSN.js'; +import { MSSQLSourceTable } from '../common/MSSQLSourceTable.js'; +import { MSSQLSourceTableCache } from '../common/MSSQLSourceTableCache.js'; + +export interface CDCStreamOptions { + connections: MSSQLConnectionManager; + storage: storage.SyncRulesBucketStorage; + metrics: MetricsEngine; + abortSignal: AbortSignal; + logger?: Logger; + /** + * Override snapshot batch size for testing. + * Defaults to 10_000. + * Note that queries are streamed, so we don't keep that much data in memory. + */ + snapshotBatchSize?: number; +} + +export enum SnapshotStatus { + IN_PROGRESS = 'in-progress', + DONE = 'done', + RESTART_REQUIRED = 'restart-required' +} + +interface WriteChangePayload { + type: storage.SaveOperationTag; + row: sql.IRecordSet; + previous_row?: sql.IRecordSet; + schema: string; + table: string; + sourceTable: storage.SourceTable; + columns: Map; +} + +export class CDCConfigurationError extends Error { + constructor(message: string) { + super(message); + } +} + +/** + * Thrown when required updates in the CDC instance tables are no longer available + * + * Possible reasons: + * * Older data has been cleaned up due to exceeding the retention period. + * This can happen if PowerSync was stopped for a long period of time. + */ +export class CDCDataExpiredError extends DatabaseConnectionError { + constructor(message: string, cause: any) { + super(ErrorCode.PSYNC_S1500, message, cause); + } +} + +export class CDCStream { + private readonly syncRules: SqlSyncRules; + private readonly storage: storage.SyncRulesBucketStorage; + private readonly connections: MSSQLConnectionManager; + private readonly abortSignal: AbortSignal; + private readonly logger: Logger; + + private tableCache = new MSSQLSourceTableCache(); + + private startedPolling = false; + + /** + * Time of the oldest uncommitted change, according to the source db. + * This is used to determine the replication lag. + */ + private oldestUncommittedChange: Date | null = null; + /** + * Keep track of whether we have done a commit or keepalive yet. + * We can only compute replication lag if isStartingReplication == false, or oldestUncommittedChange is present. + */ + private isStartingReplication = true; + + constructor(private options: CDCStreamOptions) { + this.logger = options.logger ?? defaultLogger; + this.storage = options.storage; + this.syncRules = options.storage.getParsedSyncRules({ defaultSchema: options.connections.schema }); + this.connections = options.connections; + this.abortSignal = options.abortSignal; + } + + private get metrics() { + return this.options.metrics; + } + + get stopped() { + return this.abortSignal.aborted; + } + + get defaultSchema() { + return this.connections.schema; + } + + get groupId() { + return this.options.storage.group_id; + } + + get connectionId() { + const { connectionId } = this.connections; + // Default to 1 if not set + if (!connectionId) { + return 1; + } + /** + * This is often `"default"` (string) which will parse to `NaN` + */ + const parsed = Number.parseInt(connectionId); + if (isNaN(parsed)) { + return 1; + } + return parsed; + } + + get connectionTag() { + return this.connections.connectionTag; + } + + get snapshotBatchSize() { + return this.options.snapshotBatchSize ?? 10_000; + } + + async getQualifiedTableNames( + batch: storage.BucketStorageBatch, + tablePattern: TablePattern + ): Promise { + if (tablePattern.connectionTag != this.connections.connectionTag) { + return []; + } + + const matchedTables: ResolvedTable[] = await schema_utils.getTablesFromPattern(this.connections, tablePattern); + + const tables: MSSQLSourceTable[] = []; + for (const matchedTable of matchedTables) { + const isEnabled = await isTableEnabledForCDC({ + connectionManager: this.connections, + table: matchedTable.name, + schema: matchedTable.schema + }); + + if (!isEnabled) { + this.logger.info(`Skipping ${matchedTable.schema}.${matchedTable.name} - table is not enabled for CDC.`); + continue; + } + + // TODO: Check RLS settings for table + + const replicaIdColumns = await schema_utils.getReplicationIdentityColumns({ + connectionManager: this.connections, + tableName: matchedTable.name, + schema: matchedTable.schema + }); + + const table = await this.processTable( + batch, + { + name: matchedTable.name, + schema: matchedTable.schema, + objectId: matchedTable.objectId, + replicaIdColumns: replicaIdColumns.columns + }, + false + ); + + tables.push(table); + } + return tables; + } + + async processTable( + batch: storage.BucketStorageBatch, + table: SourceEntityDescriptor, + snapshot: boolean + ): Promise { + if (!table.objectId && typeof table.objectId != 'number') { + throw new ReplicationAssertionError(`objectId expected, got ${typeof table.objectId}`); + } + const resolved = await this.storage.resolveTable({ + group_id: this.groupId, + connection_id: this.connectionId, + connection_tag: this.connectionTag, + entity_descriptor: table, + sync_rules: this.syncRules + }); + const captureInstance = await getCaptureInstance(this.connections, resolved.table); + if (!captureInstance) { + throw new ServiceAssertionError(`Missing capture instance for table ${resolved.table}`); + } + const resolvedTable = new MSSQLSourceTable({ + sourceTable: resolved.table, + captureInstance: captureInstance + }); + this.tableCache.set(resolvedTable); + + // Drop conflicting tables. This includes for example renamed tables. + await batch.drop(resolved.dropTables); + + // Snapshot if: + // 1. Snapshot is requested (false for initial snapshot, since that process handles it elsewhere) + // 2. Snapshot is not already done, AND: + // 3. The table is used in sync rules. + const shouldSnapshot = snapshot && !resolved.table.snapshotComplete && resolved.table.syncAny; + + if (shouldSnapshot) { + // Truncate this table in case a previous snapshot was interrupted. + await batch.truncate([resolved.table]); + + // Start the snapshot inside a transaction. + try { + await this.snapshotTableInTx(batch, resolvedTable); + } finally { + // TODO Cleanup? + } + } + + return resolvedTable; + } + + private async snapshotTableInTx( + batch: storage.BucketStorageBatch, + table: MSSQLSourceTable, + limited?: PrimaryKeyValue[] + ): Promise { + // Note: We use the "Read Committed" isolation level here, not snapshot isolation. + // The data may change during the transaction, but that is compensated for in the streaming + // replication afterward. + const transaction = await this.connections.createTransaction(); + await transaction.begin(sql.ISOLATION_LEVEL.READ_COMMITTED); + try { + await this.snapshotTable(batch, transaction, table, limited); + + // Get the current LSN. + // The data will only be consistent once incremental replication has passed that point. + // We have to get this LSN _after_ we have finished the table snapshot. + // + // There are basically two relevant LSNs here: + // A: The LSN before the snapshot starts. We don't explicitly record this on the PowerSync side, + // but it is implicitly recorded in the replication slot. + // B: The LSN after the table snapshot is complete, which is what we get here. + // When we do the snapshot queries, the data that we get back for each batch could match the state + // anywhere between A and B. To actually have a consistent state on our side, we need to: + // 1. Complete the snapshot. + // 2. Wait until logical replication has caught up with all the changes between A and B. + // Calling `markSnapshotDone(LSN B)` covers that. + const tableLsnNotBefore = await getLatestLSN(this.connections); + // Side note: A ROLLBACK would probably also be fine here, since we only read in this transaction. + await transaction.commit(); + const [updatedSourceTable] = await batch.markSnapshotDone([table.sourceTable], tableLsnNotBefore.toString()); + this.tableCache.updateSourceTable(updatedSourceTable); + } catch (e) { + await transaction.rollback(); + throw e; + } + } + + private async snapshotTable( + batch: storage.BucketStorageBatch, + transaction: sql.Transaction, + table: MSSQLSourceTable, + limited?: PrimaryKeyValue[] + ) { + let totalEstimatedCount = table.sourceTable.snapshotStatus?.totalEstimatedCount; + let replicatedCount = table.sourceTable.snapshotStatus?.replicatedCount ?? 0; + let lastCountTime = 0; + let query: MSSQLSnapshotQuery; + // We do streaming on two levels: + // 1. Coarse select from the entire table, stream rows 1 by one + // 2. Fine level: Stream batches of rows with each fetch call + if (limited) { + query = new IdSnapshotQuery(transaction, table, limited); + } else if (BatchedSnapshotQuery.supports(table)) { + // Single primary key - we can use the primary key for chunking + const orderByKey = table.sourceTable.replicaIdColumns[0]; + query = new BatchedSnapshotQuery( + transaction, + table, + this.snapshotBatchSize, + table.sourceTable.snapshotStatus?.lastKey ?? null + ); + if (table.sourceTable.snapshotStatus?.lastKey != null) { + this.logger.info( + `Replicating ${table.toQualifiedName()} ${table.sourceTable.formatSnapshotProgress()} - resuming from ${orderByKey.name} > ${(query as BatchedSnapshotQuery).lastKey}` + ); + } else { + this.logger.info( + `Replicating ${table.toQualifiedName()} ${table.sourceTable.formatSnapshotProgress()} - resumable` + ); + } + } else { + // Fallback case - query the entire table + this.logger.info( + `Replicating ${table.toQualifiedName()} ${table.sourceTable.formatSnapshotProgress()} - not resumable` + ); + query = new SimpleSnapshotQuery(transaction, table); + replicatedCount = 0; + } + await query.initialize(); + + let columns: sql.IColumnMetadata | null = null; + let hasRemainingData = true; + while (hasRemainingData) { + // Fetch 10k at a time. + // The balance here is between latency overhead per FETCH call, + // and not spending too much time on each FETCH call. + // We aim for a couple of seconds on each FETCH call. + const cursor = query.next(); + hasRemainingData = false; + // MSSQL streams rows one by one + for await (const result of cursor) { + if (isIColumnMetadata(result)) { + columns = result; + continue; + } else { + if (!columns) { + throw new ReplicationAssertionError(`Missing column metadata`); + } + const row: SqliteInputRow = toSqliteInputRow(result, columns!); + + // This auto-flushes when the batch reaches its size limit + await batch.save({ + tag: storage.SaveOperationTag.INSERT, + sourceTable: table.sourceTable, + before: undefined, + beforeReplicaId: undefined, + after: row, + afterReplicaId: getUuidReplicaIdentityBson(row, table.sourceTable.replicaIdColumns) + }); + + replicatedCount++; + this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1); + } + + this.touch(); + } + + // Important: flush before marking progress + await batch.flush(); + if (limited == null) { + let lastKey: Uint8Array | undefined; + if (query instanceof BatchedSnapshotQuery) { + lastKey = query.getLastKeySerialized(); + } + if (lastCountTime < performance.now() - 10 * 60 * 1000) { + // Even though we're doing the snapshot inside a transaction, the transaction uses + // the default "Read Committed" isolation level. This means we can get new data + // within the transaction, so we re-estimate the count every 10 minutes when replicating + // large tables. + totalEstimatedCount = await this.estimatedCountNumber(table, transaction); + lastCountTime = performance.now(); + } + const updatedSourceTable = await batch.updateTableProgress(table.sourceTable, { + lastKey: lastKey, + replicatedCount: replicatedCount, + totalEstimatedCount: totalEstimatedCount + }); + this.tableCache.updateSourceTable(updatedSourceTable); + + this.logger.info(`Replicating ${table.toQualifiedName()} ${table.sourceTable.formatSnapshotProgress()}`); + } else { + this.logger.info(`Replicating ${table.toQualifiedName()} ${replicatedCount}/${limited.length} for resnapshot`); + } + + if (this.abortSignal.aborted) { + // We only abort after flushing + throw new ReplicationAbortedError(`Initial replication interrupted`); + } + } + } + + /** + * Estimate the number of rows in a table. This query uses partition stats view to get a fast estimate of the row count. + * This requires that the MSSQL DB user has the VIEW DATABASE PERFORMANCE STATE permission. + * @param table + * @param transaction + */ + async estimatedCountNumber(table: MSSQLSourceTable, transaction?: sql.Transaction): Promise { + const request = transaction ? transaction.request() : await this.connections.createRequest(); + const { recordset: result } = await request.query( + `SELECT SUM(row_count) AS total_rows + FROM sys.dm_db_partition_stats + WHERE object_id = OBJECT_ID('${table.toQualifiedName()}') + AND index_id < 2;` + ); + // TODO Fallback query in case user does not have permission? + return result[0].total_rows ?? -1; + } + + /** + * Start initial replication. + * + * If (partial) replication was done before on this slot, this clears the state + * and starts again from scratch. + */ + async startInitialReplication(status: SnapshotStatus) { + if (status === SnapshotStatus.RESTART_REQUIRED) { + // This happens if the last replicated checkpoint LSN is no longer available in the CDC tables. + await this.storage.clear({ signal: this.abortSignal }); + } + + const sourceTables = this.syncRules.getSourceTables(); + await this.storage.startBatch( + { + logger: this.logger, + zeroLSN: LSN.ZERO, + defaultSchema: this.defaultSchema, + storeCurrentData: true, + skipExistingRows: true + }, + async (batch) => { + const tablesWithStatus: MSSQLSourceTable[] = []; + for (const tablePattern of sourceTables) { + const tables = await this.getQualifiedTableNames(batch, tablePattern); + // Pre-get counts + for (const table of tables) { + if (table.sourceTable.snapshotComplete) { + this.logger.info(`Skipping ${table.toQualifiedName()} - snapshot already done.`); + continue; + } + const count = await this.estimatedCountNumber(table); + const updatedSourceTable = await batch.updateTableProgress(table.sourceTable, { + totalEstimatedCount: count + }); + this.tableCache.updateSourceTable(updatedSourceTable); + tablesWithStatus.push(table); + + this.logger.info(`To replicate: ${table.toQualifiedName()} ${table.sourceTable.formatSnapshotProgress()}`); + } + } + + for (const table of tablesWithStatus) { + await this.snapshotTableInTx(batch, table); + this.touch(); + } + + // Always commit the initial snapshot at zero. + // This makes sure we don't skip any changes applied before starting this snapshot, + // in the case of snapshot retries. + await batch.commit(LSN.ZERO); + } + ); + } + + private getTable(tableId: number): MSSQLSourceTable { + const table = this.tableCache.get(tableId); + if (table == null) { + // We should always receive a replication message before the relation is used. + // If we can't find it, it's a bug. + throw new ReplicationAssertionError(`Table with ${tableId} not found in cache`); + } + return table; + } + + // async writeChange( + // batch: storage.BucketStorageBatch, + // payload: WriteChangePayload + // ): Promise { + // switch (payload.type) { + // case storage.SaveOperationTag.INSERT: + // this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1); + // const record = toSqliteInputRow(payload.row, payload.columns); + // return await batch.save({ + // tag: storage.SaveOperationTag.INSERT, + // sourceTable: payload.sourceTable, + // before: undefined, + // beforeReplicaId: undefined, + // after: record, + // afterReplicaId: getUuidReplicaIdentityBson(record, payload.sourceTable.replicaIdColumns) + // }); + // case storage.SaveOperationTag.UPDATE: + // this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1); + // // The previous row may be null if the replica id columns are unchanged. + // // It's fine to treat that the same as an insert. + // const beforeUpdated = payload.previous_row + // ? toSqliteInputRow(payload.previous_row, payload.columns) + // : undefined; + // const after = toSqliteInputRow(payload.row, payload.columns); + // + // return await batch.save({ + // tag: storage.SaveOperationTag.UPDATE, + // sourceTable: payload.sourceTable, + // before: beforeUpdated, + // beforeReplicaId: beforeUpdated + // ? getUuidReplicaIdentityBson(beforeUpdated, payload.sourceTable.replicaIdColumns) + // : undefined, + // after: after, + // afterReplicaId: getUuidReplicaIdentityBson(after, payload.sourceTable.replicaIdColumns) + // }); + // + // case storage.SaveOperationTag.DELETE: + // this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1); + // const beforeDeleted = toSqliteInputRow(payload.row, payload.columns); + // + // return await batch.save({ + // tag: storage.SaveOperationTag.DELETE, + // sourceTable: payload.sourceTable, + // before: beforeDeleted, + // beforeReplicaId: getUuidReplicaIdentityBson(beforeDeleted, payload.sourceTable.replicaIdColumns), + // after: undefined, + // afterReplicaId: undefined + // }); + // default: + // return null; + // } + // } + + async replicate() { + try { + await this.initReplication(); + //await this.streamChanges(); + } catch (e) { + await this.storage.reportError(e); + throw e; + } + } + + async initReplication() { + const errors = await checkSourceConfiguration(this.connections); + if (errors.length > 0) { + throw new CDCConfigurationError(`CDC Configuration Errors: ${errors.join(', ')}`); + } + + const snapshotStatus = await this.checkSnapshotStatus(); + if (snapshotStatus !== SnapshotStatus.DONE) { + await this.startInitialReplication(snapshotStatus); + } + } + + /** + * Checks if the initial sync has already been completed and if updates from the last checkpoint are still available + * in the CDC instances. + */ + private async checkSnapshotStatus(): Promise { + const status = await this.storage.getStatus(); + const snapshotDone = status.snapshot_done && status.checkpoint_lsn != null; + if (snapshotDone) { + // Snapshot is done, but we still need to check that the last known checkpoint LSN is still + // within the threshold of the CDC tables + this.logger.info(`Initial replication already done`); + + const lastCheckpointLSN = LSN.fromString(status.checkpoint_lsn!); + // Check that the CDC tables still have valid data + const isAvailable = await isWithinRetentionThreshold({ + checkpointLSN: lastCheckpointLSN, + tables: this.tableCache.getAll(), + connectionManager: this.connections + }); + if (!isAvailable) { + this.logger.warn( + `Updates from the last checkpoint are no longer available in the CDC instance, starting initial replication again.` + ); + } + return isAvailable ? SnapshotStatus.DONE : SnapshotStatus.RESTART_REQUIRED; + } + + return SnapshotStatus.IN_PROGRESS; + } + + // async streamChanges() { + // // When changing any logic here, check /docs/wal-lsns.md. + // const { createEmptyCheckpoints } = await this.ensureStorageCompatibility(); + // + // const replicationOptions: Record = { + // proto_version: '1', + // publication_names: PUBLICATION_NAME + // }; + // + // /** + // * Viewing the contents of logical messages emitted with `pg_logical_emit_message` + // * is only supported on Postgres >= 14.0. + // * https://www.postgresql.org/docs/14/protocol-logical-replication.html + // */ + // const exposesLogicalMessages = await this.checkLogicalMessageSupport(); + // if (exposesLogicalMessages) { + // /** + // * Only add this option if the Postgres server supports it. + // * Adding the option to a server that doesn't support it will throw an exception when starting logical replication. + // * Error: `unrecognized pgoutput option: messages` + // */ + // replicationOptions['messages'] = 'true'; + // } + // + // const replicationStream = replicationConnection.logicalReplication({ + // slot: this.slot_name, + // options: replicationOptions + // }); + // + // this.startedStreaming = true; + // + // let resnapshot: { table: storage.SourceTable; key: PrimaryKeyValue }[] = []; + // + // const markRecordUnavailable = (record: SaveUpdate) => { + // if (!IdSnapshotQuery.supports(record.sourceTable)) { + // // If it's not supported, it's also safe to ignore + // return; + // } + // let key: PrimaryKeyValue = {}; + // for (let column of record.sourceTable.replicaIdColumns) { + // const name = column.name; + // const value = record.after[name]; + // if (value == null) { + // // We don't expect this to actually happen. + // // The key should always be present in the "after" record. + // return; + // } + // // We just need a consistent representation of the primary key, and don't care about fixed quirks. + // key[name] = applyValueContext(value, CompatibilityContext.FULL_BACKWARDS_COMPATIBILITY); + // } + // resnapshot.push({ + // table: record.sourceTable, + // key: key + // }); + // }; + // + // await this.storage.startBatch( + // { + // logger: this.logger, + // zeroLSN: ZERO_LSN, + // defaultSchema: POSTGRES_DEFAULT_SCHEMA, + // storeCurrentData: true, + // skipExistingRows: false, + // markRecordUnavailable + // }, + // async (batch) => { + // // We don't handle any plain keepalive messages while we have transactions. + // // While we have transactions, we use that to advance the position. + // // Replication never starts in the middle of a transaction, so this starts as false. + // let skipKeepalive = false; + // let count = 0; + // + // for await (const chunk of replicationStream.pgoutputDecode()) { + // this.touch(); + // + // if (this.abortSignal.aborted) { + // break; + // } + // + // // chunkLastLsn may come from normal messages in the chunk, + // // or from a PrimaryKeepalive message. + // const { messages, lastLsn: chunkLastLsn } = chunk; + // + // /** + // * We can check if an explicit keepalive was sent if `exposesLogicalMessages == true`. + // * If we can't check the logical messages, we should assume a keepalive if we + // * receive an empty array of messages in a replication event. + // */ + // const assumeKeepAlive = !exposesLogicalMessages; + // let keepAliveDetected = false; + // const lastCommit = messages.findLast((msg) => msg.tag == 'commit'); + // + // for (const msg of messages) { + // if (msg.tag == 'relation') { + // await this.handleRelation(batch, getPgOutputRelation(msg), true); + // } else if (msg.tag == 'begin') { + // // This may span multiple transactions in the same chunk, or even across chunks. + // skipKeepalive = true; + // if (this.oldestUncommittedChange == null) { + // this.oldestUncommittedChange = new Date(Number(msg.commitTime / 1000n)); + // } + // } else if (msg.tag == 'commit') { + // this.metrics.getCounter(ReplicationMetric.TRANSACTIONS_REPLICATED).add(1); + // if (msg == lastCommit) { + // // Only commit if this is the last commit in the chunk. + // // This effectively lets us batch multiple transactions within the same chunk + // // into a single flush, increasing throughput for many small transactions. + // skipKeepalive = false; + // // flush() must be before the resnapshot check - that is + // // typically what reports the resnapshot records. + // await batch.flush({ oldestUncommittedChange: this.oldestUncommittedChange }); + // // This _must_ be checked after the flush(), and before + // // commit() or ack(). We never persist the resnapshot list, + // // so we have to process it before marking our progress. + // if (resnapshot.length > 0) { + // await this.resnapshot(batch, resnapshot); + // resnapshot = []; + // } + // const didCommit = await batch.commit(msg.lsn!, { + // createEmptyCheckpoints, + // oldestUncommittedChange: this.oldestUncommittedChange + // }); + // await this.ack(msg.lsn!, replicationStream); + // if (didCommit) { + // this.oldestUncommittedChange = null; + // this.isStartingReplication = false; + // } + // } + // } else { + // if (count % 100 == 0) { + // this.logger.info(`Replicating op ${count} ${msg.lsn}`); + // } + // + // /** + // * If we can see the contents of logical messages, then we can check if a keepalive + // * message is present. We only perform a keepalive (below) if we explicitly detect a keepalive message. + // * If we can't see the contents of logical messages, then we should assume a keepalive is required + // * due to the default value of `assumeKeepalive`. + // */ + // if (exposesLogicalMessages && isKeepAliveMessage(msg)) { + // keepAliveDetected = true; + // } + // + // count += 1; + // const flushResult = await this.writeChange(batch, msg); + // if (flushResult != null && resnapshot.length > 0) { + // // If we have large transactions, we also need to flush the resnapshot list + // // periodically. + // // TODO: make sure this bit is actually triggered + // await this.resnapshot(batch, resnapshot); + // resnapshot = []; + // } + // } + // } + // + // if (!skipKeepalive) { + // if (assumeKeepAlive || keepAliveDetected) { + // // Reset the detection flag. + // keepAliveDetected = false; + // + // // In a transaction, we ack and commit according to the transaction progress. + // // Outside transactions, we use the PrimaryKeepalive messages to advance progress. + // // Big caveat: This _must not_ be used to skip individual messages, since this LSN + // // may be in the middle of the next transaction. + // // It must only be used to associate checkpoints with LSNs. + // const didCommit = await batch.keepalive(chunkLastLsn); + // if (didCommit) { + // this.oldestUncommittedChange = null; + // } + // + // this.isStartingReplication = false; + // } + // + // // We receive chunks with empty messages often (about each second). + // // Acknowledging here progresses the slot past these and frees up resources. + // await this.ack(chunkLastLsn, replicationStream); + // } + // + // this.metrics.getCounter(ReplicationMetric.CHUNKS_REPLICATED).add(1); + // } + // } + // ); + // } + + // async ack(lsn: string, replicationStream: pgwire.ReplicationStream) { + // if (lsn == ZERO_LSN) { + // return; + // } + // + // replicationStream.ack(lsn); + // } + + async getReplicationLagMillis(): Promise { + if (this.oldestUncommittedChange == null) { + if (this.isStartingReplication) { + // We don't have anything to compute replication lag with yet. + return undefined; + } else { + // We don't have any uncommitted changes, so replication is up-to-date. + return 0; + } + } + return Date.now() - this.oldestUncommittedChange.getTime(); + } + + private touch() { + container.probes.touch().catch((e) => { + this.logger.error(`Error touching probe`, e); + }); + } +} diff --git a/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts b/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts new file mode 100644 index 000000000..4c65d3874 --- /dev/null +++ b/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts @@ -0,0 +1,225 @@ +import { bson, ColumnDescriptor } from '@powersync/service-core'; +import { SqliteValue } from '@powersync/service-sync-rules'; +import { ServiceAssertionError } from '@powersync/lib-services-framework'; +import { MSSQLBaseType } from '../types/mssql-data-types.js'; +import sql from 'mssql'; +import { escapeIdentifier } from '../utils/mssql.js'; +import { MSSQLSourceTable } from '../common/MSSQLSourceTable.js'; + +export interface MSSQLSnapshotQuery { + initialize(): Promise; + + /** + * Returns an async iterable iterator that yields the column metadata for the query followed by rows of data. + */ + next(): AsyncIterableIterator>; +} + +export type PrimaryKeyValue = Record; + +export interface MissingRow { + table: MSSQLSourceTable; + key: PrimaryKeyValue; +} + +/** + * Snapshot query using a plain SELECT * FROM table + * + * This supports all tables but does not efficiently resume the snapshot + * if the process is restarted. + */ +export class SimpleSnapshotQuery implements MSSQLSnapshotQuery { + public constructor( + private readonly transaction: sql.Transaction, + private readonly table: MSSQLSourceTable + ) {} + + public async initialize(): Promise {} + + public async *next(): AsyncIterableIterator> { + const request = this.transaction.request(); + request.stream = true; + const metadataPromise = new Promise((resolve) => { + request.on('recordset', resolve); + }); + const stream = request.toReadableStream(); + + request.query(`SELECT * FROM ${this.table.toQualifiedName()}`); + + const columnMetadata: sql.IColumnMetadata = await metadataPromise; + yield columnMetadata; + + // MSSQL only streams one row at a time + for await (const row of stream) { + yield row; + } + } +} + +/** + * Performs a table snapshot query, batching by ranges of primary key data. + * + * This may miss some rows if they are modified during the snapshot query. + * In that case, replication will pick up those rows afterward, + * possibly resulting in an IdSnapshotQuery. + * + * Currently, this only supports a table with a single primary key column, + * of a select few types. + */ +export class BatchedSnapshotQuery implements MSSQLSnapshotQuery { + /** + * Primary key types that we support for batched snapshots. + * + * Can expand this over time as we add more tests, + * and ensure there are no issues with type conversion. + */ + static SUPPORTED_TYPES = [ + MSSQLBaseType.TEXT, + MSSQLBaseType.NTEXT, + MSSQLBaseType.VARCHAR, + MSSQLBaseType.NVARCHAR, + MSSQLBaseType.CHAR, + MSSQLBaseType.NCHAR, + MSSQLBaseType.UNIQUEIDENTIFIER, + MSSQLBaseType.TINYINT, + MSSQLBaseType.SMALLINT, + MSSQLBaseType.INT, + MSSQLBaseType.BIGINT + ]; + + static supports(table: MSSQLSourceTable) { + if (table.sourceTable.replicaIdColumns.length != 1) { + return false; + } + const primaryKey = table.sourceTable.replicaIdColumns[0]; + + return primaryKey.typeId != null && BatchedSnapshotQuery.SUPPORTED_TYPES.includes(Number(primaryKey.typeId)); + } + + private readonly key: ColumnDescriptor; + lastKey: string | bigint | null = null; + + public constructor( + private readonly transaction: sql.Transaction, + private readonly table: MSSQLSourceTable, + private readonly batchSize: number = 10_000, + lastKeySerialized: Uint8Array | null + ) { + this.key = table.sourceTable.replicaIdColumns[0]; + + if (lastKeySerialized != null) { + this.lastKey = this.deserializeKey(lastKeySerialized); + } + } + + public async initialize(): Promise { + // No-op + } + + public getLastKeySerialized(): Uint8Array { + return bson.serialize({ [this.key.name]: this.lastKey }); + } + + public async *next(): AsyncIterableIterator> { + const escapedKeyName = escapeIdentifier(this.key.name); + const metadataRequest = this.transaction.request(); + metadataRequest.stream = true; + const metadataPromise = new Promise((resolve, reject) => { + metadataRequest.on('recordset', resolve); + metadataRequest.on('error', reject); + }); + metadataRequest.query(`SELECT TOP(0) * FROM ${this.table.toQualifiedName()}`); + + const columnMetadata: sql.IColumnMetadata = await metadataPromise; + + const foundPrimaryKey = columnMetadata[this.key.name]; + if (!foundPrimaryKey) { + throw new Error( + `Cannot find primary key column ${this.key.name} in results. Keys: ${Object.keys(columnMetadata.columns).join(', ')}` + ); + } + + yield columnMetadata; + + const request = this.transaction.request(); + const stream = request.toReadableStream(); + if (this.lastKey == null) { + request.query(`SELECT TOP(${this.batchSize}) * FROM ${this.table.toQualifiedName()} ORDER BY ${escapedKeyName}`); + } else { + if (this.key.typeId == null) { + throw new Error(`typeId required for primary key ${this.key.name}`); + } + request + .input('lastKey', this.lastKey) + .query( + `SELECT TOP(${this.batchSize}) * FROM ${this.table.toQualifiedName()} WHERE ${escapedKeyName} > @lastKey ORDER BY ${escapedKeyName}` + ); + } + + // MSSQL only streams one row at a time + for await (const row of stream) { + this.lastKey = row[this.key.name]; + yield row; + } + } + + private deserializeKey(key: Uint8Array) { + const decoded = bson.deserialize(key, { useBigInt64: true }); + const keys = Object.keys(decoded); + if (keys.length != 1) { + throw new ServiceAssertionError(`Multiple keys found: ${keys.join(', ')}`); + } + if (keys[0] != this.key.name) { + throw new ServiceAssertionError(`Key name mismatch: expected ${this.key.name}, got ${keys[0]}`); + } + const value = decoded[this.key.name]; + return value; + } +} + +/** + * This performs a snapshot query using a list of primary keys. + * + * This is not used for general snapshots, but is used when we need to re-fetch specific rows + * during streaming replication. + */ +export class IdSnapshotQuery implements MSSQLSnapshotQuery { + static supports(table: MSSQLSourceTable) { + // We have the same requirements as BatchedSnapshotQuery. + // This is typically only used as a fallback when ChunkedSnapshotQuery + // skipped some rows. + return BatchedSnapshotQuery.supports(table); + } + + public constructor( + private readonly transaction: sql.Transaction, + private readonly table: MSSQLSourceTable, + private readonly keys: PrimaryKeyValue[] + ) {} + + public async initialize(): Promise { + // No-op + } + + public async *next(): AsyncIterableIterator> { + const request = this.transaction.request(); + request.stream = true; + const metadataPromise = new Promise((resolve) => { + request.on('recordset', resolve); + }); + const stream = request.toReadableStream(); + + const keyDefinition = this.table.sourceTable.replicaIdColumns[0]; + const ids = this.keys.map((record) => record[keyDefinition.name]); + + request + .input('ids', ids) + .query(`SELECT * FROM ${this.table.toQualifiedName()} WHERE ${escapeIdentifier(keyDefinition.name)} = @ids`); + const columnMetadata: sql.IColumnMetadata = await metadataPromise; + yield columnMetadata; + + for await (const row of stream) { + yield row; + } + } +} From b63ed48f763141e52b1cd4d41e7d7301c2b929bb Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Mon, 20 Oct 2025 17:10:52 +0200 Subject: [PATCH 08/42] Updated base tsconfig to include mssql module More module template files added --- modules/module-mssql/LICENSE | 67 ++ modules/module-mssql/README.md | 3 + modules/module-mssql/dev/docker-compose.yaml | 2 +- modules/module-mssql/package.json | 51 ++ modules/module-mssql/tsconfig.json | 26 + modules/module-mssql/vitest.config.ts | 15 + pnpm-lock.yaml | 674 ++++++++++++++++++- tsconfig.json | 3 + 8 files changed, 817 insertions(+), 24 deletions(-) create mode 100644 modules/module-mssql/LICENSE create mode 100644 modules/module-mssql/README.md create mode 100644 modules/module-mssql/package.json create mode 100644 modules/module-mssql/tsconfig.json create mode 100644 modules/module-mssql/vitest.config.ts diff --git a/modules/module-mssql/LICENSE b/modules/module-mssql/LICENSE new file mode 100644 index 000000000..3ff64c975 --- /dev/null +++ b/modules/module-mssql/LICENSE @@ -0,0 +1,67 @@ +# Functional Source License, Version 1.1, ALv2 Future License + +## Abbreviation + +FSL-1.1-ALv2 + +## Notice + +Copyright 2023-2025 Journey Mobile, Inc. + +## Terms and Conditions + +### Licensor ("We") + +The party offering the Software under these Terms and Conditions. + +### The Software + +The "Software" is each version of the software that we make available under these Terms and Conditions, as indicated by our inclusion of these Terms and Conditions with the Software. + +### License Grant + +Subject to your compliance with this License Grant and the Patents, Redistribution and Trademark clauses below, we hereby grant you the right to use, copy, modify, create derivative works, publicly perform, publicly display and redistribute the Software for any Permitted Purpose identified below. + +### Permitted Purpose + +A Permitted Purpose is any purpose other than a Competing Use. A Competing Use means making the Software available to others in a commercial product or service that: + +1. substitutes for the Software; +2. substitutes for any other product or service we offer using the Software that exists as of the date we make the Software available; or +3. offers the same or substantially similar functionality as the Software. + +Permitted Purposes specifically include using the Software: + +1. for your internal use and access; +2. for non-commercial education; +3. for non-commercial research; and +4. in connection with professional services that you provide to a licensee using the Software in accordance with these Terms and Conditions. + +### Patents + +To the extent your use for a Permitted Purpose would necessarily infringe our patents, the license grant above includes a license under our patents. If you make a claim against any party that the Software infringes or contributes to the infringement of any patent, then your patent license to the Software ends immediately. + +### Redistribution + +The Terms and Conditions apply to all copies, modifications and derivatives of the Software. +If you redistribute any copies, modifications or derivatives of the Software, you must include a copy of or a link to these Terms and Conditions and not remove any copyright notices provided in or with the Software. + +### Disclaimer + +THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTIES OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION WARRANTIES OF FITNESS FOR A PARTICULAR PURPOSE, MERCHANTABILITY, TITLE OR NON-INFRINGEMENT. +IN NO EVENT WILL WE HAVE ANY LIABILITY TO YOU ARISING OUT OF OR RELATED TO THE SOFTWARE, INCLUDING INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES, EVEN IF WE HAVE BEEN INFORMED OF THEIR POSSIBILITY IN ADVANCE. + +### Trademarks + +Except for displaying the License Details and identifying us as the origin of the Software, you have no right under these Terms and Conditions to use our trademarks, trade names, service marks or product names. + +## Grant of Future License + +We hereby irrevocably grant you an additional license to use the Software under the Apache License, Version 2.0 that is effective on the second anniversary of the date we make the Software available. On or after that date, you may use the Software under the Apache License, Version 2.0, in which case the following will apply: + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/modules/module-mssql/README.md b/modules/module-mssql/README.md new file mode 100644 index 000000000..abbf9d41c --- /dev/null +++ b/modules/module-mssql/README.md @@ -0,0 +1,3 @@ +# PowerSync MSSQL Module + +MSSQL replication module for PowerSync diff --git a/modules/module-mssql/dev/docker-compose.yaml b/modules/module-mssql/dev/docker-compose.yaml index a7bb364ef..9c5a41d82 100644 --- a/modules/module-mssql/dev/docker-compose.yaml +++ b/modules/module-mssql/dev/docker-compose.yaml @@ -1,7 +1,7 @@ services: mssql: platform: linux/amd64 - image: mcr.microsoft.com/mssql/server:2025-latest + image: mcr.microsoft.com/mssql/server:2022-latest container_name: mssql ports: - "1433:1433" diff --git a/modules/module-mssql/package.json b/modules/module-mssql/package.json new file mode 100644 index 000000000..9c8f0a932 --- /dev/null +++ b/modules/module-mssql/package.json @@ -0,0 +1,51 @@ +{ + "name": "@powersync/service-module-mssql", + "repository": "https://github.com/powersync-ja/powersync-service", + "types": "dist/index.d.ts", + "version": "0.0.0", + "license": "FSL-1.1-ALv2", + "main": "dist/index.js", + "type": "module", + "publishConfig": { + "access": "public" + }, + "scripts": { + "build": "tsc -b", + "build:tests": "tsc -b test/tsconfig.json", + "clean": "rm -rf ./dist && tsc -b --clean", + "test": "vitest" + }, + "exports": { + ".": { + "import": "./dist/index.js", + "require": "./dist/index.js", + "default": "./dist/index.js" + }, + "./types": { + "import": "./dist/types/types.js", + "require": "./dist/types/types.js", + "default": "./dist/types/types.js" + } + }, + "dependencies": { + "@powersync/lib-services-framework": "workspace:*", + "@powersync/service-core": "workspace:*", + "@powersync/service-errors": "workspace:*", + "@powersync/service-sync-rules": "workspace:*", + "@powersync/service-types": "workspace:*", + "@powersync/service-jsonbig": "workspace:*", + "mssql": "^11.0.1", + "semver": "^7.7.2", + "ts-codec": "^1.3.0", + "uri-js": "^4.4.1", + "uuid": "^11.1.0" + }, + "devDependencies": { + "@powersync/service-core-tests": "workspace:*", + "@powersync/service-module-mongodb-storage": "workspace:*", + "@powersync/service-module-postgres-storage": "workspace:*", + "@types/mssql": "^9.1.7", + "@types/semver": "^7.7.1", + "@types/uuid": "^10.0.0" + } +} diff --git a/modules/module-mssql/tsconfig.json b/modules/module-mssql/tsconfig.json new file mode 100644 index 000000000..00738ba7e --- /dev/null +++ b/modules/module-mssql/tsconfig.json @@ -0,0 +1,26 @@ +{ + "extends": "../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "outDir": "dist", + "esModuleInterop": true, + "skipLibCheck": true, + "sourceMap": true, + "typeRoots": ["./node_modules/@types"] + }, + "include": ["src"], + "references": [ + { + "path": "../../packages/types" + }, + { + "path": "../../packages/sync-rules" + }, + { + "path": "../../packages/service-core" + }, + { + "path": "../../libs/lib-services" + } + ] +} diff --git a/modules/module-mssql/vitest.config.ts b/modules/module-mssql/vitest.config.ts new file mode 100644 index 000000000..7a39c1f71 --- /dev/null +++ b/modules/module-mssql/vitest.config.ts @@ -0,0 +1,15 @@ +import { defineConfig } from 'vitest/config'; +import tsconfigPaths from 'vite-tsconfig-paths'; + +export default defineConfig({ + plugins: [tsconfigPaths()], + test: { + setupFiles: './test/src/setup.ts', + poolOptions: { + threads: { + singleThread: true + } + }, + pool: 'threads' + } +}); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index fdf514281..d663557a9 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -255,6 +255,61 @@ importers: specifier: workspace:* version: link:../../packages/service-core-tests + modules/module-mssql: + dependencies: + '@powersync/lib-services-framework': + specifier: workspace:* + version: link:../../libs/lib-services + '@powersync/service-core': + specifier: workspace:* + version: link:../../packages/service-core + '@powersync/service-errors': + specifier: workspace:* + version: link:../../packages/service-errors + '@powersync/service-jsonbig': + specifier: workspace:* + version: link:../../packages/jsonbig + '@powersync/service-sync-rules': + specifier: workspace:* + version: link:../../packages/sync-rules + '@powersync/service-types': + specifier: workspace:* + version: link:../../packages/types + mssql: + specifier: ^11.0.1 + version: 11.0.1 + semver: + specifier: ^7.7.2 + version: 7.7.2 + ts-codec: + specifier: ^1.3.0 + version: 1.3.0 + uri-js: + specifier: ^4.4.1 + version: 4.4.1 + uuid: + specifier: ^11.1.0 + version: 11.1.0 + devDependencies: + '@powersync/service-core-tests': + specifier: workspace:* + version: link:../../packages/service-core-tests + '@powersync/service-module-mongodb-storage': + specifier: workspace:* + version: link:../module-mongodb-storage + '@powersync/service-module-postgres-storage': + specifier: workspace:* + version: link:../module-postgres-storage + '@types/mssql': + specifier: ^9.1.7 + version: 9.1.7 + '@types/semver': + specifier: ^7.7.1 + version: 7.7.1 + '@types/uuid': + specifier: ^10.0.0 + version: 10.0.0 + modules/module-mysql: dependencies: '@powersync/lib-services-framework': @@ -310,8 +365,8 @@ importers: specifier: ^3.2.24 version: 3.2.24 '@types/semver': - specifier: ^7.5.4 - version: 7.5.8 + specifier: ^7.7.1 + version: 7.7.1 modules/module-postgres: dependencies: @@ -704,7 +759,7 @@ importers: version: 16.14.20 ts-node: specifier: ^10.9.1 - version: 10.9.2(@types/node@22.16.2)(typescript@5.7.3) + version: 10.9.2(@types/node@22.16.2)(typescript@5.9.2) test-client: dependencies: @@ -736,6 +791,74 @@ importers: packages: + '@azure-rest/core-client@2.5.0': + resolution: {integrity: sha512-KMVIPxG6ygcQ1M2hKHahF7eddKejYsWTjoLIfTWiqnaj42dBkYzj4+S8rK9xxmlOaEHKZHcMrRbm0NfN4kgwHw==} + engines: {node: '>=20.0.0'} + + '@azure/abort-controller@2.1.2': + resolution: {integrity: sha512-nBrLsEWm4J2u5LpAPjxADTlq3trDgVZZXHNKabeXZtpq3d3AbN/KGO82R87rdDz5/lYB024rtEf10/q0urNgsA==} + engines: {node: '>=18.0.0'} + + '@azure/core-auth@1.10.0': + resolution: {integrity: sha512-88Djs5vBvGbHQHf5ZZcaoNHo6Y8BKZkt3cw2iuJIQzLEgH4Ox6Tm4hjFhbqOxyYsgIG/eJbFEHpxRIfEEWv5Ow==} + engines: {node: '>=20.0.0'} + + '@azure/core-client@1.10.0': + resolution: {integrity: sha512-O4aP3CLFNodg8eTHXECaH3B3CjicfzkxVtnrfLkOq0XNP7TIECGfHpK/C6vADZkWP75wzmdBnsIA8ksuJMk18g==} + engines: {node: '>=20.0.0'} + + '@azure/core-http-compat@2.3.0': + resolution: {integrity: sha512-qLQujmUypBBG0gxHd0j6/Jdmul6ttl24c8WGiLXIk7IHXdBlfoBqW27hyz3Xn6xbfdyVSarl1Ttbk0AwnZBYCw==} + engines: {node: '>=18.0.0'} + + '@azure/core-lro@2.7.2': + resolution: {integrity: sha512-0YIpccoX8m/k00O7mDDMdJpbr6mf1yWo2dfmxt5A8XVZVVMz2SSKaEbMCeJRvgQ0IaSlqhjT47p4hVIRRy90xw==} + engines: {node: '>=18.0.0'} + + '@azure/core-paging@1.6.2': + resolution: {integrity: sha512-YKWi9YuCU04B55h25cnOYZHxXYtEvQEbKST5vqRga7hWY9ydd3FZHdeQF8pyh+acWZvppw13M/LMGx0LABUVMA==} + engines: {node: '>=18.0.0'} + + '@azure/core-rest-pipeline@1.22.0': + resolution: {integrity: sha512-OKHmb3/Kpm06HypvB3g6Q3zJuvyXcpxDpCS1PnU8OV6AJgSFaee/covXBcPbWc6XDDxtEPlbi3EMQ6nUiPaQtw==} + engines: {node: '>=20.0.0'} + + '@azure/core-tracing@1.3.0': + resolution: {integrity: sha512-+XvmZLLWPe67WXNZo9Oc9CrPj/Tm8QnHR92fFAFdnbzwNdCH1h+7UdpaQgRSBsMY+oW1kHXNUZQLdZ1gHX3ROw==} + engines: {node: '>=20.0.0'} + + '@azure/core-util@1.13.0': + resolution: {integrity: sha512-o0psW8QWQ58fq3i24Q1K2XfS/jYTxr7O1HRcyUE9bV9NttLU+kYOH82Ixj8DGlMTOWgxm1Sss2QAfKK5UkSPxw==} + engines: {node: '>=20.0.0'} + + '@azure/identity@4.11.1': + resolution: {integrity: sha512-0ZdsLRaOyLxtCYgyuqyWqGU5XQ9gGnjxgfoNTt1pvELGkkUFrMATABZFIq8gusM7N1qbqpVtwLOhk0d/3kacLg==} + engines: {node: '>=20.0.0'} + + '@azure/keyvault-common@2.0.0': + resolution: {integrity: sha512-wRLVaroQtOqfg60cxkzUkGKrKMsCP6uYXAOomOIysSMyt1/YM0eUn9LqieAWM8DLcU4+07Fio2YGpPeqUbpP9w==} + engines: {node: '>=18.0.0'} + + '@azure/keyvault-keys@4.10.0': + resolution: {integrity: sha512-eDT7iXoBTRZ2n3fLiftuGJFD+yjkiB1GNqzU2KbY1TLYeXeSPVTVgn2eJ5vmRTZ11978jy2Kg2wI7xa9Tyr8ag==} + engines: {node: '>=18.0.0'} + + '@azure/logger@1.3.0': + resolution: {integrity: sha512-fCqPIfOcLE+CGqGPd66c8bZpwAji98tZ4JI9i/mlTNTlsIWslCfpg48s/ypyLxZTump5sypjrKn2/kY7q8oAbA==} + engines: {node: '>=20.0.0'} + + '@azure/msal-browser@4.21.1': + resolution: {integrity: sha512-qGtzX3HJfJsOVeDcVrFZAYZoxLRjrW2lXzXqijgiBA5EtM9ud7F/EYgKKQ9TJU/WtE46szuZtQZx5vD4pEiknA==} + engines: {node: '>=0.8.0'} + + '@azure/msal-common@15.12.0': + resolution: {integrity: sha512-4ucXbjVw8KJ5QBgnGJUeA07c8iznwlk5ioHIhI4ASXcXgcf2yRFhWzYOyWg/cI49LC9ekpFJeQtO3zjDTbl6TQ==} + engines: {node: '>=0.8.0'} + + '@azure/msal-node@3.7.3': + resolution: {integrity: sha512-MoJxkKM/YpChfq4g2o36tElyzNUMG8mfD6u8NbuaPAsqfGpaw249khAcJYNoIOigUzRw45OjXCOrexE6ImdUxg==} + engines: {node: '>=16'} + '@babel/code-frame@7.24.7': resolution: {integrity: sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==} engines: {node: '>=6.9.0'} @@ -1012,6 +1135,9 @@ packages: '@jridgewell/trace-mapping@0.3.9': resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==} + '@js-joda/core@5.6.5': + resolution: {integrity: sha512-3zwefSMwHpu8iVUW8YYz227sIv6UFqO31p1Bf1ZH/Vom7CmNyUsXjDBlnNzcuhmOL1XfxZ3nvND42kR23XlbcQ==} + '@js-sdsl/ordered-set@4.4.2': resolution: {integrity: sha512-ieYQ8WlBPKYzEo81H3q0DFbd8WtFRXXABb4+vRCF0AO3WWtJZFxYvRGdipUXGrd6tlSySmqhcPuO3J6SCodCxg==} @@ -1481,6 +1607,9 @@ packages: resolution: {integrity: sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==} engines: {node: '>=14.16'} + '@tediousjs/connection-string@0.5.0': + resolution: {integrity: sha512-7qSgZbincDDDFyRweCIEvZULFAw5iz/DeunhvuxpL31nfntX3P4Yd4HkHBRg9H8CdqY1e5WFN1PZIz/REL9MVQ==} + '@tootallnate/once@2.0.0': resolution: {integrity: sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==} engines: {node: '>= 10'} @@ -1523,6 +1652,9 @@ packages: '@types/lodash@4.17.6': resolution: {integrity: sha512-OpXEVoCKSS3lQqjx9GGGOapBeuW5eUboYHRlHP9urXPX25IKZ6AnP5ZRxtVf63iieUbsHxLn8NQ5Nlftc6yzAA==} + '@types/mssql@9.1.7': + resolution: {integrity: sha512-eIOEe78nuSW5KctDHImDhLZ9a+jV/z/Xs5RBhcG/jrk+YWqhdNmzBmHVWV7aWQ5fW+jbIGtX6Ph+bbVqfhzafg==} + '@types/mysql@2.15.22': resolution: {integrity: sha512-wK1pzsJVVAjYCSZWQoWHziQZbNggXFDUEIGf54g4ZM/ERuP86uGdWeKZWMYlqTPMZfHJJvLPyogXGvCOg87yLQ==} @@ -1550,12 +1682,18 @@ packages: '@types/pg@8.6.1': resolution: {integrity: sha512-1Kc4oAGzAl7uqUStZCDvaLFqZrW9qWSjXOmBfdgyBP5La7Us6Mg4GBvRlSoaZMhQF/zSj1C8CtKMBkoiT8eL8w==} + '@types/readable-stream@4.0.21': + resolution: {integrity: sha512-19eKVv9tugr03IgfXlA9UVUVRbW6IuqRO5B92Dl4a6pT7K8uaGrNS0GkxiZD0BOk6PLuXl5FhWl//eX/pzYdTQ==} + '@types/semver-utils@1.1.3': resolution: {integrity: sha512-T+YwkslhsM+CeuhYUxyAjWm7mJ5am/K10UX40RuA6k6Lc7eGtq8iY2xOzy7Vq0GOqhl/xZl5l2FwURZMTPTUww==} '@types/semver@7.5.8': resolution: {integrity: sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==} + '@types/semver@7.7.1': + resolution: {integrity: sha512-FmgJfu+MOcQ370SD0ev7EI8TlCAfKYU+B4m5T3yXc1CiRN94g/SZPtsCkk506aUDtlMnFZvasDwHHUcZUEaYuA==} + '@types/shimmer@1.2.0': resolution: {integrity: sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg==} @@ -1568,6 +1706,9 @@ packages: '@types/triple-beam@1.3.5': resolution: {integrity: sha512-6WaYesThRMCl19iryMYP7/x2OVgCtbIVflDGFpWnb9irXI3UjYE4AzmYuiUKY1AJstGijoY+MgUszMgRxIYTYw==} + '@types/uuid@10.0.0': + resolution: {integrity: sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==} + '@types/webidl-conversions@7.0.3': resolution: {integrity: sha512-CiJJvcRtIgzadHCYXw7dqEnMNRjhGZlYK05Mj9OyktqV8uVT8fD2BFOB7S1uwBE3Kj2Z+4UyPmFw/Ixgw/LAlA==} @@ -1577,6 +1718,10 @@ packages: '@types/ws@8.2.3': resolution: {integrity: sha512-ahRJZquUYCdOZf/rCsWg88S0/+cb9wazUBHv6HZEe3XdYaBe2zr/slM8J28X07Hn88Pnm4ezo7N8/ofnOgrPVQ==} + '@typespec/ts-http-runtime@0.3.0': + resolution: {integrity: sha512-sOx1PKSuFwnIl7z4RN0Ls7N9AQawmR9r66eI5rFCzLDIs8HTIYrIpH9QjYWoX0lkgGrkLxXhi4QnK7MizPRrIg==} + engines: {node: '>=20.0.0'} + '@vitest/expect@3.0.5': resolution: {integrity: sha512-nNIOqupgZ4v5jWuQx2DSlHLEs7Q4Oh/7AYwNyE+k0UQzG7tSmjPXShUikn1mpNGzYEN2jJbTvLejwShMitovBA==} @@ -1644,6 +1789,10 @@ packages: resolution: {integrity: sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==} engines: {node: '>= 6.0.0'} + agent-base@7.1.4: + resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} + engines: {node: '>= 14'} + agentkeepalive@4.5.0: resolution: {integrity: sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==} engines: {node: '>= 8.0.0'} @@ -1782,6 +1931,9 @@ packages: bl@4.1.0: resolution: {integrity: sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==} + bl@6.1.2: + resolution: {integrity: sha512-6J3oG82fpJ71WF4l0W6XslkwAPMr+Zcp+AmdxJ0L8LsXNzFeO8GYesV2J9AzGArBjrsb2xR50Ocbn/CL1B44TA==} + boxen@7.1.1: resolution: {integrity: sha512-2hCgjEmP8YLWQ130n2FerGv7rYpfBmnmp9Uy2Le1vge6X3gZIfSmEzP5QTDElFxcvVcXlEn8Aq6MU/PZygIOog==} engines: {node: '>=14.16'} @@ -1800,6 +1952,9 @@ packages: resolution: {integrity: sha512-MTxGsqgYTwfshYWTRdmZRC+M7FnG1b4y7RO7p2k3X24Wq0yv1m77Wsj0BzlPzd/IowgESfsruQCUToa7vbOpPQ==} engines: {node: '>=16.20.1'} + buffer-equal-constant-time@1.0.1: + resolution: {integrity: sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==} + buffer-from@1.1.2: resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} @@ -1809,6 +1964,10 @@ packages: buffer@6.0.3: resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} + bundle-name@4.1.0: + resolution: {integrity: sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==} + engines: {node: '>=18'} + cac@6.7.14: resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} engines: {node: '>=8'} @@ -1936,6 +2095,10 @@ packages: resolution: {integrity: sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==} engines: {node: '>=14'} + commander@11.1.0: + resolution: {integrity: sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ==} + engines: {node: '>=16'} + commander@12.1.0: resolution: {integrity: sha512-Vw8qHK3bZM9y/P10u3Vib8o/DdkvA2OtPtZvD871QKjy74Wj1WSKFILMPRPSdUSx5RFK1arlJzEtA4PkFgnbuA==} engines: {node: '>=18'} @@ -2028,6 +2191,15 @@ packages: supports-color: optional: true + debug@4.4.1: + resolution: {integrity: sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + decompress-response@6.0.0: resolution: {integrity: sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==} engines: {node: '>=10'} @@ -2056,6 +2228,14 @@ packages: resolution: {integrity: sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==} engines: {node: '>=4.0.0'} + default-browser-id@5.0.0: + resolution: {integrity: sha512-A6p/pu/6fyBcA1TRz/GqWYPViplrftcW2gZC9q79ngNCKAeR/X3gcEdXQHl4KNXV+3wgIJ1CPkJQ3IHM6lcsyA==} + engines: {node: '>=18'} + + default-browser@5.2.1: + resolution: {integrity: sha512-WY/3TUME0x3KPYdRRxEJJvXRHV4PyPoUsxtZa78lwItwRQRHhd2U9xOscaT/YTf8uCXIAjeJOFBVEh/7FtD8Xg==} + engines: {node: '>=18'} + defaults@1.0.4: resolution: {integrity: sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==} @@ -2063,6 +2243,10 @@ packages: resolution: {integrity: sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==} engines: {node: '>=10'} + define-lazy-prop@3.0.0: + resolution: {integrity: sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==} + engines: {node: '>=12'} + delegates@1.0.0: resolution: {integrity: sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==} @@ -2102,6 +2286,9 @@ packages: eastasianwidth@0.2.0: resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==} + ecdsa-sig-formatter@1.0.11: + resolution: {integrity: sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==} + emoji-regex@8.0.0: resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} @@ -2391,6 +2578,10 @@ packages: resolution: {integrity: sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==} engines: {node: '>= 6'} + http-proxy-agent@7.0.2: + resolution: {integrity: sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==} + engines: {node: '>= 14'} + http2-wrapper@2.2.1: resolution: {integrity: sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==} engines: {node: '>=10.19.0'} @@ -2399,6 +2590,10 @@ packages: resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==} engines: {node: '>= 6'} + https-proxy-agent@7.0.6: + resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==} + engines: {node: '>= 14'} + human-id@1.0.2: resolution: {integrity: sha512-UNopramDEhHJD+VR+ehk8rOslwSfByxPIZyJRfV739NDhN5LF1fa1MqnzKm2lGTQRjNrjK19Q5fhkgIfjlVUKw==} @@ -2497,6 +2692,11 @@ packages: resolution: {integrity: sha512-a5dFJih5ZLYlRtDc0dZWP7RiKr6xIKzmn/oAYCDvdLThadVgyJwlaoQPmRtMSpz+rk0OGAgIu+TcM9HUF0fk1A==} engines: {node: '>= 0.4'} + is-docker@3.0.0: + resolution: {integrity: sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + hasBin: true + is-extglob@2.1.1: resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} engines: {node: '>=0.10.0'} @@ -2509,6 +2709,11 @@ packages: resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} engines: {node: '>=0.10.0'} + is-inside-container@1.0.0: + resolution: {integrity: sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==} + engines: {node: '>=14.16'} + hasBin: true + is-installed-globally@0.4.0: resolution: {integrity: sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==} engines: {node: '>=10'} @@ -2558,6 +2763,10 @@ packages: resolution: {integrity: sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==} engines: {node: '>=0.10.0'} + is-wsl@3.1.0: + resolution: {integrity: sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==} + engines: {node: '>=16'} + is-yarn-global@0.4.1: resolution: {integrity: sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ==} engines: {node: '>=12'} @@ -2583,6 +2792,9 @@ packages: jose@4.15.9: resolution: {integrity: sha512-1vUQX+IdDMVPj4k8kOxgUqlcK518yluMuGZwqlr44FS1ppZB/5GWh4rZG89erpOBOJjU/OBsnCVFfapsRz6nEA==} + js-md4@0.3.2: + resolution: {integrity: sha512-/GDnfQYsltsjRswQhN9fhv3EMw2sCpUdrdxyWDOUK7eyD++r3gRhzgiQgc/x4MAv2i1iuQ4lxO5mvqM3vj4bwA==} + js-tokens@4.0.0: resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} @@ -2632,10 +2844,20 @@ packages: resolution: {integrity: sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==} engines: {node: '>=0.10.0'} + jsonwebtoken@9.0.2: + resolution: {integrity: sha512-PRp66vJ865SSqOlgqS8hujT5U4AOgMfhrwYIuIhfKaoSCZcirrmASQr8CX7cUg+RMih+hgznrjp99o+W4pJLHQ==} + engines: {node: '>=12', npm: '>=6'} + jsox@1.2.121: resolution: {integrity: sha512-9Ag50tKhpTwS6r5wh3MJSAvpSof0UBr39Pto8OnzFT32Z/pAbxAsKHzyvsyMEHVslELvHyO/4/jaQELHk8wDcw==} hasBin: true + jwa@1.4.2: + resolution: {integrity: sha512-eeH5JO+21J78qMvTIDdBXidBd6nG2kZjg5Ohz/1fpa28Z4CcsWUzJ1ZZyFq/3z3N17aZy+ZuBoHljASbL1WfOw==} + + jws@3.2.2: + resolution: {integrity: sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==} + keyv@4.5.4: resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} @@ -2665,6 +2887,27 @@ packages: resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} engines: {node: '>=10'} + lodash.includes@4.3.0: + resolution: {integrity: sha512-W3Bx6mdkRTGtlJISOvVD/lbqjTlPPUDTMnlXZFnVwi9NKJ6tiAk6LVdlhZMm17VZisqhKcgzpO5Wz91PCt5b0w==} + + lodash.isboolean@3.0.3: + resolution: {integrity: sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg==} + + lodash.isinteger@4.0.4: + resolution: {integrity: sha512-DBwtEWN2caHQ9/imiNeEA5ys1JoRtRfY3d7V9wkqtbycnAmTvRRmbHKDV4a0EYc678/dia0jrte4tjYwVBaZUA==} + + lodash.isnumber@3.0.3: + resolution: {integrity: sha512-QYqzpfwO3/CWf3XP+Z+tkQsfaLL/EnUlXWVkIk5FUPc4sBdTehEqZONuyRt2P67PXAk+NXmTBcc97zw9t1FQrw==} + + lodash.isplainobject@4.0.6: + resolution: {integrity: sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==} + + lodash.isstring@4.0.1: + resolution: {integrity: sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw==} + + lodash.once@4.1.1: + resolution: {integrity: sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg==} + lodash.startcase@4.4.0: resolution: {integrity: sha512-+WKqsK294HMSc2jEbNgpHpd0JfIBhp7rEV4aqXWqFr6AlXov+SlcgB1Fv01y2kGe3Gc8nMW7VA0SrGuSkRfIEg==} @@ -2863,6 +3106,11 @@ packages: ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + mssql@11.0.1: + resolution: {integrity: sha512-KlGNsugoT90enKlR8/G36H0kTxPthDhmtNUCwEHvgRza5Cjpjoj+P2X6eMpFUDN7pFrJZsKadL4x990G8RBE1w==} + engines: {node: '>=18'} + hasBin: true + mute-stream@1.0.0: resolution: {integrity: sha512-avsJQhyd+680gKXyG/sQc0nXaC6rBkPOfyHYcFb9+hdkqQkR9bdnkJ0AMZhke0oesPqIO+mFFJ+IdBc7mst4IA==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} @@ -2880,6 +3128,9 @@ packages: engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} hasBin: true + native-duplexpair@1.0.0: + resolution: {integrity: sha512-E7QQoM+3jvNtlmyfqRZ0/U75VFgCls+fSkbml2MpgWkWyz3ox8Y58gNhfuziuQYGNNQAbFZJQck55LHCnCK6CA==} + nearley@2.20.1: resolution: {integrity: sha512-+Mc8UaAebFzgV+KpI5n7DasuuQCHA89dmwm7JXw3TV43ukfNQ9DnBH3Mdb2g/I4Fdxc26pwimBWvjIw0UAILSQ==} hasBin: true @@ -3006,6 +3257,10 @@ packages: resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==} engines: {node: '>=6'} + open@10.2.0: + resolution: {integrity: sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA==} + engines: {node: '>=18'} + opentelemetry-instrumentation-fetch-node@1.2.3: resolution: {integrity: sha512-Qb11T7KvoCevMaSeuamcLsAD+pZnavkhDnlVL0kRozfhl42dKG5Q3anUklAFKJZjY3twLR+BnRa6DlwwkIE/+A==} engines: {node: '>18.0.0'} @@ -3425,6 +3680,10 @@ packages: rsocket-websocket-client@1.0.0-alpha.3: resolution: {integrity: sha512-CwTwTNMGa8BKvrWde/kM3q8IHuzO8RCIfzuj25BsVe9y8eehDQHt4fXk0g1i/wpsxTm+RY6DxE6Vr5snozKVOg==} + run-applescript@7.0.0: + resolution: {integrity: sha512-9by4Ij99JUr/MCFBUkDKLWK3G9HVXmabKz9U5MlIAIuvuzkiOicRYs8XJLxX+xahD+mLiiCYDqF9dKAgtzKP1A==} + engines: {node: '>=18'} + run-async@3.0.0: resolution: {integrity: sha512-540WwVDOMxA6dN6We19EcT9sc3hkXPw5mzRNGM3FkdN/vtE9NFvj5lFAPNwUDmJjXidm3v7TC1cTE7t17Ulm1Q==} engines: {node: '>=0.12.0'} @@ -3466,6 +3725,11 @@ packages: engines: {node: '>=10'} hasBin: true + semver@7.7.2: + resolution: {integrity: sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==} + engines: {node: '>=10'} + hasBin: true + seq-queue@0.0.5: resolution: {integrity: sha512-hr3Wtp/GZIc/6DAGPDcV4/9WoZhjrkXsi5B/07QgX8tsdc6ilr7BFM6PM6rbdAX1kFSDYeZGLipIZZKyQP0O5Q==} @@ -3679,6 +3943,14 @@ packages: resolution: {integrity: sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==} engines: {node: '>=10'} + tarn@3.0.2: + resolution: {integrity: sha512-51LAVKUSZSVfI05vjPESNc5vwqqZpbXCsU+/+wxlOrUjk2SnFTt97v9ZgQrD4YmxYW1Px6w2KjaDitCfkvgxMQ==} + engines: {node: '>=8.0.0'} + + tedious@18.6.1: + resolution: {integrity: sha512-9AvErXXQTd6l7TDd5EmM+nxbOGyhnmdbp/8c3pw+tjaiSXW9usME90ET/CRG1LN1Y9tPMtz/p83z4Q97B4DDpw==} + engines: {node: '>=18'} + term-size@2.2.1: resolution: {integrity: sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==} engines: {node: '>=8'} @@ -3795,6 +4067,9 @@ packages: tslib@2.6.3: resolution: {integrity: sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==} + tslib@2.8.1: + resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} + tuf-js@1.1.7: resolution: {integrity: sha512-i3P9Kgw3ytjELUfpuKVDNBJvk4u5bXL6gskv572mcevPbSKCV3zt3djhmlEQ65yERjIbOSncy7U4cQJaB1CBCg==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} @@ -3823,6 +4098,11 @@ packages: engines: {node: '>=14.17'} hasBin: true + typescript@5.9.2: + resolution: {integrity: sha512-CWBzXQrc/qOkhidw1OzBTQuYRbfyxDXJMVJ1XNwUHGROVmuaeiEm3OslpZ1RV96d7SKKjZKrSJu3+t/xlw3R9A==} + engines: {node: '>=14.17'} + hasBin: true + undefsafe@2.0.5: resolution: {integrity: sha512-WxONCrssBM8TSPRqN5EmsjVrsv4A8X12J4ArBiiayv3DyyG3ZlIg6yysuuSYdZsVz3TKcTg2fd//Ujd4CHV1iA==} @@ -3871,6 +4151,10 @@ packages: resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} hasBin: true + uuid@8.3.2: + resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==} + hasBin: true + v8-compile-cache-lib@3.0.1: resolution: {integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==} @@ -4045,6 +4329,10 @@ packages: utf-8-validate: optional: true + wsl-utils@0.1.0: + resolution: {integrity: sha512-h3Fbisa2nKGPxCpm89Hk33lBLsnaGBvctQopaBSOW/uIs6FTe1ATyAnKFJrzVs9vpGdsTe73WF3V4lIsk4Gacw==} + engines: {node: '>=18'} + xdg-basedir@5.1.0: resolution: {integrity: sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ==} engines: {node: '>=12'} @@ -4106,6 +4394,151 @@ packages: snapshots: + '@azure-rest/core-client@2.5.0': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.0 + '@azure/core-rest-pipeline': 1.22.0 + '@azure/core-tracing': 1.3.0 + '@typespec/ts-http-runtime': 0.3.0 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/abort-controller@2.1.2': + dependencies: + tslib: 2.8.1 + + '@azure/core-auth@1.10.0': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-util': 1.13.0 + tslib: 2.6.3 + transitivePeerDependencies: + - supports-color + + '@azure/core-client@1.10.0': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.0 + '@azure/core-rest-pipeline': 1.22.0 + '@azure/core-tracing': 1.3.0 + '@azure/core-util': 1.13.0 + '@azure/logger': 1.3.0 + tslib: 2.6.3 + transitivePeerDependencies: + - supports-color + + '@azure/core-http-compat@2.3.0': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-client': 1.10.0 + '@azure/core-rest-pipeline': 1.22.0 + transitivePeerDependencies: + - supports-color + + '@azure/core-lro@2.7.2': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-util': 1.13.0 + '@azure/logger': 1.3.0 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/core-paging@1.6.2': + dependencies: + tslib: 2.8.1 + + '@azure/core-rest-pipeline@1.22.0': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.0 + '@azure/core-tracing': 1.3.0 + '@azure/core-util': 1.13.0 + '@azure/logger': 1.3.0 + '@typespec/ts-http-runtime': 0.3.0 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/core-tracing@1.3.0': + dependencies: + tslib: 2.8.1 + + '@azure/core-util@1.13.0': + dependencies: + '@azure/abort-controller': 2.1.2 + '@typespec/ts-http-runtime': 0.3.0 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/identity@4.11.1': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.0 + '@azure/core-client': 1.10.0 + '@azure/core-rest-pipeline': 1.22.0 + '@azure/core-tracing': 1.3.0 + '@azure/core-util': 1.13.0 + '@azure/logger': 1.3.0 + '@azure/msal-browser': 4.21.1 + '@azure/msal-node': 3.7.3 + open: 10.2.0 + tslib: 2.6.3 + transitivePeerDependencies: + - supports-color + + '@azure/keyvault-common@2.0.0': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.0 + '@azure/core-client': 1.10.0 + '@azure/core-rest-pipeline': 1.22.0 + '@azure/core-tracing': 1.3.0 + '@azure/core-util': 1.13.0 + '@azure/logger': 1.3.0 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/keyvault-keys@4.10.0': + dependencies: + '@azure-rest/core-client': 2.5.0 + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.0 + '@azure/core-http-compat': 2.3.0 + '@azure/core-lro': 2.7.2 + '@azure/core-paging': 1.6.2 + '@azure/core-rest-pipeline': 1.22.0 + '@azure/core-tracing': 1.3.0 + '@azure/core-util': 1.13.0 + '@azure/keyvault-common': 2.0.0 + '@azure/logger': 1.3.0 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/logger@1.3.0': + dependencies: + '@typespec/ts-http-runtime': 0.3.0 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/msal-browser@4.21.1': + dependencies: + '@azure/msal-common': 15.12.0 + + '@azure/msal-common@15.12.0': {} + + '@azure/msal-node@3.7.3': + dependencies: + '@azure/msal-common': 15.12.0 + jsonwebtoken: 9.0.2 + uuid: 8.3.2 + '@babel/code-frame@7.24.7': dependencies: '@babel/highlight': 7.24.7 @@ -4138,7 +4571,7 @@ snapshots: outdent: 0.5.0 prettier: 2.8.8 resolve-from: 5.0.0 - semver: 7.6.2 + semver: 7.7.2 '@changesets/assemble-release-plan@6.0.4': dependencies: @@ -4147,7 +4580,7 @@ snapshots: '@changesets/should-skip-package': 0.1.1 '@changesets/types': 6.0.0 '@manypkg/get-packages': 1.1.3 - semver: 7.6.2 + semver: 7.7.2 '@changesets/changelog-git@0.2.0': dependencies: @@ -4205,7 +4638,7 @@ snapshots: '@changesets/types': 6.0.0 '@manypkg/get-packages': 1.1.3 picocolors: 1.1.0 - semver: 7.6.2 + semver: 7.7.2 '@changesets/get-release-plan@4.0.4': dependencies: @@ -4403,6 +4836,8 @@ snapshots: '@jridgewell/resolve-uri': 3.1.2 '@jridgewell/sourcemap-codec': 1.5.0 + '@js-joda/core@5.6.5': {} + '@js-sdsl/ordered-set@4.4.2': {} '@manypkg/find-root@1.1.0': @@ -4440,11 +4875,11 @@ snapshots: '@npmcli/fs@2.1.2': dependencies: '@gar/promisify': 1.1.3 - semver: 7.6.2 + semver: 7.7.2 '@npmcli/fs@3.1.1': dependencies: - semver: 7.6.2 + semver: 7.7.2 '@npmcli/git@4.1.0': dependencies: @@ -4454,7 +4889,7 @@ snapshots: proc-log: 3.0.0 promise-inflight: 1.0.1 promise-retry: 2.0.1 - semver: 7.6.2 + semver: 7.7.2 which: 3.0.1 transitivePeerDependencies: - bluebird @@ -4576,7 +5011,7 @@ snapshots: '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.25.1 - semver: 7.6.2 + semver: 7.7.2 transitivePeerDependencies: - supports-color @@ -4668,7 +5103,7 @@ snapshots: '@types/shimmer': 1.2.0 import-in-the-middle: 1.7.1 require-in-the-middle: 7.3.0 - semver: 7.6.2 + semver: 7.7.2 shimmer: 1.2.1 transitivePeerDependencies: - supports-color @@ -4681,7 +5116,7 @@ snapshots: '@types/shimmer': 1.2.0 import-in-the-middle: 1.9.0 require-in-the-middle: 7.3.0 - semver: 7.6.2 + semver: 7.7.2 shimmer: 1.2.1 transitivePeerDependencies: - supports-color @@ -4949,6 +5384,8 @@ snapshots: dependencies: defer-to-connect: 2.0.1 + '@tediousjs/connection-string@0.5.0': {} + '@tootallnate/once@2.0.0': {} '@tsconfig/node10@1.0.11': {} @@ -4980,6 +5417,14 @@ snapshots: '@types/lodash@4.17.6': {} + '@types/mssql@9.1.7': + dependencies: + '@types/node': 22.16.2 + tarn: 3.0.2 + tedious: 18.6.1 + transitivePeerDependencies: + - supports-color + '@types/mysql@2.15.22': dependencies: '@types/node': 22.16.2 @@ -5008,10 +5453,16 @@ snapshots: pg-protocol: 1.6.1 pg-types: 2.2.0 + '@types/readable-stream@4.0.21': + dependencies: + '@types/node': 22.16.2 + '@types/semver-utils@1.1.3': {} '@types/semver@7.5.8': {} + '@types/semver@7.7.1': {} + '@types/shimmer@1.2.0': {} '@types/strip-bom@3.0.0': {} @@ -5020,6 +5471,8 @@ snapshots: '@types/triple-beam@1.3.5': {} + '@types/uuid@10.0.0': {} + '@types/webidl-conversions@7.0.3': {} '@types/whatwg-url@11.0.5': @@ -5030,6 +5483,14 @@ snapshots: dependencies: '@types/node': 22.16.2 + '@typespec/ts-http-runtime@0.3.0': + dependencies: + http-proxy-agent: 7.0.2 + https-proxy-agent: 7.0.6 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + '@vitest/expect@3.0.5': dependencies: '@vitest/spy': 3.0.5 @@ -5106,6 +5567,8 @@ snapshots: transitivePeerDependencies: - supports-color + agent-base@7.1.4: {} + agentkeepalive@4.5.0: dependencies: humanize-ms: 1.2.1 @@ -5224,6 +5687,13 @@ snapshots: inherits: 2.0.4 readable-stream: 3.6.2 + bl@6.1.2: + dependencies: + '@types/readable-stream': 4.0.21 + buffer: 6.0.3 + inherits: 2.0.4 + readable-stream: 4.5.2 + boxen@7.1.1: dependencies: ansi-align: 3.0.1 @@ -5250,6 +5720,8 @@ snapshots: bson@6.10.3: {} + buffer-equal-constant-time@1.0.1: {} + buffer-from@1.1.2: {} buffer@5.7.1: @@ -5262,6 +5734,10 @@ snapshots: base64-js: 1.5.1 ieee754: 1.2.1 + bundle-name@4.1.0: + dependencies: + run-applescript: 7.0.0 + cac@6.7.14: {} cacache@16.1.3: @@ -5422,6 +5898,8 @@ snapshots: commander@10.0.1: {} + commander@11.1.0: {} + commander@12.1.0: {} commander@2.20.3: {} @@ -5514,6 +5992,10 @@ snapshots: dependencies: ms: 2.1.3 + debug@4.4.1: + dependencies: + ms: 2.1.3 + decompress-response@6.0.0: dependencies: mimic-response: 3.1.0 @@ -5526,12 +6008,21 @@ snapshots: deep-extend@0.6.0: {} + default-browser-id@5.0.0: {} + + default-browser@5.2.1: + dependencies: + bundle-name: 4.1.0 + default-browser-id: 5.0.0 + defaults@1.0.4: dependencies: clone: 1.0.4 defer-to-connect@2.0.1: {} + define-lazy-prop@3.0.0: {} + delegates@1.0.0: {} denque@2.1.0: {} @@ -5560,6 +6051,10 @@ snapshots: eastasianwidth@0.2.0: {} + ecdsa-sig-formatter@1.0.11: + dependencies: + safe-buffer: 5.2.1 + emoji-regex@8.0.0: {} emoji-regex@9.2.2: {} @@ -5898,6 +6393,13 @@ snapshots: transitivePeerDependencies: - supports-color + http-proxy-agent@7.0.2: + dependencies: + agent-base: 7.1.4 + debug: 4.4.1 + transitivePeerDependencies: + - supports-color + http2-wrapper@2.2.1: dependencies: quick-lru: 5.1.1 @@ -5910,6 +6412,13 @@ snapshots: transitivePeerDependencies: - supports-color + https-proxy-agent@7.0.6: + dependencies: + agent-base: 7.1.4 + debug: 4.4.1 + transitivePeerDependencies: + - supports-color + human-id@1.0.2: {} humanize-ms@1.2.1: @@ -6008,6 +6517,8 @@ snapshots: dependencies: hasown: 2.0.2 + is-docker@3.0.0: {} + is-extglob@2.1.1: {} is-fullwidth-code-point@3.0.0: {} @@ -6016,6 +6527,10 @@ snapshots: dependencies: is-extglob: 2.1.1 + is-inside-container@1.0.0: + dependencies: + is-docker: 3.0.0 + is-installed-globally@0.4.0: dependencies: global-dirs: 3.0.1 @@ -6047,6 +6562,10 @@ snapshots: is-windows@1.0.2: {} + is-wsl@3.1.0: + dependencies: + is-inside-container: 1.0.0 + is-yarn-global@0.4.1: {} isarray@0.0.1: {} @@ -6070,6 +6589,8 @@ snapshots: jose@4.15.9: {} + js-md4@0.3.2: {} + js-tokens@4.0.0: {} js-yaml@3.14.1: @@ -6109,8 +6630,32 @@ snapshots: jsonpointer@5.0.1: {} + jsonwebtoken@9.0.2: + dependencies: + jws: 3.2.2 + lodash.includes: 4.3.0 + lodash.isboolean: 3.0.3 + lodash.isinteger: 4.0.4 + lodash.isnumber: 3.0.3 + lodash.isplainobject: 4.0.6 + lodash.isstring: 4.0.1 + lodash.once: 4.1.1 + ms: 2.1.3 + semver: 7.7.2 + jsox@1.2.121: {} + jwa@1.4.2: + dependencies: + buffer-equal-constant-time: 1.0.1 + ecdsa-sig-formatter: 1.0.11 + safe-buffer: 5.2.1 + + jws@3.2.2: + dependencies: + jwa: 1.4.2 + safe-buffer: 5.2.1 + keyv@4.5.4: dependencies: json-buffer: 3.0.1 @@ -6139,6 +6684,20 @@ snapshots: dependencies: p-locate: 5.0.0 + lodash.includes@4.3.0: {} + + lodash.isboolean@3.0.3: {} + + lodash.isinteger@4.0.4: {} + + lodash.isnumber@3.0.3: {} + + lodash.isplainobject@4.0.6: {} + + lodash.isstring@4.0.1: {} + + lodash.once@4.1.1: {} + lodash.startcase@4.4.0: {} lodash@4.17.21: {} @@ -6340,6 +6899,17 @@ snapshots: ms@2.1.3: {} + mssql@11.0.1: + dependencies: + '@tediousjs/connection-string': 0.5.0 + commander: 11.1.0 + debug: 4.4.1 + rfdc: 1.4.1 + tarn: 3.0.2 + tedious: 18.6.1 + transitivePeerDependencies: + - supports-color + mute-stream@1.0.0: {} mysql2@3.11.3: @@ -6360,6 +6930,8 @@ snapshots: nanoid@3.3.8: {} + native-duplexpair@1.0.0: {} + nearley@2.20.1: dependencies: commander: 2.20.3 @@ -6391,7 +6963,7 @@ snapshots: nopt: 6.0.0 npmlog: 6.0.2 rimraf: 3.0.2 - semver: 7.6.2 + semver: 7.7.2 tar: 6.2.1 which: 2.0.2 transitivePeerDependencies: @@ -6414,7 +6986,7 @@ snapshots: ignore-by-default: 1.0.1 minimatch: 3.1.2 pstree.remy: 1.1.8 - semver: 7.6.2 + semver: 7.7.2 simple-update-notifier: 2.0.0 supports-color: 5.5.0 touch: 3.1.1 @@ -6433,7 +7005,7 @@ snapshots: dependencies: hosted-git-info: 6.1.1 is-core-module: 2.14.0 - semver: 7.6.2 + semver: 7.7.2 validate-npm-package-license: 3.0.4 normalize-path@3.0.0: {} @@ -6471,7 +7043,7 @@ snapshots: rc-config-loader: 4.1.3 remote-git-tags: 3.0.0 rimraf: 5.0.9 - semver: 7.6.2 + semver: 7.7.2 semver-utils: 1.1.4 source-map-support: 0.5.21 spawn-please: 2.0.2 @@ -6487,7 +7059,7 @@ snapshots: npm-install-checks@6.3.0: dependencies: - semver: 7.6.2 + semver: 7.7.2 npm-normalize-package-bin@3.0.1: {} @@ -6495,7 +7067,7 @@ snapshots: dependencies: hosted-git-info: 6.1.1 proc-log: 3.0.0 - semver: 7.6.2 + semver: 7.7.2 validate-npm-package-name: 5.0.1 npm-packlist@7.0.4: @@ -6507,7 +7079,7 @@ snapshots: npm-install-checks: 6.3.0 npm-normalize-package-bin: 3.0.1 npm-package-arg: 10.1.0 - semver: 7.6.2 + semver: 7.7.2 npm-registry-fetch@14.0.5: dependencies: @@ -6546,6 +7118,13 @@ snapshots: dependencies: mimic-fn: 2.1.0 + open@10.2.0: + dependencies: + default-browser: 5.2.1 + define-lazy-prop: 3.0.0 + is-inside-container: 1.0.0 + wsl-utils: 0.1.0 + opentelemetry-instrumentation-fetch-node@1.2.3(@opentelemetry/api@1.9.0): dependencies: '@opentelemetry/api': 1.9.0 @@ -6610,7 +7189,7 @@ snapshots: got: 12.6.1 registry-auth-token: 5.0.2 registry-url: 6.0.1 - semver: 7.6.2 + semver: 7.7.2 package-manager-detector@0.2.0: {} @@ -7007,6 +7586,8 @@ snapshots: dependencies: rsocket-core: 1.0.0-alpha.3 + run-applescript@7.0.0: {} + run-async@3.0.0: {} run-parallel@1.2.0: @@ -7033,12 +7614,14 @@ snapshots: semver-diff@4.0.0: dependencies: - semver: 7.6.2 + semver: 7.7.2 semver-utils@1.1.4: {} semver@7.6.2: {} + semver@7.7.2: {} + seq-queue@0.0.5: {} set-blocking@2.0.0: {} @@ -7083,7 +7666,7 @@ snapshots: simple-update-notifier@2.0.0: dependencies: - semver: 7.6.2 + semver: 7.7.2 sisteransi@1.0.5: {} @@ -7243,6 +7826,23 @@ snapshots: mkdirp: 1.0.4 yallist: 4.0.0 + tarn@3.0.2: {} + + tedious@18.6.1: + dependencies: + '@azure/core-auth': 1.10.0 + '@azure/identity': 4.11.1 + '@azure/keyvault-keys': 4.10.0 + '@js-joda/core': 5.6.5 + '@types/node': 22.16.2 + bl: 6.1.2 + iconv-lite: 0.6.3 + js-md4: 0.3.2 + native-duplexpair: 1.0.0 + sprintf-js: 1.1.3 + transitivePeerDependencies: + - supports-color + term-size@2.2.1: {} text-hex@1.0.0: {} @@ -7328,6 +7928,24 @@ snapshots: v8-compile-cache-lib: 3.0.1 yn: 3.1.1 + ts-node@10.9.2(@types/node@22.16.2)(typescript@5.9.2): + dependencies: + '@cspotcode/source-map-support': 0.8.1 + '@tsconfig/node10': 1.0.11 + '@tsconfig/node12': 1.0.11 + '@tsconfig/node14': 1.0.3 + '@tsconfig/node16': 1.0.4 + '@types/node': 22.16.2 + acorn: 8.12.1 + acorn-walk: 8.3.3 + arg: 4.1.3 + create-require: 1.1.1 + diff: 4.0.2 + make-error: 1.3.6 + typescript: 5.9.2 + v8-compile-cache-lib: 3.0.1 + yn: 3.1.1 + tsc-watch@6.2.0(typescript@5.7.3): dependencies: cross-spawn: 7.0.3 @@ -7349,6 +7967,8 @@ snapshots: tslib@2.6.3: {} + tslib@2.8.1: {} + tuf-js@1.1.7: dependencies: '@tufjs/models': 1.0.4 @@ -7371,6 +7991,8 @@ snapshots: typescript@5.7.3: {} + typescript@5.9.2: {} + undefsafe@2.0.5: {} undici-types@6.21.0: {} @@ -7412,7 +8034,7 @@ snapshots: is-yarn-global: 0.4.1 latest-version: 7.0.0 pupa: 3.1.0 - semver: 7.6.2 + semver: 7.7.2 semver-diff: 4.0.0 xdg-basedir: 5.1.0 @@ -7424,6 +8046,8 @@ snapshots: uuid@11.1.0: {} + uuid@8.3.2: {} + v8-compile-cache-lib@3.0.1: {} validate-npm-package-license@3.0.4: @@ -7671,6 +8295,10 @@ snapshots: ws@8.18.0: {} + wsl-utils@0.1.0: + dependencies: + is-wsl: 3.1.0 + xdg-basedir@5.1.0: {} xtend@4.0.2: {} diff --git a/tsconfig.json b/tsconfig.json index e9d0017c5..78586205e 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -25,6 +25,9 @@ { "path": "./modules/module-postgres-storage" }, + { + "path": "./modules/module-mssql" + }, { "path": "./modules/module-mysql" }, From 0162d4f0e2742284232442de76ebf98cde7ff00a Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Mon, 20 Oct 2025 17:11:30 +0200 Subject: [PATCH 09/42] Added mssql test setup, helper classes and first snapshot test --- .../module-mssql/test/src/CDCStream.test.ts | 74 +++++++ .../test/src/CDCStreamTestContext.ts | 202 ++++++++++++++++++ modules/module-mssql/test/src/env.ts | 10 + modules/module-mssql/test/src/setup.ts | 12 ++ modules/module-mssql/test/src/util.ts | 127 +++++++++++ modules/module-mssql/test/tsconfig.json | 28 +++ 6 files changed, 453 insertions(+) create mode 100644 modules/module-mssql/test/src/CDCStream.test.ts create mode 100644 modules/module-mssql/test/src/CDCStreamTestContext.ts create mode 100644 modules/module-mssql/test/src/env.ts create mode 100644 modules/module-mssql/test/src/setup.ts create mode 100644 modules/module-mssql/test/src/util.ts create mode 100644 modules/module-mssql/test/tsconfig.json diff --git a/modules/module-mssql/test/src/CDCStream.test.ts b/modules/module-mssql/test/src/CDCStream.test.ts new file mode 100644 index 000000000..197bc4ef5 --- /dev/null +++ b/modules/module-mssql/test/src/CDCStream.test.ts @@ -0,0 +1,74 @@ +import { describe, expect, test } from 'vitest'; +import { METRICS_HELPER, putOp } from '@powersync/service-core-tests'; +import { ReplicationMetric } from '@powersync/service-types'; +import { v4 as uuid } from 'uuid'; +import { describeWithStorage, enableCDCForTable, INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js'; +import { storage } from '@powersync/service-core'; +import { CDCStreamTestContext } from './CDCStreamTestContext.js'; + +const BASIC_SYNC_RULES = ` +bucket_definitions: + global: + data: + - SELECT id, description FROM "test_data" +`; + +// describe('CDCStream tests', () => { +// describeWithStorage({ timeout: 20_000 }, defineCDCStreamTests); +// }); + +defineCDCStreamTests(INITIALIZED_MONGO_STORAGE_FACTORY); + +function defineCDCStreamTests(factory: storage.TestStorageFactory) { + test('Initial snapshot sync', async () => { + await using context = await CDCStreamTestContext.open(factory); + const { connectionManager } = context; + await context.updateSyncRules(BASIC_SYNC_RULES); + + await connectionManager.query(`CREATE TABLE test_data (id UNIQUEIDENTIFIER PRIMARY KEY, description VARCHAR(MAX))`); + await enableCDCForTable({ connectionManager, schema: 'dbo', table: 'test_data' }); + const testId = uuid(); + await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('${testId}','test1')`); + + const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + + await context.replicateSnapshot(); + + const endRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + const data = await context.getBucketData('global[]'); + expect(data).toMatchObject([putOp('test_data', { id: testId, description: 'test1' })]); + expect(endRowCount - startRowCount).toEqual(1); + }); + + // test('Replicate basic values', async () => { + // await using context = await CDCStreamTestContext.open(factory); + // const { connectionManager } = context; + // await context.updateSyncRules(` + // bucket_definitions: + // global: + // data: + // - SELECT id, description, num FROM "test_data"`); + // + // await connectionManager.query( + // `CREATE TABLE test_data (id UNIQUEIDENTIFIER PRIMARY KEY, description VARCHAR(MAX), num BIGINT)` + // ); + // + // await context.replicateSnapshot(); + // + // const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + // const startTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0; + // + // await context.startStreaming(); + // const testId = uuid(); + // await connectionManager.query( + // `INSERT INTO test_data(id, description, num) VALUES('${testId}', 'test1', 1152921504606846976)` + // ); + // const data = await context.getBucketData('global[]'); + // + // expect(data).toMatchObject([putOp('test_data', { id: testId, description: 'test1', num: 1152921504606846976n })]); + // const endRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + // const endTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0; + // expect(endRowCount - startRowCount).toEqual(1); + // expect(endTxCount - startTxCount).toEqual(1); + // }); +} diff --git a/modules/module-mssql/test/src/CDCStreamTestContext.ts b/modules/module-mssql/test/src/CDCStreamTestContext.ts new file mode 100644 index 000000000..31c5a1f9e --- /dev/null +++ b/modules/module-mssql/test/src/CDCStreamTestContext.ts @@ -0,0 +1,202 @@ +import { + BucketStorageFactory, + createCoreReplicationMetrics, + initializeCoreReplicationMetrics, + InternalOpId, + OplogEntry, + storage, + SyncRulesBucketStorage +} from '@powersync/service-core'; +import { METRICS_HELPER, test_utils } from '@powersync/service-core-tests'; +import { clearTestDb, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './util.js'; +import { CDCStream, CDCStreamOptions } from '@module/replication/CDCStream.js'; +import { MSSQLConnectionManager } from '@module/replication/MSSQLConnectionManager.js'; + +/** + * Tests operating on the change data capture need to configure the stream and manage asynchronous + * replication, which gets a little tricky. + * + * This wraps all the context required for testing, and tears it down afterward + * by using `await using`. + */ +export class CDCStreamTestContext implements AsyncDisposable { + private _cdcStream?: CDCStream; + private abortController = new AbortController(); + private streamPromise?: Promise; + public storage?: SyncRulesBucketStorage; + private snapshotPromise?: Promise; + private replicationDone = false; + + static async open( + factory: (options: storage.TestStorageOptions) => Promise, + options?: { doNotClear?: boolean; cdcStreamOptions?: Partial } + ) { + const f = await factory({ doNotClear: options?.doNotClear }); + const connectionManager = new MSSQLConnectionManager(TEST_CONNECTION_OPTIONS, {}); + + if (!options?.doNotClear) { + await clearTestDb(connectionManager); + } + + return new CDCStreamTestContext(f, connectionManager, options?.cdcStreamOptions); + } + + constructor( + public factory: BucketStorageFactory, + public connectionManager: MSSQLConnectionManager, + private cdcStreamOptions?: Partial + ) { + createCoreReplicationMetrics(METRICS_HELPER.metricsEngine); + initializeCoreReplicationMetrics(METRICS_HELPER.metricsEngine); + } + + async [Symbol.asyncDispose]() { + await this.dispose(); + } + + async dispose() { + this.abortController.abort(); + await this.snapshotPromise; + await this.streamPromise; + await this.connectionManager.end(); + await this.factory?.[Symbol.asyncDispose](); + } + + get connectionTag() { + return this.connectionManager.connectionTag; + } + + async updateSyncRules(content: string) { + const syncRules = await this.factory.updateSyncRules({ content: content, validate: true }); + this.storage = this.factory.getInstance(syncRules); + return this.storage!; + } + + async loadNextSyncRules() { + const syncRules = await this.factory.getNextSyncRulesContent(); + if (syncRules == null) { + throw new Error(`Next sync rules not available`); + } + + this.storage = this.factory.getInstance(syncRules); + return this.storage!; + } + + async loadActiveSyncRules() { + const syncRules = await this.factory.getActiveSyncRulesContent(); + if (syncRules == null) { + throw new Error(`Active sync rules not available`); + } + + this.storage = this.factory.getInstance(syncRules); + return this.storage!; + } + + get cdcStream() { + if (this.storage == null) { + throw new Error('updateSyncRules() first'); + } + if (this._cdcStream) { + return this._cdcStream; + } + const options: CDCStreamOptions = { + storage: this.storage, + metrics: METRICS_HELPER.metricsEngine, + connections: this.connectionManager, + abortSignal: this.abortController.signal, + ...this.cdcStreamOptions + }; + this._cdcStream = new CDCStream(options); + return this._cdcStream!; + } + + /** + * Replicate a snapshot, start streaming, and wait for a consistent checkpoint. + */ + async initializeReplication() { + await this.replicateSnapshot(); + // TODO: renable this.startStreaming(); + // Make sure we're up to date + await this.getCheckpoint(); + } + + async replicateSnapshot() { + await this.cdcStream.initReplication(); + this.replicationDone = true; + } + + // TODO: Enable once streaming is implemented + // startStreaming() { + // if (!this.replicationDone) { + // throw new Error('Call replicateSnapshot() before startStreaming()'); + // } + // this.streamPromise = this.cdcStream.streamChanges(); + // Wait for the replication to start before returning. + // This avoids a bunch of unpredictable race conditions that appear in testing + //return new Promise(async (resolve) => { + //while (this.binlogStream.isStartingReplication) { + //await timers.setTimeout(50); + //} + + //resolve(); + //}); + // } + + async getCheckpoint(options?: { timeout?: number }) { + let checkpoint = await Promise.race([ + getClientCheckpoint(this.connectionManager, this.factory, { timeout: options?.timeout ?? 15_000 }) + //this.streamPromise + ]); + if (checkpoint == null) { + // This indicates an issue with the test setup - streamingPromise completed instead + // of getClientCheckpoint() + throw new Error('Test failure - streamingPromise completed'); + } + return checkpoint; + } + + async getBucketsDataBatch(buckets: Record, options?: { timeout?: number }) { + let checkpoint = await this.getCheckpoint(options); + const map = new Map(Object.entries(buckets)); + return test_utils.fromAsync(this.storage!.getBucketDataBatch(checkpoint, map)); + } + + /** + * This waits for a client checkpoint. + */ + async getBucketData(bucket: string, start?: InternalOpId | string | undefined, options?: { timeout?: number }) { + start ??= 0n; + if (typeof start == 'string') { + start = BigInt(start); + } + const checkpoint = await this.getCheckpoint(options); + const map = new Map([[bucket, start]]); + let data: OplogEntry[] = []; + while (true) { + const batch = this.storage!.getBucketDataBatch(checkpoint, map); + + const batches = await test_utils.fromAsync(batch); + data = data.concat(batches[0]?.chunkData.data ?? []); + if (batches.length == 0 || !batches[0]!.chunkData.has_more) { + break; + } + map.set(bucket, BigInt(batches[0]!.chunkData.next_after)); + } + return data; + } + + /** + * This does not wait for a client checkpoint. + */ + async getCurrentBucketData(bucket: string, start?: InternalOpId | string | undefined) { + start ??= 0n; + if (typeof start == 'string') { + start = BigInt(start); + } + const { checkpoint } = await this.storage!.getCheckpoint(); + const map = new Map([[bucket, start]]); + const batch = this.storage!.getBucketDataBatch(checkpoint, map); + const batches = await test_utils.fromAsync(batch); + return batches[0]?.chunkData.data ?? []; + } +} diff --git a/modules/module-mssql/test/src/env.ts b/modules/module-mssql/test/src/env.ts new file mode 100644 index 000000000..ac05d7d71 --- /dev/null +++ b/modules/module-mssql/test/src/env.ts @@ -0,0 +1,10 @@ +import { utils } from '@powersync/lib-services-framework'; + +export const env = utils.collectEnvironmentVariables({ + MSSQL_TEST_URI: utils.type.string.default(`mssql://sa:321strong_ROOT_password@localhost:1433/powersync`), + MONGO_TEST_URL: utils.type.string.default('mongodb://localhost:27017/powersync_test'), + CI: utils.type.boolean.default('false'), + PG_STORAGE_TEST_URL: utils.type.string.default('postgres://postgres:postgres@localhost:5431/powersync_storage_test'), + TEST_MONGO_STORAGE: utils.type.boolean.default('true'), + TEST_POSTGRES_STORAGE: utils.type.boolean.default('true') +}); diff --git a/modules/module-mssql/test/src/setup.ts b/modules/module-mssql/test/src/setup.ts new file mode 100644 index 000000000..8d0b885e6 --- /dev/null +++ b/modules/module-mssql/test/src/setup.ts @@ -0,0 +1,12 @@ +import { container } from '@powersync/lib-services-framework'; +import { METRICS_HELPER } from '@powersync/service-core-tests'; +import { beforeAll, beforeEach } from 'vitest'; + +beforeAll(async () => { + // Executes for every test file + container.registerDefaults(); +}); + +beforeEach(async () => { + METRICS_HELPER.resetMetrics(); +}); diff --git a/modules/module-mssql/test/src/util.ts b/modules/module-mssql/test/src/util.ts new file mode 100644 index 000000000..c5344f579 --- /dev/null +++ b/modules/module-mssql/test/src/util.ts @@ -0,0 +1,127 @@ +import * as types from '@module/types/types.js'; +import { logger } from '@powersync/lib-services-framework'; +import { BucketStorageFactory, InternalOpId, ReplicationCheckpoint, TestStorageFactory } from '@powersync/service-core'; + +import * as mongo_storage from '@powersync/service-module-mongodb-storage'; +import * as postgres_storage from '@powersync/service-module-postgres-storage'; + +import { describe, TestOptions } from 'vitest'; +import { env } from './env.js'; +import { MSSQLConnectionManager } from '@module/replication/MSSQLConnectionManager.js'; +import { getLatestLSN } from '@module/utils/mssql.js'; +import sql from 'mssql'; + +export const TEST_URI = env.MSSQL_TEST_URI; + +export const INITIALIZED_MONGO_STORAGE_FACTORY = mongo_storage.MongoTestStorageFactoryGenerator({ + url: env.MONGO_TEST_URL, + isCI: env.CI +}); + +export const INITIALIZED_POSTGRES_STORAGE_FACTORY = postgres_storage.PostgresTestStorageFactoryGenerator({ + url: env.PG_STORAGE_TEST_URL +}); + +export function describeWithStorage(options: TestOptions, fn: (factory: TestStorageFactory) => void) { + describe.skipIf(!env.TEST_MONGO_STORAGE)(`mongodb storage`, options, function () { + fn(INITIALIZED_MONGO_STORAGE_FACTORY); + }); + + describe.skipIf(!env.TEST_POSTGRES_STORAGE)(`postgres storage`, options, function () { + fn(INITIALIZED_POSTGRES_STORAGE_FACTORY); + }); +} + +export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({ + type: 'mssql', + uri: TEST_URI +}); + +/** + * Clears all test tables (those prefixed with 'test_') from the database. Also removes CDC instances for those tables. + * @param connectionManager + */ +export async function clearTestDb(connectionManager: MSSQLConnectionManager) { + const { recordset: tables } = await connectionManager.query(` + SELECT TABLE_SCHEMA, TABLE_NAME + FROM INFORMATION_SCHEMA.TABLES + WHERE TABLE_TYPE = 'BASE TABLE' AND TABLE_NAME LIKE 'test_%' + `); + for (const row of tables) { + // Disable CDC for the table if enabled + await connectionManager.execute('sys.sp_cdc_disable_table', [ + { name: 'source_schema', value: row.TABLE_SCHEMA }, + { name: 'source_name', value: row.TABLE_NAME }, + { name: 'capture_instance', value: 'all' } + ]); + // Drop Tables + await connectionManager.query(`DROP TABLE [${row.TABLE_NAME}]`); + } +} + +/** + * Create a new database for testing and enables CDC on it. + * @param connectionManager + * @param dbName + */ +export async function createTestDb(connectionManager: MSSQLConnectionManager, dbName: string) { + await connectionManager.query(`DROP DATABASE IF EXISTS ${dbName}`); + await connectionManager.query(`CREATE DATABASE ${dbName}`); + await connectionManager.execute(` + USE ${dbName}; + GO + + EXEC sys.sp_cdc_enable_db; + GO`); +} + +export interface EnableCDCForTableOptions { + connectionManager: MSSQLConnectionManager; + schema: string; + table: string; +} + +export async function enableCDCForTable(options: EnableCDCForTableOptions): Promise { + const { connectionManager, schema, table } = options; + + await connectionManager.execute('sys.sp_cdc_enable_table', [ + { name: 'source_schema', value: schema }, + { name: 'source_name', value: table }, + { name: 'role_name', value: 'NULL' }, + { name: 'supports_net_changes', value: 1 } + ]); +} + +export async function getClientCheckpoint( + connectionManager: MSSQLConnectionManager, + storageFactory: BucketStorageFactory, + options?: { timeout?: number } +): Promise { + const start = Date.now(); + + const lsn = await getLatestLSN(connectionManager); + + // This old API needs a persisted checkpoint id. + // Since we don't use LSNs anymore, the only way to get that is to wait. + + const timeout = options?.timeout ?? 50_000; + let lastCp: ReplicationCheckpoint | null = null; + + logger.info(`Waiting for LSN checkpoint: ${lsn}`); + while (Date.now() - start < timeout) { + const storage = await storageFactory.getActiveStorage(); + const cp = await storage?.getCheckpoint(); + if (cp == null) { + throw new Error('No sync rules available'); + } + lastCp = cp; + if (cp.lsn != null && cp.lsn >= lsn.toString()) { + logger.info(`Got write checkpoint: ${lsn} : ${cp.checkpoint}`); + return cp.checkpoint; + } + + await new Promise((resolve) => setTimeout(resolve, 30)); + } + + throw new Error(`Timeout while waiting for checkpoint ${lsn}. Last checkpoint: ${lastCp?.lsn}`); +} diff --git a/modules/module-mssql/test/tsconfig.json b/modules/module-mssql/test/tsconfig.json new file mode 100644 index 000000000..18898c4ee --- /dev/null +++ b/modules/module-mssql/test/tsconfig.json @@ -0,0 +1,28 @@ +{ + "extends": "../../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "baseUrl": "./", + "noEmit": true, + "esModuleInterop": true, + "skipLibCheck": true, + "sourceMap": true, + "paths": { + "@/*": ["../../../packages/service-core/src/*"], + "@module/*": ["../src/*"], + "@core-tests/*": ["../../../packages/service-core/test/src/*"] + } + }, + "include": ["src"], + "references": [ + { + "path": "../" + }, + { + "path": "../../../packages/service-core/test" + }, + { + "path": "../../../packages/service-core/" + } + ] +} From 84689594579e27691239e3d981c3fade4d5e68a5 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Mon, 20 Oct 2025 17:11:50 +0200 Subject: [PATCH 10/42] Added mssql specific error code --- packages/service-errors/src/codes.ts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/packages/service-errors/src/codes.ts b/packages/service-errors/src/codes.ts index 74f92a73f..f7d26e68d 100644 --- a/packages/service-errors/src/codes.ts +++ b/packages/service-errors/src/codes.ts @@ -297,6 +297,14 @@ export enum ErrorCode { // ## PSYNC_S2xxx: Service API + /** + * Required updates in the Change Data Capture (CDC) are no longer available. + * + * Possible causes: + * * Older data has been cleaned up due to exceeding the retention period. + */ + PSYNC_S1500 = 'PSYNC_S1500', + /** * Generic internal server error (HTTP 500). * From 296944c48cdb74230609aa85e764ef8774ac80ca Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Mon, 20 Oct 2025 17:12:34 +0200 Subject: [PATCH 11/42] Small binlog stream comment fix --- modules/module-mysql/package.json | 2 +- modules/module-mysql/src/replication/BinLogStream.ts | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/module-mysql/package.json b/modules/module-mysql/package.json index 8fbcf281e..8d668f436 100644 --- a/modules/module-mysql/package.json +++ b/modules/module-mysql/package.json @@ -47,6 +47,6 @@ "@powersync/service-module-mongodb-storage": "workspace:*", "@powersync/service-module-postgres-storage": "workspace:*", "@types/async": "^3.2.24", - "@types/semver": "^7.5.4" + "@types/semver": "^7.7.1" } } diff --git a/modules/module-mysql/src/replication/BinLogStream.ts b/modules/module-mysql/src/replication/BinLogStream.ts index 3d4c87563..efad23889 100644 --- a/modules/module-mysql/src/replication/BinLogStream.ts +++ b/modules/module-mysql/src/replication/BinLogStream.ts @@ -146,7 +146,7 @@ export class BinLogStream { const shouldSnapshot = snapshot && !result.table.snapshotComplete && result.table.syncAny; if (shouldSnapshot) { - // Truncate this table, in case a previous snapshot was interrupted. + // Truncate this table in case a previous snapshot was interrupted. await batch.truncate([result.table]); let gtid: common.ReplicatedGTID; @@ -188,7 +188,7 @@ export class BinLogStream { const matchedTables: string[] = await common.getTablesFromPattern(connection, tablePattern); connection.release(); - let tables: storage.SourceTable[] = []; + const tables: storage.SourceTable[] = []; for (const matchedTable of matchedTables) { const replicaIdColumns = await this.getReplicaIdColumns(matchedTable, tablePattern.schema); From ccb37e6ecd0274aefcbb718a6ea2f91c3c480f9b Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Mon, 20 Oct 2025 18:18:30 +0200 Subject: [PATCH 12/42] Updated lockfile --- pnpm-lock.yaml | 691 ++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 657 insertions(+), 34 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 327052e44..3fa8f7c16 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -255,6 +255,61 @@ importers: specifier: workspace:* version: link:../../packages/service-core-tests + modules/module-mssql: + dependencies: + '@powersync/lib-services-framework': + specifier: workspace:* + version: link:../../libs/lib-services + '@powersync/service-core': + specifier: workspace:* + version: link:../../packages/service-core + '@powersync/service-errors': + specifier: workspace:* + version: link:../../packages/service-errors + '@powersync/service-jsonbig': + specifier: workspace:* + version: link:../../packages/jsonbig + '@powersync/service-sync-rules': + specifier: workspace:* + version: link:../../packages/sync-rules + '@powersync/service-types': + specifier: workspace:* + version: link:../../packages/types + mssql: + specifier: ^11.0.1 + version: 11.0.1 + semver: + specifier: ^7.7.2 + version: 7.7.3 + ts-codec: + specifier: ^1.3.0 + version: 1.3.0 + uri-js: + specifier: ^4.4.1 + version: 4.4.1 + uuid: + specifier: ^11.1.0 + version: 11.1.0 + devDependencies: + '@powersync/service-core-tests': + specifier: workspace:* + version: link:../../packages/service-core-tests + '@powersync/service-module-mongodb-storage': + specifier: workspace:* + version: link:../module-mongodb-storage + '@powersync/service-module-postgres-storage': + specifier: workspace:* + version: link:../module-postgres-storage + '@types/mssql': + specifier: ^9.1.7 + version: 9.1.8 + '@types/semver': + specifier: ^7.7.1 + version: 7.7.1 + '@types/uuid': + specifier: ^10.0.0 + version: 10.0.0 + modules/module-mysql: dependencies: '@powersync/lib-services-framework': @@ -310,8 +365,8 @@ importers: specifier: ^3.2.24 version: 3.2.24 '@types/semver': - specifier: ^7.5.4 - version: 7.5.8 + specifier: ^7.7.1 + version: 7.7.1 modules/module-postgres: dependencies: @@ -743,6 +798,74 @@ importers: packages: + '@azure-rest/core-client@2.5.1': + resolution: {integrity: sha512-EHaOXW0RYDKS5CFffnixdyRPak5ytiCtU7uXDcP/uiY+A6jFRwNGzzJBiznkCzvi5EYpY+YWinieqHb0oY916A==} + engines: {node: '>=20.0.0'} + + '@azure/abort-controller@2.1.2': + resolution: {integrity: sha512-nBrLsEWm4J2u5LpAPjxADTlq3trDgVZZXHNKabeXZtpq3d3AbN/KGO82R87rdDz5/lYB024rtEf10/q0urNgsA==} + engines: {node: '>=18.0.0'} + + '@azure/core-auth@1.10.1': + resolution: {integrity: sha512-ykRMW8PjVAn+RS6ww5cmK9U2CyH9p4Q88YJwvUslfuMmN98w/2rdGRLPqJYObapBCdzBVeDgYWdJnFPFb7qzpg==} + engines: {node: '>=20.0.0'} + + '@azure/core-client@1.10.1': + resolution: {integrity: sha512-Nh5PhEOeY6PrnxNPsEHRr9eimxLwgLlpmguQaHKBinFYA/RU9+kOYVOQqOrTsCL+KSxrLLl1gD8Dk5BFW/7l/w==} + engines: {node: '>=20.0.0'} + + '@azure/core-http-compat@2.3.1': + resolution: {integrity: sha512-az9BkXND3/d5VgdRRQVkiJb2gOmDU8Qcq4GvjtBmDICNiQ9udFmDk4ZpSB5Qq1OmtDJGlQAfBaS4palFsazQ5g==} + engines: {node: '>=20.0.0'} + + '@azure/core-lro@2.7.2': + resolution: {integrity: sha512-0YIpccoX8m/k00O7mDDMdJpbr6mf1yWo2dfmxt5A8XVZVVMz2SSKaEbMCeJRvgQ0IaSlqhjT47p4hVIRRy90xw==} + engines: {node: '>=18.0.0'} + + '@azure/core-paging@1.6.2': + resolution: {integrity: sha512-YKWi9YuCU04B55h25cnOYZHxXYtEvQEbKST5vqRga7hWY9ydd3FZHdeQF8pyh+acWZvppw13M/LMGx0LABUVMA==} + engines: {node: '>=18.0.0'} + + '@azure/core-rest-pipeline@1.22.1': + resolution: {integrity: sha512-UVZlVLfLyz6g3Hy7GNDpooMQonUygH7ghdiSASOOHy97fKj/mPLqgDX7aidOijn+sCMU+WU8NjlPlNTgnvbcGA==} + engines: {node: '>=20.0.0'} + + '@azure/core-tracing@1.3.1': + resolution: {integrity: sha512-9MWKevR7Hz8kNzzPLfX4EAtGM2b8mr50HPDBvio96bURP/9C+HjdH3sBlLSNNrvRAr5/k/svoH457gB5IKpmwQ==} + engines: {node: '>=20.0.0'} + + '@azure/core-util@1.13.1': + resolution: {integrity: sha512-XPArKLzsvl0Hf0CaGyKHUyVgF7oDnhKoP85Xv6M4StF/1AhfORhZudHtOyf2s+FcbuQ9dPRAjB8J2KvRRMUK2A==} + engines: {node: '>=20.0.0'} + + '@azure/identity@4.13.0': + resolution: {integrity: sha512-uWC0fssc+hs1TGGVkkghiaFkkS7NkTxfnCH+Hdg+yTehTpMcehpok4PgUKKdyCH+9ldu6FhiHRv84Ntqj1vVcw==} + engines: {node: '>=20.0.0'} + + '@azure/keyvault-common@2.0.0': + resolution: {integrity: sha512-wRLVaroQtOqfg60cxkzUkGKrKMsCP6uYXAOomOIysSMyt1/YM0eUn9LqieAWM8DLcU4+07Fio2YGpPeqUbpP9w==} + engines: {node: '>=18.0.0'} + + '@azure/keyvault-keys@4.10.0': + resolution: {integrity: sha512-eDT7iXoBTRZ2n3fLiftuGJFD+yjkiB1GNqzU2KbY1TLYeXeSPVTVgn2eJ5vmRTZ11978jy2Kg2wI7xa9Tyr8ag==} + engines: {node: '>=18.0.0'} + + '@azure/logger@1.3.0': + resolution: {integrity: sha512-fCqPIfOcLE+CGqGPd66c8bZpwAji98tZ4JI9i/mlTNTlsIWslCfpg48s/ypyLxZTump5sypjrKn2/kY7q8oAbA==} + engines: {node: '>=20.0.0'} + + '@azure/msal-browser@4.25.1': + resolution: {integrity: sha512-kAdOSNjvMbeBmEyd5WnddGmIpKCbAAGj4Gg/1iURtF+nHmIfS0+QUBBO3uaHl7CBB2R1SEAbpOgxycEwrHOkFA==} + engines: {node: '>=0.8.0'} + + '@azure/msal-common@15.13.0': + resolution: {integrity: sha512-8oF6nj02qX7eE/6+wFT5NluXRHc05AgdCC3fJnkjiJooq8u7BcLmxaYYSwc2AfEkWRMRi6Eyvvbeqk4U4412Ag==} + engines: {node: '>=0.8.0'} + + '@azure/msal-node@3.8.0': + resolution: {integrity: sha512-23BXm82Mp5XnRhrcd4mrHa0xuUNRp96ivu3nRatrfdAqjoeWAGyD0eEAafxAOHAEWWmdlyFK4ELFcdziXyw2sA==} + engines: {node: '>=16'} + '@babel/code-frame@7.24.7': resolution: {integrity: sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==} engines: {node: '>=6.9.0'} @@ -1034,6 +1157,9 @@ packages: '@jridgewell/trace-mapping@0.3.9': resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==} + '@js-joda/core@5.6.5': + resolution: {integrity: sha512-3zwefSMwHpu8iVUW8YYz227sIv6UFqO31p1Bf1ZH/Vom7CmNyUsXjDBlnNzcuhmOL1XfxZ3nvND42kR23XlbcQ==} + '@js-sdsl/ordered-set@4.4.2': resolution: {integrity: sha512-ieYQ8WlBPKYzEo81H3q0DFbd8WtFRXXABb4+vRCF0AO3WWtJZFxYvRGdipUXGrd6tlSySmqhcPuO3J6SCodCxg==} @@ -1565,6 +1691,9 @@ packages: resolution: {integrity: sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==} engines: {node: '>=14.16'} + '@tediousjs/connection-string@0.5.0': + resolution: {integrity: sha512-7qSgZbincDDDFyRweCIEvZULFAw5iz/DeunhvuxpL31nfntX3P4Yd4HkHBRg9H8CdqY1e5WFN1PZIz/REL9MVQ==} + '@tootallnate/once@2.0.0': resolution: {integrity: sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==} engines: {node: '>= 10'} @@ -1613,6 +1742,9 @@ packages: '@types/lodash@4.17.6': resolution: {integrity: sha512-OpXEVoCKSS3lQqjx9GGGOapBeuW5eUboYHRlHP9urXPX25IKZ6AnP5ZRxtVf63iieUbsHxLn8NQ5Nlftc6yzAA==} + '@types/mssql@9.1.8': + resolution: {integrity: sha512-mt9h5jWj+DYE5jxnKaWSV/GqDf9FV52XYVk6T3XZF69noEe+JJV6MKirii48l81+cjmAkSq+qeKX+k61fHkYrQ==} + '@types/mysql@2.15.27': resolution: {integrity: sha512-YfWiV16IY0OeBfBCk8+hXKmdTKrKlwKN1MNKAPBu5JYxLwBEZl7QzeEpGnlZb3VMGJrrGmB84gXiH+ofs/TezA==} @@ -1640,12 +1772,18 @@ packages: '@types/pg@8.15.4': resolution: {integrity: sha512-I6UNVBAoYbvuWkkU3oosC8yxqH21f4/Jc4DK71JLG3dT2mdlGe1z+ep/LQGXaKaOgcvUrsQoPRqfgtMcvZiJhg==} + '@types/readable-stream@4.0.21': + resolution: {integrity: sha512-19eKVv9tugr03IgfXlA9UVUVRbW6IuqRO5B92Dl4a6pT7K8uaGrNS0GkxiZD0BOk6PLuXl5FhWl//eX/pzYdTQ==} + '@types/semver-utils@1.1.3': resolution: {integrity: sha512-T+YwkslhsM+CeuhYUxyAjWm7mJ5am/K10UX40RuA6k6Lc7eGtq8iY2xOzy7Vq0GOqhl/xZl5l2FwURZMTPTUww==} '@types/semver@7.5.8': resolution: {integrity: sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==} + '@types/semver@7.7.1': + resolution: {integrity: sha512-FmgJfu+MOcQ370SD0ev7EI8TlCAfKYU+B4m5T3yXc1CiRN94g/SZPtsCkk506aUDtlMnFZvasDwHHUcZUEaYuA==} + '@types/shimmer@1.2.0': resolution: {integrity: sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg==} @@ -1661,6 +1799,9 @@ packages: '@types/triple-beam@1.3.5': resolution: {integrity: sha512-6WaYesThRMCl19iryMYP7/x2OVgCtbIVflDGFpWnb9irXI3UjYE4AzmYuiUKY1AJstGijoY+MgUszMgRxIYTYw==} + '@types/uuid@10.0.0': + resolution: {integrity: sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==} + '@types/webidl-conversions@7.0.3': resolution: {integrity: sha512-CiJJvcRtIgzadHCYXw7dqEnMNRjhGZlYK05Mj9OyktqV8uVT8fD2BFOB7S1uwBE3Kj2Z+4UyPmFw/Ixgw/LAlA==} @@ -1670,6 +1811,10 @@ packages: '@types/ws@8.2.3': resolution: {integrity: sha512-ahRJZquUYCdOZf/rCsWg88S0/+cb9wazUBHv6HZEe3XdYaBe2zr/slM8J28X07Hn88Pnm4ezo7N8/ofnOgrPVQ==} + '@typespec/ts-http-runtime@0.3.1': + resolution: {integrity: sha512-SnbaqayTVFEA6/tYumdF0UmybY0KHyKwGPBXnyckFlrrKdhWFrL3a2HIPXHjht5ZOElKGcXfD2D63P36btb+ww==} + engines: {node: '>=20.0.0'} + '@vitest/expect@3.2.4': resolution: {integrity: sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==} @@ -1706,6 +1851,10 @@ packages: abbrev@1.1.1: resolution: {integrity: sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==} + abort-controller@3.0.0: + resolution: {integrity: sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==} + engines: {node: '>=6.5'} + abstract-logging@2.0.1: resolution: {integrity: sha512-2BjRTZxTPvheOvGbBslFSYOUkr+SjPtOnrLP33f+VIWLzezQpZcqVg7ja3L4dBXmzzgwT+a029jRx5PCi3JuiA==} @@ -1732,6 +1881,10 @@ packages: resolution: {integrity: sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==} engines: {node: '>= 6.0.0'} + agent-base@7.1.4: + resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} + engines: {node: '>= 14'} + agentkeepalive@4.5.0: resolution: {integrity: sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==} engines: {node: '>= 8.0.0'} @@ -1862,6 +2015,9 @@ packages: bl@4.1.0: resolution: {integrity: sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==} + bl@6.1.4: + resolution: {integrity: sha512-ZV/9asSuknOExbM/zPPA8z00lc1ihPKWaStHkkQrxHNeYx+yY+TmF+v80dpv2G0mv3HVXBu7ryoAsxbFFhf4eg==} + boxen@7.1.1: resolution: {integrity: sha512-2hCgjEmP8YLWQ130n2FerGv7rYpfBmnmp9Uy2Le1vge6X3gZIfSmEzP5QTDElFxcvVcXlEn8Aq6MU/PZygIOog==} engines: {node: '>=14.16'} @@ -1880,12 +2036,22 @@ packages: resolution: {integrity: sha512-WIsKqkSC0ABoBJuT1LEX+2HEvNmNKKgnTAyd0fL8qzK4SH2i9NXg+t08YtdZp/V9IZ33cxe3iV4yM0qg8lMQng==} engines: {node: '>=16.20.1'} + buffer-equal-constant-time@1.0.1: + resolution: {integrity: sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==} + buffer-from@1.1.2: resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} buffer@5.7.1: resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} + buffer@6.0.3: + resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} + + bundle-name@4.1.0: + resolution: {integrity: sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==} + engines: {node: '>=18'} + cac@6.7.14: resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} engines: {node: '>=8'} @@ -2013,6 +2179,10 @@ packages: resolution: {integrity: sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==} engines: {node: '>=14'} + commander@11.1.0: + resolution: {integrity: sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ==} + engines: {node: '>=16'} + commander@12.1.0: resolution: {integrity: sha512-Vw8qHK3bZM9y/P10u3Vib8o/DdkvA2OtPtZvD871QKjy74Wj1WSKFILMPRPSdUSx5RFK1arlJzEtA4PkFgnbuA==} engines: {node: '>=18'} @@ -2096,15 +2266,6 @@ packages: supports-color: optional: true - debug@4.4.0: - resolution: {integrity: sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==} - engines: {node: '>=6.0'} - peerDependencies: - supports-color: '*' - peerDependenciesMeta: - supports-color: - optional: true - debug@4.4.1: resolution: {integrity: sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==} engines: {node: '>=6.0'} @@ -2142,6 +2303,14 @@ packages: resolution: {integrity: sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==} engines: {node: '>=4.0.0'} + default-browser-id@5.0.0: + resolution: {integrity: sha512-A6p/pu/6fyBcA1TRz/GqWYPViplrftcW2gZC9q79ngNCKAeR/X3gcEdXQHl4KNXV+3wgIJ1CPkJQ3IHM6lcsyA==} + engines: {node: '>=18'} + + default-browser@5.2.1: + resolution: {integrity: sha512-WY/3TUME0x3KPYdRRxEJJvXRHV4PyPoUsxtZa78lwItwRQRHhd2U9xOscaT/YTf8uCXIAjeJOFBVEh/7FtD8Xg==} + engines: {node: '>=18'} + defaults@1.0.4: resolution: {integrity: sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==} @@ -2149,6 +2318,10 @@ packages: resolution: {integrity: sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==} engines: {node: '>=10'} + define-lazy-prop@3.0.0: + resolution: {integrity: sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==} + engines: {node: '>=12'} + delegates@1.0.0: resolution: {integrity: sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==} @@ -2192,6 +2365,9 @@ packages: eastasianwidth@0.2.0: resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==} + ecdsa-sig-formatter@1.0.11: + resolution: {integrity: sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==} + emoji-regex@8.0.0: resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} @@ -2246,6 +2422,14 @@ packages: event-stream@3.3.4: resolution: {integrity: sha512-QHpkERcGsR0T7Qm3HNJSyXKEEj8AHNxkY3PK8TS2KJvQ7NiSHe3DDpwVKKtoYprL/AreyzFBeIkBIWChAqn60g==} + event-target-shim@5.0.1: + resolution: {integrity: sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==} + engines: {node: '>=6'} + + events@3.3.0: + resolution: {integrity: sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==} + engines: {node: '>=0.8.x'} + expect-type@1.2.2: resolution: {integrity: sha512-JhFGDVJ7tmDJItKhYgJCGLOWjuK9vPxiXoUFLwLDc99NlmklilbiQJwoctZtt13+xMw91MCk/REan6MWHqDjyA==} engines: {node: '>=12.0.0'} @@ -2475,6 +2659,10 @@ packages: resolution: {integrity: sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==} engines: {node: '>= 6'} + http-proxy-agent@7.0.2: + resolution: {integrity: sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==} + engines: {node: '>= 14'} + http2-wrapper@2.2.1: resolution: {integrity: sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==} engines: {node: '>=10.19.0'} @@ -2483,6 +2671,10 @@ packages: resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==} engines: {node: '>= 6'} + https-proxy-agent@7.0.6: + resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==} + engines: {node: '>= 14'} + human-id@1.0.2: resolution: {integrity: sha512-UNopramDEhHJD+VR+ehk8rOslwSfByxPIZyJRfV739NDhN5LF1fa1MqnzKm2lGTQRjNrjK19Q5fhkgIfjlVUKw==} @@ -2574,6 +2766,11 @@ packages: resolution: {integrity: sha512-a5dFJih5ZLYlRtDc0dZWP7RiKr6xIKzmn/oAYCDvdLThadVgyJwlaoQPmRtMSpz+rk0OGAgIu+TcM9HUF0fk1A==} engines: {node: '>= 0.4'} + is-docker@3.0.0: + resolution: {integrity: sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + hasBin: true + is-extglob@2.1.1: resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} engines: {node: '>=0.10.0'} @@ -2586,6 +2783,11 @@ packages: resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} engines: {node: '>=0.10.0'} + is-inside-container@1.0.0: + resolution: {integrity: sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==} + engines: {node: '>=14.16'} + hasBin: true + is-installed-globally@0.4.0: resolution: {integrity: sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==} engines: {node: '>=10'} @@ -2635,6 +2837,10 @@ packages: resolution: {integrity: sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==} engines: {node: '>=0.10.0'} + is-wsl@3.1.0: + resolution: {integrity: sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==} + engines: {node: '>=16'} + is-yarn-global@0.4.1: resolution: {integrity: sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ==} engines: {node: '>=12'} @@ -2660,6 +2866,9 @@ packages: jose@4.15.9: resolution: {integrity: sha512-1vUQX+IdDMVPj4k8kOxgUqlcK518yluMuGZwqlr44FS1ppZB/5GWh4rZG89erpOBOJjU/OBsnCVFfapsRz6nEA==} + js-md4@0.3.2: + resolution: {integrity: sha512-/GDnfQYsltsjRswQhN9fhv3EMw2sCpUdrdxyWDOUK7eyD++r3gRhzgiQgc/x4MAv2i1iuQ4lxO5mvqM3vj4bwA==} + js-tokens@4.0.0: resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} @@ -2712,10 +2921,20 @@ packages: resolution: {integrity: sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==} engines: {node: '>=0.10.0'} + jsonwebtoken@9.0.2: + resolution: {integrity: sha512-PRp66vJ865SSqOlgqS8hujT5U4AOgMfhrwYIuIhfKaoSCZcirrmASQr8CX7cUg+RMih+hgznrjp99o+W4pJLHQ==} + engines: {node: '>=12', npm: '>=6'} + jsox@1.2.121: resolution: {integrity: sha512-9Ag50tKhpTwS6r5wh3MJSAvpSof0UBr39Pto8OnzFT32Z/pAbxAsKHzyvsyMEHVslELvHyO/4/jaQELHk8wDcw==} hasBin: true + jwa@1.4.2: + resolution: {integrity: sha512-eeH5JO+21J78qMvTIDdBXidBd6nG2kZjg5Ohz/1fpa28Z4CcsWUzJ1ZZyFq/3z3N17aZy+ZuBoHljASbL1WfOw==} + + jws@3.2.2: + resolution: {integrity: sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==} + keyv@4.5.4: resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} @@ -2745,6 +2964,27 @@ packages: resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} engines: {node: '>=10'} + lodash.includes@4.3.0: + resolution: {integrity: sha512-W3Bx6mdkRTGtlJISOvVD/lbqjTlPPUDTMnlXZFnVwi9NKJ6tiAk6LVdlhZMm17VZisqhKcgzpO5Wz91PCt5b0w==} + + lodash.isboolean@3.0.3: + resolution: {integrity: sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg==} + + lodash.isinteger@4.0.4: + resolution: {integrity: sha512-DBwtEWN2caHQ9/imiNeEA5ys1JoRtRfY3d7V9wkqtbycnAmTvRRmbHKDV4a0EYc678/dia0jrte4tjYwVBaZUA==} + + lodash.isnumber@3.0.3: + resolution: {integrity: sha512-QYqzpfwO3/CWf3XP+Z+tkQsfaLL/EnUlXWVkIk5FUPc4sBdTehEqZONuyRt2P67PXAk+NXmTBcc97zw9t1FQrw==} + + lodash.isplainobject@4.0.6: + resolution: {integrity: sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==} + + lodash.isstring@4.0.1: + resolution: {integrity: sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw==} + + lodash.once@4.1.1: + resolution: {integrity: sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg==} + lodash.startcase@4.4.0: resolution: {integrity: sha512-+WKqsK294HMSc2jEbNgpHpd0JfIBhp7rEV4aqXWqFr6AlXov+SlcgB1Fv01y2kGe3Gc8nMW7VA0SrGuSkRfIEg==} @@ -2937,6 +3177,11 @@ packages: ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + mssql@11.0.1: + resolution: {integrity: sha512-KlGNsugoT90enKlR8/G36H0kTxPthDhmtNUCwEHvgRza5Cjpjoj+P2X6eMpFUDN7pFrJZsKadL4x990G8RBE1w==} + engines: {node: '>=18'} + hasBin: true + mute-stream@1.0.0: resolution: {integrity: sha512-avsJQhyd+680gKXyG/sQc0nXaC6rBkPOfyHYcFb9+hdkqQkR9bdnkJ0AMZhke0oesPqIO+mFFJ+IdBc7mst4IA==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} @@ -2954,6 +3199,9 @@ packages: engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} hasBin: true + native-duplexpair@1.0.0: + resolution: {integrity: sha512-E7QQoM+3jvNtlmyfqRZ0/U75VFgCls+fSkbml2MpgWkWyz3ox8Y58gNhfuziuQYGNNQAbFZJQck55LHCnCK6CA==} + nearley@2.20.1: resolution: {integrity: sha512-+Mc8UaAebFzgV+KpI5n7DasuuQCHA89dmwm7JXw3TV43ukfNQ9DnBH3Mdb2g/I4Fdxc26pwimBWvjIw0UAILSQ==} hasBin: true @@ -3077,6 +3325,10 @@ packages: resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==} engines: {node: '>=6'} + open@10.2.0: + resolution: {integrity: sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA==} + engines: {node: '>=18'} + ora@5.4.1: resolution: {integrity: sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==} engines: {node: '>=10'} @@ -3287,6 +3539,10 @@ packages: process-warning@5.0.0: resolution: {integrity: sha512-a39t9ApHNx2L4+HBnQKqxxHNs1r7KF+Intd8Q/g1bUh6q0WIp9voPXJ/x0j+ZL45KF1pJd9+q2jLIRMfvEshkA==} + process@0.11.10: + resolution: {integrity: sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==} + engines: {node: '>= 0.6.0'} + progress@2.0.3: resolution: {integrity: sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==} engines: {node: '>=0.4.0'} @@ -3383,6 +3639,10 @@ packages: resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==} engines: {node: '>= 6'} + readable-stream@4.7.0: + resolution: {integrity: sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + readdirp@3.6.0: resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} engines: {node: '>=8.10.0'} @@ -3482,6 +3742,10 @@ packages: rsocket-websocket-client@1.0.0-alpha.3: resolution: {integrity: sha512-CwTwTNMGa8BKvrWde/kM3q8IHuzO8RCIfzuj25BsVe9y8eehDQHt4fXk0g1i/wpsxTm+RY6DxE6Vr5snozKVOg==} + run-applescript@7.1.0: + resolution: {integrity: sha512-DPe5pVFaAsinSaV6QjQ6gdiedWDcRCbUuiQfQa2wmWV7+xC9bGulGI8+TdRmoFkAPaBXk8CrAbnlY2ISniJ47Q==} + engines: {node: '>=18'} + run-async@3.0.0: resolution: {integrity: sha512-540WwVDOMxA6dN6We19EcT9sc3hkXPw5mzRNGM3FkdN/vtE9NFvj5lFAPNwUDmJjXidm3v7TC1cTE7t17Ulm1Q==} engines: {node: '>=0.12.0'} @@ -3523,6 +3787,11 @@ packages: engines: {node: '>=10'} hasBin: true + semver@7.7.3: + resolution: {integrity: sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==} + engines: {node: '>=10'} + hasBin: true + seq-queue@0.0.5: resolution: {integrity: sha512-hr3Wtp/GZIc/6DAGPDcV4/9WoZhjrkXsi5B/07QgX8tsdc6ilr7BFM6PM6rbdAX1kFSDYeZGLipIZZKyQP0O5Q==} @@ -3739,6 +4008,14 @@ packages: resolution: {integrity: sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==} engines: {node: '>=10'} + tarn@3.0.2: + resolution: {integrity: sha512-51LAVKUSZSVfI05vjPESNc5vwqqZpbXCsU+/+wxlOrUjk2SnFTt97v9ZgQrD4YmxYW1Px6w2KjaDitCfkvgxMQ==} + engines: {node: '>=8.0.0'} + + tedious@18.6.1: + resolution: {integrity: sha512-9AvErXXQTd6l7TDd5EmM+nxbOGyhnmdbp/8c3pw+tjaiSXW9usME90ET/CRG1LN1Y9tPMtz/p83z4Q97B4DDpw==} + engines: {node: '>=18'} + term-size@2.2.1: resolution: {integrity: sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==} engines: {node: '>=8'} @@ -3859,6 +4136,9 @@ packages: tslib@2.6.3: resolution: {integrity: sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==} + tslib@2.8.1: + resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} + tuf-js@1.1.7: resolution: {integrity: sha512-i3P9Kgw3ytjELUfpuKVDNBJvk4u5bXL6gskv572mcevPbSKCV3zt3djhmlEQ65yERjIbOSncy7U4cQJaB1CBCg==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} @@ -3939,6 +4219,10 @@ packages: resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} hasBin: true + uuid@8.3.2: + resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==} + hasBin: true + v8-compile-cache-lib@3.0.1: resolution: {integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==} @@ -4113,6 +4397,10 @@ packages: utf-8-validate: optional: true + wsl-utils@0.1.0: + resolution: {integrity: sha512-h3Fbisa2nKGPxCpm89Hk33lBLsnaGBvctQopaBSOW/uIs6FTe1ATyAnKFJrzVs9vpGdsTe73WF3V4lIsk4Gacw==} + engines: {node: '>=18'} + xdg-basedir@5.1.0: resolution: {integrity: sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ==} engines: {node: '>=12'} @@ -4174,6 +4462,151 @@ packages: snapshots: + '@azure-rest/core-client@2.5.1': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.1 + '@azure/core-rest-pipeline': 1.22.1 + '@azure/core-tracing': 1.3.1 + '@typespec/ts-http-runtime': 0.3.1 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/abort-controller@2.1.2': + dependencies: + tslib: 2.6.3 + + '@azure/core-auth@1.10.1': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-util': 1.13.1 + tslib: 2.6.3 + transitivePeerDependencies: + - supports-color + + '@azure/core-client@1.10.1': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.1 + '@azure/core-rest-pipeline': 1.22.1 + '@azure/core-tracing': 1.3.1 + '@azure/core-util': 1.13.1 + '@azure/logger': 1.3.0 + tslib: 2.6.3 + transitivePeerDependencies: + - supports-color + + '@azure/core-http-compat@2.3.1': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-client': 1.10.1 + '@azure/core-rest-pipeline': 1.22.1 + transitivePeerDependencies: + - supports-color + + '@azure/core-lro@2.7.2': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-util': 1.13.1 + '@azure/logger': 1.3.0 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/core-paging@1.6.2': + dependencies: + tslib: 2.8.1 + + '@azure/core-rest-pipeline@1.22.1': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.1 + '@azure/core-tracing': 1.3.1 + '@azure/core-util': 1.13.1 + '@azure/logger': 1.3.0 + '@typespec/ts-http-runtime': 0.3.1 + tslib: 2.6.3 + transitivePeerDependencies: + - supports-color + + '@azure/core-tracing@1.3.1': + dependencies: + tslib: 2.6.3 + + '@azure/core-util@1.13.1': + dependencies: + '@azure/abort-controller': 2.1.2 + '@typespec/ts-http-runtime': 0.3.1 + tslib: 2.6.3 + transitivePeerDependencies: + - supports-color + + '@azure/identity@4.13.0': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.1 + '@azure/core-client': 1.10.1 + '@azure/core-rest-pipeline': 1.22.1 + '@azure/core-tracing': 1.3.1 + '@azure/core-util': 1.13.1 + '@azure/logger': 1.3.0 + '@azure/msal-browser': 4.25.1 + '@azure/msal-node': 3.8.0 + open: 10.2.0 + tslib: 2.6.3 + transitivePeerDependencies: + - supports-color + + '@azure/keyvault-common@2.0.0': + dependencies: + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.1 + '@azure/core-client': 1.10.1 + '@azure/core-rest-pipeline': 1.22.1 + '@azure/core-tracing': 1.3.1 + '@azure/core-util': 1.13.1 + '@azure/logger': 1.3.0 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/keyvault-keys@4.10.0': + dependencies: + '@azure-rest/core-client': 2.5.1 + '@azure/abort-controller': 2.1.2 + '@azure/core-auth': 1.10.1 + '@azure/core-http-compat': 2.3.1 + '@azure/core-lro': 2.7.2 + '@azure/core-paging': 1.6.2 + '@azure/core-rest-pipeline': 1.22.1 + '@azure/core-tracing': 1.3.1 + '@azure/core-util': 1.13.1 + '@azure/keyvault-common': 2.0.0 + '@azure/logger': 1.3.0 + tslib: 2.8.1 + transitivePeerDependencies: + - supports-color + + '@azure/logger@1.3.0': + dependencies: + '@typespec/ts-http-runtime': 0.3.1 + tslib: 2.6.3 + transitivePeerDependencies: + - supports-color + + '@azure/msal-browser@4.25.1': + dependencies: + '@azure/msal-common': 15.13.0 + + '@azure/msal-common@15.13.0': {} + + '@azure/msal-node@3.8.0': + dependencies: + '@azure/msal-common': 15.13.0 + jsonwebtoken: 9.0.2 + uuid: 8.3.2 + '@babel/code-frame@7.24.7': dependencies: '@babel/highlight': 7.24.7 @@ -4206,7 +4639,7 @@ snapshots: outdent: 0.5.0 prettier: 2.8.8 resolve-from: 5.0.0 - semver: 7.6.2 + semver: 7.7.3 '@changesets/assemble-release-plan@6.0.4': dependencies: @@ -4215,7 +4648,7 @@ snapshots: '@changesets/should-skip-package': 0.1.1 '@changesets/types': 6.0.0 '@manypkg/get-packages': 1.1.3 - semver: 7.6.2 + semver: 7.7.3 '@changesets/changelog-git@0.2.0': dependencies: @@ -4273,7 +4706,7 @@ snapshots: '@changesets/types': 6.0.0 '@manypkg/get-packages': 1.1.3 picocolors: 1.1.0 - semver: 7.6.2 + semver: 7.7.3 '@changesets/get-release-plan@4.0.4': dependencies: @@ -4483,6 +4916,8 @@ snapshots: '@jridgewell/resolve-uri': 3.1.2 '@jridgewell/sourcemap-codec': 1.5.0 + '@js-joda/core@5.6.5': {} + '@js-sdsl/ordered-set@4.4.2': {} '@manypkg/find-root@1.1.0': @@ -4520,11 +4955,11 @@ snapshots: '@npmcli/fs@2.1.2': dependencies: '@gar/promisify': 1.1.3 - semver: 7.6.2 + semver: 7.7.3 '@npmcli/fs@3.1.1': dependencies: - semver: 7.6.2 + semver: 7.7.3 '@npmcli/git@4.1.0': dependencies: @@ -4534,7 +4969,7 @@ snapshots: proc-log: 3.0.0 promise-inflight: 1.0.1 promise-retry: 2.0.1 - semver: 7.6.2 + semver: 7.7.3 which: 3.0.1 transitivePeerDependencies: - bluebird @@ -4812,7 +5247,7 @@ snapshots: '@types/shimmer': 1.2.0 import-in-the-middle: 1.14.2 require-in-the-middle: 7.3.0 - semver: 7.6.2 + semver: 7.7.3 shimmer: 1.2.1 transitivePeerDependencies: - supports-color @@ -5096,6 +5531,8 @@ snapshots: dependencies: defer-to-connect: 2.0.1 + '@tediousjs/connection-string@0.5.0': {} + '@tootallnate/once@2.0.0': {} '@tsconfig/node10@1.0.11': {} @@ -5133,6 +5570,14 @@ snapshots: '@types/lodash@4.17.6': {} + '@types/mssql@9.1.8': + dependencies: + '@types/node': 22.16.2 + tarn: 3.0.2 + tedious: 18.6.1 + transitivePeerDependencies: + - supports-color + '@types/mysql@2.15.27': dependencies: '@types/node': 22.16.2 @@ -5161,10 +5606,16 @@ snapshots: pg-protocol: 1.6.1 pg-types: 2.2.0 + '@types/readable-stream@4.0.21': + dependencies: + '@types/node': 22.16.2 + '@types/semver-utils@1.1.3': {} '@types/semver@7.5.8': {} + '@types/semver@7.7.1': {} + '@types/shimmer@1.2.0': {} '@types/strip-bom@3.0.0': {} @@ -5177,6 +5628,8 @@ snapshots: '@types/triple-beam@1.3.5': {} + '@types/uuid@10.0.0': {} + '@types/webidl-conversions@7.0.3': {} '@types/whatwg-url@11.0.5': @@ -5187,6 +5640,14 @@ snapshots: dependencies: '@types/node': 22.16.2 + '@typespec/ts-http-runtime@0.3.1': + dependencies: + http-proxy-agent: 7.0.2 + https-proxy-agent: 7.0.6 + tslib: 2.6.3 + transitivePeerDependencies: + - supports-color + '@vitest/expect@3.2.4': dependencies: '@types/chai': 5.2.2 @@ -5238,6 +5699,10 @@ snapshots: abbrev@1.1.1: {} + abort-controller@3.0.0: + dependencies: + event-target-shim: 5.0.1 + abstract-logging@2.0.1: {} acorn-import-attributes@1.9.5(acorn@8.15.0): @@ -5258,6 +5723,8 @@ snapshots: transitivePeerDependencies: - supports-color + agent-base@7.1.4: {} + agentkeepalive@4.5.0: dependencies: humanize-ms: 1.2.1 @@ -5372,6 +5839,13 @@ snapshots: inherits: 2.0.4 readable-stream: 3.6.2 + bl@6.1.4: + dependencies: + '@types/readable-stream': 4.0.21 + buffer: 6.0.3 + inherits: 2.0.4 + readable-stream: 4.7.0 + boxen@7.1.1: dependencies: ansi-align: 3.0.1 @@ -5398,6 +5872,8 @@ snapshots: bson@6.10.4: {} + buffer-equal-constant-time@1.0.1: {} + buffer-from@1.1.2: {} buffer@5.7.1: @@ -5405,6 +5881,15 @@ snapshots: base64-js: 1.5.1 ieee754: 1.2.1 + buffer@6.0.3: + dependencies: + base64-js: 1.5.1 + ieee754: 1.2.1 + + bundle-name@4.1.0: + dependencies: + run-applescript: 7.1.0 + cac@6.7.14: {} cacache@16.1.3: @@ -5565,6 +6050,8 @@ snapshots: commander@10.0.1: {} + commander@11.1.0: {} + commander@12.1.0: {} commander@2.20.3: {} @@ -5653,10 +6140,6 @@ snapshots: dependencies: ms: 2.1.3 - debug@4.4.0: - dependencies: - ms: 2.1.3 - debug@4.4.1: dependencies: ms: 2.1.3 @@ -5673,12 +6156,21 @@ snapshots: deep-extend@0.6.0: {} + default-browser-id@5.0.0: {} + + default-browser@5.2.1: + dependencies: + bundle-name: 4.1.0 + default-browser-id: 5.0.0 + defaults@1.0.4: dependencies: clone: 1.0.4 defer-to-connect@2.0.1: {} + define-lazy-prop@3.0.0: {} + delegates@1.0.0: {} denque@2.1.0: {} @@ -5709,6 +6201,10 @@ snapshots: eastasianwidth@0.2.0: {} + ecdsa-sig-formatter@1.0.11: + dependencies: + safe-buffer: 5.2.1 + emoji-regex@8.0.0: {} emoji-regex@9.2.2: {} @@ -5782,6 +6278,10 @@ snapshots: stream-combiner: 0.0.4 through: 2.3.8 + event-target-shim@5.0.1: {} + + events@3.3.0: {} + expect-type@1.2.2: {} exponential-backoff@3.1.1: {} @@ -6042,6 +6542,13 @@ snapshots: transitivePeerDependencies: - supports-color + http-proxy-agent@7.0.2: + dependencies: + agent-base: 7.1.4 + debug: 4.4.1 + transitivePeerDependencies: + - supports-color + http2-wrapper@2.2.1: dependencies: quick-lru: 5.1.1 @@ -6054,6 +6561,13 @@ snapshots: transitivePeerDependencies: - supports-color + https-proxy-agent@7.0.6: + dependencies: + agent-base: 7.1.4 + debug: 4.4.1 + transitivePeerDependencies: + - supports-color + human-id@1.0.2: {} humanize-ms@1.2.1: @@ -6142,6 +6656,8 @@ snapshots: dependencies: hasown: 2.0.2 + is-docker@3.0.0: {} + is-extglob@2.1.1: {} is-fullwidth-code-point@3.0.0: {} @@ -6150,6 +6666,10 @@ snapshots: dependencies: is-extglob: 2.1.1 + is-inside-container@1.0.0: + dependencies: + is-docker: 3.0.0 + is-installed-globally@0.4.0: dependencies: global-dirs: 3.0.1 @@ -6181,6 +6701,10 @@ snapshots: is-windows@1.0.2: {} + is-wsl@3.1.0: + dependencies: + is-inside-container: 1.0.0 + is-yarn-global@0.4.1: {} isarray@0.0.1: {} @@ -6204,6 +6728,8 @@ snapshots: jose@4.15.9: {} + js-md4@0.3.2: {} + js-tokens@4.0.0: {} js-tokens@9.0.1: {} @@ -6245,8 +6771,32 @@ snapshots: jsonpointer@5.0.1: {} + jsonwebtoken@9.0.2: + dependencies: + jws: 3.2.2 + lodash.includes: 4.3.0 + lodash.isboolean: 3.0.3 + lodash.isinteger: 4.0.4 + lodash.isnumber: 3.0.3 + lodash.isplainobject: 4.0.6 + lodash.isstring: 4.0.1 + lodash.once: 4.1.1 + ms: 2.1.3 + semver: 7.7.3 + jsox@1.2.121: {} + jwa@1.4.2: + dependencies: + buffer-equal-constant-time: 1.0.1 + ecdsa-sig-formatter: 1.0.11 + safe-buffer: 5.2.1 + + jws@3.2.2: + dependencies: + jwa: 1.4.2 + safe-buffer: 5.2.1 + keyv@4.5.4: dependencies: json-buffer: 3.0.1 @@ -6275,6 +6825,20 @@ snapshots: dependencies: p-locate: 5.0.0 + lodash.includes@4.3.0: {} + + lodash.isboolean@3.0.3: {} + + lodash.isinteger@4.0.4: {} + + lodash.isnumber@3.0.3: {} + + lodash.isplainobject@4.0.6: {} + + lodash.isstring@4.0.1: {} + + lodash.once@4.1.1: {} + lodash.startcase@4.4.0: {} lodash@4.17.21: {} @@ -6468,6 +7032,17 @@ snapshots: ms@2.1.3: {} + mssql@11.0.1: + dependencies: + '@tediousjs/connection-string': 0.5.0 + commander: 11.1.0 + debug: 4.4.1 + rfdc: 1.4.1 + tarn: 3.0.2 + tedious: 18.6.1 + transitivePeerDependencies: + - supports-color + mute-stream@1.0.0: {} mysql2@3.11.3: @@ -6488,6 +7063,8 @@ snapshots: nanoid@3.3.11: {} + native-duplexpair@1.0.0: {} + nearley@2.20.1: dependencies: commander: 2.20.3 @@ -6519,7 +7096,7 @@ snapshots: nopt: 6.0.0 npmlog: 6.0.2 rimraf: 3.0.2 - semver: 7.6.2 + semver: 7.7.3 tar: 6.2.1 which: 2.0.2 transitivePeerDependencies: @@ -6542,7 +7119,7 @@ snapshots: ignore-by-default: 1.0.1 minimatch: 3.1.2 pstree.remy: 1.1.8 - semver: 7.6.2 + semver: 7.7.3 simple-update-notifier: 2.0.0 supports-color: 5.5.0 touch: 3.1.1 @@ -6561,7 +7138,7 @@ snapshots: dependencies: hosted-git-info: 6.1.1 is-core-module: 2.14.0 - semver: 7.6.2 + semver: 7.7.3 validate-npm-package-license: 3.0.4 normalize-path@3.0.0: {} @@ -6599,7 +7176,7 @@ snapshots: rc-config-loader: 4.1.3 remote-git-tags: 3.0.0 rimraf: 5.0.9 - semver: 7.6.2 + semver: 7.7.3 semver-utils: 1.1.4 source-map-support: 0.5.21 spawn-please: 2.0.2 @@ -6615,7 +7192,7 @@ snapshots: npm-install-checks@6.3.0: dependencies: - semver: 7.6.2 + semver: 7.7.3 npm-normalize-package-bin@3.0.1: {} @@ -6623,7 +7200,7 @@ snapshots: dependencies: hosted-git-info: 6.1.1 proc-log: 3.0.0 - semver: 7.6.2 + semver: 7.7.3 validate-npm-package-name: 5.0.1 npm-packlist@7.0.4: @@ -6635,7 +7212,7 @@ snapshots: npm-install-checks: 6.3.0 npm-normalize-package-bin: 3.0.1 npm-package-arg: 10.1.0 - semver: 7.6.2 + semver: 7.7.3 npm-registry-fetch@14.0.5: dependencies: @@ -6672,6 +7249,13 @@ snapshots: dependencies: mimic-fn: 2.1.0 + open@10.2.0: + dependencies: + default-browser: 5.2.1 + define-lazy-prop: 3.0.0 + is-inside-container: 1.0.0 + wsl-utils: 0.1.0 + ora@5.4.1: dependencies: bl: 4.1.0 @@ -6727,7 +7311,7 @@ snapshots: got: 12.6.1 registry-auth-token: 5.0.2 registry-url: 6.0.1 - semver: 7.6.2 + semver: 7.7.3 package-manager-detector@0.2.0: {} @@ -6882,6 +7466,8 @@ snapshots: process-warning@5.0.0: {} + process@0.11.10: {} + progress@2.0.3: {} promise-inflight@1.0.1: {} @@ -7008,6 +7594,14 @@ snapshots: string_decoder: 1.3.0 util-deprecate: 1.0.2 + readable-stream@4.7.0: + dependencies: + abort-controller: 3.0.0 + buffer: 6.0.3 + events: 3.3.0 + process: 0.11.10 + string_decoder: 1.3.0 + readdirp@3.6.0: dependencies: picomatch: 2.3.1 @@ -7032,7 +7626,7 @@ snapshots: require-in-the-middle@7.3.0: dependencies: - debug: 4.4.0 + debug: 4.4.1 module-details-from-path: 1.0.3 resolve: 1.22.8 transitivePeerDependencies: @@ -7112,6 +7706,8 @@ snapshots: dependencies: rsocket-core: 1.0.0-alpha.3 + run-applescript@7.1.0: {} + run-async@3.0.0: {} run-parallel@1.2.0: @@ -7138,12 +7734,14 @@ snapshots: semver-diff@4.0.0: dependencies: - semver: 7.6.2 + semver: 7.7.3 semver-utils@1.1.4: {} semver@7.6.2: {} + semver@7.7.3: {} + seq-queue@0.0.5: {} set-blocking@2.0.0: {} @@ -7188,7 +7786,7 @@ snapshots: simple-update-notifier@2.0.0: dependencies: - semver: 7.6.2 + semver: 7.7.3 sisteransi@1.0.5: {} @@ -7352,6 +7950,23 @@ snapshots: mkdirp: 1.0.4 yallist: 4.0.0 + tarn@3.0.2: {} + + tedious@18.6.1: + dependencies: + '@azure/core-auth': 1.10.1 + '@azure/identity': 4.13.0 + '@azure/keyvault-keys': 4.10.0 + '@js-joda/core': 5.6.5 + '@types/node': 22.16.2 + bl: 6.1.4 + iconv-lite: 0.6.3 + js-md4: 0.3.2 + native-duplexpair: 1.0.0 + sprintf-js: 1.1.3 + transitivePeerDependencies: + - supports-color + term-size@2.2.1: {} text-hex@1.0.0: {} @@ -7463,6 +8078,8 @@ snapshots: tslib@2.6.3: {} + tslib@2.8.1: {} + tuf-js@1.1.7: dependencies: '@tufjs/models': 1.0.4 @@ -7528,7 +8145,7 @@ snapshots: is-yarn-global: 0.4.1 latest-version: 7.0.0 pupa: 3.1.0 - semver: 7.6.2 + semver: 7.7.3 semver-diff: 4.0.0 xdg-basedir: 5.1.0 @@ -7540,6 +8157,8 @@ snapshots: uuid@11.1.0: {} + uuid@8.3.2: {} + v8-compile-cache-lib@3.0.1: {} validate-npm-package-license@3.0.4: @@ -7799,6 +8418,10 @@ snapshots: ws@8.18.0: {} + wsl-utils@0.1.0: + dependencies: + is-wsl: 3.1.0 + xdg-basedir@5.1.0: {} xtend@4.0.2: {} From 33aa308e9d6e79634c5e9dd6da9264d3ee9193d7 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Thu, 13 Nov 2025 09:06:19 +0200 Subject: [PATCH 13/42] Exposed no_checkpoint_before_lsn in the bucket storage batch. --- .../src/storage/implementation/MongoBucketBatch.ts | 4 ++++ .../src/storage/batch/PostgresBucketBatch.ts | 4 ++++ packages/service-core/src/storage/BucketStorageBatch.ts | 2 ++ 3 files changed, 10 insertions(+) diff --git a/modules/module-mongodb-storage/src/storage/implementation/MongoBucketBatch.ts b/modules/module-mongodb-storage/src/storage/implementation/MongoBucketBatch.ts index 003f50134..50a9fbd76 100644 --- a/modules/module-mongodb-storage/src/storage/implementation/MongoBucketBatch.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/MongoBucketBatch.ts @@ -146,6 +146,10 @@ export class MongoBucketBatch return this.last_checkpoint_lsn; } + get noCheckpointBeforeLsn() { + return this.no_checkpoint_before_lsn; + } + async flush(options?: storage.BatchBucketFlushOptions): Promise { let result: storage.FlushedResult | null = null; // One flush may be split over multiple transactions. diff --git a/modules/module-postgres-storage/src/storage/batch/PostgresBucketBatch.ts b/modules/module-postgres-storage/src/storage/batch/PostgresBucketBatch.ts index 62e7f118e..ebae078ca 100644 --- a/modules/module-postgres-storage/src/storage/batch/PostgresBucketBatch.ts +++ b/modules/module-postgres-storage/src/storage/batch/PostgresBucketBatch.ts @@ -100,6 +100,10 @@ export class PostgresBucketBatch return this.last_checkpoint_lsn; } + get noCheckpointBeforeLsn() { + return this.no_checkpoint_before_lsn; + } + async [Symbol.asyncDispose]() { super.clearListeners(); } diff --git a/packages/service-core/src/storage/BucketStorageBatch.ts b/packages/service-core/src/storage/BucketStorageBatch.ts index 62db7dd43..f71226191 100644 --- a/packages/service-core/src/storage/BucketStorageBatch.ts +++ b/packages/service-core/src/storage/BucketStorageBatch.ts @@ -83,6 +83,8 @@ export interface BucketStorageBatch extends ObserverClient; updateTableProgress(table: SourceTable, progress: Partial): Promise; From 92637299871c8955fad4a6534c49ae0070990fd9 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Thu, 13 Nov 2025 09:07:42 +0200 Subject: [PATCH 14/42] Introduced mechanism to poll the CDC tables for changes via a CDCPoller --- .../src/common/mssqls-to-sqlite.ts | 2 +- .../module-mssql/src/replication/CDCPoller.ts | 238 ++++++++++++++++++ 2 files changed, 239 insertions(+), 1 deletion(-) create mode 100644 modules/module-mssql/src/replication/CDCPoller.ts diff --git a/modules/module-mssql/src/common/mssqls-to-sqlite.ts b/modules/module-mssql/src/common/mssqls-to-sqlite.ts index fd5a9eb8c..cbb5fcec3 100644 --- a/modules/module-mssql/src/common/mssqls-to-sqlite.ts +++ b/modules/module-mssql/src/common/mssqls-to-sqlite.ts @@ -1,7 +1,7 @@ import sql from 'mssql'; import { DatabaseInputRow, SqliteInputRow, toSyncRulesRow } from '@powersync/service-sync-rules'; -export function toSqliteInputRow(row: sql.IRecordSet, columns: sql.IColumnMetadata): SqliteInputRow { +export function toSqliteInputRow(row: any, columns: sql.IColumnMetadata): SqliteInputRow { let result: DatabaseInputRow = {}; for (const key in row) { // We are very much expecting the column to be there diff --git a/modules/module-mssql/src/replication/CDCPoller.ts b/modules/module-mssql/src/replication/CDCPoller.ts new file mode 100644 index 000000000..31616551c --- /dev/null +++ b/modules/module-mssql/src/replication/CDCPoller.ts @@ -0,0 +1,238 @@ +import { Logger, logger as defaultLogger, ReplicationAssertionError } from '@powersync/lib-services-framework'; +import timers from 'timers/promises'; +import { MSSQLConnectionManager } from './MSSQLConnectionManager.js'; +import { MSSQLSourceTable } from '../common/MSSQLSourceTable.js'; +import { LSN } from '../common/LSN.js'; +import sql from 'mssql'; +import { getMinLSN, incrementLSN } from '../utils/mssql.js'; + +enum Operation { + DELETE = 1, + INSERT = 2, + UPDATE_BEFORE = 3, + UPDATE_AFTER = 4 +} +/** + * Schema changes that are detectable by inspecting query events. + * Create table statements are not included here, since new tables are automatically detected when row events + * are received for them. + */ +export enum SchemaChangeType { + RENAME_TABLE = 'Rename Table', + DROP_TABLE = 'Drop Table', + TRUNCATE_TABLE = 'Truncate Table', + ALTER_TABLE_COLUMN = 'Alter Table Column', + REPLICATION_IDENTITY = 'Alter Replication Identity' +} + +export interface SchemaChange { + type: SchemaChangeType; + /** + * The table that the schema change applies to. + */ + table: string; + schema: string; + /** + * Populated for table renames if the newTable was matched by the DatabaseFilter + */ + newTable?: string; +} + +export interface CDCEventHandler { + onInsert: (row: any, table: MSSQLSourceTable, collumns: sql.IColumnMetadata) => Promise; + onUpdate: (rowAfter: any, rowBefore: any, table: MSSQLSourceTable, collumns: sql.IColumnMetadata) => Promise; + onDelete: (row: any, table: MSSQLSourceTable, collumns: sql.IColumnMetadata) => Promise; + onCommit: (lsn: string, transactionCount: number) => Promise; + onSchemaChange: (change: SchemaChange) => Promise; +} + +export interface CDCPollerOptions { + connectionManager: MSSQLConnectionManager; + eventHandler: CDCEventHandler; + sourceTables: MSSQLSourceTable[]; + startLSN: LSN; + pollingBatchSize?: number; + pollingIntervalMs?: number; + logger?: Logger; +} + +/** + * + */ +export class CDCPoller { + private connectionManager: MSSQLConnectionManager; + private eventHandler: CDCEventHandler; + private currentLSN: LSN; + private logger: Logger; + private listenerError: Error | null; + + private isStopped: boolean = false; + private isStopping: boolean = false; + private isPolling: boolean = false; + + constructor(public options: CDCPollerOptions) { + this.logger = options.logger ?? defaultLogger; + this.connectionManager = options.connectionManager; + this.eventHandler = options.eventHandler; + this.currentLSN = options.startLSN; + this.listenerError = null; + } + + private get pollingBatchSize(): number { + return this.options.pollingBatchSize ?? 10; + } + + private get pollingIntervalMs(): number { + return this.options.pollingIntervalMs ?? 1000; + } + + private get sourceTables(): MSSQLSourceTable[] { + return this.options.sourceTables; + } + + public async stop(): Promise { + if (!(this.isStopped || this.isStopping)) { + this.isStopping = true; + this.isStopped = true; + } + } + + public async replicateUntilStopped(): Promise { + this.logger.info(`CDC polling started...`); + while (!this.isStopped) { + // Skip cycle if already polling (concurrency guard) + if (this.isPolling) { + await timers.setTimeout(this.pollingIntervalMs); + continue; + } + + try { + const hasChanges = await this.poll(); + if (!hasChanges) { + // No changes found, wait before next poll + await timers.setTimeout(this.pollingIntervalMs); + } + // If changes were found, poll immediately again (no wait) + } catch (error) { + if (!(this.isStopped || this.isStopping)) { + this.listenerError = error as Error; + this.logger.error('Error during CDC polling:', error); + this.stop(); + } + break; + } + } + + if (this.listenerError) { + this.logger.error('CDC polling was stopped due to an error:', this.listenerError); + throw this.listenerError; + } + + this.logger.info(`CDC polling stopped...`); + } + + private async poll(): Promise { + // Set polling flag to prevent concurrent polling cycles + this.isPolling = true; + + try { + // Calculate the polling LSN bounds for this batch + const startLSN = await incrementLSN(this.currentLSN, this.connectionManager); + + const { recordset: results } = await this.connectionManager.query( + `SELECT TOP (${this.pollingBatchSize}) start_lsn + FROM cdc.lsn_time_mapping + WHERE start_lsn >= @startLSN + ORDER BY start_lsn ASC + `, + [{ name: 'startLSN', type: sql.VarBinary, value: startLSN.toBinary() }] + ); + + // Handle case where no results returned (no new changes available) + if (results.length === 0) { + return false; + } + + const endLSN = LSN.fromBinary(results[results.length - 1].start_lsn); + + // If startLSN is greater than or equal to endLSN, no new changes are available + if (startLSN.compare(endLSN) >= 0) { + return false; + } + + this.logger.info(`Polling bounds are ${startLSN} -> ${endLSN}. Total potential transactions: ${results.length}`); + + // Poll each source table using existing pollTable() method + let transactionCount = 0; + for (const table of this.sourceTables) { + const tableTransactionCount = await this.pollTable(table, { startLSN, endLSN }); + // We poll for batch size transactions, but these include transactions not applicable to our Source Tables. + // Each Source Table may or may not have transactions that are applicable to it, so just keep track of the highest number of transactions processedfor any Source Table. + if (tableTransactionCount > transactionCount) { + transactionCount = tableTransactionCount; + } + } + + // Call eventHandler.onCommit() with toLSN after processing all tables + await this.eventHandler.onCommit(endLSN.toString(), transactionCount); + + // Update currentLSN to toLSN + this.currentLSN = endLSN; + this.logger.info(`Source Table transactions processed: ${transactionCount}.`); + + return true; + } finally { + // Always clear polling flag, even on error + this.isPolling = false; + } + } + + private async pollTable(table: MSSQLSourceTable, bounds: { startLSN: LSN; endLSN: LSN }): Promise { + // Check that the minimum LSN is within the bounds + const minLSN = await getMinLSN(this.connectionManager, table); + if (minLSN > bounds.endLSN) { + return 0; + } else if (minLSN >= bounds.startLSN) { + bounds.startLSN = minLSN; + } + const { recordset: results } = await this.connectionManager.query( + ` + SELECT * FROM ${table.allChangesFunction}(@from_lsn, @to_lsn, 'all update old') ORDER BY __$start_lsn, __$seqval + `, + [ + { name: 'from_lsn', type: sql.VarBinary, value: bounds.startLSN.toBinary() }, + { name: 'to_lsn', type: sql.VarBinary, value: bounds.endLSN.toBinary() } + ] + ); + + for (const row of results) { + const transactionLSN = LSN.fromBinary(row.__$start_lsn); + let updateBefore: any = null; + switch (row.__$operation) { + case Operation.DELETE: + await this.eventHandler.onDelete(row, table, results.columns); + this.logger.info(`Processed DELETE row: ${transactionLSN}`); + break; + case Operation.INSERT: + await this.eventHandler.onInsert(row, table, results.columns); + this.logger.info(`Processed INSERT row: ${transactionLSN}`); + break; + case Operation.UPDATE_BEFORE: + updateBefore = row; + this.logger.info(`Processed UPDATE, before row: ${transactionLSN}`); + break; + case Operation.UPDATE_AFTER: + if (!updateBefore) { + throw new ReplicationAssertionError('Missing before image for update event.'); + } + await this.eventHandler.onUpdate(row, updateBefore, table, results.columns); + this.logger.info(`Processed UPDATE, after row: ${transactionLSN}`); + break; + default: + this.logger.warn(`Unknown operation type [${row.__$operation}] encountered in CDC changes.`); + } + } + + return results.length; + } +} From 18c1560a1c5196014213c1a7700e7dcbb0a48929 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Thu, 13 Nov 2025 09:08:27 +0200 Subject: [PATCH 15/42] Support both SourceTable and MSSQLSourceTable in Snapshot queries --- .../src/replication/MSSQLSnapshotQuery.ts | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts b/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts index 4c65d3874..6b04754ba 100644 --- a/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts +++ b/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts @@ -1,4 +1,4 @@ -import { bson, ColumnDescriptor } from '@powersync/service-core'; +import { bson, ColumnDescriptor, SourceTable } from '@powersync/service-core'; import { SqliteValue } from '@powersync/service-sync-rules'; import { ServiceAssertionError } from '@powersync/lib-services-framework'; import { MSSQLBaseType } from '../types/mssql-data-types.js'; @@ -87,11 +87,12 @@ export class BatchedSnapshotQuery implements MSSQLSnapshotQuery { MSSQLBaseType.BIGINT ]; - static supports(table: MSSQLSourceTable) { - if (table.sourceTable.replicaIdColumns.length != 1) { + static supports(table: SourceTable | MSSQLSourceTable): boolean { + const sourceTable = table instanceof MSSQLSourceTable ? table.sourceTable : table; + if (sourceTable.replicaIdColumns.length != 1) { return false; } - const primaryKey = table.sourceTable.replicaIdColumns[0]; + const primaryKey = sourceTable.replicaIdColumns[0]; return primaryKey.typeId != null && BatchedSnapshotQuery.SUPPORTED_TYPES.includes(Number(primaryKey.typeId)); } @@ -184,7 +185,7 @@ export class BatchedSnapshotQuery implements MSSQLSnapshotQuery { * during streaming replication. */ export class IdSnapshotQuery implements MSSQLSnapshotQuery { - static supports(table: MSSQLSourceTable) { + static supports(table: SourceTable | MSSQLSourceTable) { // We have the same requirements as BatchedSnapshotQuery. // This is typically only used as a fallback when ChunkedSnapshotQuery // skipped some rows. From 5a7e2234de7756e081d746e0f6d9274925e5799f Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Thu, 13 Nov 2025 09:09:26 +0200 Subject: [PATCH 16/42] Implemented CDC replication for mssql using the CDCPoller --- .../module-mssql/src/replication/CDCStream.ts | 543 ++++++++---------- modules/module-mssql/src/utils/mssql.ts | 237 ++++---- 2 files changed, 354 insertions(+), 426 deletions(-) diff --git a/modules/module-mssql/src/replication/CDCStream.ts b/modules/module-mssql/src/replication/CDCStream.ts index 1a801c902..f9dd69ba8 100644 --- a/modules/module-mssql/src/replication/CDCStream.ts +++ b/modules/module-mssql/src/replication/CDCStream.ts @@ -2,11 +2,10 @@ import { container, DatabaseConnectionError, ErrorCode, - errors, Logger, logger as defaultLogger, - ReplicationAssertionError, ReplicationAbortedError, + ReplicationAssertionError, ServiceAssertionError } from '@powersync/lib-services-framework'; import { @@ -21,7 +20,9 @@ import { import { applyValueContext, CompatibilityContext, + DatabaseInputRow, SqliteInputRow, + SqliteRow, SqlSyncRules, TablePattern } from '@powersync/service-sync-rules'; @@ -30,26 +31,29 @@ import { ReplicationMetric } from '@powersync/service-types'; import { BatchedSnapshotQuery, IdSnapshotQuery, + MSSQLSnapshotQuery, PrimaryKeyValue, - SimpleSnapshotQuery, - MSSQLSnapshotQuery + SimpleSnapshotQuery } from './MSSQLSnapshotQuery.js'; import { MSSQLConnectionManager } from './MSSQLConnectionManager.js'; import * as schema_utils from '../utils/schema.js'; +import { ResolvedTable } from '../utils/schema.js'; import { checkSourceConfiguration, + createCheckpoint, getCaptureInstance, getLatestLSN, + getLatestReplicatedLSN, isIColumnMetadata, isTableEnabledForCDC, isWithinRetentionThreshold } from '../utils/mssql.js'; -import { ResolvedTable } from '../utils/schema.js'; import sql from 'mssql'; import { toSqliteInputRow } from '../common/mssqls-to-sqlite.js'; import { LSN } from '../common/LSN.js'; import { MSSQLSourceTable } from '../common/MSSQLSourceTable.js'; import { MSSQLSourceTableCache } from '../common/MSSQLSourceTableCache.js'; +import { CDCEventHandler, CDCPoller } from './CDCPoller.js'; export interface CDCStreamOptions { connections: MSSQLConnectionManager; @@ -71,14 +75,9 @@ export enum SnapshotStatus { RESTART_REQUIRED = 'restart-required' } -interface WriteChangePayload { - type: storage.SaveOperationTag; - row: sql.IRecordSet; - previous_row?: sql.IRecordSet; - schema: string; - table: string; - sourceTable: storage.SourceTable; - columns: Map; +export interface SnapshotStatusResult { + status: SnapshotStatus; + snapshotLSN: string | null; } export class CDCConfigurationError extends Error { @@ -120,7 +119,7 @@ export class CDCStream { * Keep track of whether we have done a commit or keepalive yet. * We can only compute replication lag if isStartingReplication == false, or oldestUncommittedChange is present. */ - private isStartingReplication = true; + public isStartingReplication = true; constructor(private options: CDCStreamOptions) { this.logger = options.logger ?? defaultLogger; @@ -170,6 +169,36 @@ export class CDCStream { return this.options.snapshotBatchSize ?? 10_000; } + async replicate() { + try { + await this.initReplication(); + await this.streamChanges(); + } catch (e) { + await this.storage.reportError(e); + throw e; + } + } + + async populateTableCache() { + const sourceTables = this.syncRules.getSourceTables(); + await this.storage.startBatch( + { + logger: this.logger, + zeroLSN: LSN.ZERO, + defaultSchema: this.defaultSchema, + storeCurrentData: true + }, + async (batch) => { + for (let tablePattern of sourceTables) { + const tables = await this.getQualifiedTableNames(batch, tablePattern); + for (const table of tables) { + this.tableCache.set(table); + } + } + } + ); + } + async getQualifiedTableNames( batch: storage.BucketStorageBatch, tablePattern: TablePattern @@ -240,7 +269,6 @@ export class CDCStream { sourceTable: resolved.table, captureInstance: captureInstance }); - this.tableCache.set(resolvedTable); // Drop conflicting tables. This includes for example renamed tables. await batch.drop(resolved.dropTables); @@ -284,18 +312,18 @@ export class CDCStream { // We have to get this LSN _after_ we have finished the table snapshot. // // There are basically two relevant LSNs here: - // A: The LSN before the snapshot starts. We don't explicitly record this on the PowerSync side, - // but it is implicitly recorded in the replication slot. - // B: The LSN after the table snapshot is complete, which is what we get here. + // A: PreSnapshot: The LSN before the snapshot starts. + // B: PostSnapshot: The LSN after the table snapshot is complete, which is what we get here. // When we do the snapshot queries, the data that we get back for each batch could match the state // anywhere between A and B. To actually have a consistent state on our side, we need to: // 1. Complete the snapshot. // 2. Wait until logical replication has caught up with all the changes between A and B. // Calling `markSnapshotDone(LSN B)` covers that. - const tableLsnNotBefore = await getLatestLSN(this.connections); + const postSnapshotLSN = await getLatestLSN(this.connections); + this.logger.info(`Post snapshot LSN: ${postSnapshotLSN.toString()}`); // Side note: A ROLLBACK would probably also be fine here, since we only read in this transaction. await transaction.commit(); - const [updatedSourceTable] = await batch.markSnapshotDone([table.sourceTable], tableLsnNotBefore.toString()); + const [updatedSourceTable] = await batch.markSnapshotDone([table.sourceTable], postSnapshotLSN.toString()); this.tableCache.updateSourceTable(updatedSourceTable); } catch (e) { await transaction.rollback(); @@ -353,19 +381,18 @@ export class CDCStream { // The balance here is between latency overhead per FETCH call, // and not spending too much time on each FETCH call. // We aim for a couple of seconds on each FETCH call. + let batchReplicatedCount = 0; const cursor = query.next(); - hasRemainingData = false; - // MSSQL streams rows one by one for await (const result of cursor) { - if (isIColumnMetadata(result)) { + if (columns == null && isIColumnMetadata(result)) { columns = result; continue; } else { if (!columns) { throw new ReplicationAssertionError(`Missing column metadata`); } - const row: SqliteInputRow = toSqliteInputRow(result, columns!); - + const inputRow: SqliteInputRow = toSqliteInputRow(result, columns); + const row = this.syncRules.applyRowContext(inputRow); // This auto-flushes when the batch reaches its size limit await batch.save({ tag: storage.SaveOperationTag.INSERT, @@ -377,6 +404,7 @@ export class CDCStream { }); replicatedCount++; + batchReplicatedCount++; this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1); } @@ -414,6 +442,11 @@ export class CDCStream { // We only abort after flushing throw new ReplicationAbortedError(`Initial replication interrupted`); } + + // When the batch of rows is smaller than the requested batch size we know it is the final batch + if (batchReplicatedCount < this.snapshotBatchSize) { + hasRemainingData = false; + } } } @@ -441,51 +474,63 @@ export class CDCStream { * If (partial) replication was done before on this slot, this clears the state * and starts again from scratch. */ - async startInitialReplication(status: SnapshotStatus) { + async startInitialReplication(snapshotStatus: SnapshotStatusResult) { + let { status, snapshotLSN } = snapshotStatus; + if (status === SnapshotStatus.RESTART_REQUIRED) { + this.logger.info(`Snapshot restart required, clearing state.`); // This happens if the last replicated checkpoint LSN is no longer available in the CDC tables. await this.storage.clear({ signal: this.abortSignal }); } - const sourceTables = this.syncRules.getSourceTables(); await this.storage.startBatch( { logger: this.logger, zeroLSN: LSN.ZERO, defaultSchema: this.defaultSchema, - storeCurrentData: true, + storeCurrentData: false, skipExistingRows: true }, async (batch) => { - const tablesWithStatus: MSSQLSourceTable[] = []; - for (const tablePattern of sourceTables) { - const tables = await this.getQualifiedTableNames(batch, tablePattern); - // Pre-get counts - for (const table of tables) { - if (table.sourceTable.snapshotComplete) { - this.logger.info(`Skipping ${table.toQualifiedName()} - snapshot already done.`); - continue; - } - const count = await this.estimatedCountNumber(table); - const updatedSourceTable = await batch.updateTableProgress(table.sourceTable, { - totalEstimatedCount: count - }); - this.tableCache.updateSourceTable(updatedSourceTable); - tablesWithStatus.push(table); - - this.logger.info(`To replicate: ${table.toQualifiedName()} ${table.sourceTable.formatSnapshotProgress()}`); + if (snapshotLSN == null) { + // First replication attempt - set the snapshot LSN to the current LSN before starting + snapshotLSN = (await getLatestReplicatedLSN(this.connections)).toString(); + await batch.setResumeLsn(snapshotLSN); + const latestLSN = (await getLatestLSN(this.connections)).toString(); + this.logger.info(`Marking snapshot at ${snapshotLSN}, Latest DB LSN ${latestLSN}.`); + } else { + this.logger.info(`Resuming snapshot at ${snapshotLSN}.`); + } + + const tablesToSnapshot: MSSQLSourceTable[] = []; + for (const table of this.tableCache.getAll()) { + if (table.sourceTable.snapshotComplete) { + this.logger.info(`Skipping table [${table.toQualifiedName()}] - snapshot already done.`); + continue; } + + const count = await this.estimatedCountNumber(table); + const updatedSourceTable = await batch.updateTableProgress(table.sourceTable, { + totalEstimatedCount: count + }); + this.tableCache.updateSourceTable(updatedSourceTable); + tablesToSnapshot.push(table); + + this.logger.info(`To replicate: ${table.toQualifiedName()} ${table.sourceTable.formatSnapshotProgress()}`); } - for (const table of tablesWithStatus) { + for (const table of tablesToSnapshot) { await this.snapshotTableInTx(batch, table); this.touch(); } - // Always commit the initial snapshot at zero. - // This makes sure we don't skip any changes applied before starting this snapshot, - // in the case of snapshot retries. - await batch.commit(LSN.ZERO); + // This will not create a consistent checkpoint yet, but will persist the op. + // Actual checkpoint will be created when streaming replication caught up. + await batch.commit(snapshotLSN); + + this.logger.info( + `Snapshot done. Need to replicate from ${snapshotLSN} to ${batch.noCheckpointBeforeLsn} to be consistent` + ); } ); } @@ -500,77 +545,15 @@ export class CDCStream { return table; } - // async writeChange( - // batch: storage.BucketStorageBatch, - // payload: WriteChangePayload - // ): Promise { - // switch (payload.type) { - // case storage.SaveOperationTag.INSERT: - // this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1); - // const record = toSqliteInputRow(payload.row, payload.columns); - // return await batch.save({ - // tag: storage.SaveOperationTag.INSERT, - // sourceTable: payload.sourceTable, - // before: undefined, - // beforeReplicaId: undefined, - // after: record, - // afterReplicaId: getUuidReplicaIdentityBson(record, payload.sourceTable.replicaIdColumns) - // }); - // case storage.SaveOperationTag.UPDATE: - // this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1); - // // The previous row may be null if the replica id columns are unchanged. - // // It's fine to treat that the same as an insert. - // const beforeUpdated = payload.previous_row - // ? toSqliteInputRow(payload.previous_row, payload.columns) - // : undefined; - // const after = toSqliteInputRow(payload.row, payload.columns); - // - // return await batch.save({ - // tag: storage.SaveOperationTag.UPDATE, - // sourceTable: payload.sourceTable, - // before: beforeUpdated, - // beforeReplicaId: beforeUpdated - // ? getUuidReplicaIdentityBson(beforeUpdated, payload.sourceTable.replicaIdColumns) - // : undefined, - // after: after, - // afterReplicaId: getUuidReplicaIdentityBson(after, payload.sourceTable.replicaIdColumns) - // }); - // - // case storage.SaveOperationTag.DELETE: - // this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1); - // const beforeDeleted = toSqliteInputRow(payload.row, payload.columns); - // - // return await batch.save({ - // tag: storage.SaveOperationTag.DELETE, - // sourceTable: payload.sourceTable, - // before: beforeDeleted, - // beforeReplicaId: getUuidReplicaIdentityBson(beforeDeleted, payload.sourceTable.replicaIdColumns), - // after: undefined, - // afterReplicaId: undefined - // }); - // default: - // return null; - // } - // } - - async replicate() { - try { - await this.initReplication(); - //await this.streamChanges(); - } catch (e) { - await this.storage.reportError(e); - throw e; - } - } - async initReplication() { const errors = await checkSourceConfiguration(this.connections); if (errors.length > 0) { throw new CDCConfigurationError(`CDC Configuration Errors: ${errors.join(', ')}`); } + await this.populateTableCache(); const snapshotStatus = await this.checkSnapshotStatus(); - if (snapshotStatus !== SnapshotStatus.DONE) { + if (snapshotStatus.status !== SnapshotStatus.DONE) { await this.startInitialReplication(snapshotStatus); } } @@ -579,15 +562,14 @@ export class CDCStream { * Checks if the initial sync has already been completed and if updates from the last checkpoint are still available * in the CDC instances. */ - private async checkSnapshotStatus(): Promise { + private async checkSnapshotStatus(): Promise { const status = await this.storage.getStatus(); - const snapshotDone = status.snapshot_done && status.checkpoint_lsn != null; - if (snapshotDone) { + if (status.snapshot_done && status.checkpoint_lsn) { // Snapshot is done, but we still need to check that the last known checkpoint LSN is still // within the threshold of the CDC tables this.logger.info(`Initial replication already done`); - const lastCheckpointLSN = LSN.fromString(status.checkpoint_lsn!); + const lastCheckpointLSN = LSN.fromString(status.checkpoint_lsn); // Check that the CDC tables still have valid data const isAvailable = await isWithinRetentionThreshold({ checkpointLSN: lastCheckpointLSN, @@ -599,195 +581,150 @@ export class CDCStream { `Updates from the last checkpoint are no longer available in the CDC instance, starting initial replication again.` ); } - return isAvailable ? SnapshotStatus.DONE : SnapshotStatus.RESTART_REQUIRED; + return { status: isAvailable ? SnapshotStatus.DONE : SnapshotStatus.RESTART_REQUIRED, snapshotLSN: null }; + } else { + return { status: SnapshotStatus.IN_PROGRESS, snapshotLSN: status.snapshot_lsn }; } + } + + async streamChanges() { + const reSnapshot: { table: storage.SourceTable; key: PrimaryKeyValue }[] = []; + // TODO Handle re-snapshot + + const markRecordUnavailable = (record: SaveUpdate) => { + if (!IdSnapshotQuery.supports(record.sourceTable)) { + // If it's not supported, it's also safe to ignore + return; + } + let key: PrimaryKeyValue = {}; + for (const column of record.sourceTable.replicaIdColumns) { + const name = column.name; + const value = record.after[name]; + if (value == null) { + // We don't expect this to actually happen. + // The key should always be present in the "after" record. + return; + } + // We just need a consistent representation of the primary key, and don't care about fixed quirks. + key[name] = applyValueContext(value, CompatibilityContext.FULL_BACKWARDS_COMPATIBILITY); + } + reSnapshot.push({ + table: record.sourceTable, + key: key + }); + }; + + await this.storage.startBatch( + { + logger: this.logger, + zeroLSN: LSN.ZERO, + defaultSchema: this.defaultSchema, + storeCurrentData: false, + skipExistingRows: false, + markRecordUnavailable + }, + async (batch) => { + if (batch.resumeFromLsn == null) { + throw new ReplicationAssertionError(`No LSN found to resume replication from.`); + } + const startLSN = LSN.fromString(batch.resumeFromLsn); + const sourceTables: MSSQLSourceTable[] = this.tableCache.getAll(); + const eventHandler = this.createEventHandler(batch); + + const poller = new CDCPoller({ + connectionManager: this.connections, + eventHandler, + sourceTables, + startLSN, + logger: this.logger + }); + + this.abortSignal.addEventListener( + 'abort', + async () => { + await poller.stop(); + }, + { once: true } + ); + + await createCheckpoint(this.connections); - return SnapshotStatus.IN_PROGRESS; + this.logger.info(`Streaming changes from: ${startLSN}`); + await poller.replicateUntilStopped(); + } + ); } - // async streamChanges() { - // // When changing any logic here, check /docs/wal-lsns.md. - // const { createEmptyCheckpoints } = await this.ensureStorageCompatibility(); - // - // const replicationOptions: Record = { - // proto_version: '1', - // publication_names: PUBLICATION_NAME - // }; - // - // /** - // * Viewing the contents of logical messages emitted with `pg_logical_emit_message` - // * is only supported on Postgres >= 14.0. - // * https://www.postgresql.org/docs/14/protocol-logical-replication.html - // */ - // const exposesLogicalMessages = await this.checkLogicalMessageSupport(); - // if (exposesLogicalMessages) { - // /** - // * Only add this option if the Postgres server supports it. - // * Adding the option to a server that doesn't support it will throw an exception when starting logical replication. - // * Error: `unrecognized pgoutput option: messages` - // */ - // replicationOptions['messages'] = 'true'; - // } - // - // const replicationStream = replicationConnection.logicalReplication({ - // slot: this.slot_name, - // options: replicationOptions - // }); - // - // this.startedStreaming = true; - // - // let resnapshot: { table: storage.SourceTable; key: PrimaryKeyValue }[] = []; - // - // const markRecordUnavailable = (record: SaveUpdate) => { - // if (!IdSnapshotQuery.supports(record.sourceTable)) { - // // If it's not supported, it's also safe to ignore - // return; - // } - // let key: PrimaryKeyValue = {}; - // for (let column of record.sourceTable.replicaIdColumns) { - // const name = column.name; - // const value = record.after[name]; - // if (value == null) { - // // We don't expect this to actually happen. - // // The key should always be present in the "after" record. - // return; - // } - // // We just need a consistent representation of the primary key, and don't care about fixed quirks. - // key[name] = applyValueContext(value, CompatibilityContext.FULL_BACKWARDS_COMPATIBILITY); - // } - // resnapshot.push({ - // table: record.sourceTable, - // key: key - // }); - // }; - // - // await this.storage.startBatch( - // { - // logger: this.logger, - // zeroLSN: ZERO_LSN, - // defaultSchema: POSTGRES_DEFAULT_SCHEMA, - // storeCurrentData: true, - // skipExistingRows: false, - // markRecordUnavailable - // }, - // async (batch) => { - // // We don't handle any plain keepalive messages while we have transactions. - // // While we have transactions, we use that to advance the position. - // // Replication never starts in the middle of a transaction, so this starts as false. - // let skipKeepalive = false; - // let count = 0; - // - // for await (const chunk of replicationStream.pgoutputDecode()) { - // this.touch(); - // - // if (this.abortSignal.aborted) { - // break; - // } - // - // // chunkLastLsn may come from normal messages in the chunk, - // // or from a PrimaryKeepalive message. - // const { messages, lastLsn: chunkLastLsn } = chunk; - // - // /** - // * We can check if an explicit keepalive was sent if `exposesLogicalMessages == true`. - // * If we can't check the logical messages, we should assume a keepalive if we - // * receive an empty array of messages in a replication event. - // */ - // const assumeKeepAlive = !exposesLogicalMessages; - // let keepAliveDetected = false; - // const lastCommit = messages.findLast((msg) => msg.tag == 'commit'); - // - // for (const msg of messages) { - // if (msg.tag == 'relation') { - // await this.handleRelation(batch, getPgOutputRelation(msg), true); - // } else if (msg.tag == 'begin') { - // // This may span multiple transactions in the same chunk, or even across chunks. - // skipKeepalive = true; - // if (this.oldestUncommittedChange == null) { - // this.oldestUncommittedChange = new Date(Number(msg.commitTime / 1000n)); - // } - // } else if (msg.tag == 'commit') { - // this.metrics.getCounter(ReplicationMetric.TRANSACTIONS_REPLICATED).add(1); - // if (msg == lastCommit) { - // // Only commit if this is the last commit in the chunk. - // // This effectively lets us batch multiple transactions within the same chunk - // // into a single flush, increasing throughput for many small transactions. - // skipKeepalive = false; - // // flush() must be before the resnapshot check - that is - // // typically what reports the resnapshot records. - // await batch.flush({ oldestUncommittedChange: this.oldestUncommittedChange }); - // // This _must_ be checked after the flush(), and before - // // commit() or ack(). We never persist the resnapshot list, - // // so we have to process it before marking our progress. - // if (resnapshot.length > 0) { - // await this.resnapshot(batch, resnapshot); - // resnapshot = []; - // } - // const didCommit = await batch.commit(msg.lsn!, { - // createEmptyCheckpoints, - // oldestUncommittedChange: this.oldestUncommittedChange - // }); - // await this.ack(msg.lsn!, replicationStream); - // if (didCommit) { - // this.oldestUncommittedChange = null; - // this.isStartingReplication = false; - // } - // } - // } else { - // if (count % 100 == 0) { - // this.logger.info(`Replicating op ${count} ${msg.lsn}`); - // } - // - // /** - // * If we can see the contents of logical messages, then we can check if a keepalive - // * message is present. We only perform a keepalive (below) if we explicitly detect a keepalive message. - // * If we can't see the contents of logical messages, then we should assume a keepalive is required - // * due to the default value of `assumeKeepalive`. - // */ - // if (exposesLogicalMessages && isKeepAliveMessage(msg)) { - // keepAliveDetected = true; - // } - // - // count += 1; - // const flushResult = await this.writeChange(batch, msg); - // if (flushResult != null && resnapshot.length > 0) { - // // If we have large transactions, we also need to flush the resnapshot list - // // periodically. - // // TODO: make sure this bit is actually triggered - // await this.resnapshot(batch, resnapshot); - // resnapshot = []; - // } - // } - // } - // - // if (!skipKeepalive) { - // if (assumeKeepAlive || keepAliveDetected) { - // // Reset the detection flag. - // keepAliveDetected = false; - // - // // In a transaction, we ack and commit according to the transaction progress. - // // Outside transactions, we use the PrimaryKeepalive messages to advance progress. - // // Big caveat: This _must not_ be used to skip individual messages, since this LSN - // // may be in the middle of the next transaction. - // // It must only be used to associate checkpoints with LSNs. - // const didCommit = await batch.keepalive(chunkLastLsn); - // if (didCommit) { - // this.oldestUncommittedChange = null; - // } - // - // this.isStartingReplication = false; - // } - // - // // We receive chunks with empty messages often (about each second). - // // Acknowledging here progresses the slot past these and frees up resources. - // await this.ack(chunkLastLsn, replicationStream); - // } - // - // this.metrics.getCounter(ReplicationMetric.CHUNKS_REPLICATED).add(1); - // } - // } - // ); - // } + private createEventHandler(batch: storage.BucketStorageBatch): CDCEventHandler { + return { + onInsert: async (row: any, table: MSSQLSourceTable, columns: sql.IColumnMetadata) => { + const afterRow = this.toSqliteRow(row, columns); + await batch.save({ + tag: storage.SaveOperationTag.INSERT, + sourceTable: table.sourceTable, + before: undefined, + beforeReplicaId: undefined, + after: afterRow, + afterReplicaId: getUuidReplicaIdentityBson(afterRow, table.sourceTable.replicaIdColumns) + }); + this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1); + }, + onUpdate: async (rowAfter: any, rowBefore: any, table: MSSQLSourceTable, columns: sql.IColumnMetadata) => { + const beforeRow = this.toSqliteRow(rowBefore, columns); + const afterRow = this.toSqliteRow(rowAfter, columns); + await batch.save({ + tag: storage.SaveOperationTag.UPDATE, + sourceTable: table.sourceTable, + before: beforeRow, + beforeReplicaId: getUuidReplicaIdentityBson(beforeRow, table.sourceTable.replicaIdColumns), + after: afterRow, + afterReplicaId: getUuidReplicaIdentityBson(afterRow, table.sourceTable.replicaIdColumns) + }); + this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1); + }, + onDelete: async (row: any, table: MSSQLSourceTable, columns: sql.IColumnMetadata) => { + const beforeRow = this.toSqliteRow(row, columns); + await batch.save({ + tag: storage.SaveOperationTag.DELETE, + sourceTable: table.sourceTable, + before: beforeRow, + beforeReplicaId: getUuidReplicaIdentityBson(beforeRow, table.sourceTable.replicaIdColumns), + after: undefined, + afterReplicaId: undefined + }); + this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1); + }, + onCommit: async (lsn: string, transactionCount: number) => { + await batch.commit(lsn); + this.metrics.getCounter(ReplicationMetric.TRANSACTIONS_REPLICATED).add(transactionCount); + this.isStartingReplication = false; + }, + onSchemaChange: async () => { + // Schema changes are handled separately + } + }; + } + + /** + * Convert CDC row data to SqliteRow format. + * CDC rows include table columns plus CDC metadata columns (__$operation, __$start_lsn, etc.). + * We filter out the CDC metadata columns. + */ + private toSqliteRow(row: any, columns: sql.IColumnMetadata): SqliteRow { + // CDC metadata columns in the row that should be excluded + const cdcMetadataColumns = ['__$operation', '__$start_lsn', '__$end_lsn', '__$seqval', '__$update_mask']; + + const filteredRow: DatabaseInputRow = {}; + for (const key in row) { + // Skip CDC metadata columns + if (!cdcMetadataColumns.includes(key)) { + filteredRow[key] = row[key]; + } + } + + const inputRow: SqliteInputRow = toSqliteInputRow(filteredRow, columns); + return this.syncRules.applyRowContext(inputRow); + } // async ack(lsn: string, replicationStream: pgwire.ReplicationStream) { // if (lsn == ZERO_LSN) { diff --git a/modules/module-mssql/src/utils/mssql.ts b/modules/module-mssql/src/utils/mssql.ts index 6a4d25a58..79f6c58e3 100644 --- a/modules/module-mssql/src/utils/mssql.ts +++ b/modules/module-mssql/src/utils/mssql.ts @@ -1,123 +1,13 @@ import sql from 'mssql'; import { SourceTable } from '@powersync/service-core'; import { coerce, gte } from 'semver'; -import { logger } from '@powersync/lib-services-framework'; +import { errors, logger } from '@powersync/lib-services-framework'; import { MSSQLConnectionManager } from '../replication/MSSQLConnectionManager.js'; import { LSN } from '../common/LSN.js'; import { CaptureInstance, MSSQLSourceTable } from '../common/MSSQLSourceTable.js'; import { MSSQLParameter } from '../types/mssql-data-types.js'; -export interface CreateStreamingQueryOptions { - query: string; - // Request to create the streaming query from - request: sql.Request; - // Cancel the iteration if this signal is aborted - signal?: AbortSignal; - // Maximum number of rows to buffer before pausing the request - maxQueueSize?: number; -} - -export interface StreamingQuery { - columns: { [name: string]: sql.IColumn }; - [Symbol.asyncIterator](): AsyncIterator>; -} - -export async function createStreamingQuery(options: CreateStreamingQueryOptions): Promise { - const { query, request, signal } = options; - const maxQueueSize = options.maxQueueSize ?? 1000; - - // Wait for the recordSet event before returning - let columns: { [name: string]: sql.IColumn } = await new Promise((resolve) => { - // Record Column metadata - request.on('recordSet', (recordSet: { [name: string]: sql.IColumn }) => { - columns = recordSet; - resolve(recordSet); - }); - }); - - async function* rowGenerator(): AsyncGenerator> { - const rowQueue: Array> = []; - let resolveNext: (() => void) | null = null; - let streamingError: Error | null = null; - let isPaused = false; - let isDone = false; - - try { - request.on('row', (row: Record) => { - rowQueue.push(row); - if (rowQueue.length >= maxQueueSize) { - request.pause(); - isPaused = true; - } - if (resolveNext) { - resolveNext(); - resolveNext = null; - } - }); - - request.on('done', () => { - isDone = true; - if (resolveNext) { - resolveNext(); - resolveNext = null; - } - }); - - request.on('error', (err) => { - streamingError = err; - isDone = true; - }); - - // Don't start the query if we are already aborted - if (signal && signal.aborted) { - isDone = true; - } else { - // Start streaming - request.query(query); - - // Handle aborts by cancelling the request - signal?.addEventListener( - 'abort', - () => { - isDone = true; - request.cancel(); - if (resolveNext) { - resolveNext(); - resolveNext = null; - } - }, - { once: true } - ); - } - - // Loop until the stream is done and the queue is empty - while (!isDone || rowQueue.length > 0) { - if (rowQueue.length > 0) { - yield rowQueue.shift() as Record; - // Resume streaming if we are below half the max queue size - if (isPaused && rowQueue.length <= maxQueueSize / 2) { - request.resume(); - } - } else if (!isDone) { - await new Promise((resolve) => { - resolveNext = resolve; - }); - } - } - - if (streamingError) { - throw streamingError; - } - } finally { - request.cancel(); - } - } - - return { - columns: columns, - [Symbol.asyncIterator]: rowGenerator - }; -} +export const POWERSYNC_CHECKPOINTS_TABLE = '_powersync_checkpoints'; export const SUPPORTED_ENGINE_EDITIONS = new Map([ [2, 'Standard'], @@ -184,9 +74,76 @@ export async function checkSourceConfiguration(connectionManager: MSSQLConnectio errors.push(`The current user does not have the 'cdc_reader' role. Please assign this role to the user.`); } + // 4) Check if the _powersync_checkpoints table is correctly configured + const checkpointTableErrors = await ensurePowerSyncCheckpointsTable(connectionManager); + errors.push(...checkpointTableErrors); + + return errors; +} + +export async function ensurePowerSyncCheckpointsTable(connectionManager: MSSQLConnectionManager): Promise { + const errors: string[] = []; + try { + // check if the dbo_powersync_checkpoints table exists + const { recordset: checkpointsResult } = await connectionManager.query(` + SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '${connectionManager.schema}' AND TABLE_NAME = '${POWERSYNC_CHECKPOINTS_TABLE}'; + `); + if (checkpointsResult.length > 0) { + // Table already exists, check if CDC is enabled + const isEnabled = await isTableEnabledForCDC({ + connectionManager, + table: POWERSYNC_CHECKPOINTS_TABLE, + schema: connectionManager.schema + }); + if (!isEnabled) { + // Enable CDC on the table + await enableCDCForTable({ + connectionManager, + table: POWERSYNC_CHECKPOINTS_TABLE + }); + } + return errors; + } + } catch (error) { + errors.push(`Failed ensure ${POWERSYNC_CHECKPOINTS_TABLE} table is correctly configured: ${error}`); + } + + // Try to create the table + try { + await connectionManager.query(` + CREATE TABLE ${connectionManager.schema}.${POWERSYNC_CHECKPOINTS_TABLE} ( + id INT IDENTITY PRIMARY KEY, + last_updated DATETIME NOT NULL DEFAULT (GETDATE()) + )`); + } catch (error) { + errors.push(`Failed to create ${POWERSYNC_CHECKPOINTS_TABLE} table: ${error}`); + } + + try { + // Enable CDC on the table if not already enabled + await enableCDCForTable({ + connectionManager, + table: POWERSYNC_CHECKPOINTS_TABLE + }); + } catch (error) { + errors.push(`Failed to enable CDC on ${POWERSYNC_CHECKPOINTS_TABLE} table: ${error}`); + } + return errors; } +export async function createCheckpoint(connectionManager: MSSQLConnectionManager): Promise { + await connectionManager.query(` + MERGE ${connectionManager.schema}.${POWERSYNC_CHECKPOINTS_TABLE} AS target + USING (SELECT 1 AS id) AS source + ON target.id = source.id + WHEN MATCHED THEN + UPDATE SET last_updated = GETDATE() + WHEN NOT MATCHED THEN + INSERT (last_updated) VALUES (GETDATE()); + `); +} + export interface IsTableEnabledForCDCOptions { connectionManager: MSSQLConnectionManager; table: string; @@ -211,6 +168,22 @@ export async function isTableEnabledForCDC(options: IsTableEnabledForCDCOptions) return checkResult.length > 0; } +export interface EnableCDCForTableOptions { + connectionManager: MSSQLConnectionManager; + table: string; +} + +export async function enableCDCForTable(options: EnableCDCForTableOptions): Promise { + const { connectionManager, table } = options; + + await connectionManager.execute('sys.sp_cdc_enable_table', [ + { name: 'source_schema', value: connectionManager.schema }, + { name: 'source_name', value: table }, + { name: 'role_name', value: 'NULL' }, + { name: 'supports_net_changes', value: 1 } + ]); +} + /** * Check if the supplied version is newer or equal to the target version. * @param version @@ -238,16 +211,7 @@ export interface IsWithinRetentionThresholdOptions { export async function isWithinRetentionThreshold(options: IsWithinRetentionThresholdOptions): Promise { const { checkpointLSN, tables, connectionManager } = options; for (const table of tables) { - const { recordset: result } = await connectionManager.query('SELECT sys.fn_cdc_get_min_lsn(dbo_lists) AS min_lsn', [ - { - name: 'capture_instance', - type: sql.NVarChar, - value: table.captureInstance - } - ]); - - const rawMinLSN: Buffer = result[0].min_lsn; - const minLSN = LSN.fromBinary(rawMinLSN); + const minLSN = await getMinLSN(connectionManager, table); if (minLSN > checkpointLSN) { logger.warn( `The checkpoint LSN:[${checkpointLSN}] is older than the minimum LSN:[${minLSN}] for table ${table.sourceTable.qualifiedName}. This indicates that the checkpoint LSN is outside of the retention window.` @@ -258,6 +222,22 @@ export async function isWithinRetentionThreshold(options: IsWithinRetentionThres return true; } +export async function getMinLSN(connectionManager: MSSQLConnectionManager, table: MSSQLSourceTable): Promise { + const { recordset: result } = await connectionManager.query( + `SELECT sys.fn_cdc_get_min_lsn('${table.captureInstance}') AS min_lsn` + ); + const rawMinLSN: Buffer = result[0].min_lsn; + return LSN.fromBinary(rawMinLSN); +} + +export async function incrementLSN(lsn: LSN, connectionManager: MSSQLConnectionManager): Promise { + const { recordset: result } = await connectionManager.query( + `SELECT sys.fn_cdc_increment_lsn(@lsn) AS incremented_lsn`, + [{ name: 'lsn', type: sql.VarBinary, value: lsn.toBinary() }] + ); + return LSN.fromBinary(result[0].incremented_lsn); +} + export async function getCaptureInstance( connectionManager: MSSQLConnectionManager, table: SourceTable @@ -288,10 +268,21 @@ export async function getCaptureInstance( } /** - * Return the maximum LSN in the CDC tables. This is the LSN that corresponds to the latest update available. + * Return the LSN of the latest transaction recorded in the transaction log * @param connectionManager */ export async function getLatestLSN(connectionManager: MSSQLConnectionManager): Promise { + const { recordset: result } = await connectionManager.query( + 'SELECT log_end_lsn FROM sys.dm_db_log_stats(DB_ID()) AS log_end_lsn' + ); + return LSN.fromString(result[0].log_end_lsn); +} + +/** + * Return the LSN of the lastest transaction replicated to the CDC tables. + * @param connectionManager + */ +export async function getLatestReplicatedLSN(connectionManager: MSSQLConnectionManager): Promise { const { recordset: result } = await connectionManager.query('SELECT sys.fn_cdc_get_max_lsn() AS max_lsn;'); // LSN is a binary(10) returned as a Buffer const rawLSN: Buffer = result[0].max_lsn; From c8eba472dca3dc03eeeaba30c09830ad23a7dd7b Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Thu, 13 Nov 2025 09:09:57 +0200 Subject: [PATCH 17/42] Added unit tests and utils for CDCStream class --- .../module-mssql/test/src/CDCStream.test.ts | 164 +++++++++++++----- .../test/src/CDCStreamTestContext.ts | 41 +++-- .../src/CDCStream_resume_snapshot.test.ts | 151 ++++++++++++++++ modules/module-mssql/test/src/env.ts | 1 + modules/module-mssql/test/src/util.ts | 74 ++++++-- 5 files changed, 361 insertions(+), 70 deletions(-) create mode 100644 modules/module-mssql/test/src/CDCStream_resume_snapshot.test.ts diff --git a/modules/module-mssql/test/src/CDCStream.test.ts b/modules/module-mssql/test/src/CDCStream.test.ts index 197bc4ef5..9dcf4a919 100644 --- a/modules/module-mssql/test/src/CDCStream.test.ts +++ b/modules/module-mssql/test/src/CDCStream.test.ts @@ -1,10 +1,16 @@ import { describe, expect, test } from 'vitest'; import { METRICS_HELPER, putOp } from '@powersync/service-core-tests'; import { ReplicationMetric } from '@powersync/service-types'; -import { v4 as uuid } from 'uuid'; -import { describeWithStorage, enableCDCForTable, INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js'; +import { + createTestTable, + describeWithStorage, + INITIALIZED_MONGO_STORAGE_FACTORY, + insertTestData, + waitForPendingCDCChanges +} from './util.js'; import { storage } from '@powersync/service-core'; import { CDCStreamTestContext } from './CDCStreamTestContext.js'; +import { enableCDCForTable, getLatestReplicatedLSN } from '@module/utils/mssql.js'; const BASIC_SYNC_RULES = ` bucket_definitions: @@ -25,50 +31,130 @@ function defineCDCStreamTests(factory: storage.TestStorageFactory) { const { connectionManager } = context; await context.updateSyncRules(BASIC_SYNC_RULES); - await connectionManager.query(`CREATE TABLE test_data (id UNIQUEIDENTIFIER PRIMARY KEY, description VARCHAR(MAX))`); - await enableCDCForTable({ connectionManager, schema: 'dbo', table: 'test_data' }); - const testId = uuid(); - await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('${testId}','test1')`); - + await createTestTable(connectionManager, 'test_data'); + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + const testData = await insertTestData(connectionManager, 'test_data'); + await waitForPendingCDCChanges(beforeLSN, connectionManager); const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; await context.replicateSnapshot(); + await context.startStreaming(); const endRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; const data = await context.getBucketData('global[]'); - expect(data).toMatchObject([putOp('test_data', { id: testId, description: 'test1' })]); + expect(data).toMatchObject([putOp('test_data', testData)]); + expect(endRowCount - startRowCount).toEqual(1); + }); + + test('Replicate basic values', async () => { + await using context = await CDCStreamTestContext.open(factory); + const { connectionManager } = context; + await context.updateSyncRules(BASIC_SYNC_RULES); + + await createTestTable(connectionManager, 'test_data'); + await context.replicateSnapshot(); + + const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + const startTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0; + + await context.startStreaming(); + + const testData = await insertTestData(connectionManager, 'test_data'); + + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([putOp('test_data', testData)]); + const endRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + const endTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0; expect(endRowCount - startRowCount).toEqual(1); + expect(endTxCount - startTxCount).toEqual(1); }); - // test('Replicate basic values', async () => { - // await using context = await CDCStreamTestContext.open(factory); - // const { connectionManager } = context; - // await context.updateSyncRules(` - // bucket_definitions: - // global: - // data: - // - SELECT id, description, num FROM "test_data"`); - // - // await connectionManager.query( - // `CREATE TABLE test_data (id UNIQUEIDENTIFIER PRIMARY KEY, description VARCHAR(MAX), num BIGINT)` - // ); - // - // await context.replicateSnapshot(); - // - // const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; - // const startTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0; - // - // await context.startStreaming(); - // const testId = uuid(); - // await connectionManager.query( - // `INSERT INTO test_data(id, description, num) VALUES('${testId}', 'test1', 1152921504606846976)` - // ); - // const data = await context.getBucketData('global[]'); - // - // expect(data).toMatchObject([putOp('test_data', { id: testId, description: 'test1', num: 1152921504606846976n })]); - // const endRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; - // const endTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0; - // expect(endRowCount - startRowCount).toEqual(1); - // expect(endTxCount - startTxCount).toEqual(1); - // }); + test('Replicate matched wild card tables in sync rules', async () => { + await using context = await CDCStreamTestContext.open(factory); + const { connectionManager } = context; + await context.updateSyncRules(` + bucket_definitions: + global: + data: + - SELECT id, description FROM "test_data_%"`); + + await createTestTable(connectionManager, 'test_data_1'); + await createTestTable(connectionManager, 'test_data_2'); + + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + const testData11 = await insertTestData(connectionManager, 'test_data_1'); + const testData21 = await insertTestData(connectionManager, 'test_data_2'); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + await context.replicateSnapshot(); + await context.startStreaming(); + + const testData12 = await insertTestData(connectionManager, 'test_data_1'); + const testData22 = await insertTestData(connectionManager, 'test_data_2'); + + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([ + putOp('test_data_1', testData11), + putOp('test_data_2', testData21), + putOp('test_data_1', testData12), + putOp('test_data_2', testData22) + ]); + }); + + test('Replication for tables not in the sync rules are ignored', async () => { + await using context = await CDCStreamTestContext.open(factory); + const { connectionManager } = context; + await context.updateSyncRules(BASIC_SYNC_RULES); + + await createTestTable(connectionManager, 'test_donotsync'); + + await context.replicateSnapshot(); + + const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + const startTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0; + + await context.startStreaming(); + + await insertTestData(connectionManager, 'test_donotsync'); + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([]); + const endRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + const endTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0; + + // There was a transaction, but it is not counted since it is not for a table in the sync rules + expect(endRowCount - startRowCount).toEqual(0); + expect(endTxCount - startTxCount).toEqual(0); + }); + + test('Replicate case sensitive table', async () => { + await using context = await CDCStreamTestContext.open(factory); + const { connectionManager } = context; + await context.updateSyncRules(` + bucket_definitions: + global: + data: + - SELECT id, description FROM "test_DATA" + `); + + await createTestTable(connectionManager, 'test_DATA'); + + await context.replicateSnapshot(); + + const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + const startTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0; + + await context.startStreaming(); + + const testData = await insertTestData(connectionManager, 'test_DATA'); + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([putOp('test_DATA', testData)]); + const endRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + const endTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0; + expect(endRowCount - startRowCount).toEqual(1); + expect(endTxCount - startTxCount).toBeGreaterThanOrEqual(1); + }); } diff --git a/modules/module-mssql/test/src/CDCStreamTestContext.ts b/modules/module-mssql/test/src/CDCStreamTestContext.ts index 31c5a1f9e..614ab03c1 100644 --- a/modules/module-mssql/test/src/CDCStreamTestContext.ts +++ b/modules/module-mssql/test/src/CDCStreamTestContext.ts @@ -11,6 +11,7 @@ import { METRICS_HELPER, test_utils } from '@powersync/service-core-tests'; import { clearTestDb, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './util.js'; import { CDCStream, CDCStreamOptions } from '@module/replication/CDCStream.js'; import { MSSQLConnectionManager } from '@module/replication/MSSQLConnectionManager.js'; +import timers from 'timers/promises'; /** * Tests operating on the change data capture need to configure the stream and manage asynchronous @@ -51,7 +52,11 @@ export class CDCStreamTestContext implements AsyncDisposable { } async [Symbol.asyncDispose]() { - await this.dispose(); + try { + await this.dispose(); + } catch (err) { + console.error('Error disposing CDCStreamTestContext', err); + } } async dispose() { @@ -126,26 +131,26 @@ export class CDCStreamTestContext implements AsyncDisposable { } // TODO: Enable once streaming is implemented - // startStreaming() { - // if (!this.replicationDone) { - // throw new Error('Call replicateSnapshot() before startStreaming()'); - // } - // this.streamPromise = this.cdcStream.streamChanges(); - // Wait for the replication to start before returning. - // This avoids a bunch of unpredictable race conditions that appear in testing - //return new Promise(async (resolve) => { - //while (this.binlogStream.isStartingReplication) { - //await timers.setTimeout(50); - //} - - //resolve(); - //}); - // } + startStreaming() { + if (!this.replicationDone) { + throw new Error('Call replicateSnapshot() before startStreaming()'); + } + this.streamPromise = this.cdcStream.streamChanges(); + // Wait for the replication to start before returning. + // This avoids a bunch of unpredictable race conditions that appear in testing + return new Promise(async (resolve) => { + while (this.cdcStream.isStartingReplication) { + await timers.setTimeout(50); + } + + resolve(); + }); + } async getCheckpoint(options?: { timeout?: number }) { let checkpoint = await Promise.race([ - getClientCheckpoint(this.connectionManager, this.factory, { timeout: options?.timeout ?? 15_000 }) - //this.streamPromise + getClientCheckpoint(this.connectionManager, this.factory, { timeout: options?.timeout ?? 15_000 }), + this.streamPromise ]); if (checkpoint == null) { // This indicates an issue with the test setup - streamingPromise completed instead diff --git a/modules/module-mssql/test/src/CDCStream_resume_snapshot.test.ts b/modules/module-mssql/test/src/CDCStream_resume_snapshot.test.ts new file mode 100644 index 000000000..eec3c4d0a --- /dev/null +++ b/modules/module-mssql/test/src/CDCStream_resume_snapshot.test.ts @@ -0,0 +1,151 @@ +import { describe, expect, test } from 'vitest'; +import { env } from './env.js'; +import { describeWithStorage } from './util.js'; +import { TestStorageFactory } from '@powersync/service-core'; +import { METRICS_HELPER } from '@powersync/service-core-tests'; +import { ReplicationMetric } from '@powersync/service-types'; +import * as timers from 'node:timers/promises'; +import { ReplicationAbortedError } from '@powersync/lib-services-framework'; +import { CDCStreamTestContext } from './CDCStreamTestContext.js'; +import { enableCDCForTable } from '@module/utils/mssql.js'; + +describe.skipIf(!(env.CI || env.SLOW_TESTS))('batch replication', function () { + describeWithStorage({ timeout: 240_000 }, function (factory) { + test('resuming initial replication (1)', async () => { + // Stop early - likely to not include deleted row in first replication attempt. + await testResumingReplication(factory, 2000); + }); + test('resuming initial replication (2)', async () => { + // Stop late - likely to include deleted row in first replication attempt. + await testResumingReplication(factory, 8000); + }); + }); +}); + +async function testResumingReplication(factory: TestStorageFactory, stopAfter: number) { + // This tests interrupting and then resuming initial replication. + // We interrupt replication after test_data1 has fully replicated, and + // test_data2 has partially replicated. + // This test relies on interval behavior that is not 100% deterministic: + // 1. We attempt to abort initial replication once a certain number of + // rows have been replicated, but this is not exact. Our only requirement + // is that we have not fully replicated test_data2 yet. + // 2. Order of replication is not deterministic, so which specific rows + // have been / have not been replicated at that point is not deterministic. + // We do allow for some variation in the test results to account for this. + + await using context = await CDCStreamTestContext.open(factory, { cdcStreamOptions: { snapshotBatchSize: 1000 } }); + + await context.updateSyncRules(`bucket_definitions: + global: + data: + - SELECT * FROM test_data1 + - SELECT * FROM test_data2`); + const { connectionManager } = context; + + await connectionManager.query(`CREATE TABLE test_data1 (id INT IDENTITY(1,1) PRIMARY KEY, description VARCHAR(MAX))`); + await enableCDCForTable({ connectionManager, schema: 'dbo', table: 'test_data1' }); + await connectionManager.query(`CREATE TABLE test_data2 (id INT IDENTITY(1,1) PRIMARY KEY, description VARCHAR(MAX))`); + await enableCDCForTable({ connectionManager, schema: 'dbo', table: 'test_data2' }); + + await connectionManager.query( + `INSERT INTO test_data1(description) SELECT 'value' FROM GENERATE_SERIES(1, 1000, 1); + INSERT INTO test_data2(description) SELECT 'value' FROM GENERATE_SERIES(1, 10000, 1);` + ); + + const p = context.replicateSnapshot(); + + let done = false; + + const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0; + try { + (async () => { + while (!done) { + const count = + ((await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0) - startRowCount; + + if (count >= stopAfter) { + break; + } + await timers.setTimeout(1); + } + // This interrupts initial replication + await context.dispose(); + })(); + // This confirms that initial replication was interrupted + const error = await p.catch((e) => e); + expect(error).toBeInstanceOf(ReplicationAbortedError); + done = true; + } finally { + done = true; + } + + // Bypass the usual "clear db on factory open" step. + await using context2 = await CDCStreamTestContext.open(factory, { + doNotClear: true, + cdcStreamOptions: { snapshotBatchSize: 1000 } + }); + + // This delete should be using one of the ids already replicated + const { + recordset: [id1] + } = await context2.connectionManager.query(`DELETE TOP (1) FROM test_data2 OUTPUT DELETED.id`); + // This update should also be using one of the ids already replicated + const { + recordset: [id2] + } = await context2.connectionManager.query( + `UPDATE test_data2 SET description = 'update1' OUTPUT INSERTED.id WHERE id = (SELECT TOP 1 id FROM test_data2)` + ); + const { + recordset: [id3] + } = await context2.connectionManager.query( + `INSERT INTO test_data2(description) OUTPUT INSERTED.id VALUES ('insert1')` + ); + + await context2.loadNextSyncRules(); + await context2.replicateSnapshot(); + + await context2.startStreaming(); + const data = await context2.getBucketData('global[]', undefined, {}); + + const deletedRowOps = data.filter((row) => row.object_type == 'test_data2' && row.object_id === String(id1)); + const updatedRowOps = data.filter((row) => row.object_type == 'test_data2' && row.object_id === String(id2)); + const insertedRowOps = data.filter((row) => row.object_type == 'test_data2' && row.object_id === String(id3)); + + if (deletedRowOps.length != 0) { + // The deleted row was part of the first replication batch, + // so it is removed by streaming replication. + expect(deletedRowOps.length).toEqual(2); + expect(deletedRowOps[1].op).toEqual('REMOVE'); + } else { + // The deleted row was not part of the first replication batch, + // so it's not in the resulting ops at all. + } + + expect(updatedRowOps.length).toEqual(2); + // description for the first op could be 'foo' or 'update1'. + // We only test the final version. + expect(JSON.parse(updatedRowOps[1].data as string).description).toEqual('update1'); + + expect(insertedRowOps.length).toEqual(2); + expect(JSON.parse(insertedRowOps[0].data as string).description).toEqual('insert1'); + expect(JSON.parse(insertedRowOps[1].data as string).description).toEqual('insert1'); + + // 1000 of test_data1 during first replication attempt. + // N >= 1000 of test_data2 during first replication attempt. + // 10000 - N - 1 + 1 of test_data2 during second replication attempt. + // An additional update during streaming replication (2x total for this row). + // An additional insert during streaming replication (2x total for this row). + // If the deleted row was part of the first replication batch, it's removed by streaming replication. + // This adds 2 ops. + // We expect this to be 11002 for stopAfter: 2000, and 11004 for stopAfter: 8000. + // However, this is not deterministic. + const expectedCount = 11002 + deletedRowOps.length; + expect(data.length).toEqual(expectedCount); + + const replicatedCount = + ((await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0) - startRowCount; + + // With resumable replication, there should be no need to re-replicate anything. + expect(replicatedCount).toEqual(expectedCount); +} diff --git a/modules/module-mssql/test/src/env.ts b/modules/module-mssql/test/src/env.ts index ac05d7d71..f3cc7a6cc 100644 --- a/modules/module-mssql/test/src/env.ts +++ b/modules/module-mssql/test/src/env.ts @@ -4,6 +4,7 @@ export const env = utils.collectEnvironmentVariables({ MSSQL_TEST_URI: utils.type.string.default(`mssql://sa:321strong_ROOT_password@localhost:1433/powersync`), MONGO_TEST_URL: utils.type.string.default('mongodb://localhost:27017/powersync_test'), CI: utils.type.boolean.default('false'), + SLOW_TESTS: utils.type.boolean.default('false'), PG_STORAGE_TEST_URL: utils.type.string.default('postgres://postgres:postgres@localhost:5431/powersync_storage_test'), TEST_MONGO_STORAGE: utils.type.boolean.default('true'), TEST_POSTGRES_STORAGE: utils.type.boolean.default('true') diff --git a/modules/module-mssql/test/src/util.ts b/modules/module-mssql/test/src/util.ts index c5344f579..0f1faf18f 100644 --- a/modules/module-mssql/test/src/util.ts +++ b/modules/module-mssql/test/src/util.ts @@ -8,8 +8,10 @@ import * as postgres_storage from '@powersync/service-module-postgres-storage'; import { describe, TestOptions } from 'vitest'; import { env } from './env.js'; import { MSSQLConnectionManager } from '@module/replication/MSSQLConnectionManager.js'; -import { getLatestLSN } from '@module/utils/mssql.js'; +import { createCheckpoint, enableCDCForTable, getLatestLSN, getLatestReplicatedLSN } from '@module/utils/mssql.js'; import sql from 'mssql'; +import { v4 as uuid } from 'uuid'; +import { LSN } from '@module/common/LSN.js'; export const TEST_URI = env.MSSQL_TEST_URI; @@ -75,21 +77,59 @@ export async function createTestDb(connectionManager: MSSQLConnectionManager, db GO`); } -export interface EnableCDCForTableOptions { - connectionManager: MSSQLConnectionManager; - schema: string; - table: string; +export async function createTestTable(connectionManager: MSSQLConnectionManager, tableName: string): Promise { + await connectionManager.query(` + CREATE TABLE ${connectionManager.schema}.${tableName} ( + id UNIQUEIDENTIFIER PRIMARY KEY, + description VARCHAR(MAX) + ) + `); + await enableCDCForTable({ connectionManager, table: tableName }); } -export async function enableCDCForTable(options: EnableCDCForTableOptions): Promise { - const { connectionManager, schema, table } = options; +export interface TestData { + id: string; + description: string; +} +export async function insertTestData(connectionManager: MSSQLConnectionManager, tableName: string): Promise { + const id = createUUID(); + const description = `description_${id}`; + await connectionManager.query( + ` + INSERT INTO ${connectionManager.schema}.${tableName} (id, description) VALUES (@id, @description) + `, + [ + { name: 'id', type: sql.UniqueIdentifier, value: id }, + { name: 'description', type: sql.NVarChar(sql.MAX), value: description } + ] + ); + + return { id, description }; +} - await connectionManager.execute('sys.sp_cdc_enable_table', [ - { name: 'source_schema', value: schema }, - { name: 'source_name', value: table }, - { name: 'role_name', value: 'NULL' }, - { name: 'supports_net_changes', value: 1 } - ]); +export async function waitForPendingCDCChanges( + beforeLSN: LSN, + connectionManager: MSSQLConnectionManager +): Promise { + while (true) { + const { recordset: result } = await connectionManager.query( + ` + SELECT TOP 1 start_lsn + FROM cdc.lsn_time_mapping + WHERE start_lsn > @before_lsn + ORDER BY start_lsn DESC + `, + [{ name: 'before_lsn', type: sql.VarBinary, value: beforeLSN.toBinary() }] + ); + + if (result.length === 0) { + logger.info(`CDC changes pending. Waiting for 500ms...`); + await new Promise((resolve) => setTimeout(resolve, 500)); + } else { + logger.info(`Found LSN: ${LSN.fromBinary(result[0].start_lsn).toString()}`); + return; + } + } } export async function getClientCheckpoint( @@ -100,6 +140,7 @@ export async function getClientCheckpoint( const start = Date.now(); const lsn = await getLatestLSN(connectionManager); + await createCheckpoint(connectionManager); // This old API needs a persisted checkpoint id. // Since we don't use LSNs anymore, the only way to get that is to wait. @@ -125,3 +166,10 @@ export async function getClientCheckpoint( throw new Error(`Timeout while waiting for checkpoint ${lsn}. Last checkpoint: ${lastCp?.lsn}`); } + +/** + * Generates a new UUID string in uppercase for testing purposes to match the SQL Server UNIQUEIDENTIFIER format. + */ +export function createUUID(): string { + return uuid().toUpperCase(); +} From 6caa1b639cfba6608f0b9c70c242ea9db655edbc Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Thu, 13 Nov 2025 09:14:23 +0200 Subject: [PATCH 18/42] Clean up unused imports --- .../module-mssql/src/replication/CDCPoller.ts | 2 +- .../module-mssql/src/replication/CDCStream.ts | 13 ------------- .../src/replication/MSSQLSnapshotQuery.ts | 4 ++-- modules/module-mssql/src/utils/mssql.ts | 2 +- .../module-mssql/test/src/CDCStream.test.ts | 18 ++++++------------ modules/module-mssql/test/src/util.ts | 2 +- 6 files changed, 11 insertions(+), 30 deletions(-) diff --git a/modules/module-mssql/src/replication/CDCPoller.ts b/modules/module-mssql/src/replication/CDCPoller.ts index 31616551c..dac1a9bae 100644 --- a/modules/module-mssql/src/replication/CDCPoller.ts +++ b/modules/module-mssql/src/replication/CDCPoller.ts @@ -222,7 +222,7 @@ export class CDCPoller { this.logger.info(`Processed UPDATE, before row: ${transactionLSN}`); break; case Operation.UPDATE_AFTER: - if (!updateBefore) { + if (updateBefore === null) { throw new ReplicationAssertionError('Missing before image for update event.'); } await this.eventHandler.onUpdate(row, updateBefore, table, results.columns); diff --git a/modules/module-mssql/src/replication/CDCStream.ts b/modules/module-mssql/src/replication/CDCStream.ts index f9dd69ba8..b4a8ce85a 100644 --- a/modules/module-mssql/src/replication/CDCStream.ts +++ b/modules/module-mssql/src/replication/CDCStream.ts @@ -9,7 +9,6 @@ import { ServiceAssertionError } from '@powersync/lib-services-framework'; import { - ColumnDescriptor, getUuidReplicaIdentityBson, MetricsEngine, SaveUpdate, @@ -108,8 +107,6 @@ export class CDCStream { private tableCache = new MSSQLSourceTableCache(); - private startedPolling = false; - /** * Time of the oldest uncommitted change, according to the source db. * This is used to determine the replication lag. @@ -535,16 +532,6 @@ export class CDCStream { ); } - private getTable(tableId: number): MSSQLSourceTable { - const table = this.tableCache.get(tableId); - if (table == null) { - // We should always receive a replication message before the relation is used. - // If we can't find it, it's a bug. - throw new ReplicationAssertionError(`Table with ${tableId} not found in cache`); - } - return table; - } - async initReplication() { const errors = await checkSourceConfiguration(this.connections); if (errors.length > 0) { diff --git a/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts b/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts index 6b04754ba..14fa46239 100644 --- a/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts +++ b/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts @@ -173,8 +173,8 @@ export class BatchedSnapshotQuery implements MSSQLSnapshotQuery { if (keys[0] != this.key.name) { throw new ServiceAssertionError(`Key name mismatch: expected ${this.key.name}, got ${keys[0]}`); } - const value = decoded[this.key.name]; - return value; + + return decoded[this.key.name]; } } diff --git a/modules/module-mssql/src/utils/mssql.ts b/modules/module-mssql/src/utils/mssql.ts index 79f6c58e3..ca94d108f 100644 --- a/modules/module-mssql/src/utils/mssql.ts +++ b/modules/module-mssql/src/utils/mssql.ts @@ -1,7 +1,7 @@ import sql from 'mssql'; import { SourceTable } from '@powersync/service-core'; import { coerce, gte } from 'semver'; -import { errors, logger } from '@powersync/lib-services-framework'; +import { logger } from '@powersync/lib-services-framework'; import { MSSQLConnectionManager } from '../replication/MSSQLConnectionManager.js'; import { LSN } from '../common/LSN.js'; import { CaptureInstance, MSSQLSourceTable } from '../common/MSSQLSourceTable.js'; diff --git a/modules/module-mssql/test/src/CDCStream.test.ts b/modules/module-mssql/test/src/CDCStream.test.ts index 9dcf4a919..5af0ce3b5 100644 --- a/modules/module-mssql/test/src/CDCStream.test.ts +++ b/modules/module-mssql/test/src/CDCStream.test.ts @@ -1,16 +1,10 @@ import { describe, expect, test } from 'vitest'; import { METRICS_HELPER, putOp } from '@powersync/service-core-tests'; import { ReplicationMetric } from '@powersync/service-types'; -import { - createTestTable, - describeWithStorage, - INITIALIZED_MONGO_STORAGE_FACTORY, - insertTestData, - waitForPendingCDCChanges -} from './util.js'; +import { createTestTable, describeWithStorage, insertTestData, waitForPendingCDCChanges } from './util.js'; import { storage } from '@powersync/service-core'; import { CDCStreamTestContext } from './CDCStreamTestContext.js'; -import { enableCDCForTable, getLatestReplicatedLSN } from '@module/utils/mssql.js'; +import { getLatestReplicatedLSN } from '@module/utils/mssql.js'; const BASIC_SYNC_RULES = ` bucket_definitions: @@ -19,11 +13,11 @@ bucket_definitions: - SELECT id, description FROM "test_data" `; -// describe('CDCStream tests', () => { -// describeWithStorage({ timeout: 20_000 }, defineCDCStreamTests); -// }); +describe('CDCStream tests', () => { + describeWithStorage({ timeout: 20_000 }, defineCDCStreamTests); +}); -defineCDCStreamTests(INITIALIZED_MONGO_STORAGE_FACTORY); +// defineCDCStreamTests(INITIALIZED_MONGO_STORAGE_FACTORY); function defineCDCStreamTests(factory: storage.TestStorageFactory) { test('Initial snapshot sync', async () => { diff --git a/modules/module-mssql/test/src/util.ts b/modules/module-mssql/test/src/util.ts index 0f1faf18f..8b82a321b 100644 --- a/modules/module-mssql/test/src/util.ts +++ b/modules/module-mssql/test/src/util.ts @@ -8,7 +8,7 @@ import * as postgres_storage from '@powersync/service-module-postgres-storage'; import { describe, TestOptions } from 'vitest'; import { env } from './env.js'; import { MSSQLConnectionManager } from '@module/replication/MSSQLConnectionManager.js'; -import { createCheckpoint, enableCDCForTable, getLatestLSN, getLatestReplicatedLSN } from '@module/utils/mssql.js'; +import { createCheckpoint, enableCDCForTable, getLatestLSN } from '@module/utils/mssql.js'; import sql from 'mssql'; import { v4 as uuid } from 'uuid'; import { LSN } from '@module/common/LSN.js'; From b637575d87af12d96a1cdfa005bc2285d7a438fd Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Thu, 13 Nov 2025 13:29:25 +0200 Subject: [PATCH 19/42] Updated MSSQConnection manager and factory to new pattern. Cleaned up some more unused imports --- .../src/replication/CDCReplicationJob.ts | 55 +++++++------------ .../src/replication/MSSQLConnectionManager.ts | 15 ++++- .../MSSQLConnectionManagerFactory.ts | 13 +++-- ...s => CDCStream_resumable_snapshot.test.ts} | 4 +- modules/module-mssql/test/src/util.ts | 4 +- .../src/replication/BinLogReplicationJob.ts | 2 +- .../src/replication/MySQLErrorRateLimiter.ts | 7 ++- .../replication/WalStreamReplicationJob.ts | 2 +- 8 files changed, 51 insertions(+), 51 deletions(-) rename modules/module-mssql/test/src/{CDCStream_resume_snapshot.test.ts => CDCStream_resumable_snapshot.test.ts} (97%) diff --git a/modules/module-mssql/src/replication/CDCReplicationJob.ts b/modules/module-mssql/src/replication/CDCReplicationJob.ts index d2a27f3f3..521e30709 100644 --- a/modules/module-mssql/src/replication/CDCReplicationJob.ts +++ b/modules/module-mssql/src/replication/CDCReplicationJob.ts @@ -18,19 +18,28 @@ export class CDCReplicationJob extends replication.AbstractReplicationJob { } async keepAlive() { - // Keepalives are handled by the binlog heartbeat mechanism + // TODO Might need to leverage checkpoints table as a keepAlive } async replicate() { try { - await this.replicateLoop(); + await this.replicateOnce(); } catch (e) { // Fatal exception - container.reporter.captureException(e, { - metadata: {} - }); - this.logger.error(`Replication failed`, e); + if (!this.isStopped) { + // Ignore aborted errors + this.logger.error(`Replication error`, e); + if (e.cause != null) { + this.logger.error(`cause`, e.cause); + } + + container.reporter.captureException(e, { + metadata: {} + }); + // This sets the retry delay + this.rateLimiter.reportError(e); + } if (e instanceof CDCDataExpiredError) { // This stops replication and restarts with a new instance await this.options.storage.factory.restartReplication(this.storage.group_id); @@ -40,21 +49,14 @@ export class CDCReplicationJob extends replication.AbstractReplicationJob { } } - async replicateLoop() { - while (!this.isStopped) { - await this.replicateOnce(); - - if (!this.isStopped) { - await new Promise((resolve) => setTimeout(resolve, 5000)); - } - } - } - async replicateOnce() { // New connections on every iteration (every error with retry), // otherwise we risk repeating errors related to the connection, // such as caused by cached PG schemas. - const connectionManager = this.connectionFactory.create({}); + const connectionManager = this.connectionFactory.create({ + idleTimeoutMillis: 30_000, + max: 2 + }); try { await this.rateLimiter?.waitUntilAllowed({ signal: this.abortController.signal }); if (this.isStopped) { @@ -69,25 +71,6 @@ export class CDCReplicationJob extends replication.AbstractReplicationJob { }); this.lastStream = stream; await stream.replicate(); - } catch (e) { - if (this.abortController.signal.aborted) { - return; - } - this.logger.error(`Replication error`, e); - if (e.cause != null) { - this.logger.error(`cause`, e.cause); - } - - if (e instanceof CDCDataExpiredError) { - throw e; - } else { - // Report the error if relevant, before retrying - container.reporter.captureException(e, { - metadata: {} - }); - // This sets the retry delay - this.rateLimiter?.reportError(e); - } } finally { await connectionManager.end(); } diff --git a/modules/module-mssql/src/replication/MSSQLConnectionManager.ts b/modules/module-mssql/src/replication/MSSQLConnectionManager.ts index d48e2aad0..e7ba1d707 100644 --- a/modules/module-mssql/src/replication/MSSQLConnectionManager.ts +++ b/modules/module-mssql/src/replication/MSSQLConnectionManager.ts @@ -1,4 +1,4 @@ -import { logger } from '@powersync/lib-services-framework'; +import { BaseObserver, logger } from '@powersync/lib-services-framework'; import sql from 'mssql'; import { NormalizedMSSQLConnectionConfig } from '../types/types.js'; import { POWERSYNC_VERSION } from '@powersync/service-core'; @@ -7,13 +7,18 @@ import { addParameters } from '../utils/mssql.js'; export const DEFAULT_SCHEMA = 'dbo'; -export class MSSQLConnectionManager { +export interface MSSQLConnectionManagerListener { + onEnded(): void; +} + +export class MSSQLConnectionManager extends BaseObserver { private readonly pool: sql.ConnectionPool; constructor( public options: NormalizedMSSQLConnectionConfig, poolOptions: sql.PoolOpts ) { + super(); // The pool is lazy - no connections are opened until a query is performed. this.pool = new sql.ConnectionPool({ authentication: options.authentication, @@ -26,7 +31,7 @@ export class MSSQLConnectionManager { options: { appName: `powersync/${POWERSYNC_VERSION}`, encrypt: true, // for azure - trustServerCertificate: true // change to true for local dev / self-signed certs + trustServerCertificate: true // TODO: Check if this needs to be configurable change to true for local dev / self-signed certs } }); } @@ -98,6 +103,10 @@ export class MSSQLConnectionManager { } catch (error) { // We don't particularly care if any errors are thrown when shutting down the pool logger.warn('Error shutting down MSSQL connection pool', error); + } finally { + this.iterateListeners((listener) => { + listener.onEnded?.(); + }); } } } diff --git a/modules/module-mssql/src/replication/MSSQLConnectionManagerFactory.ts b/modules/module-mssql/src/replication/MSSQLConnectionManagerFactory.ts index 23c3a2e26..06faf0065 100644 --- a/modules/module-mssql/src/replication/MSSQLConnectionManagerFactory.ts +++ b/modules/module-mssql/src/replication/MSSQLConnectionManagerFactory.ts @@ -4,23 +4,28 @@ import { MSSQLConnectionManager } from './MSSQLConnectionManager.js'; import sql from 'mssql'; export class MSSQLConnectionManagerFactory { - private readonly connectionManagers: MSSQLConnectionManager[]; + private readonly connectionManagers: Set; public readonly connectionConfig: ResolvedConnectionConfig; constructor(connectionConfig: ResolvedConnectionConfig) { this.connectionConfig = connectionConfig; - this.connectionManagers = []; + this.connectionManagers = new Set(); } create(poolOptions: sql.PoolOpts) { const manager = new MSSQLConnectionManager(this.connectionConfig, poolOptions); - this.connectionManagers.push(manager); + manager.registerListener({ + onEnded: () => { + this.connectionManagers.delete(manager); + } + }); + this.connectionManagers.add(manager); return manager; } async shutdown() { logger.info('Shutting down MSSQL connection Managers...'); - for (const manager of this.connectionManagers) { + for (const manager of this.connectionManagers.values()) { await manager.end(); } logger.info('MSSQL connection Managers shutdown completed.'); diff --git a/modules/module-mssql/test/src/CDCStream_resume_snapshot.test.ts b/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts similarity index 97% rename from modules/module-mssql/test/src/CDCStream_resume_snapshot.test.ts rename to modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts index eec3c4d0a..78fb92958 100644 --- a/modules/module-mssql/test/src/CDCStream_resume_snapshot.test.ts +++ b/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts @@ -44,9 +44,9 @@ async function testResumingReplication(factory: TestStorageFactory, stopAfter: n const { connectionManager } = context; await connectionManager.query(`CREATE TABLE test_data1 (id INT IDENTITY(1,1) PRIMARY KEY, description VARCHAR(MAX))`); - await enableCDCForTable({ connectionManager, schema: 'dbo', table: 'test_data1' }); + await enableCDCForTable({ connectionManager, table: 'test_data1' }); await connectionManager.query(`CREATE TABLE test_data2 (id INT IDENTITY(1,1) PRIMARY KEY, description VARCHAR(MAX))`); - await enableCDCForTable({ connectionManager, schema: 'dbo', table: 'test_data2' }); + await enableCDCForTable({ connectionManager, table: 'test_data2' }); await connectionManager.query( `INSERT INTO test_data1(description) SELECT 'value' FROM GENERATE_SERIES(1, 1000, 1); diff --git a/modules/module-mssql/test/src/util.ts b/modules/module-mssql/test/src/util.ts index 8b82a321b..2657cbe01 100644 --- a/modules/module-mssql/test/src/util.ts +++ b/modules/module-mssql/test/src/util.ts @@ -15,12 +15,12 @@ import { LSN } from '@module/common/LSN.js'; export const TEST_URI = env.MSSQL_TEST_URI; -export const INITIALIZED_MONGO_STORAGE_FACTORY = mongo_storage.MongoTestStorageFactoryGenerator({ +export const INITIALIZED_MONGO_STORAGE_FACTORY = mongo_storage.test_utils.mongoTestStorageFactoryGenerator({ url: env.MONGO_TEST_URL, isCI: env.CI }); -export const INITIALIZED_POSTGRES_STORAGE_FACTORY = postgres_storage.PostgresTestStorageFactoryGenerator({ +export const INITIALIZED_POSTGRES_STORAGE_FACTORY = postgres_storage.test_utils.postgresTestStorageFactoryGenerator({ url: env.PG_STORAGE_TEST_URL }); diff --git a/modules/module-mysql/src/replication/BinLogReplicationJob.ts b/modules/module-mysql/src/replication/BinLogReplicationJob.ts index 32c2371b7..bf72ca728 100644 --- a/modules/module-mysql/src/replication/BinLogReplicationJob.ts +++ b/modules/module-mysql/src/replication/BinLogReplicationJob.ts @@ -1,6 +1,6 @@ import { container, logger as defaultLogger } from '@powersync/lib-services-framework'; import { POWERSYNC_VERSION, replication } from '@powersync/service-core'; -import { BinlogConfigurationError, BinLogStream } from './BinLogStream.js'; +import { BinLogStream } from './BinLogStream.js'; import { MySQLConnectionManagerFactory } from './MySQLConnectionManagerFactory.js'; export interface BinLogReplicationJobOptions extends replication.AbstractReplicationJobOptions { diff --git a/modules/module-mysql/src/replication/MySQLErrorRateLimiter.ts b/modules/module-mysql/src/replication/MySQLErrorRateLimiter.ts index 8966cd201..c6ca77d2b 100644 --- a/modules/module-mysql/src/replication/MySQLErrorRateLimiter.ts +++ b/modules/module-mysql/src/replication/MySQLErrorRateLimiter.ts @@ -1,5 +1,6 @@ import { ErrorRateLimiter } from '@powersync/service-core'; import { setTimeout } from 'timers/promises'; +import { BinlogConfigurationError } from './BinLogStream.js'; export class MySQLErrorRateLimiter implements ErrorRateLimiter { nextAllowed: number = Date.now(); @@ -17,8 +18,10 @@ export class MySQLErrorRateLimiter implements ErrorRateLimiter { reportError(e: any): void { const message = (e.message as string) ?? ''; - if (message.includes('password authentication failed')) { - // Wait 15 minutes, to avoid triggering Supabase's fail2ban + if (e instanceof BinlogConfigurationError) { + // Short delay + this.setDelay(2_000); + } else if (message.includes('password authentication failed')) { this.setDelay(900_000); } else if (message.includes('ENOTFOUND')) { // DNS lookup issue - incorrect URI or deleted instance diff --git a/modules/module-postgres/src/replication/WalStreamReplicationJob.ts b/modules/module-postgres/src/replication/WalStreamReplicationJob.ts index 8b6021cd6..340af22b9 100644 --- a/modules/module-postgres/src/replication/WalStreamReplicationJob.ts +++ b/modules/module-postgres/src/replication/WalStreamReplicationJob.ts @@ -1,4 +1,4 @@ -import { container, logger, ReplicationAbortedError } from '@powersync/lib-services-framework'; +import { container, logger } from '@powersync/lib-services-framework'; import { PgManager } from './PgManager.js'; import { MissingReplicationSlotError, sendKeepAlive, WalStream } from './WalStream.js'; From 46d4decd7c2ec6b95ab4429c0caeae394e86227f Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Thu, 13 Nov 2025 13:59:02 +0200 Subject: [PATCH 20/42] Updated resumable snapshot queries Cleaned up dead code --- .../module-mssql/src/replication/CDCStream.ts | 29 +------------ .../src/replication/MSSQLSnapshotQuery.ts | 43 +++++++++++-------- .../src/replication/SnapshotQuery.ts | 2 +- 3 files changed, 27 insertions(+), 47 deletions(-) diff --git a/modules/module-mssql/src/replication/CDCStream.ts b/modules/module-mssql/src/replication/CDCStream.ts index b4a8ce85a..e053caa0f 100644 --- a/modules/module-mssql/src/replication/CDCStream.ts +++ b/modules/module-mssql/src/replication/CDCStream.ts @@ -575,40 +575,13 @@ export class CDCStream { } async streamChanges() { - const reSnapshot: { table: storage.SourceTable; key: PrimaryKeyValue }[] = []; - // TODO Handle re-snapshot - - const markRecordUnavailable = (record: SaveUpdate) => { - if (!IdSnapshotQuery.supports(record.sourceTable)) { - // If it's not supported, it's also safe to ignore - return; - } - let key: PrimaryKeyValue = {}; - for (const column of record.sourceTable.replicaIdColumns) { - const name = column.name; - const value = record.after[name]; - if (value == null) { - // We don't expect this to actually happen. - // The key should always be present in the "after" record. - return; - } - // We just need a consistent representation of the primary key, and don't care about fixed quirks. - key[name] = applyValueContext(value, CompatibilityContext.FULL_BACKWARDS_COMPATIBILITY); - } - reSnapshot.push({ - table: record.sourceTable, - key: key - }); - }; - await this.storage.startBatch( { logger: this.logger, zeroLSN: LSN.ZERO, defaultSchema: this.defaultSchema, storeCurrentData: false, - skipExistingRows: false, - markRecordUnavailable + skipExistingRows: false }, async (batch) => { if (batch.resumeFromLsn == null) { diff --git a/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts b/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts index 14fa46239..ceca0e1ca 100644 --- a/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts +++ b/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts @@ -17,11 +17,6 @@ export interface MSSQLSnapshotQuery { export type PrimaryKeyValue = Record; -export interface MissingRow { - table: MSSQLSourceTable; - key: PrimaryKeyValue; -} - /** * Snapshot query using a plain SELECT * FROM table * @@ -37,18 +32,25 @@ export class SimpleSnapshotQuery implements MSSQLSnapshotQuery { public async initialize(): Promise {} public async *next(): AsyncIterableIterator> { - const request = this.transaction.request(); - request.stream = true; - const metadataPromise = new Promise((resolve) => { - request.on('recordset', resolve); + const metadataRequest = this.transaction.request(); + metadataRequest.stream = true; + const metadataPromise = new Promise((resolve, reject) => { + metadataRequest.on('recordset', resolve); + metadataRequest.on('error', reject); }); - const stream = request.toReadableStream(); - request.query(`SELECT * FROM ${this.table.toQualifiedName()}`); + metadataRequest.query(`SELECT TOP(0) * FROM ${this.table.toQualifiedName()}`); const columnMetadata: sql.IColumnMetadata = await metadataPromise; yield columnMetadata; + const request = this.transaction.request(); + const stream = request.toReadableStream(); + + request.query(`SELECT * FROM ${this.table.toQualifiedName()}`); + + + // MSSQL only streams one row at a time for await (const row of stream) { yield row; @@ -203,22 +205,27 @@ export class IdSnapshotQuery implements MSSQLSnapshotQuery { } public async *next(): AsyncIterableIterator> { - const request = this.transaction.request(); - request.stream = true; - const metadataPromise = new Promise((resolve) => { - request.on('recordset', resolve); + const metadataRequest = this.transaction.request(); + metadataRequest.stream = true; + const metadataPromise = new Promise((resolve, reject) => { + metadataRequest.on('recordset', resolve); + metadataRequest.on('error', reject); }); - const stream = request.toReadableStream(); + metadataRequest.query(`SELECT TOP(0) * FROM ${this.table.toQualifiedName()}`); + const columnMetadata: sql.IColumnMetadata = await metadataPromise; + yield columnMetadata; + const keyDefinition = this.table.sourceTable.replicaIdColumns[0]; const ids = this.keys.map((record) => record[keyDefinition.name]); + const request = this.transaction.request(); + const stream = request.toReadableStream(); request .input('ids', ids) .query(`SELECT * FROM ${this.table.toQualifiedName()} WHERE ${escapeIdentifier(keyDefinition.name)} = @ids`); - const columnMetadata: sql.IColumnMetadata = await metadataPromise; - yield columnMetadata; + // MSSQL only streams one row at a time for await (const row of stream) { yield row; } diff --git a/modules/module-postgres/src/replication/SnapshotQuery.ts b/modules/module-postgres/src/replication/SnapshotQuery.ts index b826c215f..d496d857d 100644 --- a/modules/module-postgres/src/replication/SnapshotQuery.ts +++ b/modules/module-postgres/src/replication/SnapshotQuery.ts @@ -23,7 +23,7 @@ export interface MissingRow { /** * Snapshot query using a plain SELECT * FROM table; chunked using - * DELCLARE CURSOR / FETCH. + * DECLARE CURSOR / FETCH. * * This supports all tables, but does not efficiently resume the snapshot * if the process is restarted. From 3980afbf611d212fbddd8e2e7e64bc0433e3585a Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Thu, 13 Nov 2025 14:07:49 +0200 Subject: [PATCH 21/42] Cleaned up more imports --- .../module-mssql/src/replication/CDCStream.ts | 25 ++++--------------- .../src/replication/MSSQLSnapshotQuery.ts | 3 --- 2 files changed, 5 insertions(+), 23 deletions(-) diff --git a/modules/module-mssql/src/replication/CDCStream.ts b/modules/module-mssql/src/replication/CDCStream.ts index e053caa0f..844dc61f6 100644 --- a/modules/module-mssql/src/replication/CDCStream.ts +++ b/modules/module-mssql/src/replication/CDCStream.ts @@ -8,23 +8,9 @@ import { ReplicationAssertionError, ServiceAssertionError } from '@powersync/lib-services-framework'; -import { - getUuidReplicaIdentityBson, - MetricsEngine, - SaveUpdate, - SourceEntityDescriptor, - storage -} from '@powersync/service-core'; +import { getUuidReplicaIdentityBson, MetricsEngine, SourceEntityDescriptor, storage } from '@powersync/service-core'; -import { - applyValueContext, - CompatibilityContext, - DatabaseInputRow, - SqliteInputRow, - SqliteRow, - SqlSyncRules, - TablePattern -} from '@powersync/service-sync-rules'; +import { DatabaseInputRow, SqliteInputRow, SqliteRow, SqlSyncRules, TablePattern } from '@powersync/service-sync-rules'; import { ReplicationMetric } from '@powersync/service-types'; import { @@ -35,8 +21,7 @@ import { SimpleSnapshotQuery } from './MSSQLSnapshotQuery.js'; import { MSSQLConnectionManager } from './MSSQLConnectionManager.js'; -import * as schema_utils from '../utils/schema.js'; -import { ResolvedTable } from '../utils/schema.js'; +import { getReplicationIdentityColumns, getTablesFromPattern, ResolvedTable } from '../utils/schema.js'; import { checkSourceConfiguration, createCheckpoint, @@ -204,7 +189,7 @@ export class CDCStream { return []; } - const matchedTables: ResolvedTable[] = await schema_utils.getTablesFromPattern(this.connections, tablePattern); + const matchedTables: ResolvedTable[] = await getTablesFromPattern(this.connections, tablePattern); const tables: MSSQLSourceTable[] = []; for (const matchedTable of matchedTables) { @@ -221,7 +206,7 @@ export class CDCStream { // TODO: Check RLS settings for table - const replicaIdColumns = await schema_utils.getReplicationIdentityColumns({ + const replicaIdColumns = await getReplicationIdentityColumns({ connectionManager: this.connections, tableName: matchedTable.name, schema: matchedTable.schema diff --git a/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts b/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts index ceca0e1ca..3bd837692 100644 --- a/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts +++ b/modules/module-mssql/src/replication/MSSQLSnapshotQuery.ts @@ -49,8 +49,6 @@ export class SimpleSnapshotQuery implements MSSQLSnapshotQuery { request.query(`SELECT * FROM ${this.table.toQualifiedName()}`); - - // MSSQL only streams one row at a time for await (const row of stream) { yield row; @@ -215,7 +213,6 @@ export class IdSnapshotQuery implements MSSQLSnapshotQuery { const columnMetadata: sql.IColumnMetadata = await metadataPromise; yield columnMetadata; - const keyDefinition = this.table.sourceTable.replicaIdColumns[0]; const ids = this.keys.map((record) => record[keyDefinition.name]); From 345af3dc5ccf007c6e7b9d8ee7dd2b78685056b7 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Wed, 26 Nov 2025 11:24:56 +0200 Subject: [PATCH 22/42] Enabled the MSSQLModule --- modules/module-mssql/dev/scripts/init.sql | 143 ------------------ .../src/api/MySQLRouteAPIAdapter.ts | 2 +- pnpm-lock.yaml | 3 + service/package.json | 1 + service/src/entry.ts | 2 + 5 files changed, 7 insertions(+), 144 deletions(-) delete mode 100644 modules/module-mssql/dev/scripts/init.sql diff --git a/modules/module-mssql/dev/scripts/init.sql b/modules/module-mssql/dev/scripts/init.sql deleted file mode 100644 index 74d641a2f..000000000 --- a/modules/module-mssql/dev/scripts/init.sql +++ /dev/null @@ -1,143 +0,0 @@ --- Create database (idempotent) -DECLARE @db sysname = '$(APP_DB)'; -IF DB_ID(@db) IS NULL -BEGIN - DECLARE @sql nvarchar(max) = N'CREATE DATABASE [' + @db + N'];'; -EXEC(@sql); -END -GO - --- Enable CLR (idempotent, needed for CDC net changes update-mask optimization) -IF (SELECT CAST(value_in_use AS INT) FROM sys.configurations WHERE name = 'clr enabled') = 0 -BEGIN - EXEC sp_configure 'show advanced options', 1; - RECONFIGURE; - EXEC sp_configure 'clr enabled', 1; - RECONFIGURE; -END -GO - --- Enable CDC at the database level (idempotent) -DECLARE @db sysname = '$(APP_DB)'; -DECLARE @cmd nvarchar(max) = N'USE [' + @db + N']; -IF EXISTS (SELECT 1 FROM sys.databases WHERE name = ''' + @db + N''' AND is_cdc_enabled = 0) - EXEC sys.sp_cdc_enable_db;'; -EXEC(@cmd); -GO - --- Create a SQL login (server) and user (db), then grant CDC read access --- Note: 'cdc_reader' role is auto-created when CDC is enabled on the DB. -DECLARE @db sysname = '$(APP_DB)'; -DECLARE @login sysname = '$(APP_LOGIN)'; -DECLARE @password nvarchar(128) = '$(APP_PASSWORD)'; --- Create login if missing -IF NOT EXISTS (SELECT 1 FROM sys.server_principals WHERE name = @login) -BEGIN - DECLARE @mklogin nvarchar(max) = N'CREATE LOGIN [' + @login + N'] WITH PASSWORD = ''' + @password + N''', CHECK_POLICY = ON;'; -EXEC(@mklogin); -END; - --- Create user in DB if missing -DECLARE @mkuser nvarchar(max) = N'USE [' + @db + N']; -IF NOT EXISTS (SELECT 1 FROM sys.database_principals WHERE name = ''' + @login + N''') - CREATE USER [' + @login + N'] FOR LOGIN [' + @login + N'];'; -EXEC(@mkuser); -GO -/* ----------------------------------------------------------- - OPTIONAL: enable CDC for specific tables. - You must enable CDC per table to actually capture changes. - Example below creates a demo table and enables CDC on it. -------------------------------------------------------------*/ - -DECLARE @db sysname = '$(APP_DB)'; -EXEC(N'USE [' + @db + N']; -IF OBJECT_ID(''dbo.lists'', ''U'') IS NULL -BEGIN - CREATE TABLE dbo.lists ( - id UNIQUEIDENTIFIER NOT NULL DEFAULT NEWID(), -- GUID (36 characters), - created_at DATETIME2 NOT NULL DEFAULT SYSUTCDATETIME(), - name NVARCHAR(MAX) NOT NULL, - owner_id UNIQUEIDENTIFIER NOT NULL, - CONSTRAINT PK_lists PRIMARY KEY (id) - ); -END; -'); - - -EXEC(N'USE [' + @db + N']; -IF OBJECT_ID(''dbo.todos'', ''U'') IS NULL -BEGIN - CREATE TABLE dbo.todos ( - id UNIQUEIDENTIFIER NOT NULL DEFAULT NEWID(), -- GUID (36 characters) - created_at DATETIME2 NOT NULL DEFAULT SYSUTCDATETIME(), - completed_at DATETIME2 NULL, - description NVARCHAR(MAX) NOT NULL, - completed BIT NOT NULL DEFAULT 0, - created_by UNIQUEIDENTIFIER NULL, - completed_by UNIQUEIDENTIFIER NULL, - list_id UNIQUEIDENTIFIER NOT NULL, - CONSTRAINT PK_todos PRIMARY KEY (id), - CONSTRAINT FK_todos_lists FOREIGN KEY (list_id) REFERENCES dbo.lists(id) ON DELETE CASCADE - ); -END; -'); -GO - --- Enable CDC for dbo.lists (idempotent guard) -DECLARE @db sysname = '$(APP_DB)'; -DECLARE @login sysname = '$(APP_LOGIN)'; -DECLARE @enableListsTable nvarchar(max) = N'USE [' + @db + N']; -IF NOT EXISTS ( - SELECT 1 - FROM cdc.change_tables - WHERE source_object_id = OBJECT_ID(N''dbo.lists'') -) -BEGIN - EXEC sys.sp_cdc_enable_table - @source_schema = N''dbo'', - @source_name = N''lists'', - @role_name = N''cdc_reader'', - @supports_net_changes = 1; -END;'; -EXEC(@enableListsTable); - --- Enable CDC for dbo.todos (idempotent guard) -DECLARE @enableTodosTable nvarchar(max) = N'USE [' + @db + N']; -IF NOT EXISTS ( - SELECT 1 - FROM cdc.change_tables - WHERE source_object_id = OBJECT_ID(N''dbo.todos'') -) -BEGIN - EXEC sys.sp_cdc_enable_table - @source_schema = N''dbo'', - @source_name = N''todos'', - @role_name = N''cdc_reader'', - @supports_net_changes = 1; -END;'; -EXEC(@enableTodosTable); - --- Grant minimal rights to read CDC data: --- 1) read access to base tables (db_datareader) --- 2) membership in cdc_reader (allows selecting from CDC change tables & functions) -DECLARE @grant nvarchar(max) = N'USE [' + @db + N']; -IF NOT EXISTS (SELECT 1 FROM sys.database_role_members rm - JOIN sys.database_principals r ON rm.role_principal_id = r.principal_id AND r.name = ''db_datareader'' - JOIN sys.database_principals u ON rm.member_principal_id = u.principal_id AND u.name = ''' + @login + N''') - ALTER ROLE db_datareader ADD MEMBER [' + @login + N']; - -IF NOT EXISTS (SELECT 1 FROM sys.database_role_members rm - JOIN sys.database_principals r ON rm.role_principal_id = r.principal_id AND r.name = ''cdc_reader'' - JOIN sys.database_principals u ON rm.member_principal_id = u.principal_id AND u.name = ''' + @login + N''') - ALTER ROLE cdc_reader ADD MEMBER [' + @login + N'];'; -EXEC(@grant); -GO - -DECLARE @db sysname = '$(APP_DB)'; -EXEC(N'USE [' + @db + N']; -BEGIN - INSERT INTO dbo.lists (id, name, owner_id) - VALUES (NEWID(), ''Do a demo'', NEWID()); -END; -'); -GO \ No newline at end of file diff --git a/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts b/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts index ef0b7642c..c2d91f8ce 100644 --- a/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts +++ b/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts @@ -22,7 +22,7 @@ export class MySQLRouteAPIAdapter implements api.RouteAPI { } async shutdown(): Promise { - return this.pool.end(); + await this.pool.end(); } async getSourceConfig(): Promise { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 3fa8f7c16..2c3bc1eaa 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -733,6 +733,9 @@ importers: '@powersync/service-module-mongodb-storage': specifier: workspace:* version: link:../modules/module-mongodb-storage + '@powersync/service-module-mssql': + specifier: workspace:* + version: link:../modules/module-mssql '@powersync/service-module-mysql': specifier: workspace:* version: link:../modules/module-mysql diff --git a/service/package.json b/service/package.json index c2420fecb..b3ca0c4fc 100644 --- a/service/package.json +++ b/service/package.json @@ -16,6 +16,7 @@ "@powersync/service-module-postgres-storage": "workspace:*", "@powersync/service-module-mongodb": "workspace:*", "@powersync/service-module-mongodb-storage": "workspace:*", + "@powersync/service-module-mssql": "workspace:*", "@powersync/service-module-mysql": "workspace:*", "@powersync/service-rsocket-router": "workspace:*", "@powersync/service-module-core": "workspace:*", diff --git a/service/src/entry.ts b/service/src/entry.ts index 61b943f17..b67997922 100644 --- a/service/src/entry.ts +++ b/service/src/entry.ts @@ -5,6 +5,7 @@ import { CoreModule } from '@powersync/service-module-core'; import { MongoModule } from '@powersync/service-module-mongodb'; import { MongoStorageModule } from '@powersync/service-module-mongodb-storage'; import { MySQLModule } from '@powersync/service-module-mysql'; +import { MSSQLModule } from '@powersync/service-module-mssql'; import { PostgresModule } from '@powersync/service-module-postgres'; import { PostgresStorageModule } from '@powersync/service-module-postgres-storage'; import { startServer } from './runners/server.js'; @@ -21,6 +22,7 @@ moduleManager.register([ new CoreModule(), new MongoModule(), new MongoStorageModule(), + new MSSQLModule(), new MySQLModule(), new PostgresModule(), new PostgresStorageModule() From 593d03d7f9272dab52918320b178d3d9f85d7bc9 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Wed, 26 Nov 2025 11:25:30 +0200 Subject: [PATCH 23/42] Updated and simplified the dev mssql database --- modules/module-mssql/dev/.env.template | 4 + modules/module-mssql/dev/README.md | 82 ++++++++++ modules/module-mssql/dev/docker-compose.yaml | 33 ++-- modules/module-mssql/dev/init.sql | 154 +++++++++++++++++++ 4 files changed, 257 insertions(+), 16 deletions(-) create mode 100644 modules/module-mssql/dev/.env.template create mode 100644 modules/module-mssql/dev/README.md create mode 100644 modules/module-mssql/dev/init.sql diff --git a/modules/module-mssql/dev/.env.template b/modules/module-mssql/dev/.env.template new file mode 100644 index 000000000..1fbba060e --- /dev/null +++ b/modules/module-mssql/dev/.env.template @@ -0,0 +1,4 @@ +ROOT_PASSWORD=321strong_ROOT_password +DATABASE=powersync +DB_USER=powersync_user +DB_USER_PASSWORD=321strong_POWERSYNC_password \ No newline at end of file diff --git a/modules/module-mssql/dev/README.md b/modules/module-mssql/dev/README.md new file mode 100644 index 000000000..8dfde62d8 --- /dev/null +++ b/modules/module-mssql/dev/README.md @@ -0,0 +1,82 @@ +# MSSQL Dev Database + +This directory contains Docker Compose configuration for running a local MSSQL Server instance with CDC (Change Data Capture) enabled for development and testing. The image used is the 2022 Edition of SQL Server. 2025 can also be used, but has issues on Mac OS X 26 Tahoe due to this issue: https://github.com/microsoft/mssql-docker/issues/942 + +## Prerequisites + +- Docker and Docker Compose installed +- A `.env` file in this directory see the `.env.template` for required variables + +## Environment Variables + +```bash +ROOT_PASSWORD= +DATABASE= +DB_USER= +DB_USER_PASSWORD= +``` + +**Note:** The `ROOT_PASSWORD` and `DB_USER_PASSWORD` must meet SQL Server password complexity requirements (at least 8 characters, including uppercase, lowercase, numbers, and special characters). + +## Usage + +### Starting the Database + +From the `dev` directory, run: + +```bash +docker compose up -d +``` + +This will: +1. Start the MSSQL Server container (`mssql-dev`) +2. Wait for the database to be healthy +3. Automatically run the setup container (`mssql-dev-setup`) which executes `init.sql` + +### Stopping the Database + +```bash +docker compose down +``` + +To also remove the data volume: + +```bash +docker compose down -v +``` + +### Viewing Logs + +```bash +docker compose logs -f +``` + +## What `init.sql` Does + +The initialization script (`init.sql`) performs the following setup steps: + +1. **Database Creation**: Creates the application database (if it doesn't exist) +2. **CDC Setup**: Enables Change Data Capture at the database level +3. **User Creation**: Creates a SQL Server login and database user with appropriate permissions +4. **Create PowerSync Checkpoints table**: Creates the required `_powersync_checkpoints` table. +5. **Demo Tables**: Creates sample tables (`lists` and `todos`) for testing (optional examples) +6. **CDC Table Enablement**: Enables CDC tracking on the demo tables +7. **Permissions**: Grants `db_datareader` and `cdc_reader` roles to the application user +8. **Sample Data**: Inserts initial test data into the `lists` table + +All operations are idempotent, so you can safely re-run the setup without errors. The demo tables section (steps 5–7) serves as an example of how to enable CDC on your own tables. + +## Connection Details + +- **Host**: `localhost` +- **Port**: `1433` +- **SA Login**: `sa` / `{ROOT_PASSWORD}` +- **App Login**: `{DB_USER}` / `{DB_USER_PASSWORD}` +- **Database**: `{DATABASE}` + +## Troubleshooting + +- If the setup container fails, check logs: `docker compose logs mssql-dev-setup` +- Ensure your `.env` file exists and contains all required variables +- The database container may take 30–60 seconds to become healthy on the first startup +- If you encounter connection issues, verify the container is running: `docker compose ps` diff --git a/modules/module-mssql/dev/docker-compose.yaml b/modules/module-mssql/dev/docker-compose.yaml index 9c5a41d82..e5f5d120a 100644 --- a/modules/module-mssql/dev/docker-compose.yaml +++ b/modules/module-mssql/dev/docker-compose.yaml @@ -1,38 +1,39 @@ +name: mssql-dev services: - mssql: + mssql-dev: platform: linux/amd64 - image: mcr.microsoft.com/mssql/server:2022-latest - container_name: mssql + image: mcr.microsoft.com/mssql/server:2022-latest # 2025 Can also be used, but not on Mac 26 Tahoe due to this issue: https://github.com/microsoft/mssql-docker/issues/942 + container_name: mssql-dev ports: - "1433:1433" environment: ACCEPT_EULA: "Y" - MSSQL_SA_PASSWORD: "${SA_PASSWORD}" + MSSQL_SA_PASSWORD: "${ROOT_PASSWORD}" MSSQL_PID: "Developer" MSSQL_AGENT_ENABLED: "true" # required for CDC capture/cleanup jobs volumes: - - mssql-data:/var/opt/mssql + - data:/var/opt/mssql healthcheck: test: [ "CMD-SHELL", "/opt/mssql-tools18/bin/sqlcmd -C -S localhost -U sa -P \"$${MSSQL_SA_PASSWORD}\" -Q \"SELECT 1;\" || exit 1" ] interval: 5s timeout: 3s retries: 30 - mssql-setup: + mssql-dev-setup: platform: linux/amd64 - image: mcr.microsoft.com/mssql/server:2025-latest - container_name: mssql-setup + image: mcr.microsoft.com/mssql/server:2022-latest + container_name: mssql-dev-setup depends_on: - mssql: + mssql-dev: condition: service_healthy environment: - SA_PASSWORD: "${SA_PASSWORD}" - APP_DB: "${APP_DB:-appdb}" - APP_LOGIN: "${APP_LOGIN:-appuser}" - APP_PASSWORD: "${APP_PASSWORD:-P@ssw0rd!App}" + MSSQL_SA_PASSWORD: "${ROOT_PASSWORD}" + DATABASE: "${DATABASE}" + DB_USER: "${DB_USER}" + DB_USER_PASSWORD: "${DB_USER_PASSWORD}" volumes: - - ./scripts:/scripts:ro - entrypoint: ["/bin/bash", "-lc", "/opt/mssql-tools18/bin/sqlcmd -C -S mssql,1433 -U sa -P \"$SA_PASSWORD\" -i /scripts/init.sql && echo '✅ MSSQL init done'"] + - ./init.sql:/scripts/init.sql:ro + entrypoint: ["/bin/bash", "-lc", "/opt/mssql-tools18/bin/sqlcmd -C -S mssql-dev,1433 -U sa -P \"$${MSSQL_SA_PASSWORD}\" -i /scripts/init.sql && echo '✅ MSSQL init done'"] volumes: - mssql-data: \ No newline at end of file + data: diff --git a/modules/module-mssql/dev/init.sql b/modules/module-mssql/dev/init.sql new file mode 100644 index 000000000..d206e6fdf --- /dev/null +++ b/modules/module-mssql/dev/init.sql @@ -0,0 +1,154 @@ +-- Create database (idempotent) +IF DB_ID('$(DATABASE)') IS NULL +BEGIN + CREATE DATABASE [$(DATABASE)]; +END +GO + +-- Enable CDC at the database level (idempotent) +USE [$(DATABASE)]; +IF (SELECT is_cdc_enabled FROM sys.databases WHERE name = '$(DATABASE)') = 0 +BEGIN + EXEC sys.sp_cdc_enable_db; +END +GO + +-- Create a SQL login (server) if missing +USE [master]; +IF NOT EXISTS (SELECT 1 FROM sys.server_principals WHERE name = '$(DB_USER)') +BEGIN + CREATE LOGIN [$(DB_USER)] WITH PASSWORD = '$(DB_USER_PASSWORD)', CHECK_POLICY = ON; +END +GO + +-- Create DB user for the app DB if missing +USE [$(DATABASE)]; +IF NOT EXISTS (SELECT 1 FROM sys.database_principals WHERE name = '$(DB_USER)') +BEGIN + CREATE USER [$(DB_USER)] FOR LOGIN [$(DB_USER)]; +END +GO + +-- Required for PowerSync to access the sys.dm_db_log_stats DMV +USE [master]; +GRANT VIEW SERVER PERFORMANCE STATE TO [$(DB_USER)]; +GO + +-- Required for PowerSync to access the sys.dm_db_log_stats DMV and the sys.dm_db_partition_stats DMV +USE [$(DATABASE)]; +GRANT VIEW DATABASE PERFORMANCE STATE TO [$(DB_USER)]; +GO + +-- Create PowerSync checkpoints table +-- Powersync requires this table to ensure regular checkpoints appear in CDC +IF OBJECT_ID('dbo._powersync_checkpoints', 'U') IS NULL +BEGIN + CREATE TABLE dbo._powersync_checkpoints ( + id INT IDENTITY PRIMARY KEY, + last_updated DATETIME NOT NULL DEFAULT (GETDATE()) + ); +END + +GRANT INSERT, UPDATE ON dbo._powersync_checkpoints TO [$(DB_USER)]; +GO + +-- Enable CDC for the powersync checkpoints table +IF NOT EXISTS (SELECT 1 FROM cdc.change_tables WHERE source_object_id = OBJECT_ID(N'dbo._powersync_checkpoints')) +BEGIN + EXEC sys.sp_cdc_enable_table + @source_schema = N'dbo', + @source_name = N'_powersync_checkpoints', + @role_name = N'cdc_reader', + @supports_net_changes = 0; +END +GO + +-- Wait until capture job exists - usually takes a few seconds after enabling CDC on a table for the first time +DECLARE @tries int = 10; +WHILE @tries > 0 AND NOT EXISTS (SELECT 1 FROM msdb.dbo.cdc_jobs WHERE job_type = N'capture') +BEGIN + WAITFOR DELAY '00:00:01'; + SET @tries -= 1; +END; + +-- Set the CDC capture job polling interval to 1 second (default is 5 seconds) +EXEC sys.sp_cdc_change_job @job_type = N'capture', @pollinginterval = 1; +GO + +/* ----------------------------------------------------------- + Create demo lists and todos tables and enables CDC on them. + CDC must be enabled per table to actually capture changes. +------------------------------------------------------------*/ +IF OBJECT_ID('dbo.lists', 'U') IS NULL +BEGIN + CREATE TABLE dbo.lists ( + id UNIQUEIDENTIFIER NOT NULL DEFAULT NEWID(), + created_at DATETIME2 NOT NULL DEFAULT SYSUTCDATETIME(), + name NVARCHAR(MAX) NOT NULL, + owner_id UNIQUEIDENTIFIER NOT NULL, + CONSTRAINT PK_lists PRIMARY KEY (id) + ); +END + +GRANT INSERT, UPDATE, DELETE ON dbo.lists TO [$(DB_USER)]; +GO + +IF OBJECT_ID('dbo.todos', 'U') IS NULL +BEGIN + CREATE TABLE dbo.todos ( + id UNIQUEIDENTIFIER NOT NULL DEFAULT NEWID(), + created_at DATETIME2 NOT NULL DEFAULT SYSUTCDATETIME(), + completed_at DATETIME2 NULL, + description NVARCHAR(MAX) NOT NULL, + completed BIT NOT NULL DEFAULT 0, + created_by UNIQUEIDENTIFIER NULL, + completed_by UNIQUEIDENTIFIER NULL, + list_id UNIQUEIDENTIFIER NOT NULL, + CONSTRAINT PK_todos PRIMARY KEY (id), + CONSTRAINT FK_todos_lists FOREIGN KEY (list_id) REFERENCES dbo.lists(id) ON DELETE CASCADE + ); +END + +GRANT INSERT, UPDATE, DELETE ON dbo.todos TO [$(DB_USER)]; +GO + +-- Enable CDC for dbo.lists (idempotent guard) +IF NOT EXISTS (SELECT 1 FROM cdc.change_tables WHERE source_object_id = OBJECT_ID(N'dbo.lists')) +BEGIN + EXEC sys.sp_cdc_enable_table + @source_schema = N'dbo', + @source_name = N'lists', + @role_name = N'cdc_reader', + @supports_net_changes = 0; +END +GO + +-- Enable CDC for dbo.todos (idempotent guard) +IF NOT EXISTS (SELECT 1 FROM cdc.change_tables WHERE source_object_id = OBJECT_ID(N'dbo.todos')) +BEGIN + EXEC sys.sp_cdc_enable_table + @source_schema = N'dbo', + @source_name = N'todos', + @role_name = N'cdc_reader', + @supports_net_changes = 0; +END +GO + +-- Grant minimal rights to read CDC data +IF IS_ROLEMEMBER('db_datareader', '$(DB_USER)') = 0 +BEGIN + ALTER ROLE db_datareader ADD MEMBER [$(DB_USER)]; +END + +IF IS_ROLEMEMBER('cdc_reader', '$(DB_USER)') = 0 +BEGIN + ALTER ROLE cdc_reader ADD MEMBER [$(DB_USER)]; +END +GO + +-- Add demo data +BEGIN + INSERT INTO dbo.lists (id, name, owner_id) + VALUES (NEWID(), 'Do a demo', NEWID()); +END +GO \ No newline at end of file From 72cea6871c25ea89c466e2c1ea5ea9ef828b5071 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Wed, 26 Nov 2025 11:31:12 +0200 Subject: [PATCH 24/42] Updated mssql version --- modules/module-mssql/package.json | 4 +- pnpm-lock.yaml | 68 +++++++++++++++++++++---------- 2 files changed, 49 insertions(+), 23 deletions(-) diff --git a/modules/module-mssql/package.json b/modules/module-mssql/package.json index 9c8f0a932..37af8348e 100644 --- a/modules/module-mssql/package.json +++ b/modules/module-mssql/package.json @@ -34,7 +34,7 @@ "@powersync/service-sync-rules": "workspace:*", "@powersync/service-types": "workspace:*", "@powersync/service-jsonbig": "workspace:*", - "mssql": "^11.0.1", + "mssql": "^12.1.1", "semver": "^7.7.2", "ts-codec": "^1.3.0", "uri-js": "^4.4.1", @@ -44,7 +44,7 @@ "@powersync/service-core-tests": "workspace:*", "@powersync/service-module-mongodb-storage": "workspace:*", "@powersync/service-module-postgres-storage": "workspace:*", - "@types/mssql": "^9.1.7", + "@types/mssql": "^9.1.8", "@types/semver": "^7.7.1", "@types/uuid": "^10.0.0" } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 2c3bc1eaa..5015f40f8 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -276,8 +276,8 @@ importers: specifier: workspace:* version: link:../../packages/types mssql: - specifier: ^11.0.1 - version: 11.0.1 + specifier: ^12.1.1 + version: 12.1.1 semver: specifier: ^7.7.2 version: 7.7.3 @@ -301,7 +301,7 @@ importers: specifier: workspace:* version: link:../module-postgres-storage '@types/mssql': - specifier: ^9.1.7 + specifier: ^9.1.8 version: 9.1.8 '@types/semver': specifier: ^7.7.1 @@ -1694,8 +1694,8 @@ packages: resolution: {integrity: sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==} engines: {node: '>=14.16'} - '@tediousjs/connection-string@0.5.0': - resolution: {integrity: sha512-7qSgZbincDDDFyRweCIEvZULFAw5iz/DeunhvuxpL31nfntX3P4Yd4HkHBRg9H8CdqY1e5WFN1PZIz/REL9MVQ==} + '@tediousjs/connection-string@0.6.0': + resolution: {integrity: sha512-GxlsW354Vi6QqbUgdPyQVcQjI7cZBdGV5vOYVYuCVDTylx2wl3WHR2HlhcxxHTrMigbelpXsdcZso+66uxPfow==} '@tootallnate/once@2.0.0': resolution: {integrity: sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==} @@ -2692,6 +2692,10 @@ packages: resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} engines: {node: '>=0.10.0'} + iconv-lite@0.7.0: + resolution: {integrity: sha512-cf6L2Ds3h57VVmkZe+Pn+5APsT7FpqJtEhhieDCvrE2MK5Qk9MyffgQyuxQTm6BChfeZNtcOLHp9IcWRVcIcBQ==} + engines: {node: '>=0.10.0'} + ieee754@1.2.1: resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} @@ -3180,8 +3184,8 @@ packages: ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} - mssql@11.0.1: - resolution: {integrity: sha512-KlGNsugoT90enKlR8/G36H0kTxPthDhmtNUCwEHvgRza5Cjpjoj+P2X6eMpFUDN7pFrJZsKadL4x990G8RBE1w==} + mssql@12.1.1: + resolution: {integrity: sha512-nUTXi0unU6p72YKe6KDR9vW2mSQWsmy1KZqV0JkaT2v3RSkxlwx4Y4srjYmH+DZNbyA53Ijp6o2OaLnLc4F2Qg==} engines: {node: '>=18'} hasBin: true @@ -4019,6 +4023,10 @@ packages: resolution: {integrity: sha512-9AvErXXQTd6l7TDd5EmM+nxbOGyhnmdbp/8c3pw+tjaiSXW9usME90ET/CRG1LN1Y9tPMtz/p83z4Q97B4DDpw==} engines: {node: '>=18'} + tedious@19.1.3: + resolution: {integrity: sha512-6O6efTeYtcnar3Cqf/ptqJs+U10fYYjp/SHRNm3VGuCTUDys+AUgIbxWbT2kzl4baXAzuy9byV3qCgOimrRfTA==} + engines: {node: '>=18.17'} + term-size@2.2.1: resolution: {integrity: sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==} engines: {node: '>=8'} @@ -4478,13 +4486,13 @@ snapshots: '@azure/abort-controller@2.1.2': dependencies: - tslib: 2.6.3 + tslib: 2.8.1 '@azure/core-auth@1.10.1': dependencies: '@azure/abort-controller': 2.1.2 '@azure/core-util': 1.13.1 - tslib: 2.6.3 + tslib: 2.8.1 transitivePeerDependencies: - supports-color @@ -4496,7 +4504,7 @@ snapshots: '@azure/core-tracing': 1.3.1 '@azure/core-util': 1.13.1 '@azure/logger': 1.3.0 - tslib: 2.6.3 + tslib: 2.8.1 transitivePeerDependencies: - supports-color @@ -4529,19 +4537,19 @@ snapshots: '@azure/core-util': 1.13.1 '@azure/logger': 1.3.0 '@typespec/ts-http-runtime': 0.3.1 - tslib: 2.6.3 + tslib: 2.8.1 transitivePeerDependencies: - supports-color '@azure/core-tracing@1.3.1': dependencies: - tslib: 2.6.3 + tslib: 2.8.1 '@azure/core-util@1.13.1': dependencies: '@azure/abort-controller': 2.1.2 '@typespec/ts-http-runtime': 0.3.1 - tslib: 2.6.3 + tslib: 2.8.1 transitivePeerDependencies: - supports-color @@ -4557,7 +4565,7 @@ snapshots: '@azure/msal-browser': 4.25.1 '@azure/msal-node': 3.8.0 open: 10.2.0 - tslib: 2.6.3 + tslib: 2.8.1 transitivePeerDependencies: - supports-color @@ -4594,7 +4602,7 @@ snapshots: '@azure/logger@1.3.0': dependencies: '@typespec/ts-http-runtime': 0.3.1 - tslib: 2.6.3 + tslib: 2.8.1 transitivePeerDependencies: - supports-color @@ -5534,7 +5542,7 @@ snapshots: dependencies: defer-to-connect: 2.0.1 - '@tediousjs/connection-string@0.5.0': {} + '@tediousjs/connection-string@0.6.0': {} '@tootallnate/once@2.0.0': {} @@ -5647,7 +5655,7 @@ snapshots: dependencies: http-proxy-agent: 7.0.2 https-proxy-agent: 7.0.6 - tslib: 2.6.3 + tslib: 2.8.1 transitivePeerDependencies: - supports-color @@ -6585,6 +6593,10 @@ snapshots: dependencies: safer-buffer: 2.1.2 + iconv-lite@0.7.0: + dependencies: + safer-buffer: 2.1.2 + ieee754@1.2.1: {} ignore-by-default@1.0.1: {} @@ -7035,14 +7047,13 @@ snapshots: ms@2.1.3: {} - mssql@11.0.1: + mssql@12.1.1: dependencies: - '@tediousjs/connection-string': 0.5.0 + '@tediousjs/connection-string': 0.6.0 commander: 11.1.0 debug: 4.4.1 - rfdc: 1.4.1 tarn: 3.0.2 - tedious: 18.6.1 + tedious: 19.1.3 transitivePeerDependencies: - supports-color @@ -7970,6 +7981,21 @@ snapshots: transitivePeerDependencies: - supports-color + tedious@19.1.3: + dependencies: + '@azure/core-auth': 1.10.1 + '@azure/identity': 4.13.0 + '@azure/keyvault-keys': 4.10.0 + '@js-joda/core': 5.6.5 + '@types/node': 22.16.2 + bl: 6.1.4 + iconv-lite: 0.7.0 + js-md4: 0.3.2 + native-duplexpair: 1.0.0 + sprintf-js: 1.1.3 + transitivePeerDependencies: + - supports-color + term-size@2.2.1: {} text-hex@1.0.0: {} From 7f023d3aef8634a081fb9df8e47329f427246033 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Wed, 26 Nov 2025 11:31:45 +0200 Subject: [PATCH 25/42] Implemented MSSQLRouteAPIAdapter --- .../src/api/MSSQLRouteAPIAdapter.ts | 262 ++++++++++++++++-- 1 file changed, 243 insertions(+), 19 deletions(-) diff --git a/modules/module-mssql/src/api/MSSQLRouteAPIAdapter.ts b/modules/module-mssql/src/api/MSSQLRouteAPIAdapter.ts index 4ff2b3b39..398a59a2f 100644 --- a/modules/module-mssql/src/api/MSSQLRouteAPIAdapter.ts +++ b/modules/module-mssql/src/api/MSSQLRouteAPIAdapter.ts @@ -5,38 +5,258 @@ import { ReplicationHeadCallback, ReplicationLagOptions } from '@powersync/service-core'; -import { Promise } from 'mssql'; import * as service_types from '@powersync/service-types'; +import * as sync_rules from '@powersync/service-sync-rules'; import { SqlSyncRules, TablePattern } from '@powersync/service-sync-rules'; -import { ResolvedConnectionConfig } from '../types/types.js'; +import * as types from '../types/types.js'; import { ExecuteSqlResponse } from '@powersync/service-types/dist/routes.js'; import { MSSQLConnectionManager } from '../replication/MSSQLConnectionManager.js'; +import { + checkSourceConfiguration, + createCheckpoint, + getDebugTableInfo, + getLatestLSN, + POWERSYNC_CHECKPOINTS_TABLE +} from '../utils/mssql.js'; +import { getTablesFromPattern, ResolvedTable } from '../utils/schema.js'; +import { toExpressionTypeFromMSSQLType } from '../common/mssqls-to-sqlite.js'; export class MSSQLRouteAPIAdapter implements api.RouteAPI { protected connectionManager: MSSQLConnectionManager; - constructor(protected config: ResolvedConnectionConfig) { + constructor(protected config: types.ResolvedMSSQLConnectionConfig) { this.connectionManager = new MSSQLConnectionManager(config, {}); } - createReplicationHead(callback: ReplicationHeadCallback): Promise { - return Promise.resolve(undefined); + async createReplicationHead(callback: ReplicationHeadCallback): Promise { + const currentLSN = await getLatestLSN(this.connectionManager); + const result = await callback(currentLSN.toString()); + + // Updates the powersync checkpoints table on the source database, ensuring that an update with a newer LSN will be captured by the CDC. + await createCheckpoint(this.connectionManager); + + return result; } - executeQuery(query: string, params: any[]): Promise { - return Promise.resolve(undefined); + async executeQuery(query: string, params: any[]): Promise { + if (!this.config.debug_api) { + return service_types.internal_routes.ExecuteSqlResponse.encode({ + results: { + columns: [], + rows: [] + }, + success: false, + error: 'SQL querying is not enabled' + }); + } + try { + const { recordset: result } = await this.connectionManager.query(query, params); + return service_types.internal_routes.ExecuteSqlResponse.encode({ + success: true, + results: { + columns: Object.values(result.columns).map((column) => column.name), + rows: result.map((row) => { + return Object.values(row).map((value: any) => { + const sqlValue = sync_rules.applyValueContext( + sync_rules.toSyncRulesValue(row), + sync_rules.CompatibilityContext.FULL_BACKWARDS_COMPATIBILITY + ); + + if (typeof sqlValue == 'bigint') { + return Number(row); + } else if (value instanceof Date) { + return value.toISOString(); + } else if (sync_rules.isJsonValue(sqlValue)) { + return sqlValue; + } else { + return null; + } + }); + }) + } + }); + } catch (e) { + return service_types.internal_routes.ExecuteSqlResponse.encode({ + results: { + columns: [], + rows: [] + }, + success: false, + error: e.message + }); + } } - getConnectionSchema(): Promise { - return Promise.resolve([]); + async getConnectionSchema(): Promise { + const { recordset: results } = await this.connectionManager.query(` + SELECT + sch.name AS schema_name, + tbl.name AS table_name, + col.name AS column_name, + typ.name AS data_type, + CASE + WHEN typ.name IN ('nvarchar', 'nchar') + AND col.max_length > 0 + AND col.max_length != -1 + THEN typ.name + '(' + CAST(col.max_length / 2 AS VARCHAR) + ')' + WHEN typ.name IN ('varchar', 'char', 'varbinary', 'binary') + AND col.max_length > 0 + AND col.max_length != -1 + THEN typ.name + '(' + CAST(col.max_length AS VARCHAR) + ')' + WHEN typ.name IN ('varchar', 'nvarchar', 'char', 'nchar') + AND col.max_length = -1 + THEN typ.name + '(MAX)' + WHEN typ.name IN ('decimal', 'numeric') + AND col.precision > 0 + THEN typ.name + '(' + CAST(col.precision AS VARCHAR) + ',' + CAST(col.scale AS VARCHAR) + ')' + WHEN typ.name IN ('float', 'real') + AND col.precision > 0 + THEN typ.name + '(' + CAST(col.precision AS VARCHAR) + ')' + ELSE typ.name + END AS formatted_type + FROM sys.tables AS tbl + JOIN sys.schemas AS sch ON sch.schema_id = tbl.schema_id + JOIN sys.columns AS col ON col.object_id = tbl.object_id + JOIN sys.types AS typ ON typ.user_type_id = col.user_type_id + WHERE sch.name = '${this.connectionManager.schema}' + AND sch.name NOT IN ('sys', 'INFORMATION_SCHEMA', 'cdc') + AND tbl.name NOT IN ('systranschemas', '${POWERSYNC_CHECKPOINTS_TABLE}') + AND tbl.type = 'U' + AND col.is_computed = 0 + ORDER BY sch.name, tbl.name, col.column_id + `); + + /** + * Reduces the SQL results into a Record of {@link DatabaseSchema} + * then returns the values as an array. + */ + const schemas: Record = {}; + + for (const row of results) { + const schemaName = row.schema_name as string; + const tableName = row.table_name as string; + const columnName = row.column_name as string; + const dataType = row.data_type as string; + const formattedType = (row.formatted_type as string) || dataType; + + const schema = + schemas[schemaName] || + (schemas[schemaName] = { + name: schemaName, + tables: [] + }); + + let table = schema.tables.find((t) => t.name === tableName); + if (!table) { + table = { + name: tableName, + columns: [] + }; + schema.tables.push(table); + } + + table.columns.push({ + name: columnName, + type: formattedType, + sqlite_type: toExpressionTypeFromMSSQLType(dataType).typeFlags, + internal_type: formattedType, + pg_type: formattedType + }); + } + + return Object.values(schemas); } - getConnectionStatus(): Promise { - return Promise.resolve(undefined); + async getConnectionStatus(): Promise { + const base = { + id: this.config?.id ?? '', + uri: this.config == null ? '' : types.baseUri(this.config) + }; + + try { + await this.connectionManager.query(`SELECT 'PowerSync connection test'`); + } catch (e) { + return { + ...base, + connected: false, + errors: [{ level: 'fatal', message: `${e.code} - message: ${e.message}` }] + }; + } + + try { + const errors = await checkSourceConfiguration(this.connectionManager); + if (errors.length) { + return { + ...base, + connected: true, + errors: errors.map((e) => ({ level: 'fatal', message: e })) + }; + } + } catch (e) { + return { + ...base, + connected: true, + errors: [{ level: 'fatal', message: e.message }] + }; + } + + return { + ...base, + connected: true, + errors: [] + }; } - getDebugTablesInfo(tablePatterns: TablePattern[], sqlSyncRules: SqlSyncRules): Promise { - return Promise.resolve([]); + async getDebugTablesInfo(tablePatterns: TablePattern[], sqlSyncRules: SqlSyncRules): Promise { + const result: PatternResult[] = []; + + for (const tablePattern of tablePatterns) { + const schema = tablePattern.schema; + const patternResult: PatternResult = { + schema: schema, + pattern: tablePattern.tablePattern, + wildcard: tablePattern.isWildcard + }; + result.push(patternResult); + + const tables = await getTablesFromPattern(this.connectionManager, tablePattern); + if (tablePattern.isWildcard) { + patternResult.tables = []; + for (const table of tables) { + const details = await getDebugTableInfo({ + connectionManager: this.connectionManager, + tablePattern, + table, + syncRules: sqlSyncRules + }); + patternResult.tables.push(details); + } + } else { + if (tables.length == 0) { + // This should tenchnically never happen, but we'll handle it anyway. + const resolvedTable: ResolvedTable = { + objectId: 0, + schema: schema, + name: tablePattern.name + }; + patternResult.table = await getDebugTableInfo({ + connectionManager: this.connectionManager, + tablePattern, + table: resolvedTable, + syncRules: sqlSyncRules + }); + } else { + patternResult.table = await getDebugTableInfo({ + connectionManager: this.connectionManager, + tablePattern, + table: tables[0], + syncRules: sqlSyncRules + }); + } + } + } + + return result; } getParseSyncRulesOptions(): ParseSyncRulesOptions { @@ -45,15 +265,19 @@ export class MSSQLRouteAPIAdapter implements api.RouteAPI { }; } - getReplicationLagBytes(options: ReplicationLagOptions): Promise { - return Promise.resolve(undefined); + async getReplicationLagBytes(options: ReplicationLagOptions): Promise { + return undefined; + } + + async getSourceConfig(): Promise { + return this.config; } - getSourceConfig(): Promise { - return Promise.resolve(undefined); + async [Symbol.asyncDispose]() { + await this.shutdown(); } - shutdown(): Promise { - return Promise.resolve(undefined); + async shutdown(): Promise { + await this.connectionManager.end(); } } From 71a2904997fc6a296ee3ed029563ea5db025a03e Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Wed, 26 Nov 2025 11:32:44 +0200 Subject: [PATCH 26/42] Updated mssql to sqlite type mappings Added type mapping tests --- .../src/common/mssqls-to-sqlite.ts | 107 +++- .../test/src/mssql-to-sqlite.test.ts | 474 ++++++++++++++++++ modules/module-mssql/test/src/util.ts | 17 +- 3 files changed, 593 insertions(+), 5 deletions(-) create mode 100644 modules/module-mssql/test/src/mssql-to-sqlite.test.ts diff --git a/modules/module-mssql/src/common/mssqls-to-sqlite.ts b/modules/module-mssql/src/common/mssqls-to-sqlite.ts index cbb5fcec3..dec261923 100644 --- a/modules/module-mssql/src/common/mssqls-to-sqlite.ts +++ b/modules/module-mssql/src/common/mssqls-to-sqlite.ts @@ -1,5 +1,6 @@ import sql from 'mssql'; -import { DatabaseInputRow, SqliteInputRow, toSyncRulesRow } from '@powersync/service-sync-rules'; +import { DatabaseInputRow, ExpressionType, SqliteInputRow, toSyncRulesRow } from '@powersync/service-sync-rules'; +import { MSSQLUserDefinedType } from '../types/mssql-data-types.js'; export function toSqliteInputRow(row: any, columns: sql.IColumnMetadata): SqliteInputRow { let result: DatabaseInputRow = {}; @@ -15,6 +16,10 @@ export function toSqliteInputRow(row: any, columns: sql.IColumnMetadata): Sqlite result[key] = BigInt(row[key]); } break; + case sql.TYPES.Bit: + // MSSQL returns BIT as boolean + result[key] = row[key] ? 1 : 0; + break; // Convert Dates to string case sql.TYPES.Date: result[key] = toISODateString(row[key] as Date); @@ -29,7 +34,24 @@ export function toSqliteInputRow(row: any, columns: sql.IColumnMetadata): Sqlite const date = row[key] as Date; result[key] = isNaN(date.getTime()) ? null : date.toISOString(); break; - // TODO: Confirm case sql.TYPES.UDT + case sql.TYPES.Binary: + case sql.TYPES.VarBinary: + case sql.TYPES.Image: + result[key] = new Uint8Array(Object.values(row[key])); + break; + // TODO: Spatial types need to be converted to binary WKB, they are returned as a non standard object currently + case sql.TYPES.Geometry: + case sql.TYPES.Geography: + result[key] = JSON.stringify(row[key]); + break; + case sql.TYPES.UDT: + if (columnMetadata.udt.name === MSSQLUserDefinedType.HIERARCHYID) { + result[key] = new Uint8Array(Object.values(row[key])); + break; + } else { + result[key] = row[key]; + } + break; default: result[key] = row[key]; } @@ -45,6 +67,85 @@ function toISODateString(date: Date): string | null { return isNaN(date.getTime()) ? null : date.toISOString().split('T')[0]; } +/** + * MSSQL time format is HH:mm:ss[.nnnnnnn] + * @param date + * @returns + */ function toISOTimeString(date: Date): string | null { - return isNaN(date.getTime()) ? null : date.toISOString().split('T')[1]; + return isNaN(date.getTime()) ? null : date.toISOString().split('T')[1].replace('Z', ''); +} + +/** + * Converts MSSQL type names to SQLite ExpressionType + * @param mssqlType - The MSSQL type name (e.g., 'int', 'varchar', 'datetime2') + */ +export function toExpressionTypeFromMSSQLType(mssqlType: string | undefined): ExpressionType { + if (!mssqlType) { + return ExpressionType.TEXT; + } + + const baseType = mssqlType.toUpperCase(); + switch (baseType) { + case 'BIT': + case 'TINYINT': + case 'SMALLINT': + case 'INT': + case 'INTEGER': + case 'BIGINT': + return ExpressionType.INTEGER; + case 'BINARY': + case 'VARBINARY': + case 'IMAGE': + case 'TIMESTAMP': + return ExpressionType.BLOB; + case 'FLOAT': + case 'REAL': + case 'MONEY': + case 'SMALLMONEY': + case 'DECIMAL': + case 'NUMERIC': + return ExpressionType.REAL; + case 'JSON': + return ExpressionType.TEXT; + // System and extended types + case 'SYSNAME': + // SYSNAME is essentially NVARCHAR(128), map to TEXT + return ExpressionType.TEXT; + case 'HIERARCHYID': + // HIERARCHYID is a CLR UDT representing hierarchical data, stored as string representation + return ExpressionType.TEXT; + case 'GEOMETRY': + case 'GEOGRAPHY': + // Spatial CLR UDT types, typically stored as WKT (Well-Known Text) strings + return ExpressionType.TEXT; + case 'VECTOR': + // Vector type (SQL Server 2022+), stored as binary data + return ExpressionType.BLOB; + default: + // In addition to the normal text types, includes: VARCHAR, NVARCHAR, CHAR, NCHAR, TEXT, NTEXT, DATE, TIME, DATETIME, DATETIME2, SMALLDATETIME, DATETIMEOFFSET, XML, UNIQUEIDENTIFIER, SQL_VARIANT + return ExpressionType.TEXT; + } +} + +export interface CDCRowToSqliteRowOptions { + row: any; + columns: sql.IColumnMetadata; +} +// CDC metadata columns in CDCS rows that should be excluded +const CDC_METADATA_COLUMNS = ['__$operation', '__$start_lsn', '__$end_lsn', '__$seqval', '__$update_mask']; +/** + * Convert CDC row data to SqliteRow format. + * CDC rows include table columns plus CDC metadata columns (__$operation, __$start_lsn, etc.) + * which we filter out. + */ +export function CDCToSqliteRow(options: CDCRowToSqliteRowOptions): SqliteInputRow { + const { row, columns } = options; + const filteredRow: DatabaseInputRow = {}; + for (const key in row) { + if (!CDC_METADATA_COLUMNS.includes(key)) { + filteredRow[key] = row[key]; + } + } + return toSqliteInputRow(filteredRow, columns); } diff --git a/modules/module-mssql/test/src/mssql-to-sqlite.test.ts b/modules/module-mssql/test/src/mssql-to-sqlite.test.ts new file mode 100644 index 000000000..bac600c3c --- /dev/null +++ b/modules/module-mssql/test/src/mssql-to-sqlite.test.ts @@ -0,0 +1,474 @@ +import { SqliteInputRow } from '@powersync/service-sync-rules'; +import { afterAll, beforeEach, describe, expect, test } from 'vitest'; +import { clearTestDb, createUpperCaseUUID, TEST_CONNECTION_OPTIONS, waitForPendingCDCChanges } from './util.js'; +import { CDCToSqliteRow, toSqliteInputRow } from '@module/common/mssqls-to-sqlite.js'; +import { MSSQLConnectionManager } from '@module/replication/MSSQLConnectionManager.js'; +import { + enableCDCForTable, + getCaptureInstance, + getLatestReplicatedLSN, + getMinLSN, + toQualifiedTableName +} from '@module/utils/mssql.js'; +import sql from 'mssql'; + +describe('MSSQL Data Types Tests', () => { + const connectionManager = new MSSQLConnectionManager(TEST_CONNECTION_OPTIONS, {}); + + beforeEach(async () => { + await clearTestDb(connectionManager); + await setupTestTable(); + }); + afterAll(async () => { + await connectionManager.end(); + }); + + async function setupTestTable() { + await connectionManager.query(` + CREATE TABLE ${connectionManager.schema}.test_data ( + id INT IDENTITY(1,1) PRIMARY KEY, + tinyint_col TINYINT, + smallint_col SMALLINT, + int_col INT, + bigint_col BIGINT, + float_col FLOAT, + real_col REAL, + decimal_col DECIMAL(10,2), + numeric_col NUMERIC(10,2), + money_col MONEY, + smallmoney_col SMALLMONEY, + bit_col BIT, + + date_col DATE, + datetime_col DATETIME, + datetime2_col DATETIME2(6), + smalldatetime_col SMALLDATETIME, + datetimeoffset_col DATETIMEOFFSET(3), + time_col TIME(6), + + char_col CHAR(10), + varchar_col VARCHAR(255), + varchar_max_col VARCHAR(MAX), + nchar_col NCHAR(15), + nvarchar_col NVARCHAR(255), + nvarchar_max_col NVARCHAR(MAX), + text_col TEXT, + ntext_col NTEXT, + + binary_col BINARY(16), + varbinary_col VARBINARY(256), + varbinary_max_col VARBINARY(MAX), + image_col IMAGE, + + uniqueidentifier_col UNIQUEIDENTIFIER, + xml_col XML, + json_col NVARCHAR(MAX), + + hierarchyid_col HIERARCHYID, + geometry_col GEOMETRY, + geography_col GEOGRAPHY + ) + `); + + await enableCDCForTable({ connectionManager, table: 'test_data' }); + } + + test('Number types mappings', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + await connectionManager.query(` + INSERT INTO ${connectionManager.schema}.test_data( + tinyint_col, + smallint_col, + int_col, + bigint_col, + float_col, + real_col, + decimal_col, + numeric_col, + money_col, + smallmoney_col, + bit_col + ) VALUES ( + 255, -- TINYINT maximum value + 32767, -- SMALLINT maximum value + 2147483647, -- INT maximum value + 9223372036854775807, -- BIGINT maximum value + 3.1415926535, -- FLOAT example + 3.14, -- REAL example + 12345.67, -- DECIMAL(10,2) example + 12345.67, -- NUMERIC(10,2) example + 12345.67, -- MONEY example + 123.45, -- SMALLMONEY example + 1 -- BIT value + ) + `); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + + const expectedResult: SqliteInputRow = { + tinyint_col: 255, + smallint_col: 32767, + int_col: 2147483647, + bigint_col: 9223372036854775807n, + float_col: 3.1415926535, + real_col: expect.closeTo(3.14, 2), + decimal_col: 12345.67, + numeric_col: 12345.67, + money_col: 12345.67, + smallmoney_col: 123.45, + bit_col: 1 + }; + expect(databaseRows[0]).toMatchObject(expectedResult); + expect(replicatedRows[0]).toMatchObject(expectedResult); + }); + + test('Character types mappings', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + await connectionManager.query(` + INSERT INTO [${connectionManager.schema}].test_data ( + char_col, + varchar_col, + varchar_max_col, + nchar_col, + nvarchar_col, + nvarchar_max_col, + text_col, + ntext_col + ) VALUES ( + 'CharData', -- CHAR(10) with padding spaces + 'Variable character data',-- VARCHAR(255) + 'Variable character data MAX', -- VARCHAR(MAX) + N'UnicodeChar', -- NCHAR(15) + N'Variable Unicode data', -- NVARCHAR(255) + N'Variable Unicode data MAX', -- NVARCHAR(MAX) + 'TextData', -- TEXT + N'UnicodeTextData' -- NTEXT + ) + `); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + const expectedResult = { + char_col: 'CharData ', // CHAR pads with spaces up to the defined length (10) + varchar_col: 'Variable character data', + varchar_max_col: 'Variable character data MAX', + nchar_col: 'UnicodeChar ', // NCHAR pads with spaces up to the defined length (15) + nvarchar_col: 'Variable Unicode data', + nvarchar_max_col: 'Variable Unicode data MAX', + text_col: 'TextData', + ntext_col: 'UnicodeTextData' + }; + + expect(databaseRows[0]).toMatchObject(expectedResult); + expect(replicatedRows[0]).toMatchObject(expectedResult); + }); + + test('Binary types mappings', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + const binaryData = Buffer.from('BinaryData'); + await connectionManager.query( + ` + INSERT INTO [${connectionManager.schema}].test_data ( + binary_col, + varbinary_col, + varbinary_max_col, + image_col + ) VALUES ( + @binary_col, + @varbinary_col, + @varbinary_max_col, + @image_col + ) + `, + [ + { name: 'binary_col', type: sql.Binary, value: binaryData }, + { name: 'varbinary_col', type: sql.VarBinary, value: binaryData }, + { name: 'varbinary_max_col', type: sql.VarBinary(sql.MAX), value: binaryData }, + { name: 'image_col', type: sql.Image, value: binaryData } + ] + ); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + + const expectedBinary = new Uint8Array(binaryData); + const expectedBinaryPadded = new Uint8Array(16); + expectedBinaryPadded.set(expectedBinary.slice(0, 16), 0); + + const expectedResult: SqliteInputRow = { + binary_col: expectedBinaryPadded, + varbinary_col: expectedBinary, + varbinary_max_col: expectedBinary, + image_col: expectedBinary + }; + + expect(databaseRows[0]).toMatchObject(expectedResult); + expect(replicatedRows[0]).toMatchObject(expectedResult); + }); + + test('Date types mappings', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + const testDate = new Date('2023-03-06T15:47:00.000Z'); + await connectionManager.query( + ` + INSERT INTO [${connectionManager.schema}].test_data( + date_col, + datetime_col, + datetime2_col, + smalldatetime_col, + time_col + ) + VALUES ( + @date_col, + @datetime_col, + @datetime2_col, + @smalldatetime_col, + @time_col + ) + `, + [ + { name: 'date_col', type: sql.Date, value: testDate }, + { name: 'datetime_col', type: sql.DateTime, value: testDate }, + { name: 'datetime2_col', type: sql.DateTime2(6), value: testDate }, + { name: 'smalldatetime_col', type: sql.SmallDateTime, value: testDate }, + { name: 'time_col', type: sql.Time(6), value: testDate } + ] + ); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + const expectedResult = { + date_col: '2023-03-06', + datetime_col: '2023-03-06T15:47:00.000Z', + datetime2_col: '2023-03-06T15:47:00.000Z', + smalldatetime_col: '2023-03-06T15:47:00.000Z', + time_col: '15:47:00.000' + }; + + expect(databaseRows[0]).toMatchObject(expectedResult); + expect(replicatedRows[0]).toMatchObject(expectedResult); + }); + + test('Date types edge cases mappings', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + + await connectionManager.query(` + INSERT INTO [${connectionManager.schema}].test_data(datetime2_col) + VALUES ('0001-01-01 00:00:00.000') + `); + await connectionManager.query(` + INSERT INTO [${connectionManager.schema}].test_data(datetime2_col) + VALUES ('9999-12-31 23:59:59.999') + `); + await connectionManager.query(` + INSERT INTO [${connectionManager.schema}].test_data(datetime_col) + VALUES ('1753-01-01 00:00:00') + `); + await connectionManager.query(` + INSERT INTO [${connectionManager.schema}].test_data(datetime_col) + VALUES ('9999-12-31 23:59:59.997') + `); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const expectedResults = [ + { datetime2_col: '0001-01-01T00:00:00.000Z' }, + { datetime2_col: '9999-12-31T23:59:59.999Z' }, + { datetime_col: '1753-01-01T00:00:00.000Z' }, + { datetime_col: '9999-12-31T23:59:59.997Z' } + ]; + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + + for (let i = 0; i < expectedResults.length; i++) { + expect(databaseRows[i]).toMatchObject(expectedResults[i]); + expect(replicatedRows[i]).toMatchObject(expectedResults[i]); + } + }); + + test('DateTimeOffset type mapping', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + // DateTimeOffset preserves timezone information + await connectionManager.query(` + INSERT INTO [${connectionManager.schema}].test_data(datetimeoffset_col) + VALUES ('2023-03-06 15:47:00.000 +05:00') + `); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const expectedResult = { + datetimeoffset_col: '2023-03-06T10:47:00.000Z' // Converted to UTC + }; + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + + // Note: The driver converts DateTimeOffset to Date, which incorporates the timezone offset which is then represented in UTC. + expect(databaseRows[0]).toMatchObject(expectedResult); + expect(replicatedRows[0]).toMatchObject(expectedResult); + }); + + test('UniqueIdentifier type mapping', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + + const testGuid = createUpperCaseUUID(); + await connectionManager.query( + ` + INSERT INTO [${connectionManager.schema}].test_data(uniqueidentifier_col) + VALUES (@guid) + `, + [{ name: 'guid', type: sql.UniqueIdentifier, value: testGuid }] + ); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + + // GUIDs are returned as strings + expect(databaseRows[0].uniqueidentifier_col).toBe(testGuid); + expect(replicatedRows[0].uniqueidentifier_col).toBe(testGuid); + }); + + test('JSON type mapping', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + const expectedJSON = { name: 'John Doe', age: 30, married: true }; + await connectionManager.query( + ` + INSERT INTO [${connectionManager.schema}].test_data(json_col) + VALUES (@json) + `, + [{ name: 'json', type: sql.NVarChar(sql.MAX), value: JSON.stringify(expectedJSON) }] + ); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + + const actualDBJSONValue = JSON.parse(databaseRows[0].json_col as string); + const actualReplicatedJSONValue = JSON.parse(replicatedRows[0].json_col as string); + expect(actualDBJSONValue).toEqual(expectedJSON); + expect(actualReplicatedJSONValue).toEqual(expectedJSON); + }); + + test('XML type mapping', async () => { + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + const xmlData = 'value'; + await connectionManager.query( + ` + INSERT INTO [${connectionManager.schema}].test_data(xml_col) + VALUES (@xml) + `, + [{ name: 'xml', type: sql.Xml, value: xmlData }] + ); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + + expect(databaseRows[0].xml_col).toBe(xmlData); + expect(replicatedRows[0].xml_col).toBe(xmlData); + }); + + // TODO: Update test when properly converting spatial types + // test('Spatial types mappings', async () => { + // const beforeLSN = await getLatestReplicatedLSN(connectionManager); + // // Geometry and Geography types are stored as binary/WKT strings + // await connectionManager.query(` + // INSERT INTO [${connectionManager.schema}].test_data(geometry_col, geography_col) + // VALUES ( + // geometry::STGeomFromText('POINT(1 2)', 0), + // geography::STGeomFromText('POINT(1 2)', 4326) + // ) + // `); + // await waitForPendingCDCChanges(beforeLSN, connectionManager); + // + // const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + // const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + // + // // The driver currently returns spatial types as non standard objects. We just convert them to JSON strings for now + // expect(databaseRows[0].geometry_col).toBeDefined(); + // expect(databaseRows[0].geography_col).toBeDefined(); + // expect(replicatedRows[0].geometry_col).toBeDefined(); + // expect(replicatedRows[0].geography_col).toBeDefined(); + // }); + + // TODO: Enable when HierarchyID type is properly supported + // test('HierarchyID type mapping', async () => { + // const hierarchyid = '/1/'; + // const beforeLSN = await getLatestReplicatedLSN(connectionManager); + // await connectionManager.query(` + // INSERT INTO [${connectionManager.schema}].test_data(hierarchyid_col) + // VALUES (@hierarchyid) + // `, + // [{ name: 'hierarchyid', type: sql.VarChar, value: hierarchyid }] + // ); + // await waitForPendingCDCChanges(beforeLSN, connectionManager); + // + // const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + // const replicatedRows = await getReplicatedRows(connectionManager, 'test_data'); + // + // const expectedBinary = new Uint8Array(Buffer.from(hierarchyid)); + // + // expect(databaseRows[0].hierarchyid_col).toEqual(expectedBinary); + // expect(replicatedRows[0].hierarchyid_col).toEqual(expectedBinary); + // }); +}); + +async function getDatabaseRows( + connectionManager: MSSQLConnectionManager, + tableName: string +): Promise { + const { recordset: rows } = await connectionManager.query( + `SELECT * FROM ${toQualifiedTableName(connectionManager.schema, tableName)}` + ); + return rows.map((row) => { + const converted = toSqliteInputRow(row, rows.columns); + // Exclude id column from results + const { id, ...rest } = converted; + return rest; + }); +} + +/** + * Return all the updates from the CDC stream for the table. + */ +async function getReplicatedRows( + connectionManager: MSSQLConnectionManager, + tableName: string +): Promise { + const endLSN = await getLatestReplicatedLSN(connectionManager); + + const captureInstance = await getCaptureInstance({ + connectionManager, + schema: connectionManager.schema, + tableName + }); + if (!captureInstance) { + throw new Error(`No CDC capture instance found for table ${tableName}`); + } + + const startLSN = await getMinLSN(connectionManager, captureInstance.name); + // Query CDC changes + const { recordset: results } = await connectionManager.query( + ` + SELECT * FROM ${captureInstance.schema}.fn_cdc_get_all_changes_${captureInstance.name}(@from_lsn, @to_lsn, 'all update old') ORDER BY __$start_lsn, __$seqval + `, + [ + { name: 'from_lsn', type: sql.VarBinary, value: startLSN.toBinary() }, + { name: 'to_lsn', type: sql.VarBinary, value: endLSN.toBinary() } + ] + ); + + return results + .filter((row) => row.__$operation === 2) // Only INSERT operations + .map((row) => { + const converted = CDCToSqliteRow({ row, columns: results.columns }); + // Exclude id column from results + const { id, ...rest } = converted; + return rest; + }); +} diff --git a/modules/module-mssql/test/src/util.ts b/modules/module-mssql/test/src/util.ts index 2657cbe01..8bbd2d5e4 100644 --- a/modules/module-mssql/test/src/util.ts +++ b/modules/module-mssql/test/src/util.ts @@ -87,12 +87,25 @@ export async function createTestTable(connectionManager: MSSQLConnectionManager, await enableCDCForTable({ connectionManager, table: tableName }); } +export async function createTestTableWithBasicId( + connectionManager: MSSQLConnectionManager, + tableName: string +): Promise { + await connectionManager.query(` + CREATE TABLE ${connectionManager.schema}.${tableName} ( + id INT IDENTITY(1,1) PRIMARY KEY, + description VARCHAR(MAX) + ) + `); + await enableCDCForTable({ connectionManager, table: tableName }); +} + export interface TestData { id: string; description: string; } export async function insertTestData(connectionManager: MSSQLConnectionManager, tableName: string): Promise { - const id = createUUID(); + const id = createUpperCaseUUID(); const description = `description_${id}`; await connectionManager.query( ` @@ -170,6 +183,6 @@ export async function getClientCheckpoint( /** * Generates a new UUID string in uppercase for testing purposes to match the SQL Server UNIQUEIDENTIFIER format. */ -export function createUUID(): string { +export function createUpperCaseUUID(): string { return uuid().toUpperCase(); } From 542b84c97233ea33c63114256f772ac36bd63cad Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Wed, 26 Nov 2025 11:35:26 +0200 Subject: [PATCH 27/42] Added more mssql utility functions --- .../src/common/MSSQLSourceTable.ts | 4 +- modules/module-mssql/src/index.ts | 1 + .../module-mssql/src/module/MSSQLModule.ts | 4 +- .../src/types/mssql-data-types.ts | 6 + modules/module-mssql/src/utils/mssql.ts | 109 ++++++++++++++++-- 5 files changed, 110 insertions(+), 14 deletions(-) create mode 100644 modules/module-mssql/src/index.ts diff --git a/modules/module-mssql/src/common/MSSQLSourceTable.ts b/modules/module-mssql/src/common/MSSQLSourceTable.ts index 768d03cb7..0d0dbd597 100644 --- a/modules/module-mssql/src/common/MSSQLSourceTable.ts +++ b/modules/module-mssql/src/common/MSSQLSourceTable.ts @@ -1,5 +1,5 @@ import { SourceTable } from '@powersync/service-core'; -import { escapeIdentifier } from '../utils/mssql.js'; +import { toQualifiedTableName } from '../utils/mssql.js'; export interface CaptureInstance { name: string; @@ -49,6 +49,6 @@ export class MSSQLSourceTable { * Escapes this source table's name and schema for use in MSSQL queries. */ toQualifiedName(): string { - return `${escapeIdentifier(this.sourceTable.schema)}.${escapeIdentifier(this.sourceTable.name)}`; + return toQualifiedTableName(this.sourceTable.schema, this.sourceTable.name); } } diff --git a/modules/module-mssql/src/index.ts b/modules/module-mssql/src/index.ts new file mode 100644 index 000000000..844339f8e --- /dev/null +++ b/modules/module-mssql/src/index.ts @@ -0,0 +1 @@ +export * from './module/MSSQLModule.js'; diff --git a/modules/module-mssql/src/module/MSSQLModule.ts b/modules/module-mssql/src/module/MSSQLModule.ts index f070134fc..9971840d8 100644 --- a/modules/module-mssql/src/module/MSSQLModule.ts +++ b/modules/module-mssql/src/module/MSSQLModule.ts @@ -47,7 +47,7 @@ export class MSSQLModule extends replication.ReplicationModule { + static async testConnection(normalizedConfig: types.ResolvedMSSQLConnectionConfig): Promise { const connectionManager = new MSSQLConnectionManager(normalizedConfig, { max: 1 }); try { const errors = await checkSourceConfiguration(connectionManager); diff --git a/modules/module-mssql/src/types/mssql-data-types.ts b/modules/module-mssql/src/types/mssql-data-types.ts index 402def00f..7cdf73413 100644 --- a/modules/module-mssql/src/types/mssql-data-types.ts +++ b/modules/module-mssql/src/types/mssql-data-types.ts @@ -66,6 +66,12 @@ export enum MSSQLExtendedUserType { GEOGRAPHY = 130 } +export enum MSSQLUserDefinedType { + VECTOR = 'vector', + SYSNAME = 'sysname', + HIERARCHYID = 'hierarchyid' +} + export interface MSSQLParameter { name: string; value: any; diff --git a/modules/module-mssql/src/utils/mssql.ts b/modules/module-mssql/src/utils/mssql.ts index ca94d108f..7637571e6 100644 --- a/modules/module-mssql/src/utils/mssql.ts +++ b/modules/module-mssql/src/utils/mssql.ts @@ -1,11 +1,14 @@ import sql from 'mssql'; -import { SourceTable } from '@powersync/service-core'; import { coerce, gte } from 'semver'; import { logger } from '@powersync/lib-services-framework'; import { MSSQLConnectionManager } from '../replication/MSSQLConnectionManager.js'; import { LSN } from '../common/LSN.js'; import { CaptureInstance, MSSQLSourceTable } from '../common/MSSQLSourceTable.js'; import { MSSQLParameter } from '../types/mssql-data-types.js'; +import { SqlSyncRules, TablePattern } from '@powersync/service-sync-rules'; +import { getReplicationIdentityColumns, ReplicationIdentityColumnsResult, ResolvedTable } from './schema.js'; +import * as service_types from '@powersync/service-types'; +import * as sync_rules from '@powersync/service-sync-rules'; export const POWERSYNC_CHECKPOINTS_TABLE = '_powersync_checkpoints'; @@ -211,7 +214,7 @@ export interface IsWithinRetentionThresholdOptions { export async function isWithinRetentionThreshold(options: IsWithinRetentionThresholdOptions): Promise { const { checkpointLSN, tables, connectionManager } = options; for (const table of tables) { - const minLSN = await getMinLSN(connectionManager, table); + const minLSN = await getMinLSN(connectionManager, table.captureInstance); if (minLSN > checkpointLSN) { logger.warn( `The checkpoint LSN:[${checkpointLSN}] is older than the minimum LSN:[${minLSN}] for table ${table.sourceTable.qualifiedName}. This indicates that the checkpoint LSN is outside of the retention window.` @@ -222,9 +225,9 @@ export async function isWithinRetentionThreshold(options: IsWithinRetentionThres return true; } -export async function getMinLSN(connectionManager: MSSQLConnectionManager, table: MSSQLSourceTable): Promise { +export async function getMinLSN(connectionManager: MSSQLConnectionManager, captureInstance: string): Promise { const { recordset: result } = await connectionManager.query( - `SELECT sys.fn_cdc_get_min_lsn('${table.captureInstance}') AS min_lsn` + `SELECT sys.fn_cdc_get_min_lsn('${captureInstance}') AS min_lsn` ); const rawMinLSN: Buffer = result[0].min_lsn; return LSN.fromBinary(rawMinLSN); @@ -238,10 +241,14 @@ export async function incrementLSN(lsn: LSN, connectionManager: MSSQLConnectionM return LSN.fromBinary(result[0].incremented_lsn); } -export async function getCaptureInstance( - connectionManager: MSSQLConnectionManager, - table: SourceTable -): Promise { +export interface GetCaptureInstanceOptions { + connectionManager: MSSQLConnectionManager; + tableName: string; + schema: string; +} + +export async function getCaptureInstance(options: GetCaptureInstanceOptions): Promise { + const { connectionManager, tableName, schema } = options; const { recordset: result } = await connectionManager.query( ` SELECT @@ -251,8 +258,8 @@ export async function getCaptureInstance( sys.tables tbl INNER JOIN sys.schemas sch ON tbl.schema_id = sch.schema_id INNER JOIN cdc.change_tables ct ON ct.source_object_id = tbl.object_id - WHERE sch.name = '${table.schema}' - AND tbl.name = '${table.name}' + WHERE sch.name = '${schema}' + AND tbl.name = '${tableName}' AND ct.end_lsn IS NULL; ` ); @@ -297,6 +304,10 @@ export function escapeIdentifier(identifier: string): string { return `[${identifier}]`; } +export function toQualifiedTableName(schema: string, tableName: string): string { + return `${escapeIdentifier(schema)}.${escapeIdentifier(tableName)}`; +} + export function isIColumnMetadata(obj: any): obj is sql.IColumnMetadata { if (obj === null || typeof obj !== 'object' || Array.isArray(obj)) { return false; @@ -329,3 +340,81 @@ export function addParameters(request: sql.Request, parameters: MSSQLParameter[] } return request; } + +export interface GetDebugTableInfoOptions { + connectionManager: MSSQLConnectionManager; + tablePattern: TablePattern; + table: ResolvedTable; + syncRules: SqlSyncRules; +} + +export async function getDebugTableInfo(options: GetDebugTableInfoOptions): Promise { + const { connectionManager, tablePattern, table, syncRules } = options; + const { schema } = tablePattern; + + let idColumnsResult: ReplicationIdentityColumnsResult | null = null; + let idColumnsError: service_types.ReplicationError | null = null; + try { + idColumnsResult = await getReplicationIdentityColumns({ + connectionManager: connectionManager, + schema, + tableName: table.name + }); + } catch (ex) { + idColumnsError = { level: 'fatal', message: ex.message }; + } + + const idColumns = idColumnsResult?.columns ?? []; + const sourceTable: sync_rules.SourceTableInterface = { + connectionTag: connectionManager.connectionTag, + schema: schema, + name: table.name + }; + const syncData = syncRules.tableSyncsData(sourceTable); + const syncParameters = syncRules.tableSyncsParameters(sourceTable); + + if (idColumns.length === 0 && idColumnsError == null) { + let message = `No replication id found for ${toQualifiedTableName(schema, table.name)}. Replica identity: ${idColumnsResult?.identity}.`; + if (idColumnsResult?.identity === 'default') { + message += ' Configure a primary key on the table.'; + } + idColumnsError = { level: 'fatal', message }; + } + + let selectError: service_types.ReplicationError | null = null; + try { + await connectionManager.query(`SELECT TOP 1 * FROM [${toQualifiedTableName(schema, table.name)}]`); + } catch (e) { + selectError = { level: 'fatal', message: e.message }; + } + + // Check if CDC is enabled for the table + let cdcError: service_types.ReplicationError | null = null; + try { + const isEnabled = await isTableEnabledForCDC({ + connectionManager: connectionManager, + table: table.name, + schema: schema + }); + if (!isEnabled) { + cdcError = { + level: 'fatal', + message: `CDC is not enabled for table ${toQualifiedTableName(schema, table.name)}. Enable CDC with: sys.sp_cdc_enable_table @source_schema = '${schema}', @source_name = '${table.name}', @role_name = NULL, @supports_net_changes = 1` + }; + } + } catch (e) { + cdcError = { level: 'warning', message: `Could not check CDC status: ${e.message}` }; + } + + // TODO check RLS settings for table + + return { + schema: schema, + name: table.name, + pattern: tablePattern.isWildcard ? tablePattern.tablePattern : undefined, + replication_id: idColumns.map((c) => c.name), + data_queries: syncData, + parameter_queries: syncParameters, + errors: [idColumnsError, selectError, cdcError].filter((error) => error != null) as service_types.ReplicationError[] + }; +} From 10647fb5a37a820f411b4bef8e5f40a016921c3d Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Wed, 26 Nov 2025 12:24:41 +0200 Subject: [PATCH 28/42] Made polling interval configurable Improved logging for CDC Poller --- .../module-mssql/src/module/MSSQLModule.ts | 3 +- .../module-mssql/src/replication/CDCPoller.ts | 55 ++++++++++--------- .../src/replication/CDCReplicationJob.ts | 7 ++- .../src/replication/CDCReplicator.ts | 7 ++- .../module-mssql/src/replication/CDCStream.ts | 40 +++++--------- modules/module-mssql/src/types/types.ts | 52 +++++++++++++++++- 6 files changed, 107 insertions(+), 57 deletions(-) diff --git a/modules/module-mssql/src/module/MSSQLModule.ts b/modules/module-mssql/src/module/MSSQLModule.ts index 9971840d8..4f9cf928c 100644 --- a/modules/module-mssql/src/module/MSSQLModule.ts +++ b/modules/module-mssql/src/module/MSSQLModule.ts @@ -40,7 +40,8 @@ export class MSSQLModule extends replication.ReplicationModule { - this.logger.info(`CDC polling started...`); + this.logger.info(`CDC polling started with interval of ${this.pollingIntervalMs}ms...`); + this.logger.info(`Polling a maximum of [${this.pollingBatchSize}] transactions per polling cycle.`); while (!this.isStopped) { - // Skip cycle if already polling (concurrency guard) + // Don't poll if already polling (concurrency guard) if (this.isPolling) { await timers.setTimeout(this.pollingIntervalMs); continue; @@ -136,7 +137,8 @@ export class CDCPoller { this.isPolling = true; try { - // Calculate the polling LSN bounds for this batch + // Calculate the LSN bounds for this batch + // CDC bounds are inclusive, so the new startLSN is the currentLSN incremented by 1 const startLSN = await incrementLSN(this.currentLSN, this.connectionManager); const { recordset: results } = await this.connectionManager.query( @@ -148,37 +150,33 @@ export class CDCPoller { [{ name: 'startLSN', type: sql.VarBinary, value: startLSN.toBinary() }] ); - // Handle case where no results returned (no new changes available) + // No new LSNs found, no changes to process if (results.length === 0) { return false; } + // The new endLSN is the largest LSN in the result const endLSN = LSN.fromBinary(results[results.length - 1].start_lsn); - // If startLSN is greater than or equal to endLSN, no new changes are available - if (startLSN.compare(endLSN) >= 0) { - return false; - } + this.logger.info(`Polling bounds are ${startLSN} -> ${endLSN} spanning ${results.length} transaction(s).`); - this.logger.info(`Polling bounds are ${startLSN} -> ${endLSN}. Total potential transactions: ${results.length}`); - - // Poll each source table using existing pollTable() method let transactionCount = 0; for (const table of this.sourceTables) { const tableTransactionCount = await this.pollTable(table, { startLSN, endLSN }); // We poll for batch size transactions, but these include transactions not applicable to our Source Tables. - // Each Source Table may or may not have transactions that are applicable to it, so just keep track of the highest number of transactions processedfor any Source Table. + // Each Source Table may or may not have transactions that are applicable to it, so just keep track of the highest number of transactions processed for any Source Table. if (tableTransactionCount > transactionCount) { transactionCount = tableTransactionCount; } } + this.logger.info( + `Processed ${results.length} transaction(s), including ${transactionCount} Source Table transaction(s).` + ); // Call eventHandler.onCommit() with toLSN after processing all tables await this.eventHandler.onCommit(endLSN.toString(), transactionCount); - // Update currentLSN to toLSN this.currentLSN = endLSN; - this.logger.info(`Source Table transactions processed: ${transactionCount}.`); return true; } finally { @@ -188,8 +186,8 @@ export class CDCPoller { } private async pollTable(table: MSSQLSourceTable, bounds: { startLSN: LSN; endLSN: LSN }): Promise { - // Check that the minimum LSN is within the bounds - const minLSN = await getMinLSN(this.connectionManager, table); + // Ensure that the startLSN is not before the minimum LSN for the table + const minLSN = await getMinLSN(this.connectionManager, table.captureInstance); if (minLSN > bounds.endLSN) { return 0; } else if (minLSN >= bounds.startLSN) { @@ -205,34 +203,39 @@ export class CDCPoller { ] ); + let transactionCount = 0; + let updateBefore: any = null; for (const row of results) { const transactionLSN = LSN.fromBinary(row.__$start_lsn); - let updateBefore: any = null; switch (row.__$operation) { case Operation.DELETE: await this.eventHandler.onDelete(row, table, results.columns); - this.logger.info(`Processed DELETE row: ${transactionLSN}`); + transactionCount++; + this.logger.info(`Processed DELETE row LSN: ${transactionLSN}`); break; case Operation.INSERT: await this.eventHandler.onInsert(row, table, results.columns); - this.logger.info(`Processed INSERT row: ${transactionLSN}`); + transactionCount++; + this.logger.info(`Processed INSERT row LSN: ${transactionLSN}`); break; case Operation.UPDATE_BEFORE: updateBefore = row; - this.logger.info(`Processed UPDATE, before row: ${transactionLSN}`); + this.logger.debug(`Processed UPDATE, before row LSN: ${transactionLSN}`); break; case Operation.UPDATE_AFTER: if (updateBefore === null) { throw new ReplicationAssertionError('Missing before image for update event.'); } await this.eventHandler.onUpdate(row, updateBefore, table, results.columns); - this.logger.info(`Processed UPDATE, after row: ${transactionLSN}`); + updateBefore = null; + transactionCount++; + this.logger.info(`Processed UPDATE row LSN: ${transactionLSN}`); break; default: this.logger.warn(`Unknown operation type [${row.__$operation}] encountered in CDC changes.`); } } - return results.length; + return transactionCount; } } diff --git a/modules/module-mssql/src/replication/CDCReplicationJob.ts b/modules/module-mssql/src/replication/CDCReplicationJob.ts index 521e30709..ca93d5be3 100644 --- a/modules/module-mssql/src/replication/CDCReplicationJob.ts +++ b/modules/module-mssql/src/replication/CDCReplicationJob.ts @@ -2,19 +2,23 @@ import { replication } from '@powersync/service-core'; import { MSSQLConnectionManagerFactory } from './MSSQLConnectionManagerFactory.js'; import { container, logger as defaultLogger } from '@powersync/lib-services-framework'; import { CDCDataExpiredError, CDCStream } from './CDCStream.js'; +import { CDCPollingOptions } from '../types/types.js'; export interface CDCReplicationJobOptions extends replication.AbstractReplicationJobOptions { connectionFactory: MSSQLConnectionManagerFactory; + pollingOptions: CDCPollingOptions; } export class CDCReplicationJob extends replication.AbstractReplicationJob { private connectionFactory: MSSQLConnectionManagerFactory; private lastStream: CDCStream | null = null; + private cdcReplicationJobOptions: CDCReplicationJobOptions; constructor(options: CDCReplicationJobOptions) { super(options); this.logger = defaultLogger.child({ prefix: `[powersync_${this.options.storage.group_id}] ` }); this.connectionFactory = options.connectionFactory; + this.cdcReplicationJobOptions = options; } async keepAlive() { @@ -67,7 +71,8 @@ export class CDCReplicationJob extends replication.AbstractReplicationJob { abortSignal: this.abortController.signal, storage: this.options.storage, metrics: this.options.metrics, - connections: connectionManager + connections: connectionManager, + pollingOptions: this.cdcReplicationJobOptions.pollingOptions }); this.lastStream = stream; await stream.replicate(); diff --git a/modules/module-mssql/src/replication/CDCReplicator.ts b/modules/module-mssql/src/replication/CDCReplicator.ts index 385e37f6e..06e42b22e 100644 --- a/modules/module-mssql/src/replication/CDCReplicator.ts +++ b/modules/module-mssql/src/replication/CDCReplicator.ts @@ -2,17 +2,21 @@ import { replication, storage } from '@powersync/service-core'; import { MSSQLConnectionManagerFactory } from './MSSQLConnectionManagerFactory.js'; import { CDCReplicationJob } from './CDCReplicationJob.js'; import { MSSQLModule } from '../module/MSSQLModule.js'; +import { CDCPollingOptions } from '../types/types.js'; export interface CDCReplicatorOptions extends replication.AbstractReplicatorOptions { connectionFactory: MSSQLConnectionManagerFactory; + pollingOptions: CDCPollingOptions; } export class CDCReplicator extends replication.AbstractReplicator { private readonly connectionFactory: MSSQLConnectionManagerFactory; + private readonly cdcReplicatorOptions: CDCReplicatorOptions; constructor(options: CDCReplicatorOptions) { super(options); this.connectionFactory = options.connectionFactory; + this.cdcReplicatorOptions = options; } createJob(options: replication.CreateJobOptions): CDCReplicationJob { @@ -22,7 +26,8 @@ export class CDCReplicator extends replication.AbstractReplicator { - // Schema changes are handled separately + // TODO: Handle schema changes } }; } @@ -656,29 +662,11 @@ export class CDCStream { * We filter out the CDC metadata columns. */ private toSqliteRow(row: any, columns: sql.IColumnMetadata): SqliteRow { - // CDC metadata columns in the row that should be excluded - const cdcMetadataColumns = ['__$operation', '__$start_lsn', '__$end_lsn', '__$seqval', '__$update_mask']; - - const filteredRow: DatabaseInputRow = {}; - for (const key in row) { - // Skip CDC metadata columns - if (!cdcMetadataColumns.includes(key)) { - filteredRow[key] = row[key]; - } - } + const inputRow: SqliteInputRow = CDCToSqliteRow({ row, columns }); - const inputRow: SqliteInputRow = toSqliteInputRow(filteredRow, columns); return this.syncRules.applyRowContext(inputRow); } - // async ack(lsn: string, replicationStream: pgwire.ReplicationStream) { - // if (lsn == ZERO_LSN) { - // return; - // } - // - // replicationStream.ack(lsn); - // } - async getReplicationLagMillis(): Promise { if (this.oldestUncommittedChange == null) { if (this.isStartingReplication) { diff --git a/modules/module-mssql/src/types/types.ts b/modules/module-mssql/src/types/types.ts index 061b647e4..c5813d8e7 100644 --- a/modules/module-mssql/src/types/types.ts +++ b/modules/module-mssql/src/types/types.ts @@ -68,6 +68,18 @@ export type AuthenticationType = | AzureActiveDirectoryPasswordAuthentication | AzureActiveDirectoryServicePrincipalSecret; + +export interface CDCPollingOptions { + /** + * Maximum number of transactions to poll per polling cycle. Defaults to 10. + */ + batchSize: number; + /** + * Interval in milliseconds to wait between polling cycles. Defaults to 1 second. + */ + intervalMs: number; +} + export interface NormalizedMSSQLConnectionConfig { id: string; tag: string; @@ -81,6 +93,13 @@ export interface NormalizedMSSQLConnectionConfig { authentication?: AuthenticationType; + cdcPollingOptions: CDCPollingOptions; + + /** + * Whether to trust the server certificate. Set to true for local development and self-signed certificates. + * Default is false. + */ + trustServerCertificate: boolean; lookup?: LookupFunction; } @@ -99,6 +118,17 @@ export const MSSQLConnectionConfig = service_types.configFile.DataSourceConfig.a .or(AzureActiveDirectoryServicePrincipalSecret) .optional(), + cdcPollingOptions: t.object({ + batchSize: t.number.optional(), + intervalMs: t.number.optional() + }).optional(), + + /** + * Whether to trust the server certificate. Set to true for local development and self-signed certificates. + * Default is false. + */ + trustServerCertificate: t.boolean.optional(), + reject_ip_ranges: t.array(t.string).optional() }) ); @@ -111,7 +141,7 @@ export type MSSQLConnectionConfig = t.Decoded; /** * Resolved version of {@link MSSQLConnectionConfig} */ -export type ResolvedConnectionConfig = MSSQLConnectionConfig & NormalizedMSSQLConnectionConfig; +export type ResolvedMSSQLConnectionConfig = MSSQLConnectionConfig & NormalizedMSSQLConnectionConfig; /** * Validate and normalize connection options. @@ -171,6 +201,24 @@ export function normalizeConnectionConfig(options: MSSQLConnectionConfig): Norma database, lookup, - authentication: options.authentication + authentication: options.authentication, + + cdcPollingOptions: { + /** + * Maximum number of transactions to poll per polling cycle. Defaults to 10. + */ + batchSize: options.cdcPollingOptions?.batchSize ?? 10, + + /** + * Interval in milliseconds to wait between polling cycles. Defaults to 1 second. + */ + intervalMs: options.cdcPollingOptions?.intervalMs ?? 1000, + }, + + trustServerCertificate: options.trustServerCertificate ?? false, } satisfies NormalizedMSSQLConnectionConfig; } + +export function baseUri(config: ResolvedMSSQLConnectionConfig) { + return `mssql://${config.hostname}:${config.port}/${config.database}`; +} From 3497b4e7e30b4d830e0e947db743c15019e54afa Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Wed, 26 Nov 2025 12:25:32 +0200 Subject: [PATCH 29/42] Made trust server certificate mssql connection option configurable. --- .../module-mssql/src/replication/MSSQLConnectionManager.ts | 4 ++-- .../src/replication/MSSQLConnectionManagerFactory.ts | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/module-mssql/src/replication/MSSQLConnectionManager.ts b/modules/module-mssql/src/replication/MSSQLConnectionManager.ts index e7ba1d707..f11119edc 100644 --- a/modules/module-mssql/src/replication/MSSQLConnectionManager.ts +++ b/modules/module-mssql/src/replication/MSSQLConnectionManager.ts @@ -30,8 +30,8 @@ export class MSSQLConnectionManager extends BaseObserver; - public readonly connectionConfig: ResolvedConnectionConfig; + public readonly connectionConfig: ResolvedMSSQLConnectionConfig; - constructor(connectionConfig: ResolvedConnectionConfig) { + constructor(connectionConfig: ResolvedMSSQLConnectionConfig) { this.connectionConfig = connectionConfig; this.connectionManagers = new Set(); } From 43be97a7af341963292b8d63d068c83ed9ef6c0b Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Wed, 26 Nov 2025 12:26:05 +0200 Subject: [PATCH 30/42] Updated tests --- .../module-mssql/test/src/CDCStream.test.ts | 64 +++++++++++++++++-- .../test/src/CDCStreamTestContext.ts | 5 ++ .../src/CDCStream_resumable_snapshot.test.ts | 21 +++--- 3 files changed, 74 insertions(+), 16 deletions(-) diff --git a/modules/module-mssql/test/src/CDCStream.test.ts b/modules/module-mssql/test/src/CDCStream.test.ts index 5af0ce3b5..edd35a652 100644 --- a/modules/module-mssql/test/src/CDCStream.test.ts +++ b/modules/module-mssql/test/src/CDCStream.test.ts @@ -1,10 +1,17 @@ import { describe, expect, test } from 'vitest'; -import { METRICS_HELPER, putOp } from '@powersync/service-core-tests'; +import { METRICS_HELPER, putOp, removeOp } from '@powersync/service-core-tests'; import { ReplicationMetric } from '@powersync/service-types'; -import { createTestTable, describeWithStorage, insertTestData, waitForPendingCDCChanges } from './util.js'; +import { + createTestTable, + describeWithStorage, + INITIALIZED_MONGO_STORAGE_FACTORY, + insertTestData, + waitForPendingCDCChanges +} from './util.js'; import { storage } from '@powersync/service-core'; import { CDCStreamTestContext } from './CDCStreamTestContext.js'; import { getLatestReplicatedLSN } from '@module/utils/mssql.js'; +import sql from 'mssql'; const BASIC_SYNC_RULES = ` bucket_definitions: @@ -13,11 +20,11 @@ bucket_definitions: - SELECT id, description FROM "test_data" `; -describe('CDCStream tests', () => { - describeWithStorage({ timeout: 20_000 }, defineCDCStreamTests); -}); +// describe('CDCStream tests', () => { +// describeWithStorage({ timeout: 20_000 }, defineCDCStreamTests); +// }); -// defineCDCStreamTests(INITIALIZED_MONGO_STORAGE_FACTORY); +defineCDCStreamTests(INITIALIZED_MONGO_STORAGE_FACTORY); function defineCDCStreamTests(factory: storage.TestStorageFactory) { test('Initial snapshot sync', async () => { @@ -64,6 +71,51 @@ function defineCDCStreamTests(factory: storage.TestStorageFactory) { expect(endTxCount - startTxCount).toEqual(1); }); + test('Replicate row updates', async () => { + await using context = await CDCStreamTestContext.open(factory); + const { connectionManager } = context; + await context.updateSyncRules(BASIC_SYNC_RULES); + + await createTestTable(connectionManager, 'test_data'); + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + const testData = await insertTestData(connectionManager, 'test_data'); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + await context.replicateSnapshot(); + + await context.startStreaming(); + + const updatedTestData = { ...testData }; + updatedTestData.description = 'updated'; + await connectionManager.query(`UPDATE test_data SET description = @description WHERE id = @id`, [ + { name: 'description', type: sql.NVarChar(sql.MAX), value: updatedTestData.description }, + { name: 'id', type: sql.UniqueIdentifier, value: updatedTestData.id } + ]); + + const data = await context.getBucketData('global[]'); + expect(data).toMatchObject([putOp('test_data', testData), putOp('test_data', updatedTestData)]); + }); + + test('Replicate row deletions', async () => { + await using context = await CDCStreamTestContext.open(factory); + const { connectionManager } = context; + await context.updateSyncRules(BASIC_SYNC_RULES); + + await createTestTable(connectionManager, 'test_data'); + const beforeLSN = await getLatestReplicatedLSN(connectionManager); + const testData = await insertTestData(connectionManager, 'test_data'); + await waitForPendingCDCChanges(beforeLSN, connectionManager); + await context.replicateSnapshot(); + + await context.startStreaming(); + + await connectionManager.query(`DELETE FROM test_data WHERE id = @id`, [ + { name: 'id', type: sql.UniqueIdentifier, value: testData.id } + ]); + + const data = await context.getBucketData('global[]'); + expect(data).toMatchObject([putOp('test_data', testData), removeOp('test_data', testData.id)]); + }); + test('Replicate matched wild card tables in sync rules', async () => { await using context = await CDCStreamTestContext.open(factory); const { connectionManager } = context; diff --git a/modules/module-mssql/test/src/CDCStreamTestContext.ts b/modules/module-mssql/test/src/CDCStreamTestContext.ts index 614ab03c1..968adf98f 100644 --- a/modules/module-mssql/test/src/CDCStreamTestContext.ts +++ b/modules/module-mssql/test/src/CDCStreamTestContext.ts @@ -12,6 +12,7 @@ import { clearTestDb, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './uti import { CDCStream, CDCStreamOptions } from '@module/replication/CDCStream.js'; import { MSSQLConnectionManager } from '@module/replication/MSSQLConnectionManager.js'; import timers from 'timers/promises'; +import { CDCPollingOptions } from '@module/types/types.js'; /** * Tests operating on the change data capture need to configure the stream and manage asynchronous @@ -109,6 +110,10 @@ export class CDCStreamTestContext implements AsyncDisposable { metrics: METRICS_HELPER.metricsEngine, connections: this.connectionManager, abortSignal: this.abortController.signal, + pollingOptions: { + batchSize: 10, + intervalMs: 1000 + } satisfies CDCPollingOptions, ...this.cdcStreamOptions }; this._cdcStream = new CDCStream(options); diff --git a/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts b/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts index 78fb92958..14b496cb7 100644 --- a/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts +++ b/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts @@ -1,13 +1,13 @@ import { describe, expect, test } from 'vitest'; import { env } from './env.js'; -import { describeWithStorage } from './util.js'; +import { createTestTable, createTestTableWithBasicId, describeWithStorage, waitForPendingCDCChanges } from './util.js'; import { TestStorageFactory } from '@powersync/service-core'; import { METRICS_HELPER } from '@powersync/service-core-tests'; import { ReplicationMetric } from '@powersync/service-types'; import * as timers from 'node:timers/promises'; import { ReplicationAbortedError } from '@powersync/lib-services-framework'; import { CDCStreamTestContext } from './CDCStreamTestContext.js'; -import { enableCDCForTable } from '@module/utils/mssql.js'; +import { getLatestReplicatedLSN } from '@module/utils/mssql.js'; describe.skipIf(!(env.CI || env.SLOW_TESTS))('batch replication', function () { describeWithStorage({ timeout: 240_000 }, function (factory) { @@ -43,15 +43,14 @@ async function testResumingReplication(factory: TestStorageFactory, stopAfter: n - SELECT * FROM test_data2`); const { connectionManager } = context; - await connectionManager.query(`CREATE TABLE test_data1 (id INT IDENTITY(1,1) PRIMARY KEY, description VARCHAR(MAX))`); - await enableCDCForTable({ connectionManager, table: 'test_data1' }); - await connectionManager.query(`CREATE TABLE test_data2 (id INT IDENTITY(1,1) PRIMARY KEY, description VARCHAR(MAX))`); - await enableCDCForTable({ connectionManager, table: 'test_data2' }); + await createTestTableWithBasicId(connectionManager, 'test_data1'); + await createTestTableWithBasicId(connectionManager, 'test_data2'); - await connectionManager.query( - `INSERT INTO test_data1(description) SELECT 'value' FROM GENERATE_SERIES(1, 1000, 1); - INSERT INTO test_data2(description) SELECT 'value' FROM GENERATE_SERIES(1, 10000, 1);` - ); + let beforeLSN = await getLatestReplicatedLSN(connectionManager); + await connectionManager.query(`INSERT INTO test_data1(description) SELECT 'value' FROM GENERATE_SERIES(1, 1000, 1)`); + await connectionManager.query(`INSERT INTO test_data2(description) SELECT 'value' FROM GENERATE_SERIES(1, 10000, 1)`); + + await waitForPendingCDCChanges(beforeLSN, connectionManager); const p = context.replicateSnapshot(); @@ -86,6 +85,7 @@ async function testResumingReplication(factory: TestStorageFactory, stopAfter: n cdcStreamOptions: { snapshotBatchSize: 1000 } }); + beforeLSN = await getLatestReplicatedLSN(context2.connectionManager); // This delete should be using one of the ids already replicated const { recordset: [id1] @@ -101,6 +101,7 @@ async function testResumingReplication(factory: TestStorageFactory, stopAfter: n } = await context2.connectionManager.query( `INSERT INTO test_data2(description) OUTPUT INSERTED.id VALUES ('insert1')` ); + await waitForPendingCDCChanges(beforeLSN, context2.connectionManager); await context2.loadNextSyncRules(); await context2.replicateSnapshot(); From 23877f4f1c475133746f4adfd2c27b382c89f59f Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Wed, 26 Nov 2025 12:26:51 +0200 Subject: [PATCH 31/42] Cleanup --- modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts b/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts index c2d91f8ce..79f87d5c1 100644 --- a/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts +++ b/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts @@ -4,9 +4,9 @@ import * as sync_rules from '@powersync/service-sync-rules'; import * as service_types from '@powersync/service-types'; import mysql from 'mysql2/promise'; import * as common from '../common/common-index.js'; +import { toExpressionTypeFromMySQLType } from '../common/common-index.js'; import * as mysql_utils from '../utils/mysql-utils.js'; import * as types from '../types/types.js'; -import { toExpressionTypeFromMySQLType } from '../common/common-index.js'; type SchemaResult = { schema_name: string; @@ -288,11 +288,8 @@ export class MySQLRouteAPIAdapter implements api.RouteAPI { async createReplicationHead(callback: ReplicationHeadCallback): Promise { const head = await this.getReplicationHead(); - const r = await callback(head); - // TODO: make sure another message is replicated - - return r; + return await callback(head); } async getConnectionSchema(): Promise { From 4aa95007e50587906cf9406ddac3c12ea3c65a67 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Wed, 26 Nov 2025 12:37:20 +0200 Subject: [PATCH 32/42] Updated tsconfig for service to include mssql --- modules/module-mssql/package.json | 2 +- service/tsconfig.json | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/module-mssql/package.json b/modules/module-mssql/package.json index 37af8348e..37a955dd8 100644 --- a/modules/module-mssql/package.json +++ b/modules/module-mssql/package.json @@ -2,7 +2,7 @@ "name": "@powersync/service-module-mssql", "repository": "https://github.com/powersync-ja/powersync-service", "types": "dist/index.d.ts", - "version": "0.0.0", + "version": "0.0.1", "license": "FSL-1.1-ALv2", "main": "dist/index.js", "type": "module", diff --git a/service/tsconfig.json b/service/tsconfig.json index 6a9560f45..c308b2058 100644 --- a/service/tsconfig.json +++ b/service/tsconfig.json @@ -47,6 +47,9 @@ }, { "path": "../modules/module-mysql" + }, + { + "path": "../modules/module-mssql" } ] } From bcbce208967d866decb770a5258046937abda3fe Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Wed, 26 Nov 2025 12:43:58 +0200 Subject: [PATCH 33/42] Added mssql module to Dockerfile --- service/Dockerfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/service/Dockerfile b/service/Dockerfile index 66c86808e..102d20367 100644 --- a/service/Dockerfile +++ b/service/Dockerfile @@ -22,6 +22,7 @@ COPY modules/module-postgres-storage/package.json modules/module-postgres-storag COPY modules/module-mongodb/package.json modules/module-mongodb/tsconfig.json modules/module-mongodb/ COPY modules/module-mongodb-storage/package.json modules/module-mongodb-storage/tsconfig.json modules/module-mongodb-storage/ COPY modules/module-mysql/package.json modules/module-mysql/tsconfig.json modules/module-mysql/ +COPY modules/module-mssql/package.json modules/module-mssql/tsconfig.json modules/module-mssql/ RUN corepack enable pnpm && corepack install RUN pnpm install --frozen-lockfile @@ -48,6 +49,7 @@ COPY modules/module-postgres-storage/src modules/module-postgres-storage/src/ COPY modules/module-mongodb/src modules/module-mongodb/src/ COPY modules/module-mongodb-storage/src modules/module-mongodb-storage/src/ COPY modules/module-mysql/src modules/module-mysql/src/ +COPY modules/module-mssql/src modules/module-mssql/src/ RUN pnpm build:production && \ rm -rf node_modules **/node_modules && \ From f51f987eade2d420b95eb0d60d432a34e5ac5ad5 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Wed, 26 Nov 2025 13:11:35 +0200 Subject: [PATCH 34/42] Added changeset --- .changeset/thin-snails-compete.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 .changeset/thin-snails-compete.md diff --git a/.changeset/thin-snails-compete.md b/.changeset/thin-snails-compete.md new file mode 100644 index 000000000..9b37e6961 --- /dev/null +++ b/.changeset/thin-snails-compete.md @@ -0,0 +1,15 @@ +--- +'@powersync/service-core': minor +'@powersync/service-module-mssql': minor +'@powersync/service-module-postgres-storage': patch +'@powersync/service-module-mongodb-storage': patch +'@powersync/service-module-postgres': patch +'@powersync/service-errors': patch +'@powersync/service-module-mysql': patch +'@powersync/service-image': patch +--- + +- First iteration of MSSQL replication using Change Data Capture (CDC). +- Supports resumable snapshot replication +- Uses CDC polling for replication + From 18bbc9be0e35109d878c4d2a83b257cf74284b15 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Wed, 26 Nov 2025 13:23:05 +0200 Subject: [PATCH 35/42] Removed mysql comment --- modules/module-mssql/src/replication/CDCReplicator.ts | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/modules/module-mssql/src/replication/CDCReplicator.ts b/modules/module-mssql/src/replication/CDCReplicator.ts index 06e42b22e..e2985e4ad 100644 --- a/modules/module-mssql/src/replication/CDCReplicator.ts +++ b/modules/module-mssql/src/replication/CDCReplicator.ts @@ -31,9 +31,7 @@ export class CDCReplicator extends replication.AbstractReplicator { - // The MySQL module does not create anything which requires cleanup on the MySQL server. - } + async cleanUp(syncRulesStorage: storage.SyncRulesBucketStorage): Promise {} async stop(): Promise { await super.stop(); @@ -59,7 +57,7 @@ export class CDCReplicator extends replication.AbstractReplicator Date: Thu, 27 Nov 2025 08:55:50 +0200 Subject: [PATCH 36/42] Enabled mssql-module automated tests --- .github/workflows/test.yml | 92 ++++++++++++++++++++++++++ modules/module-mssql/ci/init-mssql.sql | 50 ++++++++++++++ modules/module-mssql/dev/init.sql | 5 +- 3 files changed, 145 insertions(+), 2 deletions(-) create mode 100644 modules/module-mssql/ci/init-mssql.sql diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c2a8e3604..f861e9595 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -292,3 +292,95 @@ jobs: - name: Test Storage run: pnpm --filter='./modules/module-mongodb-storage' test + + run-mssql-tests: + name: MSSQL Test + runs-on: ubuntu-latest + needs: run-core-tests + + env: + MSSQL_SA_PASSWORD: 321strong_ROOT_password + + strategy: + fail-fast: false + matrix: + mssql-version: [2022, 2025] + + steps: + - uses: actions/checkout@v5 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Start MSSQL + run: | + docker run \ + --name MSSQLTestDatabase \ + --health-cmd="/opt/mssql-tools18/bin/sqlcmd -C -S localhost -U sa -P \"${{ env.MSSQL_SA_PASSWORD }}\" -Q \"SELECT 1;\" || exit 1" \ + --health-interval 5s \ + --health-timeout 3s \ + --health-retries 30 \ + -e ACCEPT_EULA=Y \ + -e MSSQL_SA_PASSWORD=${{ env.MSSQL_SA_PASSWORD }} \ + -e MSSQL_PID=Developer \ + -e MSSQL_AGENT_ENABLED=true \ + -p 1433:1433 \ + -d mcr.microsoft.com/mssql/server:${{ matrix.mssql-version }}-latest + + - name: Wait for MSSQL to be healthy + run: | + timeout 120 bash -c 'until docker inspect --format="{{.State.Health.Status}}" MSSQLTestDatabase | grep -q "healthy"; do sleep 2; done' + + - name: Initialize MSSQL database + run: | + docker run \ + --rm \ + --network host \ + -e MSSQL_SA_PASSWORD=${{ env.MSSQL_SA_PASSWORD }} \ + -v ${{ github.workspace }}/modules/module-mssql/ci/init-mssql.sql:/scripts/init-mssql.sql:ro \ + mcr.microsoft.com/mssql/server:${{ matrix.mssql-version }}-latest \ + /bin/bash -c "/opt/mssql-tools18/bin/sqlcmd -C -S localhost -U sa -P \"${{ env.MSSQL_SA_PASSWORD }}\" -v DATABASE=powersync -v DB_USER=sa -i /scripts/init-mssql.sql" + + # The mongodb-github-action below doesn't use the Docker credentials for the pull. + # We pre-pull, so that the image is cached. + - name: Pre-pull Mongo image + run: docker pull mongo:8.0 + + - name: Start MongoDB + uses: supercharge/mongodb-github-action@1.12.0 + with: + mongodb-version: '8.0' + mongodb-replica-set: test-rs + + - name: Start PostgreSQL (Storage) + run: | + docker run \ + --health-cmd pg_isready \ + --health-interval 10s \ + --health-timeout 5s \ + --health-retries 5 \ + -e POSTGRES_PASSWORD=postgres \ + -e POSTGRES_DB=powersync_storage_test \ + -p 5431:5432 \ + -d postgres:18 + + - name: Enable Corepack + run: corepack enable + - name: Setup Node.js + uses: actions/setup-node@v6 + with: + node-version-file: '.nvmrc' + cache: pnpm + + - name: Install dependencies + run: pnpm install + + - name: Build + shell: bash + run: pnpm build + + - name: Test Replication + run: pnpm --filter='./modules/module-mssql' test diff --git a/modules/module-mssql/ci/init-mssql.sql b/modules/module-mssql/ci/init-mssql.sql new file mode 100644 index 000000000..159771b96 --- /dev/null +++ b/modules/module-mssql/ci/init-mssql.sql @@ -0,0 +1,50 @@ +-- Create database (idempotent) +IF DB_ID('$(DATABASE)') IS NULL +BEGIN + CREATE DATABASE [$(DATABASE)]; +END +GO + +-- Enable CDC at the database level (idempotent) +USE [$(DATABASE)]; +IF (SELECT is_cdc_enabled FROM sys.databases WHERE name = '$(DATABASE)') = 0 +BEGIN + EXEC sys.sp_cdc_enable_db; +END +GO + +-- Create PowerSync checkpoints table +-- Powersync requires this table to ensure regular checkpoints appear in CDC +IF OBJECT_ID('dbo._powersync_checkpoints', 'U') IS NULL +BEGIN + CREATE TABLE dbo._powersync_checkpoints ( + id INT IDENTITY PRIMARY KEY, + last_updated DATETIME NOT NULL DEFAULT (GETDATE()) +); +END + +GRANT INSERT, UPDATE ON dbo._powersync_checkpoints TO [$(DB_USER)]; +GO + +-- Enable CDC for the powersync checkpoints table +IF NOT EXISTS (SELECT 1 FROM cdc.change_tables WHERE source_object_id = OBJECT_ID(N'dbo._powersync_checkpoints')) + BEGIN + EXEC sys.sp_cdc_enable_table + @source_schema = N'dbo', + @source_name = N'_powersync_checkpoints', + @role_name = N'cdc_reader', + @supports_net_changes = 0; +END +GO + +-- Wait until capture job exists - usually takes a few seconds after enabling CDC on a table for the first time +DECLARE @tries int = 10; +WHILE @tries > 0 AND NOT EXISTS (SELECT 1 FROM msdb.dbo.cdc_jobs WHERE job_type = N'capture') +BEGIN + WAITFOR DELAY '00:00:01'; + SET @tries -= 1; +END; + +-- Set the CDC capture job polling interval to 1 second (default is 5 seconds) +EXEC sys.sp_cdc_change_job @job_type = N'capture', @pollinginterval = 1; +GO \ No newline at end of file diff --git a/modules/module-mssql/dev/init.sql b/modules/module-mssql/dev/init.sql index d206e6fdf..55e2199c7 100644 --- a/modules/module-mssql/dev/init.sql +++ b/modules/module-mssql/dev/init.sql @@ -147,8 +147,9 @@ END GO -- Add demo data +IF NOT EXISTS (SELECT 1 FROM dbo.lists) BEGIN - INSERT INTO dbo.lists (id, name, owner_id) - VALUES (NEWID(), 'Do a demo', NEWID()); +INSERT INTO dbo.lists (id, name, owner_id) +VALUES (NEWID(), 'Do a demo', NEWID()); END GO \ No newline at end of file From ddc0f0dfa4a7bea12a29eb26a08029e89fdb7b4b Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Thu, 27 Nov 2025 22:15:01 +0200 Subject: [PATCH 37/42] Use trust server certificate for tests Added .npmignore --- modules/module-mssql/.npmignore | 1 + modules/module-mssql/test/src/util.ts | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 modules/module-mssql/.npmignore diff --git a/modules/module-mssql/.npmignore b/modules/module-mssql/.npmignore new file mode 100644 index 000000000..90012116c --- /dev/null +++ b/modules/module-mssql/.npmignore @@ -0,0 +1 @@ +dev \ No newline at end of file diff --git a/modules/module-mssql/test/src/util.ts b/modules/module-mssql/test/src/util.ts index 8bbd2d5e4..330c94aa0 100644 --- a/modules/module-mssql/test/src/util.ts +++ b/modules/module-mssql/test/src/util.ts @@ -36,7 +36,8 @@ export function describeWithStorage(options: TestOptions, fn: (factory: TestStor export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({ type: 'mssql', - uri: TEST_URI + uri: TEST_URI, + trustServerCertificate: true }); /** From 3fd676d8cab3d649fe0d8899d914e3bc59fcbea8 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Fri, 28 Nov 2025 09:39:11 +0200 Subject: [PATCH 38/42] Fixed resumable snapshot tests --- .../module-mssql/src/replication/CDCPoller.ts | 14 ++++++++++---- .../module-mssql/src/replication/CDCStream.ts | 9 ++++++--- modules/module-mssql/src/utils/mssql.ts | 2 +- .../module-mssql/test/src/CDCStream.test.ts | 8 +++----- .../src/CDCStream_resumable_snapshot.test.ts | 19 +++++++++++++------ modules/module-mssql/test/src/util.ts | 13 ++++++------- 6 files changed, 39 insertions(+), 26 deletions(-) diff --git a/modules/module-mssql/src/replication/CDCPoller.ts b/modules/module-mssql/src/replication/CDCPoller.ts index 2f70b6c46..31d0b5229 100644 --- a/modules/module-mssql/src/replication/CDCPoller.ts +++ b/modules/module-mssql/src/replication/CDCPoller.ts @@ -99,7 +99,7 @@ export class CDCPoller { public async replicateUntilStopped(): Promise { this.logger.info(`CDC polling started with interval of ${this.pollingIntervalMs}ms...`); - this.logger.info(`Polling a maximum of [${this.pollingBatchSize}] transactions per polling cycle.`); + this.logger.info(`Polling a maximum of ${this.pollingBatchSize} transactions per polling cycle.`); while (!this.isStopped) { // Don't poll if already polling (concurrency guard) if (this.isPolling) { @@ -205,17 +205,16 @@ export class CDCPoller { let transactionCount = 0; let updateBefore: any = null; + let lastTransactionLSN: LSN | null = null; for (const row of results) { const transactionLSN = LSN.fromBinary(row.__$start_lsn); switch (row.__$operation) { case Operation.DELETE: await this.eventHandler.onDelete(row, table, results.columns); - transactionCount++; this.logger.info(`Processed DELETE row LSN: ${transactionLSN}`); break; case Operation.INSERT: await this.eventHandler.onInsert(row, table, results.columns); - transactionCount++; this.logger.info(`Processed INSERT row LSN: ${transactionLSN}`); break; case Operation.UPDATE_BEFORE: @@ -228,12 +227,19 @@ export class CDCPoller { } await this.eventHandler.onUpdate(row, updateBefore, table, results.columns); updateBefore = null; - transactionCount++; this.logger.info(`Processed UPDATE row LSN: ${transactionLSN}`); break; default: this.logger.warn(`Unknown operation type [${row.__$operation}] encountered in CDC changes.`); } + + // Increment transaction count when we encounter a new transaction LSN (except for UPDATE_BEFORE rows) + if (transactionLSN != lastTransactionLSN) { + lastTransactionLSN = transactionLSN; + if (row.__$operation !== Operation.UPDATE_BEFORE) { + transactionCount++; + } + } } return transactionCount; diff --git a/modules/module-mssql/src/replication/CDCStream.ts b/modules/module-mssql/src/replication/CDCStream.ts index db482c789..cc3955cdc 100644 --- a/modules/module-mssql/src/replication/CDCStream.ts +++ b/modules/module-mssql/src/replication/CDCStream.ts @@ -247,7 +247,11 @@ export class CDCStream { entity_descriptor: table, sync_rules: this.syncRules }); - const captureInstance = await getCaptureInstance({ connectionManager: this.connections, tableName: resolved.table.name, schema: resolved.table.schema }); + const captureInstance = await getCaptureInstance({ + connectionManager: this.connections, + tableName: resolved.table.name, + schema: resolved.table.schema + }); if (!captureInstance) { throw new ServiceAssertionError( `Missing capture instance for table ${toQualifiedTableName(resolved.table.schema, resolved.table.name)}` @@ -361,7 +365,6 @@ export class CDCStream { } await query.initialize(); - let columns: sql.IColumnMetadata | null = null; let hasRemainingData = true; while (hasRemainingData) { // Fetch 10k at a time. @@ -369,6 +372,7 @@ export class CDCStream { // and not spending too much time on each FETCH call. // We aim for a couple of seconds on each FETCH call. let batchReplicatedCount = 0; + let columns: sql.IColumnMetadata | null = null; const cursor = query.next(); for await (const result of cursor) { if (columns == null && isIColumnMetadata(result)) { @@ -451,7 +455,6 @@ export class CDCStream { WHERE object_id = OBJECT_ID('${table.toQualifiedName()}') AND index_id < 2;` ); - // TODO Fallback query in case user does not have permission? return result[0].total_rows ?? -1; } diff --git a/modules/module-mssql/src/utils/mssql.ts b/modules/module-mssql/src/utils/mssql.ts index 7637571e6..1e3a040ee 100644 --- a/modules/module-mssql/src/utils/mssql.ts +++ b/modules/module-mssql/src/utils/mssql.ts @@ -319,7 +319,7 @@ export function isIColumnMetadata(obj: any): obj is sql.IColumnMetadata { propertiesMatched = typeof property.index === 'number' && typeof property.name === 'string' && - typeof property.length === 'number' && + (typeof property.length === 'number' || typeof property.length === 'undefined') && (typeof property.type === 'function' || typeof property.type === 'object') && typeof property.nullable === 'boolean' && typeof property.caseSensitive === 'boolean' && diff --git a/modules/module-mssql/test/src/CDCStream.test.ts b/modules/module-mssql/test/src/CDCStream.test.ts index edd35a652..50276fc77 100644 --- a/modules/module-mssql/test/src/CDCStream.test.ts +++ b/modules/module-mssql/test/src/CDCStream.test.ts @@ -20,11 +20,9 @@ bucket_definitions: - SELECT id, description FROM "test_data" `; -// describe('CDCStream tests', () => { -// describeWithStorage({ timeout: 20_000 }, defineCDCStreamTests); -// }); - -defineCDCStreamTests(INITIALIZED_MONGO_STORAGE_FACTORY); +describe('CDCStream tests', () => { + describeWithStorage({ timeout: 20_000 }, defineCDCStreamTests); +}); function defineCDCStreamTests(factory: storage.TestStorageFactory) { test('Initial snapshot sync', async () => { diff --git a/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts b/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts index 14b496cb7..929c710c2 100644 --- a/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts +++ b/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts @@ -1,11 +1,11 @@ import { describe, expect, test } from 'vitest'; import { env } from './env.js'; -import { createTestTable, createTestTableWithBasicId, describeWithStorage, waitForPendingCDCChanges } from './util.js'; +import { createTestTableWithBasicId, describeWithStorage, waitForPendingCDCChanges } from './util.js'; import { TestStorageFactory } from '@powersync/service-core'; import { METRICS_HELPER } from '@powersync/service-core-tests'; import { ReplicationMetric } from '@powersync/service-types'; import * as timers from 'node:timers/promises'; -import { ReplicationAbortedError } from '@powersync/lib-services-framework'; +import { logger, ReplicationAbortedError } from '@powersync/lib-services-framework'; import { CDCStreamTestContext } from './CDCStreamTestContext.js'; import { getLatestReplicatedLSN } from '@module/utils/mssql.js'; @@ -64,6 +64,7 @@ async function testResumingReplication(factory: TestStorageFactory, stopAfter: n ((await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0) - startRowCount; if (count >= stopAfter) { + logger.info(`Stopped initial replication after replicating ${count} rows.`); break; } await timers.setTimeout(1); @@ -85,22 +86,28 @@ async function testResumingReplication(factory: TestStorageFactory, stopAfter: n cdcStreamOptions: { snapshotBatchSize: 1000 } }); - beforeLSN = await getLatestReplicatedLSN(context2.connectionManager); // This delete should be using one of the ids already replicated const { - recordset: [id1] + recordset: [deleteResult] } = await context2.connectionManager.query(`DELETE TOP (1) FROM test_data2 OUTPUT DELETED.id`); // This update should also be using one of the ids already replicated + const id1 = deleteResult.id; + logger.info(`Deleted row with id: ${id1}`); const { - recordset: [id2] + recordset: [updateResult] } = await context2.connectionManager.query( `UPDATE test_data2 SET description = 'update1' OUTPUT INSERTED.id WHERE id = (SELECT TOP 1 id FROM test_data2)` ); + const id2 = updateResult.id; + logger.info(`Updated row with id: ${id2}`); + beforeLSN = await getLatestReplicatedLSN(context2.connectionManager); const { - recordset: [id3] + recordset: [insertResult] } = await context2.connectionManager.query( `INSERT INTO test_data2(description) OUTPUT INSERTED.id VALUES ('insert1')` ); + const id3 = insertResult.id; + logger.info(`Inserted row with id: ${id3}`); await waitForPendingCDCChanges(beforeLSN, context2.connectionManager); await context2.loadNextSyncRules(); diff --git a/modules/module-mssql/test/src/util.ts b/modules/module-mssql/test/src/util.ts index 330c94aa0..a0d76068f 100644 --- a/modules/module-mssql/test/src/util.ts +++ b/modules/module-mssql/test/src/util.ts @@ -166,13 +166,12 @@ export async function getClientCheckpoint( while (Date.now() - start < timeout) { const storage = await storageFactory.getActiveStorage(); const cp = await storage?.getCheckpoint(); - if (cp == null) { - throw new Error('No sync rules available'); - } - lastCp = cp; - if (cp.lsn != null && cp.lsn >= lsn.toString()) { - logger.info(`Got write checkpoint: ${lsn} : ${cp.checkpoint}`); - return cp.checkpoint; + if (cp != null) { + lastCp = cp; + if (cp.lsn != null && cp.lsn >= lsn.toString()) { + logger.info(`Got write checkpoint: ${lsn} : ${cp.checkpoint}`); + return cp.checkpoint; + } } await new Promise((resolve) => setTimeout(resolve, 30)); From 1ace4c109830db95c5bea24e0f2880821c729425 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Fri, 28 Nov 2025 10:13:24 +0200 Subject: [PATCH 39/42] Removed unused import --- modules/module-mssql/test/src/CDCStream.test.ts | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/modules/module-mssql/test/src/CDCStream.test.ts b/modules/module-mssql/test/src/CDCStream.test.ts index 50276fc77..03bd6e442 100644 --- a/modules/module-mssql/test/src/CDCStream.test.ts +++ b/modules/module-mssql/test/src/CDCStream.test.ts @@ -1,13 +1,7 @@ import { describe, expect, test } from 'vitest'; import { METRICS_HELPER, putOp, removeOp } from '@powersync/service-core-tests'; import { ReplicationMetric } from '@powersync/service-types'; -import { - createTestTable, - describeWithStorage, - INITIALIZED_MONGO_STORAGE_FACTORY, - insertTestData, - waitForPendingCDCChanges -} from './util.js'; +import { createTestTable, describeWithStorage, insertTestData, waitForPendingCDCChanges } from './util.js'; import { storage } from '@powersync/service-core'; import { CDCStreamTestContext } from './CDCStreamTestContext.js'; import { getLatestReplicatedLSN } from '@module/utils/mssql.js'; From 1ed0b288764910d1dd4cb0101813428dbe6f3145 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Fri, 28 Nov 2025 13:02:42 +0200 Subject: [PATCH 40/42] Refactored mssql connection config to support additional configuration. --- .../module-mssql/src/module/MSSQLModule.ts | 2 +- .../module-mssql/src/replication/CDCPoller.ts | 8 +- .../src/replication/CDCReplicationJob.ts | 6 +- .../src/replication/CDCReplicator.ts | 6 +- .../module-mssql/src/replication/CDCStream.ts | 8 +- .../src/replication/MSSQLConnectionManager.ts | 2 +- modules/module-mssql/src/types/types.ts | 80 +++++++++---------- .../test/src/CDCStreamTestContext.ts | 10 +-- modules/module-mssql/test/src/util.ts | 6 +- 9 files changed, 65 insertions(+), 63 deletions(-) diff --git a/modules/module-mssql/src/module/MSSQLModule.ts b/modules/module-mssql/src/module/MSSQLModule.ts index 4f9cf928c..95fcd9ca8 100644 --- a/modules/module-mssql/src/module/MSSQLModule.ts +++ b/modules/module-mssql/src/module/MSSQLModule.ts @@ -41,7 +41,7 @@ export class MSSQLModule extends replication.ReplicationModule { @@ -27,7 +27,7 @@ export class CDCReplicator extends replication.AbstractReplicator; -export type AuthenticationType = - | DefaultAuthentication - | AzureActiveDirectoryPasswordAuthentication - | AzureActiveDirectoryServicePrincipalSecret; +export const AdditionalConfig = t.object({ + /** + * Interval in milliseconds to wait between polling cycles. Defaults to 1000 milliseconds. + */ + pollingIntervalMs: t.number.optional(), + /** + * Maximum number of transactions to poll per polling cycle. Defaults to 10. + */ + pollingBatchSize: t.number.optional(), - -export interface CDCPollingOptions { + /** + * Whether to trust the server certificate. Set to true for local development and self-signed certificates. + * Default is false. + */ + trustServerCertificate: t.boolean.optional() +}); + +export interface AdditionalConfig { + /** + * Interval in milliseconds to wait between polling cycles. Defaults to 1000 milliseconds. + */ + pollingIntervalMs: number; /** * Maximum number of transactions to poll per polling cycle. Defaults to 10. */ - batchSize: number; + pollingBatchSize: number; /** - * Interval in milliseconds to wait between polling cycles. Defaults to 1 second. + * Whether to trust the server certificate. Set to true for local development and self-signed certificates. + * Default is false. */ - intervalMs: number; + trustServerCertificate: boolean; } +export type AuthenticationType = + | DefaultAuthentication + | AzureActiveDirectoryPasswordAuthentication + | AzureActiveDirectoryServicePrincipalSecret; + export interface NormalizedMSSQLConnectionConfig { id: string; tag: string; @@ -93,14 +114,9 @@ export interface NormalizedMSSQLConnectionConfig { authentication?: AuthenticationType; - cdcPollingOptions: CDCPollingOptions; - - /** - * Whether to trust the server certificate. Set to true for local development and self-signed certificates. - * Default is false. - */ - trustServerCertificate: boolean; lookup?: LookupFunction; + + additionalConfig: AdditionalConfig; } export const MSSQLConnectionConfig = service_types.configFile.DataSourceConfig.and( @@ -118,18 +134,8 @@ export const MSSQLConnectionConfig = service_types.configFile.DataSourceConfig.a .or(AzureActiveDirectoryServicePrincipalSecret) .optional(), - cdcPollingOptions: t.object({ - batchSize: t.number.optional(), - intervalMs: t.number.optional() - }).optional(), - - /** - * Whether to trust the server certificate. Set to true for local development and self-signed certificates. - * Default is false. - */ - trustServerCertificate: t.boolean.optional(), - - reject_ip_ranges: t.array(t.string).optional() + reject_ip_ranges: t.array(t.string).optional(), + additionalConfig: AdditionalConfig.optional() }) ); @@ -203,19 +209,11 @@ export function normalizeConnectionConfig(options: MSSQLConnectionConfig): Norma lookup, authentication: options.authentication, - cdcPollingOptions: { - /** - * Maximum number of transactions to poll per polling cycle. Defaults to 10. - */ - batchSize: options.cdcPollingOptions?.batchSize ?? 10, - - /** - * Interval in milliseconds to wait between polling cycles. Defaults to 1 second. - */ - intervalMs: options.cdcPollingOptions?.intervalMs ?? 1000, - }, - - trustServerCertificate: options.trustServerCertificate ?? false, + additionalConfig: { + pollingIntervalMs: options.additionalConfig?.pollingIntervalMs ?? 1000, + pollingBatchSize: options.additionalConfig?.pollingBatchSize ?? 10, + trustServerCertificate: options.additionalConfig?.trustServerCertificate ?? false + } } satisfies NormalizedMSSQLConnectionConfig; } diff --git a/modules/module-mssql/test/src/CDCStreamTestContext.ts b/modules/module-mssql/test/src/CDCStreamTestContext.ts index 968adf98f..6b674befc 100644 --- a/modules/module-mssql/test/src/CDCStreamTestContext.ts +++ b/modules/module-mssql/test/src/CDCStreamTestContext.ts @@ -12,7 +12,6 @@ import { clearTestDb, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './uti import { CDCStream, CDCStreamOptions } from '@module/replication/CDCStream.js'; import { MSSQLConnectionManager } from '@module/replication/MSSQLConnectionManager.js'; import timers from 'timers/promises'; -import { CDCPollingOptions } from '@module/types/types.js'; /** * Tests operating on the change data capture need to configure the stream and manage asynchronous @@ -110,10 +109,11 @@ export class CDCStreamTestContext implements AsyncDisposable { metrics: METRICS_HELPER.metricsEngine, connections: this.connectionManager, abortSignal: this.abortController.signal, - pollingOptions: { - batchSize: 10, - intervalMs: 1000 - } satisfies CDCPollingOptions, + additionalConfig: { + pollingBatchSize: 10, + pollingIntervalMs: 1000, + trustServerCertificate: true + }, ...this.cdcStreamOptions }; this._cdcStream = new CDCStream(options); diff --git a/modules/module-mssql/test/src/util.ts b/modules/module-mssql/test/src/util.ts index a0d76068f..e4eaacef0 100644 --- a/modules/module-mssql/test/src/util.ts +++ b/modules/module-mssql/test/src/util.ts @@ -37,7 +37,11 @@ export function describeWithStorage(options: TestOptions, fn: (factory: TestStor export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({ type: 'mssql', uri: TEST_URI, - trustServerCertificate: true + additionalConfig: { + pollingBatchSize: 10, + pollingIntervalMs: 1000, + trustServerCertificate: true + } }); /** From 285b7cb2d9d4243764a569f80a98f6015e3abb55 Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Fri, 28 Nov 2025 13:17:54 +0200 Subject: [PATCH 41/42] Small tweak to mssql snapshot test --- .../module-mssql/test/src/CDCStream_resumable_snapshot.test.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts b/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts index 929c710c2..19b886b93 100644 --- a/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts +++ b/modules/module-mssql/test/src/CDCStream_resumable_snapshot.test.ts @@ -46,10 +46,9 @@ async function testResumingReplication(factory: TestStorageFactory, stopAfter: n await createTestTableWithBasicId(connectionManager, 'test_data1'); await createTestTableWithBasicId(connectionManager, 'test_data2'); - let beforeLSN = await getLatestReplicatedLSN(connectionManager); await connectionManager.query(`INSERT INTO test_data1(description) SELECT 'value' FROM GENERATE_SERIES(1, 1000, 1)`); + let beforeLSN = await getLatestReplicatedLSN(connectionManager); await connectionManager.query(`INSERT INTO test_data2(description) SELECT 'value' FROM GENERATE_SERIES(1, 10000, 1)`); - await waitForPendingCDCChanges(beforeLSN, connectionManager); const p = context.replicateSnapshot(); From b9dd9a08768c03b918fc4e4ec39c78e37843199b Mon Sep 17 00:00:00 2001 From: Roland Teichert Date: Fri, 28 Nov 2025 14:34:53 +0200 Subject: [PATCH 42/42] Streamlined mssql types a bit more --- modules/module-mssql/src/types/types.ts | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/modules/module-mssql/src/types/types.ts b/modules/module-mssql/src/types/types.ts index 16189f2d5..d89fc847b 100644 --- a/modules/module-mssql/src/types/types.ts +++ b/modules/module-mssql/src/types/types.ts @@ -63,6 +63,11 @@ export const DefaultAuthentication = t.object({ }); export type DefaultAuthentication = t.Decoded; +export const Authentication = DefaultAuthentication.or(AzureActiveDirectoryPasswordAuthentication).or( + AzureActiveDirectoryServicePrincipalSecret +); +export type Authentication = t.Decoded; + export const AdditionalConfig = t.object({ /** * Interval in milliseconds to wait between polling cycles. Defaults to 1000 milliseconds. @@ -81,7 +86,7 @@ export const AdditionalConfig = t.object({ }); export interface AdditionalConfig { - /** + /** * Interval in milliseconds to wait between polling cycles. Defaults to 1000 milliseconds. */ pollingIntervalMs: number; @@ -96,11 +101,6 @@ export interface AdditionalConfig { trustServerCertificate: boolean; } -export type AuthenticationType = - | DefaultAuthentication - | AzureActiveDirectoryPasswordAuthentication - | AzureActiveDirectoryServicePrincipalSecret; - export interface NormalizedMSSQLConnectionConfig { id: string; tag: string; @@ -112,7 +112,7 @@ export interface NormalizedMSSQLConnectionConfig { database: string; schema?: string; - authentication?: AuthenticationType; + authentication?: Authentication; lookup?: LookupFunction; @@ -130,9 +130,7 @@ export const MSSQLConnectionConfig = service_types.configFile.DataSourceConfig.a hostname: t.string.optional(), port: service_types.configFile.portCodec.optional(), - authentication: DefaultAuthentication.or(AzureActiveDirectoryPasswordAuthentication) - .or(AzureActiveDirectoryServicePrincipalSecret) - .optional(), + authentication: Authentication.optional(), reject_ip_ranges: t.array(t.string).optional(), additionalConfig: AdditionalConfig.optional()