diff --git a/lib/plugins/aws/deploy/lib/cleanup-s3-bucket.js b/lib/plugins/aws/deploy/lib/cleanup-s3-bucket.js index dea452fed..fd971d272 100644 --- a/lib/plugins/aws/deploy/lib/cleanup-s3-bucket.js +++ b/lib/plugins/aws/deploy/lib/cleanup-s3-bucket.js @@ -6,6 +6,7 @@ const isS3ListAccessDeniedError = require('../../utils/is-s3-list-access-denied- const parseDeploymentObjectKey = require('../../utils/parse-deployment-object-key'); const ServerlessError = require('../../../../serverless-error'); const { log } = require('../../../../utils/serverless-utils/log'); +const { S3Client, DeleteObjectsCommand, paginateListObjectsV2 } = require('@aws-sdk/client-s3'); const maxDeleteObjectsCount = 1000; @@ -31,19 +32,12 @@ const createDeleteObjectsError = (bucketName, firstError) => { ); }; -async function listObjectsV2(provider, params) { +async function listObjectsV2(s3, params) { const Contents = []; - let ContinuationToken; - do { - const response = await provider.request('S3', 'listObjectsV2', { - ...params, - ...(ContinuationToken ? { ContinuationToken } : {}), - }); - - Contents.push(...(response?.Contents || [])); - ContinuationToken = response && response.NextContinuationToken; - } while (ContinuationToken); + for await (const response of paginateListObjectsV2({ client: s3 }, params)) { + Contents.push(...(response.Contents || [])); + } return { Contents }; } @@ -58,7 +52,8 @@ module.exports = { const prefix = this.provider.getDeploymentPrefix(); try { - const response = await listObjectsV2(this.provider, { + const s3 = new S3Client(await this.provider.getAwsSdkV3Config()); + const response = await listObjectsV2(s3, { Bucket: this.bucketName, Prefix: `${prefix}/${service}/${stage}/`, }); @@ -76,12 +71,15 @@ module.exports = { async removeObjects(objectsToRemove) { if (!objectsToRemove || !objectsToRemove.length) return; + const s3 = new S3Client(await this.provider.getAwsSdkV3Config()); for (let index = 0; index < objectsToRemove.length; index += maxDeleteObjectsCount) { const batch = objectsToRemove.slice(index, index + maxDeleteObjectsCount); - const result = await this.provider.request('S3', 'deleteObjects', { - Bucket: this.bucketName, - Delete: { Objects: batch }, - }); + const result = await s3.send( + new DeleteObjectsCommand({ + Bucket: this.bucketName, + Delete: { Objects: batch }, + }) + ); if (result && result.Errors && result.Errors.length) { throw createDeleteObjectsError(this.bucketName, result.Errors[0]); @@ -127,7 +125,8 @@ module.exports = { 'INVALID_EMPTY_CHANGE_SET_ARTIFACT_DIRECTORY' ); } - response = await listObjectsV2(this.provider, { + const s3 = new S3Client(await this.provider.getAwsSdkV3Config()); + response = await listObjectsV2(s3, { Bucket: this.bucketName, Prefix: `${artifactDirectoryName}/`, }); diff --git a/lib/plugins/aws/deploy/lib/upload-artifacts.js b/lib/plugins/aws/deploy/lib/upload-artifacts.js index 33cdcef63..351407305 100644 --- a/lib/plugins/aws/deploy/lib/upload-artifacts.js +++ b/lib/plugins/aws/deploy/lib/upload-artifacts.js @@ -6,6 +6,7 @@ const crypto = require('crypto'); const limit = require('ext/promise/limit').bind(Promise); const { filesize } = require('filesize'); const normalizeFiles = require('../../lib/normalize-files'); +const uploadS3Object = require('../../lib/upload-s3-object'); const getLambdaLayerArtifactPath = require('../../utils/get-lambda-layer-artifact-path'); const ServerlessError = require('../../../../serverless-error'); const setS3UploadEncryptionOptions = require('../../../../aws/set-s3-upload-encryption-options'); @@ -71,7 +72,7 @@ module.exports = { params = setS3UploadEncryptionOptions(params, deploymentBucketObject); } - return this.provider.request('S3', 'upload', params); + return uploadS3Object(this.provider, params); }, async uploadStateFile() { log.info('Uploading State file to S3'); @@ -101,7 +102,7 @@ module.exports = { params = setS3UploadEncryptionOptions(params, deploymentBucketObject); } - return this.provider.request('S3', 'upload', params); + return uploadS3Object(this.provider, params); }, async getFunctionArtifactFilePaths() { diff --git a/lib/plugins/aws/lib/upload-s3-object.js b/lib/plugins/aws/lib/upload-s3-object.js new file mode 100644 index 000000000..9e8e0d8a4 --- /dev/null +++ b/lib/plugins/aws/lib/upload-s3-object.js @@ -0,0 +1,49 @@ +'use strict'; + +const { S3Client } = require('@aws-sdk/client-s3'); +const { Upload } = require('@aws-sdk/lib-storage'); +const ServerlessError = require('../../../serverless-error'); +const { getAwsErrorCode, getAwsErrorStatusCode } = require('../utils/aws-sdk-v3-error'); + +const uploadQueueSize = 6; +const uploadPartSize = 5 * 1024 * 1024; +const normalizerPattern = /(? { + if (typeof code === 'number') return `HTTP_${code}_ERROR`; + return String(code).replace(normalizerPattern, '_$1').toUpperCase(); +}; + +module.exports = async (provider, params) => { + const s3 = new S3Client( + await provider.getAwsSdkV3Config({ + useAccelerateEndpoint: provider.isS3TransferAccelerationEnabled(), + }) + ); + + try { + return await new Upload({ + client: s3, + params, + queueSize: uploadQueueSize, + partSize: uploadPartSize, + leavePartsOnError: false, + }).done(); + } catch (error) { + const providerErrorCode = getAwsErrorCode(error) || getAwsErrorStatusCode(error); + const providerErrorCodeExtension = providerErrorCode + ? normalizeErrorCodePostfix(providerErrorCode) + : 'ERROR'; + + throw Object.assign( + new ServerlessError( + error && error.message ? error.message : String(providerErrorCode || 'Error'), + `AWS_S3_UPLOAD_${providerErrorCodeExtension}` + ), + { + providerError: error, + providerErrorCodeExtension, + } + ); + } +}; diff --git a/lib/plugins/aws/lib/upload-zip-file.js b/lib/plugins/aws/lib/upload-zip-file.js index ab58a94dd..8295e228e 100644 --- a/lib/plugins/aws/lib/upload-zip-file.js +++ b/lib/plugins/aws/lib/upload-zip-file.js @@ -5,6 +5,7 @@ const fs = require('fs'); const crypto = require('crypto'); const log = require('../../../utils/serverless-utils/log').log.get('deploy:upload'); const setS3UploadEncryptionOptions = require('../../../aws/set-s3-upload-encryption-options'); +const uploadS3Object = require('./upload-s3-object'); module.exports = { async uploadZipFile({ filename, s3KeyDirname, basename }) { @@ -38,7 +39,7 @@ module.exports = { params = setS3UploadEncryptionOptions(params, deploymentBucketObject); } - const response = await this.provider.request('S3', 'upload', params); + const response = await uploadS3Object(this.provider, params); // Interestingly, if request handling was queued, and stream errored (before being consumed by // AWS SDK) then SDK call succeeds without actually uploading a file to S3 bucket. // Below line ensures that eventual stream error is communicated diff --git a/lib/plugins/aws/package/compile/functions.js b/lib/plugins/aws/package/compile/functions.js index c20496ee0..5fd82f7f2 100644 --- a/lib/plugins/aws/package/compile/functions.js +++ b/lib/plugins/aws/package/compile/functions.js @@ -1,10 +1,11 @@ 'use strict'; -const AWS = require('../../../../aws/sdk-v2'); const crypto = require('crypto'); const fs = require('fs'); const path = require('path'); const { isDeepStrictEqual } = require('node:util'); +const { pipeline } = require('node:stream/promises'); +const { S3Client, GetObjectCommand } = require('@aws-sdk/client-s3'); const isObject = require('type/object/is'); const ServerlessError = require('../../../../serverless-error'); const deepSortObjectByKey = require('../../../../utils/deep-sort-object-by-key'); @@ -115,9 +116,6 @@ class AwsCompileFunctions { } async downloadPackageArtifact(functionName) { - const { region } = this.options; - const S3 = new AWS.S3({ region }); - const functionObject = this.serverless.service.getFunction(functionName); if (functionObject.image) return; @@ -128,25 +126,18 @@ class AwsCompileFunctions { const s3Object = parseS3URI(artifactFilePath); if (!s3Object) return; log.info(`Downloading ${s3Object.Key} from bucket ${s3Object.Bucket}`); - await new Promise((resolve, reject) => { - const tmpDir = this.serverless.utils.getTmpDirPath(); - const filePath = path.join(tmpDir, path.basename(s3Object.Key)); - - const readStream = S3.getObject(s3Object).createReadStream(); - - const writeStream = fs.createWriteStream(filePath); - readStream - .pipe(writeStream) - .on('error', reject) - .on('close', () => { - if (functionObject.package.artifact) { - functionObject.package.artifact = filePath; - } else { - this.serverless.service.package.artifact = filePath; - } - return resolve(filePath); - }); - }); + const tmpDir = this.serverless.utils.getTmpDirPath(); + const filePath = path.join(tmpDir, path.basename(s3Object.Key)); + const { region } = this.options; + const s3 = new S3Client(await this.provider.getAwsSdkV3Config({ region })); + const response = await s3.send(new GetObjectCommand(s3Object)); + + await pipeline(response.Body, fs.createWriteStream(filePath)); + if (functionObject.package && functionObject.package.artifact) { + functionObject.package.artifact = filePath; + } else { + this.serverless.service.package.artifact = filePath; + } } async addFileToHash(filePath, hash) { diff --git a/lib/plugins/aws/remove/lib/bucket.js b/lib/plugins/aws/remove/lib/bucket.js index 1f4310884..8a4bfb416 100644 --- a/lib/plugins/aws/remove/lib/bucket.js +++ b/lib/plugins/aws/remove/lib/bucket.js @@ -4,6 +4,12 @@ const { log } = require('../../../../utils/serverless-utils/log'); const ServerlessError = require('../../../../serverless-error'); const isS3ListAccessDeniedError = require('../../utils/is-s3-list-access-denied-error'); const { isCloudFormationValidationError } = require('../../utils/aws-sdk-v3-error'); +const { + S3Client, + DeleteObjectsCommand, + ListObjectsV2Command, + ListObjectVersionsCommand, +} = require('@aws-sdk/client-s3'); const maxDeleteObjectsCount = 1000; @@ -35,17 +41,20 @@ module.exports = { this.serverless.service.service }/${this.provider.getStage()}/`; + const s3 = new S3Client(await this.provider.getAwsSdkV3Config()); let ContinuationToken; do { let result; try { - result = await this.provider.request('S3', 'listObjectsV2', { - Bucket: this.bucketName, - Prefix: prefix, - ...(ContinuationToken ? { ContinuationToken } : {}), - }); + result = await s3.send( + new ListObjectsV2Command({ + Bucket: this.bucketName, + Prefix: prefix, + ...(ContinuationToken ? { ContinuationToken } : {}), + }) + ); } catch (err) { if (isS3ListAccessDeniedError(err)) throw createS3ListObjectsAccessDeniedError(); throw err; @@ -56,7 +65,7 @@ module.exports = { })); const nextContinuationToken = result && result.NextContinuationToken; - await this.deleteObjectBatches(pageObjects); + await this.deleteObjectBatches(pageObjects, s3); ContinuationToken = nextContinuationToken; } while (ContinuationToken); }, @@ -66,6 +75,7 @@ module.exports = { this.serverless.service.service }/${this.provider.getStage()}/`; + const s3 = new S3Client(await this.provider.getAwsSdkV3Config()); let KeyMarker; let VersionIdMarker; @@ -73,12 +83,14 @@ module.exports = { let result; try { - result = await this.provider.request('S3', 'listObjectVersions', { - Bucket: this.bucketName, - Prefix: prefix, - ...(KeyMarker ? { KeyMarker } : {}), - ...(VersionIdMarker ? { VersionIdMarker } : {}), - }); + result = await s3.send( + new ListObjectVersionsCommand({ + Bucket: this.bucketName, + Prefix: prefix, + ...(KeyMarker ? { KeyMarker } : {}), + ...(VersionIdMarker ? { VersionIdMarker } : {}), + }) + ); } catch (err) { if (isS3ListAccessDeniedError(err)) throw createS3ListObjectsAccessDeniedError(); throw err; @@ -97,7 +109,7 @@ module.exports = { const nextKeyMarker = result && result.NextKeyMarker; const nextVersionIdMarker = result && result.NextVersionIdMarker; - await this.deleteObjectBatches(pageObjects); + await this.deleteObjectBatches(pageObjects, s3); KeyMarker = nextKeyMarker; VersionIdMarker = nextVersionIdMarker; } while (KeyMarker || VersionIdMarker); @@ -110,17 +122,19 @@ module.exports = { : this.listObjectsV2(); }, - async deleteObjectBatches(objects) { + async deleteObjectBatches(objects, s3) { if (!objects.length) return; for (let index = 0; index < objects.length; index += maxDeleteObjectsCount) { const batch = objects.slice(index, index + maxDeleteObjectsCount); - const data = await this.provider.request('S3', 'deleteObjects', { - Bucket: this.bucketName, - Delete: { - Objects: batch, - }, - }); + const data = await s3.send( + new DeleteObjectsCommand({ + Bucket: this.bucketName, + Delete: { + Objects: batch, + }, + }) + ); if (data && data.Errors && data.Errors.length) { const firstErrorCode = data.Errors[0].Code; diff --git a/lib/plugins/aws/rollback.js b/lib/plugins/aws/rollback.js index 49038a4c1..d6c06a768 100644 --- a/lib/plugins/aws/rollback.js +++ b/lib/plugins/aws/rollback.js @@ -12,19 +12,16 @@ const getCreateStackParams = require('./lib/get-create-stack-params'); const getUpdateStackParams = require('./lib/get-update-stack-params'); const findAndGroupDeployments = require('./utils/find-and-group-deployments'); const isS3ListAccessDeniedError = require('./utils/is-s3-list-access-denied-error'); +const s3BodyToString = require('../../aws/s3-body-to-string'); const ServerlessError = require('../../serverless-error'); const { style, log, progress } = require('../../utils/serverless-utils/log'); +const { S3Client, GetObjectCommand, paginateListObjectsV2 } = require('@aws-sdk/client-s3'); +const { isS3GetObjectNoSuchKeyError } = require('./utils/aws-sdk-v3-error'); const slsConsoleLog = log.get('console'); const mainProgress = progress.get('main'); -const getErrorCode = (error) => - error && (error.code || error.Code || (error.providerError && error.providerError.code)); - -const isS3GetObjectNoSuchKeyError = (error) => - getErrorCode(error) === 'AWS_S3_GET_OBJECT_NO_SUCH_KEY' || getErrorCode(error) === 'NoSuchKey'; - const createS3ListObjectsAccessDeniedError = () => new ServerlessError( 'Could not list objects in the deployment bucket. Make sure you have sufficient permissions to access it.', @@ -106,19 +103,17 @@ class AwsRollback { const prefix = `${deploymentPrefix}/${serviceName}/${stage}/`; const response = { Contents: [] }; + const s3 = new S3Client(await this.provider.getAwsSdkV3Config()); try { - let ContinuationToken; - - do { - const page = await this.provider.request('S3', 'listObjectsV2', { + for await (const page of paginateListObjectsV2( + { client: s3 }, + { Bucket: this.bucketName, Prefix: prefix, - ...(ContinuationToken ? { ContinuationToken } : {}), - }); - + } + )) { response.Contents.push(...(page?.Contents || [])); - ContinuationToken = page && page.NextContinuationToken; - } while (ContinuationToken); + } } catch (err) { if (isS3ListAccessDeniedError(err)) throw createS3ListObjectsAccessDeniedError(); throw err; @@ -157,14 +152,15 @@ class AwsRollback { service.package.artifactDirectoryName = `${prefix}${dateString}`; const stateString = await (async () => { try { - return ( - await this.provider.request('S3', 'getObject', { + const result = await s3.send( + new GetObjectCommand({ Bucket: this.bucketName, Key: `${ service.package.artifactDirectoryName }/${this.provider.naming.getServiceStateFileName()}`, }) - ).Body; + ); + return s3BodyToString(result.Body); } catch (error) { if (isS3GetObjectNoSuchKeyError(error)) return null; throw error; diff --git a/lib/plugins/aws/utils/aws-sdk-v3-error.js b/lib/plugins/aws/utils/aws-sdk-v3-error.js index 8aa03ba4c..6a1f422d8 100644 --- a/lib/plugins/aws/utils/aws-sdk-v3-error.js +++ b/lib/plugins/aws/utils/aws-sdk-v3-error.js @@ -83,6 +83,10 @@ function isS3HeadObjectForbiddenError(error) { ); } +function isS3GetObjectNoSuchKeyError(error) { + return isAwsErrorCode(error, 'AWS_S3_GET_OBJECT_NO_SUCH_KEY', 'NoSuchKey'); +} + function isS3HeadBucketNotFoundError(error) { return ( isAwsErrorCode(error, 'AWS_S3_HEAD_BUCKET_NOT_FOUND', 'NotFound', 'NoSuchBucket') || @@ -121,6 +125,7 @@ module.exports = { isAwsErrorStatusCode, isS3ListObjectsNoSuchBucketError, isS3ListObjectsAccessDeniedError, + isS3GetObjectNoSuchKeyError, isS3HeadObjectForbiddenError, isS3HeadBucketNotFoundError, isS3HeadBucketForbiddenError, diff --git a/package.json b/package.json index d63e5a86c..7f5dd758e 100644 --- a/package.json +++ b/package.json @@ -37,10 +37,11 @@ "@aws-sdk/client-eventbridge": "^3.975.0", "@aws-sdk/client-iam": "^3.975.0", "@aws-sdk/client-lambda": "^3.975.0", - "@aws-sdk/client-s3": "^3.975.0", + "@aws-sdk/client-s3": "^3.983.0", "@aws-sdk/client-ssm": "^3.975.0", "@aws-sdk/client-sts": "^3.975.0", "@aws-sdk/credential-providers": "^3.975.0", + "@aws-sdk/lib-storage": "^3.983.0", "@smithy/node-http-handler": "^4.6.1", "ajv": "^8.12.0", "ajv-formats": "^2.1.1", diff --git a/test/lib/configure-aws-sdk-v3-stub.js b/test/lib/configure-aws-sdk-v3-stub.js index c9ba0d898..e546a5a93 100644 --- a/test/lib/configure-aws-sdk-v3-stub.js +++ b/test/lib/configure-aws-sdk-v3-stub.js @@ -67,6 +67,9 @@ const serviceDefinitions = { clientName: 'S3Client', commands: { listObjectsV2: 'ListObjectsV2Command', + listObjectVersions: 'ListObjectVersionsCommand', + deleteObjects: 'DeleteObjectsCommand', + getObject: 'GetObjectCommand', headObject: 'HeadObjectCommand', headBucket: 'HeadBucketCommand', }, @@ -77,6 +80,7 @@ const serviceDefinitions = { outputToken: 'NextContinuationToken', }, }, + extraMethods: ['upload'], }, STS: { packageName: '@aws-sdk/client-sts', @@ -119,6 +123,57 @@ function getMethodStub(stubMap, service, method) { return serviceConfig[method]; } +function supportsMethod(definition, method) { + return ( + definition.commands[method] || + (definition.paginators || {})[method] || + (definition.extraMethods || []).includes(method) + ); +} + +function createLibStorageModuleStub({ stubMap, state }) { + return { + Upload: createNamedClass( + 'Upload', + class { + constructor(options) { + this.options = options; + this.client = options.client; + this.params = options.params; + } + + on() { + return this; + } + + async done() { + const context = { + service: 'S3', + method: 'upload', + commandName: 'Upload', + input: this.params, + clientConfig: this.client && this.client.config, + client: this.client, + upload: this, + options: this.options, + }; + state.sends.push(context); + + const value = getMethodStub(stubMap, 'S3', 'upload'); + return resolveStubValue({ + state, + service: 'S3', + method: 'upload', + value, + input: this.params, + context, + }); + } + } + ), + }; +} + function createModuleStub({ service, definition, stubMap, state }) { const exports = {}; @@ -205,9 +260,7 @@ module.exports = (stubMap, { ignoreUnsupportedServices = false } = {}) => { } if ( ignoreUnsupportedServices && - !Object.keys(stubMap[service]).some( - (method) => definition.commands[method] || (definition.paginators || {})[method] - ) + !Object.keys(stubMap[service]).some((method) => supportsMethod(definition, method)) ) { continue; } @@ -217,6 +270,9 @@ module.exports = (stubMap, { ignoreUnsupportedServices = false } = {}) => { stubMap, state, }); + if (service === 'S3') { + modulesCacheStub['@aws-sdk/lib-storage'] = createLibStorageModuleStub({ stubMap, state }); + } } return { diff --git a/test/unit/lib/plugins/aws/deploy/index.test.js b/test/unit/lib/plugins/aws/deploy/index.test.js index 5b9bf28b8..67c85da50 100644 --- a/test/unit/lib/plugins/aws/deploy/index.test.js +++ b/test/unit/lib/plugins/aws/deploy/index.test.js @@ -254,7 +254,8 @@ describe('test/unit/lib/plugins/aws/deploy/index.test.js', () => { call[0].Key.endsWith('compiled-cloudformation-template.json') ); expect(wasCloudFormationTemplateUploadInitiated).to.be.true; - expect(deleteObjectsStub).to.be.calledWithExactly({ + expect(deleteObjectsStub).to.be.calledOnce; + expect(deleteObjectsStub.firstCall.args[0]).to.deep.equal({ Bucket: 's3-bucket-resource', Delete: { Objects: [ @@ -806,7 +807,8 @@ describe('test/unit/lib/plugins/aws/deploy/index.test.js', () => { call[0].Key.endsWith('compiled-cloudformation-template.json') ); expect(wasCloudFormationTemplateUploadInitiated).to.be.true; - expect(deleteObjectsStub).to.be.calledWithExactly({ + expect(deleteObjectsStub).to.be.calledOnce; + expect(deleteObjectsStub.firstCall.args[0]).to.deep.equal({ Bucket: 's3-bucket-resource', Delete: { Objects: [ @@ -886,7 +888,8 @@ describe('test/unit/lib/plugins/aws/deploy/index.test.js', () => { expect(createChangeSetStub.getCall(0).args[0].ChangeSetType).to.equal('UPDATE'); expect(executeChangeSetStub).not.to.be.called; expect(deleteChangeSetStub).to.be.calledTwice; - expect(deleteObjectsStub).to.be.calledWithExactly({ + expect(deleteObjectsStub).to.be.calledOnce; + expect(deleteObjectsStub.firstCall.args[0]).to.deep.equal({ Bucket: 's3-bucket-resource', Delete: { Objects: objectsToRemove }, }); diff --git a/test/unit/lib/plugins/aws/deploy/lib/cleanup-s3-bucket.test.js b/test/unit/lib/plugins/aws/deploy/lib/cleanup-s3-bucket.test.js index ae831aa3c..8d2378f57 100644 --- a/test/unit/lib/plugins/aws/deploy/lib/cleanup-s3-bucket.test.js +++ b/test/unit/lib/plugins/aws/deploy/lib/cleanup-s3-bucket.test.js @@ -2,6 +2,7 @@ const sinon = require('sinon'); const chai = require('chai'); +const { S3Client, ListObjectsV2Command, DeleteObjectsCommand } = require('@aws-sdk/client-s3'); const AwsProvider = require('../../../../../../../lib/plugins/aws/provider'); const AwsDeploy = require('../../../../../../../lib/plugins/aws/deploy/index'); const Serverless = require('../../../../../../../lib/serverless'); @@ -13,6 +14,7 @@ describe('cleanupS3Bucket', () => { let provider; let awsDeploy; let s3Key; + let s3SendStub; beforeEach(() => { const options = { @@ -29,6 +31,7 @@ describe('cleanupS3Bucket', () => { awsDeploy = new AwsDeploy(serverless, options); awsDeploy.bucketName = 'deployment-bucket'; awsDeploy.serverless.cli = new serverless.classes.CLI(); + s3SendStub = sinon.stub(S3Client.prototype, 'send'); }); const createSignatureMismatchListError = () => { @@ -63,21 +66,34 @@ describe('cleanupS3Bucket', () => { return error; }; + afterEach(() => { + if (S3Client.prototype.send.restore) S3Client.prototype.send.restore(); + }); + + function expectListObjectsCall(call, input) { + expect(call.args[0]).to.be.instanceOf(ListObjectsV2Command); + expect(call.args[0].input).to.include(input); + } + + function expectDeleteObjectsCall(call, input) { + expect(call.args[0]).to.be.instanceOf(DeleteObjectsCommand); + expect(call.args[0].input).to.deep.equal(input); + } + describe('#getObjectsToRemove()', () => { it('should resolve if no objects are found', async () => { const serviceObjects = { Contents: [], }; - const listObjectsStub = sinon.stub(awsDeploy.provider, 'request').resolves(serviceObjects); + s3SendStub.resolves(serviceObjects); return awsDeploy.getObjectsToRemove().then(() => { - expect(listObjectsStub).to.have.been.calledOnce; - expect(listObjectsStub).to.have.been.calledWithExactly('S3', 'listObjectsV2', { + expect(s3SendStub).to.have.been.calledOnce; + expectListObjectsCall(s3SendStub.firstCall, { Bucket: awsDeploy.bucketName, Prefix: `${s3Key}/`, }); - awsDeploy.provider.request.restore(); }); }); @@ -97,25 +113,24 @@ describe('cleanupS3Bucket', () => { ]), }; - const listObjectsStub = sinon.stub(awsDeploy.provider, 'request').resolves(serviceObjects); + s3SendStub.resolves(serviceObjects); return awsDeploy.getObjectsToRemove().then((objectsToRemove) => { expect(objectsToRemove).to.deep.equal([ { Key: `${s3Key}/${deploymentDirectories[0]}/artifact.zip` }, { Key: `${s3Key}/${deploymentDirectories[0]}/cloudformation.json` }, ]); - expect(listObjectsStub.calledOnce).to.be.equal(true); - expect(listObjectsStub).to.have.been.calledWithExactly('S3', 'listObjectsV2', { + expect(s3SendStub.calledOnce).to.be.equal(true); + expectListObjectsCall(s3SendStub.firstCall, { Bucket: awsDeploy.bucketName, Prefix: `${s3Key}/`, }); - awsDeploy.provider.request.restore(); }); }); it('should not rewrite specific S3 list authentication failures', async () => { const listError = createSignatureMismatchListError(); - const listObjectsStub = sinon.stub(awsDeploy.provider, 'request').rejects(listError); + s3SendStub.rejects(listError); try { await awsDeploy.getObjectsToRemove(); @@ -123,14 +138,13 @@ describe('cleanupS3Bucket', () => { } catch (error) { expect(error).to.equal(listError); } finally { - expect(listObjectsStub).to.have.been.calledOnce; - awsDeploy.provider.request.restore(); + expect(s3SendStub).to.have.been.calledOnce; } }); it('should rewrite status-only S3 list access denied failures', async () => { const listError = createStatusOnlyListError(); - const listObjectsStub = sinon.stub(awsDeploy.provider, 'request').rejects(listError); + s3SendStub.rejects(listError); try { await expect(awsDeploy.getObjectsToRemove()).to.be.eventually.rejected.and.have.property( @@ -138,14 +152,13 @@ describe('cleanupS3Bucket', () => { 'AWS_S3_LIST_OBJECTS_V2_ACCESS_DENIED' ); } finally { - expect(listObjectsStub).to.have.been.calledOnce; - awsDeploy.provider.request.restore(); + expect(s3SendStub).to.have.been.calledOnce; } }); it('should rewrite wrapped status-only S3 list access denied failures', async () => { const listError = createWrappedStatusOnlyListError(); - const listObjectsStub = sinon.stub(awsDeploy.provider, 'request').rejects(listError); + s3SendStub.rejects(listError); try { await expect(awsDeploy.getObjectsToRemove()).to.be.eventually.rejected.and.have.property( @@ -153,14 +166,13 @@ describe('cleanupS3Bucket', () => { 'AWS_S3_LIST_OBJECTS_V2_ACCESS_DENIED' ); } finally { - expect(listObjectsStub).to.have.been.calledOnce; - awsDeploy.provider.request.restore(); + expect(s3SendStub).to.have.been.calledOnce; } }); it('should rewrite explicit S3 list access denied failures', async () => { const listError = createAccessDeniedListError(); - const listObjectsStub = sinon.stub(awsDeploy.provider, 'request').rejects(listError); + s3SendStub.rejects(listError); try { await expect(awsDeploy.getObjectsToRemove()).to.be.eventually.rejected.and.have.property( @@ -168,8 +180,7 @@ describe('cleanupS3Bucket', () => { 'AWS_S3_LIST_OBJECTS_V2_ACCESS_DENIED' ); } finally { - expect(listObjectsStub).to.have.been.calledOnce; - awsDeploy.provider.request.restore(); + expect(s3SendStub).to.have.been.calledOnce; } }); @@ -179,9 +190,7 @@ describe('cleanupS3Bucket', () => { }; const oldKey = `${s3Key}/141264711231-2016-08-18T15:43:00/artifact.zip`; const newKey = `${s3Key}/151224711231-2016-08-18T15:42:00/artifact.zip`; - const listObjectsStub = sinon.stub(awsDeploy.provider, 'request'); - listObjectsStub - .withArgs('S3', 'listObjectsV2') + s3SendStub .onFirstCall() .resolves({ Contents: [{ Key: oldKey }], @@ -196,26 +205,17 @@ describe('cleanupS3Bucket', () => { const objectsToRemove = await awsDeploy.getObjectsToRemove(); expect(objectsToRemove).to.deep.equal([{ Key: oldKey }]); - expect(listObjectsStub).to.have.been.calledTwice; - expect(listObjectsStub.firstCall.args).to.deep.equal([ - 'S3', - 'listObjectsV2', - { - Bucket: awsDeploy.bucketName, - Prefix: `${s3Key}/`, - }, - ]); - expect(listObjectsStub.secondCall.args).to.deep.equal([ - 'S3', - 'listObjectsV2', - { - Bucket: awsDeploy.bucketName, - Prefix: `${s3Key}/`, - ContinuationToken: 'next-page', - }, - ]); + expect(s3SendStub).to.have.been.calledTwice; + expectListObjectsCall(s3SendStub.firstCall, { + Bucket: awsDeploy.bucketName, + Prefix: `${s3Key}/`, + }); + expectListObjectsCall(s3SendStub.secondCall, { + Bucket: awsDeploy.bucketName, + Prefix: `${s3Key}/`, + ContinuationToken: 'next-page', + }); } finally { - awsDeploy.provider.request.restore(); delete serverless.service.provider.deploymentBucketObject; } }); @@ -232,16 +232,15 @@ describe('cleanupS3Bucket', () => { ], }; - const listObjectsStub = sinon.stub(awsDeploy.provider, 'request').resolves(serviceObjects); + s3SendStub.resolves(serviceObjects); return awsDeploy.getObjectsToRemove().then((objectsToRemove) => { expect(objectsToRemove.length).to.equal(0); - expect(listObjectsStub.calledOnce).to.be.equal(true); - expect(listObjectsStub).to.have.been.calledWithExactly('S3', 'listObjectsV2', { + expect(s3SendStub.calledOnce).to.be.equal(true); + expectListObjectsCall(s3SendStub.firstCall, { Bucket: awsDeploy.bucketName, Prefix: `${s3Key}/`, }); - awsDeploy.provider.request.restore(); }); }); @@ -259,16 +258,15 @@ describe('cleanupS3Bucket', () => { ], }; - const listObjectsStub = sinon.stub(awsDeploy.provider, 'request').resolves(serviceObjects); + s3SendStub.resolves(serviceObjects); return awsDeploy.getObjectsToRemove().then((objectsToRemove) => { expect(objectsToRemove).to.have.lengthOf(0); - expect(listObjectsStub).to.have.been.calledOnce; - expect(listObjectsStub).to.have.been.calledWithExactly('S3', 'listObjectsV2', { + expect(s3SendStub).to.have.been.calledOnce; + expectListObjectsCall(s3SendStub.firstCall, { Bucket: awsDeploy.bucketName, Prefix: `${s3Key}/`, }); - awsDeploy.provider.request.restore(); }); }); @@ -295,7 +293,7 @@ describe('cleanupS3Bucket', () => { ], }; - const listObjectsStub = sinon.stub(awsDeploy.provider, 'request').resolves(serviceObjects); + s3SendStub.resolves(serviceObjects); return awsDeploy.getObjectsToRemove().then((objectsToRemove) => { expect(objectsToRemove).to.deep.include.members([ @@ -313,12 +311,11 @@ describe('cleanupS3Bucket', () => { Key: `${s3Key}/141264711231-2016-08-18T15:43:00/cloudformation.json`, }); - expect(listObjectsStub.calledOnce).to.be.equal(true); - expect(listObjectsStub).to.have.been.calledWithExactly('S3', 'listObjectsV2', { + expect(s3SendStub.calledOnce).to.be.equal(true); + expectListObjectsCall(s3SendStub.firstCall, { Bucket: awsDeploy.bucketName, Prefix: `${s3Key}/`, }); - awsDeploy.provider.request.restore(); }); }); @@ -338,37 +335,25 @@ describe('cleanupS3Bucket', () => { ], }; - const listObjectsStub = sinon.stub(awsDeploy.provider, 'request').resolves(serviceObjects); + s3SendStub.resolves(serviceObjects); return awsDeploy.getObjectsToRemove().then((objectsToRemove) => { expect(objectsToRemove).to.deep.equal(serviceObjects.Contents); - expect(listObjectsStub.calledOnce).to.be.equal(true); - expect(listObjectsStub).to.have.been.calledWithExactly('S3', 'listObjectsV2', { + expect(s3SendStub.calledOnce).to.be.equal(true); + expectListObjectsCall(s3SendStub.firstCall, { Bucket: awsDeploy.bucketName, Prefix: `${s3Key}/`, }); - awsDeploy.provider.request.restore(); }); }); }); }); describe('#removeObjects()', () => { - let deleteObjectsStub; - - beforeEach(() => { - deleteObjectsStub = sinon.stub(awsDeploy.provider, 'request').resolves(); - }); - - afterEach(() => { - if (awsDeploy.provider.request.restore) awsDeploy.provider.request.restore(); - }); - it('should resolve if no service objects are found in the S3 bucket', async () => awsDeploy.removeObjects().then(() => { - expect(deleteObjectsStub.calledOnce).to.be.equal(false); - awsDeploy.provider.request.restore(); + expect(s3SendStub.calledOnce).to.be.equal(false); })); it('should remove all old service files from the S3 bucket if available', async () => { @@ -378,16 +363,16 @@ describe('cleanupS3Bucket', () => { { Key: `${s3Key}/141264711231-2016-08-18T15:42:00/artifact.zip` }, { Key: `${s3Key}/141264711231-2016-08-18T15:42:00/cloudformation.json` }, ]; + s3SendStub.resolves(); return awsDeploy.removeObjects(objectsToRemove).then(() => { - expect(deleteObjectsStub).to.have.been.calledOnce; - expect(deleteObjectsStub).to.have.been.calledWithExactly('S3', 'deleteObjects', { + expect(s3SendStub).to.have.been.calledOnce; + expectDeleteObjectsCall(s3SendStub.firstCall, { Bucket: awsDeploy.bucketName, Delete: { Objects: objectsToRemove, }, }); - awsDeploy.provider.request.restore(); }); }); @@ -395,34 +380,27 @@ describe('cleanupS3Bucket', () => { const objectsToRemove = Array.from({ length: 1001 }, (ignored, index) => ({ Key: `${s3Key}/artifact-${index}.zip`, })); + s3SendStub.resolves(); await awsDeploy.removeObjects(objectsToRemove); - expect(deleteObjectsStub).to.have.been.calledTwice; - expect(deleteObjectsStub.firstCall.args).to.deep.equal([ - 'S3', - 'deleteObjects', - { - Bucket: awsDeploy.bucketName, - Delete: { - Objects: objectsToRemove.slice(0, 1000), - }, + expect(s3SendStub).to.have.been.calledTwice; + expectDeleteObjectsCall(s3SendStub.firstCall, { + Bucket: awsDeploy.bucketName, + Delete: { + Objects: objectsToRemove.slice(0, 1000), }, - ]); - expect(deleteObjectsStub.secondCall.args).to.deep.equal([ - 'S3', - 'deleteObjects', - { - Bucket: awsDeploy.bucketName, - Delete: { - Objects: objectsToRemove.slice(1000), - }, + }); + expectDeleteObjectsCall(s3SendStub.secondCall, { + Bucket: awsDeploy.bucketName, + Delete: { + Objects: objectsToRemove.slice(1000), }, - ]); + }); }); it('should fail when a delete objects batch returns a generic partial failure', async () => { - deleteObjectsStub.resolves({ + s3SendStub.resolves({ Errors: [{ Code: 'InternalError' }], }); @@ -432,7 +410,7 @@ describe('cleanupS3Bucket', () => { }); it('should fail when a delete objects batch returns an access denied partial failure', async () => { - deleteObjectsStub.resolves({ + s3SendStub.resolves({ Errors: [{ Code: 'AccessDenied' }], }); @@ -450,10 +428,8 @@ describe('cleanupS3Bucket', () => { const deploymentDirectory = '151224711231-2016-08-18T15:42:00'; const firstKey = `${s3Key}/${deploymentDirectory}/artifact.zip`; const secondKey = `${s3Key}/${deploymentDirectory}/compiled-cloudformation-template.json`; - const requestStub = sinon.stub(awsDeploy.provider, 'request'); awsDeploy.serverless.service.package.artifactDirectoryName = `${s3Key}/${deploymentDirectory}`; - requestStub - .withArgs('S3', 'listObjectsV2') + s3SendStub .onFirstCall() .resolves({ Contents: [{ Key: firstKey }], @@ -461,75 +437,50 @@ describe('cleanupS3Bucket', () => { }) .onSecondCall() .resolves({ Contents: [{ Key: secondKey }] }); - requestStub.withArgs('S3', 'deleteObjects').resolves(); + s3SendStub.onThirdCall().resolves(); - try { - await awsDeploy.cleanupArtifactsForEmptyChangeSet(); + await awsDeploy.cleanupArtifactsForEmptyChangeSet(); - expect(requestStub.firstCall.args).to.deep.equal([ - 'S3', - 'listObjectsV2', - { - Bucket: awsDeploy.bucketName, - Prefix: `${s3Key}/${deploymentDirectory}/`, - }, - ]); - expect(requestStub.secondCall.args).to.deep.equal([ - 'S3', - 'listObjectsV2', - { - Bucket: awsDeploy.bucketName, - Prefix: `${s3Key}/${deploymentDirectory}/`, - ContinuationToken: 'next-page', - }, - ]); - const deleteCall = requestStub - .getCalls() - .find((call) => call.args[0] === 'S3' && call.args[1] === 'deleteObjects'); - expect(deleteCall.args[2]).to.deep.equal({ - Bucket: awsDeploy.bucketName, - Delete: { - Objects: [{ Key: firstKey }, { Key: secondKey }], - }, - }); - } finally { - awsDeploy.provider.request.restore(); - } + expectListObjectsCall(s3SendStub.firstCall, { + Bucket: awsDeploy.bucketName, + Prefix: `${s3Key}/${deploymentDirectory}/`, + }); + expectListObjectsCall(s3SendStub.secondCall, { + Bucket: awsDeploy.bucketName, + Prefix: `${s3Key}/${deploymentDirectory}/`, + ContinuationToken: 'next-page', + }); + expectDeleteObjectsCall(s3SendStub.thirdCall, { + Bucket: awsDeploy.bucketName, + Delete: { + Objects: [{ Key: firstKey }, { Key: secondKey }], + }, + }); }); it('should list only the selected deployment directory', async () => { const deploymentDirectory = '151224711231-2016-08-18T15:42:00'; const artifactDirectoryName = `${s3Key}/${deploymentDirectory}`; const artifactKey = `${artifactDirectoryName}/artifact.zip`; - const requestStub = sinon.stub(awsDeploy.provider, 'request'); awsDeploy.serverless.service.package.artifactDirectoryName = artifactDirectoryName; - requestStub.withArgs('S3', 'listObjectsV2').resolves({ + s3SendStub.onFirstCall().resolves({ Contents: [{ Key: artifactKey }], }); - requestStub.withArgs('S3', 'deleteObjects').resolves(); + s3SendStub.onSecondCall().resolves(); - try { - await awsDeploy.cleanupArtifactsForEmptyChangeSet(); + await awsDeploy.cleanupArtifactsForEmptyChangeSet(); - expect(requestStub.firstCall.args).to.deep.equal([ - 'S3', - 'listObjectsV2', - { - Bucket: awsDeploy.bucketName, - Prefix: `${artifactDirectoryName}/`, - }, - ]); - } finally { - awsDeploy.provider.request.restore(); - } + expectListObjectsCall(s3SendStub.firstCall, { + Bucket: awsDeploy.bucketName, + Prefix: `${artifactDirectoryName}/`, + }); }); it('should not rewrite specific empty-changeset S3 list authentication failures', async () => { const deploymentDirectory = '151224711231-2016-08-18T15:42:00'; const listError = createSignatureMismatchListError(); - const requestStub = sinon.stub(awsDeploy.provider, 'request'); awsDeploy.serverless.service.package.artifactDirectoryName = `${s3Key}/${deploymentDirectory}`; - requestStub.withArgs('S3', 'listObjectsV2').rejects(listError); + s3SendStub.rejects(listError); try { await awsDeploy.cleanupArtifactsForEmptyChangeSet(); @@ -537,17 +488,15 @@ describe('cleanupS3Bucket', () => { } catch (error) { expect(error).to.equal(listError); } finally { - expect(requestStub).to.have.been.calledOnce; - awsDeploy.provider.request.restore(); + expect(s3SendStub).to.have.been.calledOnce; } }); it('should rewrite status-only empty-changeset S3 list access denied failures', async () => { const deploymentDirectory = '151224711231-2016-08-18T15:42:00'; const listError = createStatusOnlyListError(); - const requestStub = sinon.stub(awsDeploy.provider, 'request'); awsDeploy.serverless.service.package.artifactDirectoryName = `${s3Key}/${deploymentDirectory}`; - requestStub.withArgs('S3', 'listObjectsV2').rejects(listError); + s3SendStub.rejects(listError); try { await expect( @@ -557,17 +506,15 @@ describe('cleanupS3Bucket', () => { 'AWS_S3_LIST_OBJECTS_V2_ACCESS_DENIED' ); } finally { - expect(requestStub).to.have.been.calledOnce; - awsDeploy.provider.request.restore(); + expect(s3SendStub).to.have.been.calledOnce; } }); it('should rewrite wrapped status-only empty-changeset S3 list access denied failures', async () => { const deploymentDirectory = '151224711231-2016-08-18T15:42:00'; const listError = createWrappedStatusOnlyListError(); - const requestStub = sinon.stub(awsDeploy.provider, 'request'); awsDeploy.serverless.service.package.artifactDirectoryName = `${s3Key}/${deploymentDirectory}`; - requestStub.withArgs('S3', 'listObjectsV2').rejects(listError); + s3SendStub.rejects(listError); try { await expect( @@ -577,8 +524,7 @@ describe('cleanupS3Bucket', () => { 'AWS_S3_LIST_OBJECTS_V2_ACCESS_DENIED' ); } finally { - expect(requestStub).to.have.been.calledOnce; - awsDeploy.provider.request.restore(); + expect(s3SendStub).to.have.been.calledOnce; } }); @@ -601,20 +547,15 @@ describe('cleanupS3Bucket', () => { }, ]) { it(`should reject when ${description}`, async () => { - const requestStub = sinon.stub(awsDeploy.provider, 'request'); awsDeploy.serverless.service.package.artifactDirectoryName = resolveArtifactDirectoryName(); - try { - await expect( - awsDeploy.cleanupArtifactsForEmptyChangeSet() - ).to.be.eventually.rejected.and.have.property( - 'code', - 'INVALID_EMPTY_CHANGE_SET_ARTIFACT_DIRECTORY' - ); - expect(requestStub).to.not.have.been.called; - } finally { - awsDeploy.provider.request.restore(); - } + await expect( + awsDeploy.cleanupArtifactsForEmptyChangeSet() + ).to.be.eventually.rejected.and.have.property( + 'code', + 'INVALID_EMPTY_CHANGE_SET_ARTIFACT_DIRECTORY' + ); + expect(s3SendStub).to.not.have.been.called; }); } @@ -622,27 +563,18 @@ describe('cleanupS3Bucket', () => { const deploymentDirectory = '151224711231-2016-08-18T15:42:00'; const artifactDirectoryName = `${s3Key}/${deploymentDirectory}`; const artifactKey = `${artifactDirectoryName}/artifact.zip`; - const requestStub = sinon.stub(awsDeploy.provider, 'request'); awsDeploy.serverless.service.package.artifactDirectoryName = `${artifactDirectoryName}/`; - requestStub.withArgs('S3', 'listObjectsV2').resolves({ + s3SendStub.onFirstCall().resolves({ Contents: [{ Key: artifactKey }], }); - requestStub.withArgs('S3', 'deleteObjects').resolves(); + s3SendStub.onSecondCall().resolves(); - try { - await awsDeploy.cleanupArtifactsForEmptyChangeSet(); + await awsDeploy.cleanupArtifactsForEmptyChangeSet(); - expect(requestStub.firstCall.args).to.deep.equal([ - 'S3', - 'listObjectsV2', - { - Bucket: awsDeploy.bucketName, - Prefix: `${artifactDirectoryName}/`, - }, - ]); - } finally { - awsDeploy.provider.request.restore(); - } + expectListObjectsCall(s3SendStub.firstCall, { + Bucket: awsDeploy.bucketName, + Prefix: `${artifactDirectoryName}/`, + }); }); it('should not rewrite delete failures as list failures', async () => { @@ -650,20 +582,17 @@ describe('cleanupS3Bucket', () => { const artifactKey = `${s3Key}/${deploymentDirectory}/artifact.zip`; const deleteError = new Error('delete denied'); deleteError.statusCode = 403; - const requestStub = sinon.stub(awsDeploy.provider, 'request'); awsDeploy.serverless.service.package.artifactDirectoryName = `${s3Key}/${deploymentDirectory}`; - requestStub.withArgs('S3', 'listObjectsV2').resolves({ + s3SendStub.onFirstCall().resolves({ Contents: [{ Key: artifactKey }], }); - requestStub.withArgs('S3', 'deleteObjects').rejects(deleteError); + s3SendStub.onSecondCall().rejects(deleteError); try { await awsDeploy.cleanupArtifactsForEmptyChangeSet(); throw new Error('Expected cleanupArtifactsForEmptyChangeSet to reject'); } catch (error) { expect(error).to.equal(deleteError); - } finally { - awsDeploy.provider.request.restore(); } }); }); diff --git a/test/unit/lib/plugins/aws/deploy/lib/upload-artifacts.test.js b/test/unit/lib/plugins/aws/deploy/lib/upload-artifacts.test.js index 62fad5ac3..3e9e922d2 100644 --- a/test/unit/lib/plugins/aws/deploy/lib/upload-artifacts.test.js +++ b/test/unit/lib/plugins/aws/deploy/lib/upload-artifacts.test.js @@ -6,6 +6,7 @@ const fse = require('fs-extra'); const path = require('path'); const crypto = require('crypto'); const chai = require('chai'); +const { Upload } = require('@aws-sdk/lib-storage'); const normalizeFiles = require('../../../../../../../lib/plugins/aws/lib/normalize-files'); const AwsProvider = require('../../../../../../../lib/plugins/aws/provider'); const AwsDeploy = require('../../../../../../../lib/plugins/aws/deploy/index'); @@ -54,6 +55,51 @@ describe('uploadArtifacts', () => { afterEach(() => sinon.restore()); + function getUploadParams(uploadStub) { + return uploadStub.firstCall.thisValue.params; + } + + function createAwsError(name) { + return Object.assign(new Error('access denied'), { + name, + $metadata: { httpStatusCode: 403 }, + }); + } + + async function expectNormalizedUploadError(promise, providerError) { + try { + await promise; + } catch (error) { + expect(error.code).to.equal('AWS_S3_UPLOAD_ACCESS_DENIED'); + expect(error.providerError).to.equal(providerError); + return; + } + + throw new Error('Expected upload to reject'); + } + + function writeStateFile(state) { + const serviceDirPath = createTmpDir(); + const stateFileName = awsDeploy.provider.naming.getServiceStateFileName(); + const stateObject = { + ...state, + service: { + provider: {}, + ...(state.service || {}), + }, + package: state.package || {}, + }; + const stateFileContent = JSON.stringify(stateObject); + + serverless.serviceDir = serviceDirPath; + serverless.utils.writeFileSync( + path.join(serviceDirPath, '.serverless', stateFileName), + stateFileContent + ); + + return stateFileContent; + } + describe('#uploadCloudFormationFile()', () => { let normalizeCloudFormationTemplateStub; let uploadStub; @@ -62,7 +108,7 @@ describe('uploadArtifacts', () => { normalizeCloudFormationTemplateStub = sinon .stub(normalizeFiles, 'normalizeCloudFormationTemplate') .returns(); - uploadStub = sinon.stub(awsDeploy.provider, 'request').resolves(); + uploadStub = sinon.stub(Upload.prototype, 'done').resolves(); }); afterEach(() => { @@ -76,19 +122,34 @@ describe('uploadArtifacts', () => { return awsDeploy.uploadCloudFormationFile().then(() => { expect(normalizeCloudFormationTemplateStub).to.have.been.calledOnce; expect(uploadStub).to.have.been.calledOnce; - expect(uploadStub).to.have.been.calledWithExactly('S3', 'upload', { + expect(getUploadParams(uploadStub)).to.deep.include({ Bucket: awsDeploy.bucketName, Key: `${awsDeploy.serverless.service.package.artifactDirectoryName}/compiled-cloudformation-template.json`, Body: JSON.stringify({ foo: 'bar' }), ContentType: 'application/json', - Metadata: { - filesha256: 'local-hash-cf-template', - }, + }); + expect(getUploadParams(uploadStub).Metadata).to.deep.equal({ + filesha256: 'local-hash-cf-template', + }); + expect(uploadStub.firstCall.thisValue).to.include({ + queueSize: 6, + partSize: 5 * 1024 * 1024, + leavePartsOnError: false, }); expect(normalizeCloudFormationTemplateStub).to.have.been.calledWithExactly({ foo: 'bar' }); }); }); + it('should configure S3 transfer acceleration for CloudFormation file uploads', async () => { + crypto.createHash().update().digest.onCall(0).returns('local-hash-cf-template'); + awsDeploy.provider.options['aws-s3-accelerate'] = true; + + return awsDeploy.uploadCloudFormationFile().then(() => { + expect(uploadStub).to.have.been.calledOnce; + expect(uploadStub.firstCall.thisValue.client.config.useAccelerateEndpoint).to.equal(true); + }); + }); + it('should upload the CloudFormation file to a bucket with SSE bucket policy', async () => { crypto.createHash().update().digest.onCall(0).returns('local-hash-cf-template'); awsDeploy.serverless.service.provider.deploymentBucketObject = { @@ -98,19 +159,147 @@ describe('uploadArtifacts', () => { return awsDeploy.uploadCloudFormationFile().then(() => { expect(normalizeCloudFormationTemplateStub).to.have.been.calledOnce; expect(uploadStub).to.have.been.calledOnce; - expect(uploadStub).to.have.been.calledWithExactly('S3', 'upload', { + expect(getUploadParams(uploadStub)).to.deep.include({ Bucket: awsDeploy.bucketName, Key: `${awsDeploy.serverless.service.package.artifactDirectoryName}/compiled-cloudformation-template.json`, Body: JSON.stringify({ foo: 'bar' }), ContentType: 'application/json', - ServerSideEncryption: 'AES256', Metadata: { filesha256: 'local-hash-cf-template', }, + ServerSideEncryption: 'AES256', }); expect(normalizeCloudFormationTemplateStub).to.have.been.calledWithExactly({ foo: 'bar' }); }); }); + + it('should upload the CloudFormation file with KMS encryption options', async () => { + crypto.createHash().update().digest.onCall(0).returns('local-hash-cf-template'); + awsDeploy.serverless.service.provider.deploymentBucketObject = { + serverSideEncryption: 'aws:kms', + sseKMSKeyId: 'kms-key-id', + }; + + await awsDeploy.uploadCloudFormationFile(); + + expect(uploadStub).to.have.been.calledOnce; + expect(getUploadParams(uploadStub)).to.deep.equal({ + Bucket: awsDeploy.bucketName, + Key: `${awsDeploy.serverless.service.package.artifactDirectoryName}/compiled-cloudformation-template.json`, + Body: JSON.stringify({ foo: 'bar' }), + ContentType: 'application/json', + Metadata: { + filesha256: 'local-hash-cf-template', + }, + ServerSideEncryption: 'aws:kms', + SSEKMSKeyId: 'kms-key-id', + }); + }); + + it('should normalize CloudFormation upload errors', async () => { + const error = createAwsError('AccessDenied'); + crypto.createHash().update().digest.onCall(0).returns('local-hash-cf-template'); + uploadStub.rejects(error); + + await expectNormalizedUploadError(awsDeploy.uploadCloudFormationFile(), error); + }); + + it('should normalize status-only CloudFormation upload errors', async () => { + const error = Object.assign(new Error('forbidden'), { + $metadata: { httpStatusCode: 403 }, + }); + crypto.createHash().update().digest.onCall(0).returns('local-hash-cf-template'); + uploadStub.rejects(error); + + try { + await awsDeploy.uploadCloudFormationFile(); + } catch (caughtError) { + expect(caughtError.code).to.equal('AWS_S3_UPLOAD_HTTP_403_ERROR'); + expect(caughtError.providerError).to.equal(error); + return; + } + + throw new Error('Expected upload to reject'); + }); + }); + + describe('#uploadStateFile()', () => { + let uploadStub; + + beforeEach(() => { + uploadStub = sinon.stub(Upload.prototype, 'done').resolves(); + }); + + afterEach(() => { + uploadStub.restore(); + }); + + it('should upload the state file to the S3 bucket', async () => { + const stateFileContent = writeStateFile({ service: { service: 'new-service' } }); + crypto.createHash().update().digest.onCall(0).returns('local-hash-state-file'); + + await awsDeploy.uploadStateFile(); + + expect(uploadStub).to.have.been.calledOnce; + expect(getUploadParams(uploadStub)).to.deep.include({ + Bucket: awsDeploy.bucketName, + Key: `${awsDeploy.serverless.service.package.artifactDirectoryName}/serverless-state.json`, + Body: stateFileContent, + ContentType: 'application/json', + }); + expect(getUploadParams(uploadStub).Metadata).to.deep.equal({ + filesha256: 'local-hash-state-file', + }); + expect(uploadStub.firstCall.thisValue).to.include({ + queueSize: 6, + partSize: 5 * 1024 * 1024, + leavePartsOnError: false, + }); + }); + + it('should configure S3 transfer acceleration for state file uploads', async () => { + writeStateFile({ service: { service: 'new-service' } }); + crypto.createHash().update().digest.onCall(0).returns('local-hash-state-file'); + awsDeploy.provider.options['aws-s3-accelerate'] = true; + + await awsDeploy.uploadStateFile(); + + expect(uploadStub).to.have.been.calledOnce; + expect(uploadStub.firstCall.thisValue.client.config.useAccelerateEndpoint).to.equal(true); + }); + + it('should upload the state file with KMS encryption options', async () => { + const stateFileContent = writeStateFile({ service: { service: 'new-service' } }); + crypto.createHash().update().digest.onCall(0).returns('local-hash-state-file'); + awsDeploy.serverless.service.provider.deploymentBucketObject = { + serverSideEncryption: 'aws:kms', + sseKMSKeyId: 'kms-key-id', + }; + + await awsDeploy.uploadStateFile(); + + expect(uploadStub).to.have.been.calledOnce; + expect(getUploadParams(uploadStub)).to.deep.equal({ + Bucket: awsDeploy.bucketName, + Key: `${awsDeploy.serverless.service.package.artifactDirectoryName}/serverless-state.json`, + Body: stateFileContent, + ContentType: 'application/json', + Metadata: { + filesha256: 'local-hash-state-file', + }, + ServerSideEncryption: 'aws:kms', + SSEKMSKeyId: 'kms-key-id', + }); + }); + + it('should normalize state file upload errors', async () => { + const error = createAwsError('AccessDenied'); + writeStateFile({ service: { service: 'new-service' } }); + crypto.createHash().update().digest.onCall(0).returns('local-hash-state-file'); + uploadStub.rejects(error); + + await expectNormalizedUploadError(awsDeploy.uploadStateFile(), error); + }); }); describe('#uploadZipFile()', () => { @@ -119,7 +308,7 @@ describe('uploadArtifacts', () => { beforeEach(() => { readFileSyncStub = sinon.stub(fs, 'readFileSync').returns(); - uploadStub = sinon.stub(awsDeploy.provider, 'request').resolves(); + uploadStub = sinon.stub(Upload.prototype, 'done').resolves(); }); afterEach(() => { @@ -145,19 +334,61 @@ describe('uploadArtifacts', () => { }) .then(() => { expect(uploadStub).to.have.been.calledOnce; - expect(uploadStub).to.have.been.calledWithExactly('S3', 'upload', { + expect(getUploadParams(uploadStub)).to.deep.include({ Bucket: awsDeploy.bucketName, Key: `${awsDeploy.serverless.service.package.artifactDirectoryName}/artifact.zip`, - Body: sinon.match.object.and(sinon.match.has('path', artifactFilePath)), ContentType: 'application/zip', - Metadata: { - filesha256: 'local-hash-zip-file', - }, + }); + expect(getUploadParams(uploadStub).Body.path).to.equal(artifactFilePath); + expect(getUploadParams(uploadStub).Metadata).to.deep.equal({ + filesha256: 'local-hash-zip-file', + }); + expect(uploadStub.firstCall.thisValue).to.include({ + queueSize: 6, + partSize: 5 * 1024 * 1024, + leavePartsOnError: false, }); expect(readFileSyncStub).to.have.been.calledWithExactly(artifactFilePath); }); }); + it('should configure S3 transfer acceleration for .zip file uploads', async () => { + crypto.createHash().update().digest.onCall(0).returns('local-hash-zip-file'); + awsDeploy.provider.options['aws-s3-accelerate'] = true; + + const tmpDirPath = getTmpDirPath(); + const artifactFilePath = path.join(tmpDirPath, 'artifact.zip'); + serverless.utils.writeFileSync(artifactFilePath, 'artifact.zip file content'); + + await awsDeploy.uploadZipFile({ + filename: artifactFilePath, + s3KeyDirname: awsDeploy.serverless.service.package.artifactDirectoryName, + }); + + expect(uploadStub).to.have.been.calledOnce; + expect(uploadStub.firstCall.thisValue.client.config.useAccelerateEndpoint).to.equal(true); + }); + + it('should throw observed stream errors after upload completes', async () => { + const streamError = new Error('stream failed'); + crypto.createHash().update().digest.onCall(0).returns('local-hash-zip-file'); + const artifactFilePath = path.join(getTmpDirPath(), 'artifact.zip'); + sinon.stub(fs, 'createReadStream').returns({ + path: artifactFilePath, + on(eventName, listener) { + if (eventName === 'error') listener(streamError); + return this; + }, + }); + + await expect( + awsDeploy.uploadZipFile({ + filename: artifactFilePath, + s3KeyDirname: awsDeploy.serverless.service.package.artifactDirectoryName, + }) + ).to.be.rejectedWith(streamError); + }); + it('should upload the .zip file to a bucket with SSE bucket policy', async () => { crypto.createHash().update().digest.onCall(0).returns('local-hash-zip-file'); @@ -176,19 +407,72 @@ describe('uploadArtifacts', () => { .then(() => { expect(uploadStub).to.have.been.calledOnce; expect(readFileSyncStub).to.have.been.calledOnce; - expect(uploadStub).to.have.been.calledWithExactly('S3', 'upload', { + expect(getUploadParams(uploadStub)).to.deep.include({ Bucket: awsDeploy.bucketName, Key: `${awsDeploy.serverless.service.package.artifactDirectoryName}/artifact.zip`, - Body: sinon.match.object.and(sinon.match.has('path', artifactFilePath)), ContentType: 'application/zip', ServerSideEncryption: 'AES256', Metadata: { filesha256: 'local-hash-zip-file', }, }); + expect(getUploadParams(uploadStub).Body.path).to.equal(artifactFilePath); expect(readFileSyncStub).to.have.been.calledWithExactly(artifactFilePath); }); }); + + it('should upload the .zip file with SSE-C encryption options', async () => { + crypto.createHash().update().digest.onCall(0).returns('local-hash-zip-file'); + + const tmpDirPath = getTmpDirPath(); + const artifactFilePath = path.join(tmpDirPath, 'artifact.zip'); + serverless.utils.writeFileSync(artifactFilePath, 'artifact.zip file content'); + awsDeploy.serverless.service.provider.deploymentBucketObject = { + sseCustomerAlgorithim: 'AES256', + sseCustomerKey: 'customer-key', + sseCustomerKeyMD5: 'customer-key-md5', + }; + + await awsDeploy.uploadZipFile({ + filename: artifactFilePath, + s3KeyDirname: awsDeploy.serverless.service.package.artifactDirectoryName, + }); + + const uploadParams = getUploadParams(uploadStub); + expect({ + ...uploadParams, + Body: uploadParams.Body.path, + }).to.deep.equal({ + Bucket: awsDeploy.bucketName, + Key: `${awsDeploy.serverless.service.package.artifactDirectoryName}/artifact.zip`, + Body: artifactFilePath, + ContentType: 'application/zip', + Metadata: { + filesha256: 'local-hash-zip-file', + }, + SSECustomerAlgorithm: 'AES256', + SSECustomerKey: 'customer-key', + SSECustomerKeyMD5: 'customer-key-md5', + }); + }); + + it('should normalize zip upload errors', async () => { + const error = createAwsError('AccessDenied'); + crypto.createHash().update().digest.onCall(0).returns('local-hash-zip-file'); + + const tmpDirPath = getTmpDirPath(); + const artifactFilePath = path.join(tmpDirPath, 'artifact.zip'); + serverless.utils.writeFileSync(artifactFilePath, 'artifact.zip file content'); + uploadStub.rejects(error); + + await expectNormalizedUploadError( + awsDeploy.uploadZipFile({ + filename: artifactFilePath, + s3KeyDirname: awsDeploy.serverless.service.package.artifactDirectoryName, + }), + error + ); + }); }); describe('#uploadFunctionsAndLayers()', () => { @@ -294,7 +578,7 @@ describe('uploadArtifacts', () => { let customResourcesFilePath; beforeEach(() => { - uploadStub = sinon.stub(awsDeploy.provider, 'request').resolves(); + uploadStub = sinon.stub(Upload.prototype, 'done').resolves(); serviceDirPath = createTmpDir(); customResourcesFilePath = path.join(serviceDirPath, '.serverless', 'custom-resources.zip'); // Ensure no file stream is created, as by having provider.request mocked it'll be not consumed. @@ -325,16 +609,49 @@ describe('uploadArtifacts', () => { return expect(awsDeploy.uploadCustomResources()).to.eventually.be.fulfilled.then(() => { expect(uploadStub).to.have.been.calledOnce; - expect(uploadStub).to.have.been.calledWithExactly('S3', 'upload', { + expect(getUploadParams(uploadStub)).to.deep.include({ Bucket: awsDeploy.bucketName, Key: `${awsDeploy.serverless.service.package.artifactDirectoryName}/custom-resources.zip`, - Body: sinon.match.object.and(sinon.match.has('path', customResourcesFilePath)), ContentType: 'application/zip', Metadata: { filesha256: 'local-hash-zip-file', }, }); + expect(getUploadParams(uploadStub).Body.path).to.equal(customResourcesFilePath); + }); + }); + + it('should configure S3 transfer acceleration for custom resources uploads', async () => { + fse.ensureFileSync(customResourcesFilePath); + crypto.createHash().update().digest.onCall(0).returns('local-hash-zip-file'); + awsDeploy.provider.options['aws-s3-accelerate'] = true; + + await awsDeploy.uploadCustomResources(); + + expect(uploadStub).to.have.been.calledOnce; + expect(uploadStub.firstCall.thisValue.client.config.useAccelerateEndpoint).to.equal(true); + }); + + it('should upload custom resources with SSE bucket policy', async () => { + fse.ensureFileSync(customResourcesFilePath); + crypto.createHash().update().digest.onCall(0).returns('local-hash-zip-file'); + awsDeploy.serverless.service.provider.deploymentBucketObject = { + serverSideEncryption: 'AES256', + }; + + await awsDeploy.uploadCustomResources(); + + expect(uploadStub).to.have.been.calledOnce; + expect(getUploadParams(uploadStub)).to.deep.include({ + Bucket: awsDeploy.bucketName, + Key: `${awsDeploy.serverless.service.package.artifactDirectoryName}/custom-resources.zip`, + ContentType: 'application/zip', + ServerSideEncryption: 'AES256', + Metadata: { + filesha256: 'local-hash-zip-file', + }, }); + expect(getUploadParams(uploadStub).Body.path).to.equal(customResourcesFilePath); }); }); }); diff --git a/test/unit/lib/plugins/aws/package/compile/functions.test.js b/test/unit/lib/plugins/aws/package/compile/functions.test.js index 0d68739ee..fb8ea51bc 100644 --- a/test/unit/lib/plugins/aws/package/compile/functions.test.js +++ b/test/unit/lib/plugins/aws/package/compile/functions.test.js @@ -1,9 +1,10 @@ 'use strict'; -const AWS = require('aws-sdk'); const fse = require('fs-extra'); const fsp = require('fs').promises; const path = require('path'); +const { Readable } = require('node:stream'); +const { S3Client, GetObjectCommand } = require('@aws-sdk/client-s3'); const chai = require('chai'); const sinon = require('sinon'); const AwsProvider = require('../../../../../../../lib/plugins/aws/provider'); @@ -72,24 +73,22 @@ describe('AwsCompileFunctions', () => { }); describe('#downloadPackageArtifacts()', () => { - let requestStub; + let sendStub; let testFilePath; const s3BucketName = 'test-bucket'; const s3ArtifactName = 's3-hosted-artifact.zip'; + const downloadedArtifactContent = 'downloaded artifact content'; beforeEach(() => { testFilePath = createTmpFile('dummy-artifact'); - requestStub = sinon.stub(AWS, 'S3').returns({ - getObject: () => ({ - createReadStream() { - return fse.createReadStream(testFilePath); - }, - }), + fse.writeFileSync(testFilePath, downloadedArtifactContent); + sendStub = sinon.stub(S3Client.prototype, 'send').resolves({ + Body: fse.createReadStream(testFilePath), }); }); afterEach(() => { - AWS.S3.restore(); + S3Client.prototype.send.restore(); }); it('should download the file and replace the artifact path for function packages', async () => { @@ -98,14 +97,20 @@ describe('AwsCompileFunctions', () => { `https://s3.amazonaws.com/${s3BucketName}/${s3ArtifactName}`; return expect(awsCompileFunctions.downloadPackageArtifacts()).to.be.fulfilled.then(() => { - const artifactFileName = awsCompileFunctions.serverless.service.functions[ - functionName - ].package.artifact - .split(path.sep) - .pop(); - - expect(requestStub.callCount).to.equal(1); + const artifactFilePath = + awsCompileFunctions.serverless.service.functions[functionName].package.artifact; + const artifactFileName = artifactFilePath.split(path.sep).pop(); + + expect(sendStub.callCount).to.equal(1); + expect(sendStub.firstCall.args[0]).to.be.instanceOf(GetObjectCommand); + expect(sendStub.firstCall.args[0].input).to.deep.equal({ + Bucket: s3BucketName, + Key: s3ArtifactName, + }); expect(artifactFileName).to.equal(s3ArtifactName); + return expect(fsp.readFile(artifactFilePath, 'utf8')).to.eventually.equal( + downloadedArtifactContent + ); }); }); @@ -115,28 +120,72 @@ describe('AwsCompileFunctions', () => { awsCompileFunctions.serverless.service.package.artifact = `https://s3.amazonaws.com/${s3BucketName}/${s3ArtifactName}`; return expect(awsCompileFunctions.downloadPackageArtifacts()).to.be.fulfilled.then(() => { - const artifactFileName = awsCompileFunctions.serverless.service.package.artifact - .split(path.sep) - .pop(); - - expect(requestStub.callCount).to.equal(1); + const artifactFilePath = awsCompileFunctions.serverless.service.package.artifact; + const artifactFileName = artifactFilePath.split(path.sep).pop(); + + expect(sendStub.callCount).to.equal(1); + expect(sendStub.firstCall.args[0]).to.be.instanceOf(GetObjectCommand); + expect(sendStub.firstCall.args[0].input).to.deep.equal({ + Bucket: s3BucketName, + Key: s3ArtifactName, + }); expect(artifactFileName).to.equal(s3ArtifactName); + return expect(fsp.readFile(artifactFilePath, 'utf8')).to.eventually.equal( + downloadedArtifactContent + ); }); }); - it('should not access AWS.S3 if URL is not an S3 URl', async () => { - AWS.S3.restore(); - const myRequestStub = sinon.stub(AWS, 'S3').returns({ - getObject: () => { - throw new Error('should not be invoked'); - }, + it('should strip query parameters when downloading presigned S3 artifact URLs', async () => { + awsCompileFunctions.serverless.service.package.individually = true; + awsCompileFunctions.serverless.service.functions[functionName].package.artifact = + `https://s3.amazonaws.com/${s3BucketName}/path/to/${s3ArtifactName}?X-Amz-Signature=secret`; + + await expect(awsCompileFunctions.downloadPackageArtifacts()).to.be.fulfilled; + + const artifactFilePath = + awsCompileFunctions.serverless.service.functions[functionName].package.artifact; + + expect(sendStub.callCount).to.equal(1); + expect(sendStub.firstCall.args[0]).to.be.instanceOf(GetObjectCommand); + expect(sendStub.firstCall.args[0].input).to.deep.equal({ + Bucket: s3BucketName, + Key: `path/to/${s3ArtifactName}`, }); + expect(path.basename(artifactFilePath)).to.equal(s3ArtifactName); + expect(artifactFilePath).to.not.include('?'); + expect(artifactFilePath).to.not.include('X-Amz'); + }); + + it('should not access S3 if URL is not an S3 URL', async () => { awsCompileFunctions.serverless.service.functions[functionName].package.artifact = 'https://s33amazonaws.com/this/that'; return expect(awsCompileFunctions.downloadPackageArtifacts()).to.be.fulfilled.then(() => { - expect(myRequestStub.callCount).to.equal(1); + expect(sendStub).to.not.have.been.called; }); }); + + it('should reject if the downloaded artifact stream errors', async () => { + const streamError = new Error('stream failed'); + const errorStream = new Readable({ + read() { + this.destroy(streamError); + }, + }); + sendStub.resolves({ + Body: errorStream, + }); + awsCompileFunctions.serverless.service.package.individually = true; + awsCompileFunctions.serverless.service.functions[functionName].package.artifact = + `https://s3.amazonaws.com/${s3BucketName}/${s3ArtifactName}`; + const originalArtifact = + awsCompileFunctions.serverless.service.functions[functionName].package.artifact; + + await expect(awsCompileFunctions.downloadPackageArtifacts()).to.be.rejectedWith(streamError); + expect( + awsCompileFunctions.serverless.service.functions[functionName].package.artifact + ).to.equal(originalArtifact); + }); }); describe('#compileFunctions()', () => { diff --git a/test/unit/lib/plugins/aws/remove/index.test.js b/test/unit/lib/plugins/aws/remove/index.test.js index fc2eb7b11..649eae25b 100644 --- a/test/unit/lib/plugins/aws/remove/index.test.js +++ b/test/unit/lib/plugins/aws/remove/index.test.js @@ -96,7 +96,8 @@ describe('test/unit/lib/plugins/aws/remove/index.test.js', () => { awsRequestStubMap, }); - expect(deleteObjectsStub).to.be.calledWithExactly({ + expect(deleteObjectsStub).to.be.calledOnce; + expect(deleteObjectsStub.firstCall.args[0]).to.deep.equal({ Bucket: 'resource-id', Delete: { Objects: [{ Key: 'first' }, { Key: 'second' }], @@ -120,7 +121,8 @@ describe('test/unit/lib/plugins/aws/remove/index.test.js', () => { awsRequestStubMap, }); - expect(deleteObjectsStub).to.be.calledWithExactly({ + expect(deleteObjectsStub).to.be.calledOnce; + expect(deleteObjectsStub.firstCall.args[0]).to.deep.equal({ Bucket: 'resource-id', Delete: { Objects: [{ Key: 'first' }, { Key: 'second' }], @@ -159,7 +161,8 @@ describe('test/unit/lib/plugins/aws/remove/index.test.js', () => { awsRequestStubMap, }); - expect(deleteObjectsStub).to.be.calledWithExactly({ + expect(deleteObjectsStub).to.be.calledOnce; + expect(deleteObjectsStub.firstCall.args[0]).to.deep.equal({ Bucket: 'resource-id', Delete: { Objects: [{ Key: 'first' }, { Key: 'second' }], @@ -396,7 +399,7 @@ describe('test/unit/lib/plugins/aws/remove/index.test.js', () => { }, }); - expect(listObjectVersionsStub).to.be.calledWithExactly({ + expect(listObjectVersionsStub.firstCall.args[0]).to.deep.equal({ Bucket: 'bucket', Prefix: `serverless/${serverless.service.service}/dev/`, }); @@ -441,12 +444,13 @@ describe('test/unit/lib/plugins/aws/remove/index.test.js', () => { }, }); - expect(listObjectVersionsStub).to.be.calledWithExactly({ + expect(listObjectVersionsStub.firstCall.args[0]).to.deep.equal({ Bucket: 'bucket', Prefix: `serverless/${serverless.service.service}/dev/`, }); - expect(innerDeleteObjectsStub).to.be.calledWithExactly({ + expect(innerDeleteObjectsStub).to.be.calledOnce; + expect(innerDeleteObjectsStub.firstCall.args[0]).to.deep.equal({ Bucket: 'bucket', Delete: { Objects: [ diff --git a/test/unit/lib/plugins/aws/rollback.test.js b/test/unit/lib/plugins/aws/rollback.test.js index 16907090a..cd91f7210 100644 --- a/test/unit/lib/plugins/aws/rollback.test.js +++ b/test/unit/lib/plugins/aws/rollback.test.js @@ -7,6 +7,8 @@ const Serverless = require('../../../../../lib/serverless'); const chai = require('chai'); const assert = require('chai').assert; const sinon = require('sinon'); +const { S3Client, ListObjectsV2Command, GetObjectCommand } = require('@aws-sdk/client-s3'); +const { Readable } = require('stream'); const expect = chai.expect; @@ -16,10 +18,14 @@ describe('AwsRollback', () => { let spawnStub; let serverless; let provider; - const selectedDeploymentKey = - 'serverless/rollback/dev/1476779096930-2016-10-18T08:24:56.930Z/compiled-cloudformation-template.json'; + let s3SendStub; + const selectedDeploymentDirectory = '1476779096930-2016-10-18T08:24:56.930Z'; const createInstance = (options) => { + if (serverless && serverless.pluginManager.spawn.restore) { + serverless.pluginManager.spawn.restore(); + } + if (S3Client.prototype.send.restore) S3Client.prototype.send.restore(); serverless = new Serverless({ commands: [], options: {} }); provider = new AwsProvider(serverless, options); serverless.setProvider('aws', provider); @@ -31,6 +37,7 @@ describe('AwsRollback', () => { awsRollback.serverless.cli = new serverless.classes.CLI(); const prefix = provider.getDeploymentPrefix(); s3Key = `${prefix}/${serverless.service.service}/${provider.getStage()}`; + s3SendStub = sinon.stub(S3Client.prototype, 'send'); }; beforeEach(() => @@ -43,47 +50,66 @@ describe('AwsRollback', () => { afterEach(() => { if (provider.request.restore) provider.request.restore(); + if (S3Client.prototype.send.restore) S3Client.prototype.send.restore(); serverless.pluginManager.spawn.restore(); }); - function stubSelectedDeploymentList(requestStub) { - requestStub.withArgs('S3', 'listObjectsV2').resolves({ - Contents: [{ Key: selectedDeploymentKey }], + function expectListObjectsCall(call, input) { + expect(call.args[0]).to.be.instanceOf(ListObjectsV2Command); + expect(call.args[0].input).to.include(input); + } + + function stubSelectedDeploymentList() { + s3SendStub.onFirstCall().resolves({ + Contents: [ + { + Key: `${s3Key}/${selectedDeploymentDirectory}/compiled-cloudformation-template.json`, + }, + ], }); } - function expectGetStateFileCall(requestStub) { - expect(requestStub).to.have.been.calledWithExactly('S3', 'getObject', { + function expectGetStateFileCall(call) { + expect(call.args[0]).to.be.instanceOf(GetObjectCommand); + expect(call.args[0].input).to.deep.equal({ Bucket: awsRollback.bucketName, Key: 'serverless/rollback/dev/1476779096930-2016-10-18T08:24:56.930Z/serverless-state.json', }); } + function readableState(state) { + return Readable.from([JSON.stringify(state)]); + } + + async function expectSetStackToUpdateToRejectWith(expectedError) { + try { + await awsRollback.setStackToUpdate(); + } catch (error) { + expect(error).to.equal(expectedError); + return; + } + + throw new Error('Expected setStackToUpdate to reject'); + } + const createSignatureMismatchListError = () => { - const error = new Error('signature mismatch'); - error.providerError = { - code: 'SignatureDoesNotMatch', - statusCode: 403, - }; - return error; + return Object.assign(new Error('signature mismatch'), { + name: 'SignatureDoesNotMatch', + $metadata: { httpStatusCode: 403 }, + }); }; const createAccessDeniedListError = () => { - const error = new Error('access denied'); - error.providerError = { - code: 'AccessDenied', - statusCode: 403, - }; - return error; + return Object.assign(new Error('access denied'), { + name: 'AccessDenied', + $metadata: { httpStatusCode: 403 }, + }); }; - const createWrappedStatusOnlyListError = () => { - const error = new Error('forbidden'); - error.code = 'AWS_S3_LIST_OBJECTS_V2_ERROR'; - error.providerError = { - statusCode: 403, - }; - return error; + const createStatusOnlyListError = () => { + return Object.assign(new Error('forbidden'), { + $metadata: { httpStatusCode: 403 }, + }); }; describe('#constructor()', () => { @@ -144,16 +170,12 @@ describe('AwsRollback', () => { Contents: s3Objects, }; - const requestStub = sinon.stub(awsRollback.provider, 'request'); - requestStub.withArgs('S3', 'listObjectsV2').resolves(s3Response); - requestStub.withArgs('S3', 'getObject').resolves({ Body: '{}' }); + s3SendStub.onFirstCall().resolves(s3Response).onSecondCall().resolves({ Body: '{}' }); return awsRollback.setStackToUpdate().then(() => { expect(awsRollback.serverless.service.package.artifactDirectoryName).to.be.equal( 'serverless/rollback/dev/1476779096930-2016-10-18T08:24:56.930Z' ); - - awsRollback.provider.request.restore(); }); }); @@ -161,9 +183,7 @@ describe('AwsRollback', () => { const s3Response = { Contents: [], }; - const listObjectsStub = sinon.stub(awsRollback.provider, 'request'); - listObjectsStub.withArgs('S3', 'listObjectsV2').resolves(s3Response); - listObjectsStub.withArgs('S3', 'getObject').resolves({ Body: '{}' }); + s3SendStub.resolves(s3Response); return awsRollback .setStackToUpdate() @@ -172,14 +192,11 @@ describe('AwsRollback', () => { }) .catch((error) => { expect(error.code).to.equal('ROLLBACK_DEPLOYMENTS_NOT_FOUND'); - expect(listObjectsStub.calledOnce).to.be.equal(true); - expect( - listObjectsStub.calledWithExactly('S3', 'listObjectsV2', { - Bucket: awsRollback.bucketName, - Prefix: `${s3Key}/`, - }) - ).to.be.equal(true); - awsRollback.provider.request.restore(); + expect(s3SendStub.calledOnce).to.be.equal(true); + expectListObjectsCall(s3SendStub.firstCall, { + Bucket: awsRollback.bucketName, + Prefix: `${s3Key}/`, + }); }); }); @@ -196,9 +213,7 @@ describe('AwsRollback', () => { Contents: s3Objects, }; - const listObjectsStub = sinon.stub(awsRollback.provider, 'request'); - listObjectsStub.withArgs('S3', 'listObjectsV2').resolves(s3Response); - listObjectsStub.withArgs('S3', 'getObject').resolves({ Body: '{}' }); + s3SendStub.resolves(s3Response); return awsRollback .setStackToUpdate() @@ -207,14 +222,11 @@ describe('AwsRollback', () => { }) .catch((error) => { expect(error.code).to.equal('ROLLBACK_DEPLOYMENT_NOT_FOUND'); - expect(listObjectsStub.calledOnce).to.be.equal(true); - expect( - listObjectsStub.calledWithExactly('S3', 'listObjectsV2', { - Bucket: awsRollback.bucketName, - Prefix: `${s3Key}/`, - }) - ).to.be.equal(true); - awsRollback.provider.request.restore(); + expect(s3SendStub.calledOnce).to.be.equal(true); + expectListObjectsCall(s3SendStub.firstCall, { + Bucket: awsRollback.bucketName, + Prefix: `${s3Key}/`, + }); }); }); @@ -231,28 +243,21 @@ describe('AwsRollback', () => { Contents: s3Objects, }; - const listObjectsStub = sinon.stub(awsRollback.provider, 'request'); - listObjectsStub.withArgs('S3', 'listObjectsV2').resolves(s3Response); - listObjectsStub.withArgs('S3', 'getObject').resolves({ Body: '{}' }); + s3SendStub.onFirstCall().resolves(s3Response).onSecondCall().resolves({ Body: '{}' }); return awsRollback.setStackToUpdate().then(() => { expect(awsRollback.serverless.service.package.artifactDirectoryName).to.be.equal( 'serverless/rollback/dev/1476779096930-2016-10-18T08:24:56.930Z' ); - expect( - listObjectsStub.calledWithExactly('S3', 'listObjectsV2', { - Bucket: awsRollback.bucketName, - Prefix: `${s3Key}/`, - }) - ).to.be.equal(true); - awsRollback.provider.request.restore(); + expectListObjectsCall(s3SendStub.firstCall, { + Bucket: awsRollback.bucketName, + Prefix: `${s3Key}/`, + }); }); }); it('should resolve when the target deployment is found on a later S3 page', async () => { - const requestStub = sinon.stub(awsRollback.provider, 'request'); - requestStub - .withArgs('S3', 'listObjectsV2') + s3SendStub .onFirstCall() .resolves({ Contents: [], @@ -266,30 +271,23 @@ describe('AwsRollback', () => { }, ], }); - requestStub.withArgs('S3', 'getObject').resolves({ Body: '{}' }); + s3SendStub.onThirdCall().resolves({ Body: '{}' }); await awsRollback.setStackToUpdate(); expect(awsRollback.serverless.service.package.artifactDirectoryName).to.equal( 'serverless/rollback/dev/1476779096930-2016-10-18T08:24:56.930Z' ); - expect(requestStub.secondCall.args).to.deep.equal([ - 'S3', - 'listObjectsV2', - { - Bucket: awsRollback.bucketName, - Prefix: `${s3Key}/`, - ContinuationToken: 'next-page', - }, - ]); + expectListObjectsCall(s3SendStub.secondCall, { + Bucket: awsRollback.bucketName, + Prefix: `${s3Key}/`, + ContinuationToken: 'next-page', + }); }); it('should not rewrite specific S3 list authentication failures', async () => { const listError = createSignatureMismatchListError(); - sinon - .stub(awsRollback.provider, 'request') - .withArgs('S3', 'listObjectsV2') - .rejects(listError); + s3SendStub.rejects(listError); try { await awsRollback.setStackToUpdate(); @@ -301,10 +299,7 @@ describe('AwsRollback', () => { it('should rewrite explicit S3 list access denied failures', async () => { const listError = createAccessDeniedListError(); - sinon - .stub(awsRollback.provider, 'request') - .withArgs('S3', 'listObjectsV2') - .rejects(listError); + s3SendStub.rejects(listError); await expect(awsRollback.setStackToUpdate()).to.be.eventually.rejected.and.have.property( 'code', @@ -312,12 +307,9 @@ describe('AwsRollback', () => { ); }); - it('should rewrite wrapped status-only S3 list access denied failures', async () => { - const listError = createWrappedStatusOnlyListError(); - sinon - .stub(awsRollback.provider, 'request') - .withArgs('S3', 'listObjectsV2') - .rejects(listError); + it('should rewrite status-only S3 list access denied failures', async () => { + const listError = createStatusOnlyListError(); + s3SendStub.rejects(listError); await expect(awsRollback.setStackToUpdate()).to.be.eventually.rejected.and.have.property( 'code', @@ -326,76 +318,121 @@ describe('AwsRollback', () => { }); it('should read the state file for the selected deployment', async () => { - const requestStub = sinon.stub(awsRollback.provider, 'request'); - requestStub.withArgs('S3', 'listObjectsV2').resolves({ - Contents: [ - { - Key: 'serverless/rollback/dev/1476779096930-2016-10-18T08:24:56.930Z/compiled-cloudformation-template.json', - }, - ], - }); - requestStub.withArgs('S3', 'getObject').resolves({ - Body: JSON.stringify({ service: { service: 'rollback' } }), + stubSelectedDeploymentList(); + s3SendStub.onSecondCall().resolves({ + Body: readableState({ service: { service: 'rollback' } }), }); await awsRollback.setStackToUpdate(); - expect(requestStub).to.have.been.calledWithExactly('S3', 'getObject', { + expect(s3SendStub.secondCall.args[0]).to.be.instanceOf(GetObjectCommand); + expect(s3SendStub.secondCall.args[0].input).to.deep.equal({ Bucket: awsRollback.bucketName, Key: 'serverless/rollback/dev/1476779096930-2016-10-18T08:24:56.930Z/serverless-state.json', }); }); it('should continue when the selected deployment has no state file', async () => { - const requestStub = sinon.stub(awsRollback.provider, 'request'); - stubSelectedDeploymentList(requestStub); - requestStub.withArgs('S3', 'getObject').rejects({ - code: 'AWS_S3_GET_OBJECT_NO_SUCH_KEY', - }); + stubSelectedDeploymentList(); + s3SendStub.onSecondCall().rejects( + Object.assign(new Error('The specified key does not exist.'), { + name: 'NoSuchKey', + $metadata: { httpStatusCode: 404 }, + }) + ); await expect(awsRollback.setStackToUpdate()).to.eventually.be.fulfilled; - expectGetStateFileCall(requestStub); + expectGetStateFileCall(s3SendStub.secondCall); + }); + + it('should rethrow missing bucket errors when reading the deployment state file', async () => { + const error = Object.assign(new Error('The specified bucket does not exist.'), { + name: 'NoSuchBucket', + $metadata: { httpStatusCode: 404 }, + }); + stubSelectedDeploymentList(); + s3SendStub.onSecondCall().rejects(error); + + await expectSetStackToUpdateToRejectWith(error); + }); + + it('should rethrow status-only 404 errors when reading the deployment state file', async () => { + const error = Object.assign(new Error('not found'), { + $metadata: { httpStatusCode: 404 }, + }); + stubSelectedDeploymentList(); + s3SendStub.onSecondCall().rejects(error); + + await expectSetStackToUpdateToRejectWith(error); + }); + + it('should rethrow access denied errors when reading the deployment state file', async () => { + const error = Object.assign(new Error('access denied'), { + name: 'AccessDenied', + $metadata: { httpStatusCode: 403 }, + }); + stubSelectedDeploymentList(); + s3SendStub.onSecondCall().rejects(error); + + await expectSetStackToUpdateToRejectWith(error); + }); + + it('should reject rollback for unsupported console deployment state', async () => { + stubSelectedDeploymentList(); + s3SendStub.onSecondCall().resolves({ + Body: readableState({ console: true }), + }); + + await expect(awsRollback.setStackToUpdate()).to.be.eventually.rejected.and.have.property( + 'code', + 'CONSOLE_ACTIVATION_MISMATCH_ROLLBACK' + ); }); it('should reject malformed deployment state JSON', async () => { - const requestStub = sinon.stub(awsRollback.provider, 'request'); - stubSelectedDeploymentList(requestStub); - requestStub.withArgs('S3', 'getObject').resolves({ - Body: '{not-json', + stubSelectedDeploymentList(); + s3SendStub.onSecondCall().resolves({ + Body: Readable.from(['{not-json']), }); await expect(awsRollback.setStackToUpdate()).to.be.rejectedWith(SyntaxError); - expectGetStateFileCall(requestStub); + expectGetStateFileCall(s3SendStub.secondCall); }); it('should reject empty deployment state JSON', async () => { - const requestStub = sinon.stub(awsRollback.provider, 'request'); - stubSelectedDeploymentList(requestStub); - requestStub.withArgs('S3', 'getObject').resolves({ + stubSelectedDeploymentList(); + s3SendStub.onSecondCall().resolves({ + Body: Readable.from([]), + }); + + await expect(awsRollback.setStackToUpdate()).to.be.rejectedWith(SyntaxError); + expectGetStateFileCall(s3SendStub.secondCall); + }); + + it('should reject empty string deployment state JSON', async () => { + stubSelectedDeploymentList(); + s3SendStub.onSecondCall().resolves({ Body: '', }); await expect(awsRollback.setStackToUpdate()).to.be.rejectedWith(SyntaxError); - expectGetStateFileCall(requestStub); + expectGetStateFileCall(s3SendStub.secondCall); }); - it('should reject rollback for unsupported console deployment state', async () => { - const requestStub = sinon.stub(awsRollback.provider, 'request'); - requestStub.withArgs('S3', 'listObjectsV2').resolves({ - Contents: [ - { - Key: 'serverless/rollback/dev/1476779096930-2016-10-18T08:24:56.930Z/compiled-cloudformation-template.json', - }, - ], + it('should not update the stack if deployment state validation fails', async () => { + sinon.stub(awsRollback, 'setBucketName').callsFake(async () => { + awsRollback.bucketName = 'deployment-bucket'; }); - requestStub.withArgs('S3', 'getObject').resolves({ - Body: JSON.stringify({ console: true }), + const updateStackStub = sinon.stub(awsRollback, 'updateStack').resolves(); + stubSelectedDeploymentList(); + s3SendStub.onSecondCall().resolves({ + Body: readableState({ console: true }), }); - await expect(awsRollback.setStackToUpdate()).to.be.eventually.rejected.and.have.property( - 'code', - 'CONSOLE_ACTIVATION_MISMATCH_ROLLBACK' - ); + await expect( + awsRollback.hooks['rollback:rollback']() + ).to.be.eventually.rejected.and.have.property('code', 'CONSOLE_ACTIVATION_MISMATCH_ROLLBACK'); + expect(updateStackStub).not.to.be.called; }); }); }); diff --git a/test/unit/lib/plugins/aws/utils/aws-sdk-v3-error.test.js b/test/unit/lib/plugins/aws/utils/aws-sdk-v3-error.test.js index 68a8630c5..b89efc81a 100644 --- a/test/unit/lib/plugins/aws/utils/aws-sdk-v3-error.test.js +++ b/test/unit/lib/plugins/aws/utils/aws-sdk-v3-error.test.js @@ -128,6 +128,42 @@ describe('test/unit/lib/plugins/aws/utils/aws-sdk-v3-error.test.js', () => { ).to.equal(true); }); + it('matches only explicit S3 GetObject missing-key shapes', () => { + expect( + awsSdkV3Error.isS3GetObjectNoSuchKeyError({ + code: 'AWS_S3_GET_OBJECT_NO_SUCH_KEY', + }) + ).to.equal(true); + expect(awsSdkV3Error.isS3GetObjectNoSuchKeyError({ name: 'NoSuchKey' })).to.equal(true); + expect(awsSdkV3Error.isS3GetObjectNoSuchKeyError({ Code: 'NoSuchKey' })).to.equal(true); + expect( + awsSdkV3Error.isS3GetObjectNoSuchKeyError({ + providerError: { code: 'NoSuchKey', statusCode: 404 }, + }) + ).to.equal(true); + expect( + awsSdkV3Error.isS3GetObjectNoSuchKeyError({ $metadata: { httpStatusCode: 404 } }) + ).to.equal(false); + expect( + awsSdkV3Error.isS3GetObjectNoSuchKeyError({ + name: 'NoSuchBucket', + $metadata: { httpStatusCode: 404 }, + }) + ).to.equal(false); + expect( + awsSdkV3Error.isS3GetObjectNoSuchKeyError({ + name: 'NotFound', + $metadata: { httpStatusCode: 404 }, + }) + ).to.equal(false); + expect( + awsSdkV3Error.isS3GetObjectNoSuchKeyError({ + name: 'AccessDenied', + $metadata: { httpStatusCode: 403 }, + }) + ).to.equal(false); + }); + it('matches CloudFormation, Lambda, and ECR shapes', () => { expect(awsSdkV3Error.isCloudFormationValidationError({ name: 'ValidationError' })).to.equal( true diff --git a/test/unit/test-lib/configure-aws-sdk-v3-stub.test.js b/test/unit/test-lib/configure-aws-sdk-v3-stub.test.js index 4ff13cf07..641f4e69a 100644 --- a/test/unit/test-lib/configure-aws-sdk-v3-stub.test.js +++ b/test/unit/test-lib/configure-aws-sdk-v3-stub.test.js @@ -66,6 +66,106 @@ describe('test/lib/configure-aws-sdk-v3-stub.test.js', () => { }); }); + it('stubs additional S3 data-plane commands', async () => { + const awsSdkV3Stub = configureAwsSdkV3Stub({ + S3: { + getObject: { Body: 'body' }, + deleteObjects: { Deleted: [{ Key: 'key' }] }, + listObjectVersions: { Versions: [{ Key: 'key', VersionId: 'version' }] }, + }, + }); + const { S3Client, GetObjectCommand, DeleteObjectsCommand, ListObjectVersionsCommand } = + awsSdkV3Stub.modulesCacheStub['@aws-sdk/client-s3']; + const client = new S3Client({ region: 'us-east-1' }); + + await expect( + client.send(new GetObjectCommand({ Bucket: 'bucket', Key: 'key' })) + ).to.eventually.deep.equal({ Body: 'body' }); + await expect( + client.send(new DeleteObjectsCommand({ Bucket: 'bucket', Delete: { Objects: [] } })) + ).to.eventually.deep.equal({ Deleted: [{ Key: 'key' }] }); + await expect( + client.send(new ListObjectVersionsCommand({ Bucket: 'bucket' })) + ).to.eventually.deep.equal({ Versions: [{ Key: 'key', VersionId: 'version' }] }); + + expect(awsSdkV3Stub.sends.map((send) => send.method)).to.deep.equal([ + 'getObject', + 'deleteObjects', + 'listObjectVersions', + ]); + }); + + it('stubs lib-storage Upload and records upload context', async () => { + const awsSdkV3Stub = configureAwsSdkV3Stub({ + S3: { + upload: { Location: 's3://bucket/key' }, + }, + }); + const { S3Client } = awsSdkV3Stub.modulesCacheStub['@aws-sdk/client-s3']; + const { Upload } = awsSdkV3Stub.modulesCacheStub['@aws-sdk/lib-storage']; + const client = new S3Client({ region: 'us-east-1' }); + const params = { Bucket: 'bucket', Key: 'key', Body: 'body' }; + const upload = new Upload({ client, params, queueSize: 6 }); + + expect(upload.on('httpUploadProgress', () => {})).to.equal(upload); + await expect(upload.done()).to.eventually.deep.equal({ Location: 's3://bucket/key' }); + + expect(awsSdkV3Stub.sends).to.have.length(1); + expect(awsSdkV3Stub.sends[0]).to.include({ + service: 'S3', + method: 'upload', + commandName: 'Upload', + client, + upload, + }); + expect(awsSdkV3Stub.sends[0].input).to.equal(params); + expect(awsSdkV3Stub.sends[0].clientConfig).to.equal(client.config); + expect(awsSdkV3Stub.sends[0].options).to.equal(upload.options); + expect(awsSdkV3Stub.sends[0].options).to.include({ queueSize: 6 }); + }); + + it('stubs lib-storage Upload even when S3.upload is not configured', async () => { + const awsSdkV3Stub = configureAwsSdkV3Stub({ S3: {} }); + const { S3Client } = awsSdkV3Stub.modulesCacheStub['@aws-sdk/client-s3']; + const { Upload } = awsSdkV3Stub.modulesCacheStub['@aws-sdk/lib-storage']; + const client = new S3Client({}); + const upload = new Upload({ + client, + params: { Bucket: 'bucket', Key: 'key', Body: 'body' }, + }); + + await expect(upload.done()).to.be.rejectedWith( + 'Missing AWS SDK v3 stub configuration for S3.upload' + ); + }); + + it('propagates configured Upload rejections', async () => { + const error = new Error('upload failed'); + const awsSdkV3Stub = configureAwsSdkV3Stub({ + S3: { + upload: () => { + throw error; + }, + }, + }); + const { S3Client } = awsSdkV3Stub.modulesCacheStub['@aws-sdk/client-s3']; + const { Upload } = awsSdkV3Stub.modulesCacheStub['@aws-sdk/lib-storage']; + const client = new S3Client({}); + const upload = new Upload({ + client, + params: { Bucket: 'bucket', Key: 'key', Body: 'body' }, + }); + + try { + await upload.done(); + } catch (caughtError) { + expect(caughtError).to.equal(error); + return; + } + + throw new Error('Expected upload to reject'); + }); + it('throws a clear error for missing method stubs', async () => { const awsSdkV3Stub = configureAwsSdkV3Stub({ S3: {} }); const { S3Client, HeadBucketCommand } = awsSdkV3Stub.modulesCacheStub['@aws-sdk/client-s3'];