diff --git a/assets/images/posts/aws-cdk/construct-hub.png b/assets/images/posts/aws-cdk/construct-hub.png new file mode 100644 index 000000000..b3d318618 Binary files /dev/null and b/assets/images/posts/aws-cdk/construct-hub.png differ diff --git a/assets/images/posts/aws-sqs/cloudwatch-log.png b/assets/images/posts/aws-sqs/cloudwatch-log.png new file mode 100644 index 000000000..00383aae4 Binary files /dev/null and b/assets/images/posts/aws-sqs/cloudwatch-log.png differ diff --git a/assets/images/posts/aws-sqs/lambda-trigger.png b/assets/images/posts/aws-sqs/lambda-trigger.png new file mode 100644 index 000000000..bab05bc62 Binary files /dev/null and b/assets/images/posts/aws-sqs/lambda-trigger.png differ diff --git a/assets/images/posts/aws-sqs/sqs-queue.png b/assets/images/posts/aws-sqs/sqs-queue.png new file mode 100644 index 000000000..82996c3a6 Binary files /dev/null and b/assets/images/posts/aws-sqs/sqs-queue.png differ diff --git a/content/.DS_Store b/content/.DS_Store new file mode 100644 index 000000000..4eae0835e Binary files /dev/null and b/content/.DS_Store differ diff --git a/content/blog/2021/2021-05-10-spring-cloud-aws-sqs.md b/content/blog/2021/2021-05-10-spring-cloud-aws-sqs.md index 0615dff2f..1ffe13b06 100644 --- a/content/blog/2021/2021-05-10-spring-cloud-aws-sqs.md +++ b/content/blog/2021/2021-05-10-spring-cloud-aws-sqs.md @@ -134,10 +134,12 @@ With the `QueueMessageChannel`, we first create an instance of this class to rep ```java @Service public class MessageSender { - private static final Logger logger = LoggerFactory.getLogger(MessageSender.class); + private static final Logger logger + = LoggerFactory.getLogger(MessageSender.class); // Replace XXXXX with AWS account ID. - private static final String QUEUE_NAME = "https://sqs.us-east-1.amazonaws.com/XXXXXXX/testQueue"; + private static final String QUEUE_NAME + = "https://sqs.us-east-1.amazonaws.com/XXXXXXX/testQueue"; @Autowired private final AmazonSQSAsync amazonSqs; @@ -148,7 +150,8 @@ public class MessageSender { } public boolean send(final String messagePayload) { - MessageChannel messageChannel = new QueueMessageChannel(amazonSqs, QUEUE_NAME); + MessageChannel messageChannel + = new QueueMessageChannel(amazonSqs, QUEUE_NAME); Message msg = MessageBuilder.withPayload(messagePayload) .setHeader("sender", "app1") @@ -213,7 +216,10 @@ public class MessageSenderWithTemplate { @Autowired private QueueMessagingTemplate messagingTemplate; - public void sendToFifoQueue(final String messagePayload, final String messageGroupID, final String messageDedupID) { + public void sendToFifoQueue( + final String messagePayload, + final String messageGroupID, + final String messageDedupID) { Message msg = MessageBuilder.withPayload(messagePayload) .setHeader("message-group-id", messageGroupID) @@ -294,7 +300,7 @@ for GenericMessage / "email":"jackie.chan@gmail.com"}, headers={ ... ... -```text +``` We can see a `MessageConversionException` here since the default converter `SimpleMessageConverter` can only convert between `String` and SQS messages. For complex objects like `SignupEvent` in our example, a custom converter needs to be configured like this: ```java @@ -316,14 +322,15 @@ public class CustomSqsConfiguration { new QueueMessageHandlerFactory(); queueHandlerFactory.setAmazonSqs(amazonSQSAsync); queueHandlerFactory.setArgumentResolvers(Collections.singletonList( - new PayloadMethodArgumentResolver(jackson2MessageConverter(mapper)) + new PayloadMethodArgumentResolver(jackson2MessageConverter(mapper)) )); return queueHandlerFactory; } private MessageConverter jackson2MessageConverter(final ObjectMapper mapper){ - final MappingJackson2MessageConverter converter = new MappingJackson2MessageConverter(); + final MappingJackson2MessageConverter + converter = new MappingJackson2MessageConverter(); converter.setObjectMapper(mapper); return converter; } @@ -353,11 +360,14 @@ public class CustomSqsConfiguration { ... ... - private MessageConverter jackson2MessageConverter(final ObjectMapper mapper) { + private MessageConverter jackson2MessageConverter( + final ObjectMapper mapper) { - final MappingJackson2MessageConverter converter = new MappingJackson2MessageConverter(); + final MappingJackson2MessageConverter + converter = new MappingJackson2MessageConverter(); - // set strict content type match to false to enable the listener to handle AWS events + // set strict content type match to false + // to enable the listener to handle AWS events converter.setStrictContentTypeMatch(false); converter.setObjectMapper(mapper); return converter; @@ -380,9 +390,11 @@ Our class `S3EventListener` containing the listener method which will receive th @Service public class S3EventListener { - @SqsListener(value = "testS3Queue", deletionPolicy = SqsMessageDeletionPolicy.ON_SUCCESS) + @SqsListener(value = "testS3Queue", + deletionPolicy = SqsMessageDeletionPolicy.ON_SUCCESS) public void receive(S3EventNotification s3EventNotificationRecord) { - S3EventNotification.S3Entity s3Entity = s3EventNotificationRecord.getRecords().get(0).getS3(); + S3EventNotification.S3Entity s3Entity + = s3EventNotificationRecord.getRecords().get(0).getS3(); String objectKey = s3Entity.getObject().getKey(); log.info("objectKey:: {}",objectKey); } diff --git a/content/blog/2022/2022-01-05-getting-started-with-amazon-CDK.md b/content/blog/2022/2022-01-05-getting-started-with-amazon-CDK.md new file mode 100644 index 000000000..ebe8ad9e1 --- /dev/null +++ b/content/blog/2022/2022-01-05-getting-started-with-amazon-CDK.md @@ -0,0 +1,465 @@ +--- +authors: [pratikdas] +title: "Getting Started with AWS CDK" +categories: ["aws"] +date: 2022-01-20 06:00:00 +1000 +modified: 2022-01-20 06:00:00 +1000 +excerpt: "AWS Cloud Development Kit (CDK) is a framework for defining cloud infrastructure in code and provisioning it through AWS CloudFormation. It helps us to build applications in the cloud with the expressive power of a programming language. In this article, we will introduce AWS CDK, understand its core concepts and work through some examples." +image: images/stock/0115-2021-1200x628-branded.jpg +url: getting-started-with-aws-cdk +--- + +Infrastructure as Code (IaC) is the managing and provisioning of infrastructure through code instead of through manual processes. + +AWS provides native support for IaC thru the CloudFormation service. With CloudFormation, teams can define declarative templates that specify the infrastructure required to deploy their solutions. + +AWS Cloud Development Kit (CDK) is a framework for defining cloud infrastructure with the expressive power of a programming language and provisioning it through AWS CloudFormation. + +In this article, we will introduce AWS CDK, understand its core concepts and work through some examples. + +{% include github-project.html url="https://github.com/thombergs/code-examples/tree/master/aws/cdkv2" %} + +## What is AWS CDK +The AWS Cloud Development Kit (AWS CDK) is an open-source framework for defining cloud infrastructure as code with a set of supported programming languages. It is designed to support multiple programming languages. The core of the system is written in [TypeScript](https://www.typescriptlang.org), and bindings for other languages can be added. + +AWS CDK comes with a Command Line Interface (CLI) to interact with CDK applications for performing different tasks like : + - listing the infrastructure stacks defined in the CDK app + - synthesizing the stacks into CloudFormation templates + - determining the differences between running stack instances and the stacks defined in our CDK code, + and deploying stacks to any public AWS Region + + + +## Primer on CloudFormation - the Engine underneath CDK +The CDK is built over AWS CloudFormation service and uses it as the engine for provisioning AWS resources. So it is very important to have a good understanding of CloudFormation when working with CDK. + +**AWS CloudFormation is an infrastructure as code (IaC) service for modeling, provisioning, and managing AWS and third-party resources.** + +We work with templates and stacks when using AWS CloudFormation. **We create templates in YAML or JSON format to describe our AWS resources with their properties.** A sample template for hosting a web application looks like this: + +```yaml +Resources: + WebServer: + Type: 'AWS::EC2::Instance' + Properties: + SecurityGroups: + - !Ref WebServerSecurityGroup + KeyName: mykey + ImageId: 'ami-08e4e35cccc6189f4' + + Database: + Type: AWS::RDS::DBInstance + Properties: + AllocatedStorage: 20 + ... + Engine: 'mysql' + + WebServerSecurityGroup: + Type: AWS::EC2::SecurityGroup + Properties: + SecurityGroupIngress: + - CidrIp: 0.0.0.0/0 + FromPort: 80 + IpProtocol: tcp + +``` + +This template specifies the resources that we want for hosting a website: +1. an Amazon EC2 instance +2. an RDS MySQL database for storage +3. An Amazon EC2 security group to control firewall settings for the Amazon EC2 instance. + +**A CloudFormation stack is a collection of AWS resources that we can create, update, or delete as a single unit.** The stack in our example includes all the resources required to run the web application: such as a web server, a database, and firewall rules. + +When creating a stack, CloudFormation provisions the resources that are described in our template by making underlying service calls to AWS. + +AWS CDK allows us to define our infrastructure in our favorite programming language instead of using a declarative language like JSON or YAML as in CloudFormation. + +## Setting up the Prerequisites for CDK + +To work through some examples, let us first set up our development environment for writing AWS CDK apps. +We need to complete the following activities for working with CDK: + +1. **Configure Programmatic Access to an AWS Account**: We will need access to an AWS account where our infrastructure will be created. We need access keys to make [programmatic calls to AWS](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html). We can create access keys from the [AWS IAM console](https://console.aws.amazon.com/iam/) and set that up in our credentials file. + +2. **Install CDK Toolkit**: The AWS CDK Toolkit is the primary tool for interacting with the AWS CDK app through the CLI command `cdk`. It is an open-source project in [GitHub](https://github.com/aws/aws-cdk). Among its capabilities are producing and deploying the AWS CloudFormation templates generated by the AWS CDK. + +We can install the AWS CDK globally with [npm](https://www.npmjs.com): +```shell +npm install -g aws-cdk +``` +This will install the latest version of the CDK toolkit in our environment which we can verify with: + +```shell +cdk --version +``` +3. **Set up Language-Specific Prerequisites**: CDK supports multiple languages. We will be using Java in our examples here. We can create AWS CDK applications in Java using the language's familiar tools like the JDK (Oracle's, or an OpenJDK distribution such as Amazon Corretto) and Apache Maven. Prerequisites for other languages can be found in the official documentation. + + +## Creating a new CDK Project +Let us create a new CDK project using the CDK CLI using the `cdk init` command: +```shell + mkdir cdk-app + cd cdk-app + cdk init --language java +``` +Here we have created an empty directory `cdk-app` and used the `cdk init` command to create a [Maven](https://maven.apache.org/) based CDK project in Java language. + +Running the `cdk init` command also displays the important CDK commands as shown here: +```shell +Applying project template app for java +# Welcome to your CDK Java project! +... +... + +## Useful commands + + * `mvn package` compile and run tests + * `cdk ls` list all stacks in the app + * `cdk synth` emits the synthesized CloudFormation template + * `cdk deploy` deploy this stack to your default AWS account/region + * `cdk diff` compare deployed stack with current state + * `cdk docs` open CDK documentation + +``` + +The following files are generated by the CDK toolkit arranged in this folder structure: +```shell +├── README.md +├── cdk.json +├── pom.xml +├── src +│   ├── main +│   │   └── java +│   │   └── com +│   │   └── myorg +│   │   ├── CdkAppApp.java +│   │   └── CdkAppStack.java +│   └── test +│   └── java +│   └── com +│   └── myorg +│   └── CdkAppTest.java + +``` +We can see two Java classes with names ending with `App` and `Stack` are generated along with a test class in a Maven project. The class with its name ending in `App` contains the `main()` method and is the entry point of the application. + +We will understand more about the function of the `App` and the `Stack` classes and build on this further to define our infrastructure resources in the following sections. + + +## Introducing Constructs - the Basic Building Block +Before working any further with the files generated in our project, we need to understand the concept of constructs which are the basic building blocks of an AWS CDK application. + +Constructs are reusable components in which we bundle a bunch of infrastructure resources that can be further composed together for building more complex pieces of infrastructure. + +A construct can represent a single AWS resource, such as an Amazon Simple Storage Service (Amazon S3) bucket, or it can be a higher-level abstraction consisting of multiple AWS-related resources. As such constructs are represented as a tree starting with a root construct and multiple child constructs arranged in a hierarchy. + +In all CDK-supported languages, a construct is represented as a base class from which all other types of constructs inherit. + + +## Structure of an CDK Application +A CDK project is composed of an `App` construct and one or more constructs of type `Stack`. When we generated the project by running `cdk init`, one `App` and one `Stack` construct were generated. + +### The App Construct - the CDK Application +The `App` is a construct that represents an entire CDK app. This construct is normally the root of the construct tree. We define an `App` instance as the entry point of our CDK application and then define the constructs where the `App` is used as the parent scope. + +We use the `App` construct to define one or more stacks within the scope of an application as shown in this code snippet: +```java +public class MyCdkApp { + public static void main(final String[] args) { + App app = new App(); + + new MyFirstStack(app, "myStack", StackProps.builder() + + .env(Environment.builder() + .account("********") + .region("us-east-1") + .build()) + + .build()); + + app.synth(); + } +} + +``` +In this example, the `App` instantiates a stack named `myStack` and sets the AWS account and region where the resources will be provisioned. + +### The Stack Construct - Unit of Deployment +A stack is the unit of deployment in the AWS CDK. All AWS resources defined within the scope of a stack are provisioned as a single unit. +We can define any number of stacks within a CDK app. + +For example, the following code defines an AWS CDK app with two stacks: + +```java +public class MyCdkApp { + public static void main(final String[] args) { + App app = new App(); + + new MyFirstStack(app, "stack1"); + new MySecondStack(app, "stack2"); + + app.synth(); + } +} + +``` +Here we are defining two stacks named `stack1` and `stack2` and calling the `synth()` method on the `app` instance to generate the CloudFormation template. + +## Defining the Infrastructure with CDK +After understanding the `App` and the `Stack` constructs, let us return to the project we generated earlier for creating our infrastructure resources. + +We will first change the `App` class in our project to specify the stack properties: AWS account, and the region where we want to create our infrastructure. We do this by specifying these values in an environment object as shown here: + +```java +public class CdkAppApp { + public static void main(final String[] args) { + App app = new App(); + + new CdkAppStack(app, "CdkAppStack", StackProps.builder() + + .env(Environment.builder() + .account("**********") + .region("us-east-1") + .build()) + + .build()); + + app.synth(); + } +} + +``` +We have defined the region as `us-east-1` along with our AWS account in the `env()` method. + +Next, we will modify our stack class to define an infrastructure resource: [AWS EC2](https://aws.amazon.com/ec2/) with a [security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) in a default [VPC](https://aws.amazon.com/vpc/): + +```java +public class CdkAppStack extends Stack { + public CdkAppStack(final Construct scope, final String id) { + this(scope, id, null); + } + + public CdkAppStack( + final Construct scope, + final String id, + final StackProps props) { + super(scope, id, props); + + // Look up the default VPC + IVpc vpc = Vpc.fromLookup( + this, + "vpc", + VpcLookupOptions + .builder() + .isDefault(true) + .build()); + + // Create a SecurityGroup which will allow all outbound traffic + SecurityGroup securityGroup = SecurityGroup + .Builder + .create(this, "sg") + .vpc(vpc) + .allowAllOutbound(true) + .build(); + + // Create EC2 instance of type T2.micro + Instance.Builder.create(this, "Instance") + .vpc(vpc) + .instanceType(InstanceType.of( + InstanceClass.BURSTABLE2, + InstanceSize.MICRO)) + .machineImage(MachineImage.latestAmazonLinux()) + .blockDevices(List.of( + BlockDevice.builder() + .deviceName("/dev/sda1") + .volume(BlockDeviceVolume.ebs(50)) + .build(), + BlockDevice.builder() + .deviceName("/dev/sdm") + .volume(BlockDeviceVolume.ebs(100)) + .build())) + .securityGroup(securityGroup) + .build(); + + } +} + +``` + +In this code snippet, we are first looking up the default VPC in our AWS account. After that, we are creating a security group in this VPC that will allow all outbound traffic. Finally, we are creating the EC2 instance with properties: `instanceType`, `machineImage`, `blockDevices`, and `securityGroup`. + +## Synthesize Cloudformation Template +Synthesizing is the process of executing our CDK app to generate the equivalent of our CDK code as a CloudFormation template. We do this by running the `synth` command as follows: +```shell +cdk synth +``` + +If our app contained more than one `Stack`, we need to specify which `Stack(s)` to synthesize. We don't have to specify the `Stack` if it only contains only one `Stack`. + + +The `cdk synth` command executes our app, which causes the resources defined in it to be translated into an AWS CloudFormation template. The output of `cdk synth` is a YAML-format template. The beginning of our app's output is shown below: + +```shell +> cdk synth +Resources: + sg29196201: + Type: AWS::EC2::SecurityGroup + Properties: + ... + ... + InstanceC1063A87: + Type: AWS::EC2::Instance + Properties: + AvailabilityZone: us-east-1a + BlockDeviceMappings: + - DeviceName: /dev/sda1 + ... + ... + InstanceType: t2.micro + SecurityGroupIds: + ... + ... +``` +The output is the CloudFormation template containing the resources defined in the stack under our CDK app. +## Deploy Cloudformation Template +At last, we proceed to deploy the CDK app with the `deploy` command when the actual resources are provisioned in AWS. Let us run the `deploy` command by specifying our AWS credentials stored under a profile created in our environment: + +```shell +(base) Pratiks-MacBook-Pro:cdk-app pratikdas$ cdk deploy --profile pratikpoc +``` + +The output of the deploy command looks like this: +```shell +✨ Synthesis time: 8.18s + +This deployment will make potentially sensitive changes according to your current security approval level (--require-approval broadening). +Please confirm you intend to make the following modifications: + +IAM Statement Changes +┌───┬──────────────────────────────┬────────┬────────────────┬───────────────────────────┬───────────┐ +│ │ Resource │ Effect │ Action │ Principal │ Condition │ +├───┼──────────────────────────────┼────────┼────────────────┼───────────────────────────┼───────────┤ +│ + │ ${Instance/InstanceRole.Arn} │ Allow │ sts:AssumeRole │ Service:ec2.amazonaws.com │ │ +└───┴──────────────────────────────┴────────┴────────────────┴───────────────────────────┴───────────┘ +Security Group Changes +┌───┬───────────────┬─────┬────────────┬─────────────────┐ +│ │ Group │ Dir │ Protocol │ Peer │ +├───┼───────────────┼─────┼────────────┼─────────────────┤ +│ + │ ${sg.GroupId} │ Out │ Everything │ Everyone (IPv4) │ +└───┴───────────────┴─────┴────────────┴─────────────────┘ +(NOTE: There may be security-related changes not in this list. See https://github.com/aws/aws-cdk/issues/1299) + +Do you wish to deploy these changes (y/n)? y +CdkAppStack: deploying... +[0%] start: Publishing 7815fc615f7d50b22e75cf1d134480a5d44b5b8b995b780207e963a44f27e61b:675153449441-us-east-1 +[100%] success: Published 7815fc615f7d50b22e75cf1d134480a5d44b5b8b995b780207e963a44f27e61b:675153449441-us-east-1 +CdkAppStack: creating CloudFormation changeset... + + + + + + + ✅ CdkAppStack + +✨ Deployment time: 253.98s + +Stack ARN: +arn:aws:cloudformation:us-east-1:675153449441:stack/CdkAppStack/b9ab5740-7919-11ec-9cad-0a05d9e5c641 + +✨ Total time: 262.16s + + +``` +As part of deploy, first, a changeset of the resources is generated which we need to confirm for them to be provisioned. + +## Destroying the Infrastructure +When we no longer need the infrastructure, we can dispose of all the provisioned resources by running the `destroy` command: + +```shell +> cdk destroy --profile pratikpoc +Are you sure you want to delete: CdkAppStack (y/n)? y +CdkAppStack: destroying... + + + + ✅ CdkAppStack: destroyed + + +``` +As a result of running the `destroy` command, all the resources under the stack are destroyed as a single unit. + + + +## Construct Library and the Construct Hub +The AWS CDK contains the [AWS Construct Library](https://docs.aws.amazon.com/cdk/api/v2/docs/aws-construct-library.html), which includes constructs that represent all the resources available on AWS. This library has three levels of constructs : + +- **Level 1 (L1) Constructs**: These are low-level constructs also called CFN Resources which directly represent all resources available in AWS CloudFormation. They are named CfnXyz, where Xyz is the name of the resource. We have to configure all the properties of the L1 constructs. For example, we will define an EC2 instance with CfnInstance class and configure all its properties. + +- **Level 2 (L2) Constructs**: These are slightly higher level constructs than the L1 constructs with some of the properties of the resources defined as defaults.AWS resources with a higher-level, intent-based API. The `Instance` class that we used in our example to provision an EC2 instance is an L2 construct and comes with default properties set. + +- **Level 3 (L3) Constructs**: These constructs are also called patterns, these constructs are designed to help us complete common tasks in AWS, often involving multiple kinds of resources. For example, the [aws-ecs-patterns](https://docs.aws.amazon.com/cdk/api/v2//docs/aws-cdk-lib.aws_ecs_patterns-readme.html) provides higher-level Amazon ECS constructs which follow common architectural patterns for application and network Load Balanced Services, Queue Processing Services, and Scheduled Tasks (cron jobs). + +Similarly, the Construct Hub is a resource to help us discover additional constructs from AWS, third parties, and the open-source CDK community. + +## Writing Our Own Curated Constructs + +We can also write our own constructs by extending the `Construct` base class as shown here: + +```java +public class MyStorageBucket extends Construct{ + + public MyStorageBucket(final Construct scope, final String id) { + super(scope, id); + Bucket bucket = new Bucket(this, "mybucket"); + + LifecycleRule lifecycleRule = LifecycleRule.builder() + .abortIncompleteMultipartUploadAfter(Duration.minutes(30)) + .enabled(false) + .expiration(Duration.minutes(30)) + .expiredObjectDeleteMarker(false) + .id("myrule") + .build(); + + bucket.addLifecycleRule(lifecycleRule); + + } + +} + +``` +This construct can be used for creating an S3 bucket construct with a lifecycle rule attached. + +We can also create constructs by the composition of lower-level constructs. This way we can define reusable components and share them with other teams like any other code. + +For example, in an organization setup, a team can define a construct to enforce security best practices for an AWS resource like EC2 or S3 and share it with other teams in the organization. Other teams can now use this construct when building different applications without provisioning their AWS resources without any risk of non-compliance with organizations' security policies. + + + +## Conclusion + +Here is a list of the major points for a quick reference: +1. AWS Cloud Development Kit (CDK) is a framework for defining cloud infrastructure in code and provisioning it through AWS CloudFormation. +2. Multiple programming languages are supported by CDK. +2. Constructs are the basic building blocks of CDK. +3. The App construct represents the CDK application. +4. We define the resources which we want to provision in the Stack construct. +5. There are three levels of constructs: L1, L2, and L3 in the Construct library. +6. The Construct Hub is a resource to help us discover additional constructs from AWS, third parties, and the open-source CDK community +7. We can curate our constructs usually by the composition of lower-level constructs. This way we can define reusable components and share them with other teams like any other code. +8. As with all frameworks, AWS CDK has recommended [best practices](https://docs.aws.amazon.com/cdk/v2/guide/best-practices.html) which should be followed for building CDK applications. +9. Important cdk commands: +```shell + +cdk init app --language java // Generate the CDK project +cdk synth // Generate the CloudFormation Template +cdk diff // Finding the difference between deployed resources and new resources +cdk deploy // Deploy the app to provision the resources +cdk destroy // Dispose of the infrastructure + +``` + + +You can refer to all the source code used in the article on [Github](https://github.com/thombergs/code-examples/tree/master/aws/cdkv2). + diff --git a/content/blog/2022/2022-01-22-getting-started-with-amazon-SQS.md b/content/blog/2022/2022-01-22-getting-started-with-amazon-SQS.md new file mode 100644 index 000000000..29e94d204 --- /dev/null +++ b/content/blog/2022/2022-01-22-getting-started-with-amazon-SQS.md @@ -0,0 +1,901 @@ +--- +authors: [pratikdas] +title: "Getting Started with AWS SQS" +categories: ["aws"] +date: 2022-01-20T00:00:00 +excerpt: "Amazon Simple Queue Service (SQS) is a fully managed message queuing service. We can send, store, and receive messages at any volume, without losing messages or requiring other systems to be available. In this article, we will introduce Amazon SQS, understand its core concepts and work through some examples." +image: images/stock/0115-2021-1200x628-branded.jpg +url: getting-started-with-aws-sqs +--- + +Amazon Simple Queue Service (SQS) is a fully managed message queuing service that enables decoupling and communication between the components of a distributed system. We can send, store, and receive messages at any volume, without losing messages or requiring other systems to be available. + +Being fully managed, Amazon SQS also eliminates the additional overhead associated with managing and operating message-oriented middleware thereby empowering developers to focus on application development instead of managing infrastructure. + +In this article, we will introduce Amazon SQS, understand its core concepts of the queue and sending and receiving messages and work through some examples. + +{{% github "https://github.com/thombergs/code-examples/tree/master/aws/sqs" %}} + +## What is Message Queueing + +Message Queueing is an asynchronous style of communication between two or more processes. + +Messages and queues are the basic components of a message queuing system. + +Programs communicate with each other by sending data in the form of messages which are placed in a storage called a queue, instead of calling each other directly. The receiver programs retrieve the message from the queue and do the processing without any knowledge of the producer programs. + +This allows the communicating programs to run independently of each other, at different speeds and times, in different processes, and without having a direct connection between them. + +## Core Concepts of Amazon SQS + +The Amazon Simple Queue Service (SQS) is a fully managed distributed Message Queueing System. The queue provided by the SQS service redundantly stores the messages across multiple Amazon SQS servers. Let us look at some of its core concepts: + +### Standard Queues vs FIFO Queues + +Amazon SQS provides two types of message queues: + +**Standard queues**: They provide maximum throughput, best-effort ordering, and at-least-once delivery. The standard queue is the default queue type in SQS. When using standard queues, we should design our applications to be idempotent so that there is no negative impact when processing the same message more than once. + +**FIFO queues**: FIFO (First-In-First-Out) queues are used for messaging when the order of operations and events exchanged between applications is important, or in situations where we want to avoid processing duplicate messages. FIFO queues guarantee that messages are processed exactly once, in the exact order that they are sent. + +### Ordering and Deduplication (Exactly-Once Delivery) in FIFO Queues + +A FIFO queue preserves the order in which messages are sent and received and a message is delivered exactly once. + +The messages are ordered based on message group ID. If multiple hosts send messages with the same message group ID to a FIFO queue, Amazon SQS stores the messages in the order in which they arrive for processing. + +To make sure that Amazon SQS preserves the order in which messages are sent and received, each producer should use a unique message group ID to send all its messages. + +Messages that belong to the same message group are always processed one by one, in a strict order relative to the message group. + +FIFO queues also help us to avoid sending duplicate messages to a queue. If we send the same message within the 5-minute deduplication interval, it is not added to the queue. We can configure deduplication in two ways: + +- **Enabling Content-Based Deduplication**: When this property is enabled for a queue, SQS uses a SHA-256 hash to generate the message deduplication ID using the contents in the body of the message. + +- **Providing the Message Deduplication ID**: When a message with a particular message deduplication ID is sent, any messages subsequently sent with the same message deduplication ID are accepted successfully but are not delivered during the 5-minute deduplication interval. + + +### Queue Configurations + +After creating the queue, we need to configure the queue with specific attributes based on our message processing requirements. Let us look at some of the properties which we configure: + +**Dead-letter Queue**:A dead-letter queue is a queue that one or more source queues can use for messages that are not consumed successfully. They are useful for debugging our applications or messaging system because they let us isolate unconsumed messages to determine why their processing does not succeed. + +**Dead-letter Queue Redrive**:We use this configuration to define the time after which unconsumed messages are moved out of an existing dead-letter queue back to their source queues. + +**Visibility Timeout**:The visibility timeout is a period of time during which a message received from a queue by one consumer is not visible to the other message consumers. Amazon SQS prevents other consumers from receiving and processing the message during the visibility timeout period. + +**Message Retention Period**: The amount of time for which a message remains in the queue. The messages in the queue should be received and processed before this time is crossed. They are automatically deleted from the queue once the message retention period has expired. + +**DelaySeconds**: The length of time for which the delivery of all messages in the queue is delayed. + +**MaximumMessageSize**: The limit on the size of a message in bytes that can be sent to SQS before being rejected. + +**ReceiveMessageWaitTimeSeconds**: The length of time for which a message receiver waits for a message to arrive. This value defaults to `0` and can take any value from `0` to `20` seconds. + +**Short and long polling**:Amazon SQS uses short polling and long polling mechanisms to receive messages from a queue. Short polling returns immediately, even if the message queue being polled is empty, while long polling does not return a response until a message arrives in the message queue, or the long polling period expires. +Queues use short polling by default. Long polling is preferable to short polling in most cases. + + +## Creating a Standard SQS Queue + +We can use the [Amazon SQS console](https://console.aws.amazon.com/sqs/#/create-queue) to create standard queues and FIFO queues. The console provides default values for all settings except for the queue name. + +However, for our examples, we will use [AWS SDK for Java](https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/home.html) to create our queues and send and receive messages. + +So we will use the AWS Java SDK and add it as a Maven dependency. The AWS SDK for Java simplifies the use of AWS Services by providing a set of libraries that are based on common design patterns familiar to Java developers. + +Let us first add the following Maven dependency in our `pom.xml`: + +```xml + + software.amazon.awssdk + sqs + + + + + software.amazon.awssdk + bom + 2.17.116 + pom + import + + + + +``` + +We will next create our queue in the `ResourceHelper` class with the Java SDK: + +```java +public class ResourceHelper { + + private static Logger logger + = Logger.getLogger(ResourceHelper.class.getName()); + + public static void main(String[] args) { + createStandardQueue(); + } + + public static void createStandardQueue() { + SqsClient sqsClient = getSQSClient(); + + // Define the request for creating a + // standard queue with default parameters + CreateQueueRequest createQueueRequest + = CreateQueueRequest.builder() + .queueName("myqueue") + .build(); + // Create the queue + sqsClient.createQueue(createQueueRequest); + } + + + private static SqsClient getSQSClient() { + AwsCredentialsProvider credentialsProvider = + ProfileCredentialsProvider.create(""); + + SqsClient sqsClient = SqsClient + .builder() + .credentialsProvider(credentialsProvider) + .region(Region.US_EAST_1).build(); + + return sqsClient; + } + +} + +``` + +Here we are first establishing a connection with the AWS SQS service using the `SqsClient` class. After that, the message to be sent is constructed with the `SendMessageRequest` class by specifying the URL of the queue and the message body. + +Then the message is sent by invoking the `sendMessage()` method on the `SqsClient` instance. + +When we run this program we can see the `message ID` in the output: + +```shell +INFO: message id: fa5fd857-59b4-4a9a-ba54-a5ab98ee82f9 + +``` +This message ID returned in the `sendMessage()` response is assigned by SQS and is useful for identifying messages. + +We can also send multiple messages in a single request using the `sendMessageBatch()` method of the `SqsClient` class. + +## Creating a First-In-First-Out (FIFO) SQS Queue + +Let us now create a FIFO queue that we can use for sending non-duplicate messages in a fixed sequence. We will do this in the `createFifoQueue()` method as shown here: + +```java +public class ResourceHelper { + private static Logger logger + = Logger.getLogger(ResourceHelper.class.getName()); + + public static void main(String[] args) { + createFifoQueue(); + } + + + public static void createFifoQueue() { + SqsClient sqsClient = getSQSClient(); + + + // Define attributes of FIFO queue in an attribute map + Map attributeMap + = new HashMap(); + + // FIFO_QUEUE attribute is set to true mark the queue as FIFO + attributeMap.put( + QueueAttributeName.FIFO_QUEUE, "true"); + + // Scope of DEDUPLICATION is set to messageGroup + attributeMap.put( + QueueAttributeName.DEDUPLICATION_SCOPE, "messageGroup"); + + // CONTENT_BASED_DEDUPLICATION is disabled + attributeMap.put( + QueueAttributeName.CONTENT_BASED_DEDUPLICATION, "false"); + + // Prepare the queue creation request and end the name of the queue with fifo + CreateQueueRequest createQueueRequest + = CreateQueueRequest.builder() + .queueName("myfifoqueue.fifo") + .attributes(attributeMap ) + .build(); + + // Create the FIFO queue + CreateQueueResponse createQueueResponse + = sqsClient.createQueue(createQueueRequest); + + // URL of the queue is returned in the response + logger.info("url "+createQueueResponse.queueUrl()); + } + + private static SqsClient getSQSClient() { + AwsCredentialsProvider credentialsProvider + = ProfileCredentialsProvider.create(""); + + SqsClient sqsClient = SqsClient + .builder() + .credentialsProvider(credentialsProvider) + .region(Region.US_EAST_1).build(); + return sqsClient; + } + + private static String getQueueArn( + final String queueName, + final String region) { + return "arn:aws:sqs:"+region + ":" + + AppConfig.ACCOUNT_NO + ":" + + queueName; + } + +} + +``` +As we can see, we have defined a queue with the name `myfifoqueue.fifo`. The name of FIFO queues must end with `.fifo`. We have set the property: `contentBasedDeduplication` to `false` which means that we need to explicitly send `messageDeduplicationId` with the message so that SQS can identify them as duplicates. + +Further, the `deduplicationScope` property of the queue is set to `MESSAGE_GROUP` which indicates the message group as the scope for identifying duplicate messages. The `deduplicationScope` property can alternately be set to `QUEUE`. + +## Sending Message to FIFO Queue + +As explained earlier, a FIFO queue preserves the order in which messages are sent and received. + +To check this behavior, let us send five messages to the FIFO queue, we created earlier : + +```java +public class MessageSender { + + private static Logger logger + = Logger.getLogger(MessageSender.class.getName()); + + public static void sendMessageToFifo() { + SqsClient sqsClient = getSQSClient(); + + Map messageAttributes + = new HashMap(); + ... + ... + + final String queueURL = "https://sqs.us-east-1.amazonaws.com/" + +AppConfig.ACCOUNT_NO + + "/myfifoqueue.fifo"; + + // List of deduplicate IDs to be sent with different messages + List dedupIds = List.of("dedupid1", + "dedupid2", + "dedupid3", + "dedupid2", + "dedupid1"); + + String messageGroupId = "signup"; + + // List of messages to be sent. 2 of them are duplicates + List messages = List.of( + "My fifo message1", + "My fifo message2", + "My fifo message3", + "My fifo message2", // Duplicate message + "My fifo message1"); // Duplicate message + + short loop = 0; + + // sending the above messages in sequence. + // Duplicate messages will be sent but will not be received. + for (String message : messages) { + + // message is identified as duplicate + // if deduplication id is already used + SendMessageRequest sendMessageRequest + = SendMessageRequest.builder() + .queueUrl(queueURL) + .messageBody(message) + .messageAttributes(messageAttributes) + .messageDeduplicationId(dedupIds.get(loop)) + .messageGroupId(messageGroupId) + .build(); + + SendMessageResponse sendMessageResponse + = sqsClient + .sendMessage(sendMessageRequest); + + logger.info("message id: "+ sendMessageResponse.messageId()); + + loop+=1; + } + + + sqsClient.close(); + } + +``` +A sample of the output generated by running this program is shown: + +```shell + +message id and sequence no.: 9529ddac-8946-4fee-a2dc-7be428666b63 | 18867399222923248640 + +message id and sequence no.: 2ba4d7dd-877c-4982-b41e-817c99633fc4 | 18867399223023088896 + +message id and sequence no.: ad354de3-3a89-4400-83b8-89a892c30526 | 18867399223104239872 + +message id and sequence no.: 2ba4d7dd-877c-4982-b41e-817c99633fc4 | 18867399223023088896 + +message id and sequence no.: 9529ddac-8946-4fee-a2dc-7be428666b63 | 18867399222923248640 + + +``` +When SQS accepts the message, it returns a sequence number along with a message identifier. The Sequence number as we can see is a large, non-consecutive number that Amazon SQS assigns to each message. + +We are sending five messages with two of them being duplicates. Since we had set the `contentBasedDeduplication` property to `true`, SQS determines duplicate messages by the `messageDeduplicationId`. The messages: "My fifo message1" and "My fifo message2" are each sent twice with the same `messageDeduplicationId` while "My fifo message3" is sent once. + +Although we have sent five messages, we will only receive three unique messages in the same order when we consume the messages from the queue. We will look at how to consume messages from SQS in the next section. + +## Consuming Messages from a Queue + +Now let us read the message we sent to the queue from a different consumer program. As explained earlier, in keeping with the asynchronous programming model, the consumer program is independent of the sender program. The sender program does not wait for the consumer program to read the message before completion. + +We retrieve messages that are currently in the queue by calling the AmazonSQS client’s `receiveMessage()` method of the `SqsClient` class as shown here: + +```java +public class MessageReceiver { + + public static void receiveMessage() { + SqsClient sqsClient = getSQSClient(); + + final String queueURL = "https://sqs.us-east-1.amazonaws.com/" + +AppConfig.ACCOUNT_NO + + "/myqueue"; + + + // long polling and wait for waitTimeSeconds before timing out + ReceiveMessageRequest receiveMessageRequest = + ReceiveMessageRequest + .builder() + .queueUrl(queueURL) + .waitTimeSeconds(20) + .messageAttributeNames("trace-id") + .build(); + + List messages + = sqsClient + .receiveMessage(receiveMessageRequest) + .messages(); + } + + private static SqsClient getSQSClient() { + AwsCredentialsProvider credentialsProvider = + ProfileCredentialsProvider.create(""); + + SqsClient sqsClient = SqsClient + .builder() + .credentialsProvider(credentialsProvider) + .region(Region.US_EAST_1).build(); + return sqsClient; + } + +} +``` +Here we have enabled long polling for receiving the SQS messages by setting the wait time as `20` seconds on the `ReceiveMessageRequest` which we have supplied to the `receiveMessage()` method of the `SqsClient` class. + +The `receiveMessage()` returns the messages from the queue as a list of `Message` objects. + + +## Deleting Messages from a Queue with the ReceiptHandle + +We get a `receiptHandle` when we receive a message from SQS. + +We use this `receiptHandle` to delete a message from a queue as shown in this example, otherwise, the messages left in a queue are deleted automatically after the expiry of the retention period configured for the queue: + +```java +public class MessageReceiver { + + + public static void receiveFifoMessage() throws InterruptedException { + SqsClient sqsClient = getSQSClient(); + + final String queueURL = "https://sqs.us-east-1.amazonaws.com/" + +AppConfig.ACCOUNT_NO + + "/myfifoqueue.fifo"; + + ... + ... + + while(true) { + + Thread.sleep(20000l); + List messages + = sqsClient.receiveMessage(receiveMessageRequest) + .messages(); + + messages.stream().forEach(msg->{ + + // Get the receipt handle of the message received + String receiptHandle = msg.receiptHandle(); + + // Create the delete request with the receipt handle + DeleteMessageRequest deleteMessageRequest + = DeleteMessageRequest + .builder() + .queueUrl(queueURL) + .receiptHandle(receiptHandle) + .build(); + + // Delete the message + DeleteMessageResponse deleteMessageResponse + = sqsClient.deleteMessage(deleteMessageRequest ); + + }); + + } + + } + + private static SqsClient getSQSClient() { + + AwsCredentialsProvider credentialsProvider + = ProfileCredentialsProvider.create(""); + + SqsClient sqsClient = SqsClient + .builder() + .credentialsProvider(credentialsProvider) + .region(Region.US_EAST_1) + .build(); + return sqsClient; + } + +} + +``` +In this `receiveFifoMessage()`, we get the `receiptHandle` of the message received from SQS and use this to delete the queue. + +The `receiptHandle` is associated with a specific instance of receiving a message. It is different each time we receive the message in case we receive the message more than once. So we must use the most recently received `receiptHandle` for the message for sending deletion requests. + +For standard queues, it is possible to receive a message even after we have deleted it because of the distributed nature of the underlying storage. We should ensure that our application is idempotent to handle this scenario. + +## Handling Messaging Failures with SQS Dead Letter Queue (DLQ) +Sometimes, messages cannot be processed because of many erroneous conditions within the producer or consumer application. We can isolate the messages which failed processing by moving them to a separate queue called Dead Letter Queue (DLQ). + +After we have fixed the consumer application or when the consumer application is available to consume the message, we can move the messages back to the source queue using the dead-letter queue redrive capability. + +A dead-letter queue is a queue that one or more source queues can use for messages that are not consumed successfully. + +Amazon SQS does not create the dead-letter queue automatically. We must first create the queue before using it as a dead-letter queue. With this understanding, let us update the queue creation method that we defined earlier using AWS SDK: + + +```java +public class ResourceHelper { + private static Logger logger + = Logger.getLogger(ResourceHelper.class.getName()); + + public static void main(String[] args) { + createStandardQueue(); + } + + public static void createStandardQueue() { + SqsClient sqsClient = getSQSClient(); + + String dlqName = "mydlq"; + CreateQueueRequest createQueueRequest + = CreateQueueRequest.builder() + .queueName(dlqName) + .build(); + + + // Create dead letter queue + CreateQueueResponse createQueueResponse + = sqsClient.createQueue(createQueueRequest); + + + String dlqArn = getQueueArn(dlqName,"us-east-1"); + + Map attributeMap + = new HashMap(); + + attributeMap.put(QueueAttributeName.REDRIVE_POLICY, + "{\"maxReceiveCount\":10,\"deadLetterTargetArn\":\""+dlqArn+"\"}"); + + // Prepare request for creating the standard queue + createQueueRequest = CreateQueueRequest.builder() + .queueName("myqueue") + .attributes(attributeMap) + .build(); + + // create the queue + createQueueResponse = sqsClient.createQueue(createQueueRequest); + + logger.info("Queue URL " + createQueueResponse.queueUrl()); + } + + private static String getQueueArn( + final String queueName, + final String region) { + + return "arn:aws:sqs:" + + region + + ":" + AppConfig.ACCOUNT_NO+ ":" + queueName; + } + +} + +``` +Here we have first defined a standard queue named `mydlq` for using it as the dead-letter queue. + +The redrive policy of an SQS queue is used to specify the source queue, the dead-letter queue, and the conditions under which Amazon SQS will move messages if the consumer of the source queue fails to process a message a specified number of times. The `maxReceiveCount` is the number of times a consumer tries to receive a message from a queue without deleting it before being moved to the dead-letter queue. + +Accordingly, we have defined the `Redrive policy` in the attribute map when creating the source queue with `maxReceiveCount` value of `10` and Amazon Resource Names (ARN) of the dead-letter queue. + + +## Trigger AWS Lambda Function by Incoming Messages in the Queue + +AWS Lambda is a serverless, event-driven compute service which we can use to run code for any type of application or backend service without provisioning or managing servers. + +We can trigger the Lambda function from many AWS services and only pay for what we use. + +We can attach an SQS standard and FIFO queues to an AWS Lambda function as an event source. The lambda function will get triggered whenever messages are put in the queue. The function will read and process messages in the queue. + +The Lambda function will poll the queue and invoke the Lambda function by passing an event parameter that contains the messages in the queue. + +Lambda function supports many language runtimes like Node.js, Python, C#, and Java. + +Let us attach the following lambda function to our standard queue created earlier to process SQS messages: + +```js +exports.handler = async function(event, context) { + event.Records.forEach(record => { + const { body } = record; + console.log(body); + }); + return {}; +} + +``` +This function is written in Javascript and uses the Node.js runtime during execution in AWS Lambda. A handler function named `handler()` is exported that takes an `event` object and a `context` object as parameters and prints the message received from the SQS queue in the console. The handler function in Lambda is the method that processes events. Lambda runs the handler method when the function is invoked. + +We will also need to create an execution role with lambda with the following IAM policy attached: + +```xml +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "sqs:DeleteMessage", + "sqs:ReceiveMessage", + "sqs:GetQueueAttributes" + ], + "Resource": [ + "arn:aws:sqs:us-east-1::myqueue" + ] + } + ] +} + +``` +For processing messages from the queue, the lambda function needs permissions for `DeleteMessage`, `ReceiveMessage`, `GetQueueAttributes` on our SQS queue and an AWS managed policy: `AWSLambdaBasicExecutionRole` for permission for writing to CloudWatch logs. + +Let us create this lambda function from the AWS console as shown here: + +{{% image alt="Lambda Trigger for SQS queue" src="images/posts/aws-sqs/lambda-trigger.png" %}} + + +Let us run our `sendMessage()` method to send a message to the queue where the lambda function is attached. Since the lambda function is attached to be triggered by messages in the queue, we can see the message sent by the `sendMessage()` method in the CloudWatch console: + +{{% image alt="Lambda Trigger for SQS queue" src="images/posts/aws-sqs/cloudwatch-log.png" %}} + +We can see the message: `Test message` which was sent to the SQS queue, printed by the lambda receiver function in the CloudWatch console. + +We can also specify a queue to act as a dead-letter queue for messages that our Lambda function fails to process. + +## Sending Message Metadata with Message Attributes + +Message attributes are structured metadata that can be attached and sent together with the message to SQS. + +Message Metadata are of two kinds : + +- **Message Attributes**: These are custom metadata usually added and extracted by our applications for general-purpose use cases. Each message can have up to 10 attributes. + +- **Message System Attributes**: These are used to store metadata for other AWS services like AWS X-Ray. + +Let us modify our earlier example of sending a message by adding a message attribute to be sent with the message: + +```java +public class MessageSender { + + private static final String TRACE_ID_NAME = "trace-id"; + private static Logger logger + = Logger.getLogger(MessageSender.class.getName()); + + public static void main(String[] args) { + sendMessage(); + } + + public static void sendMessage() { + SqsClient sqsClient = getSQSClient(); + + Map messageAttributes = + new HashMap(); + + // generates a UUID as the traceId + String traceId = UUID.randomUUID().toString(); + + // add traceId as a message attribute + messageAttributes.put(TRACE_ID_NAME, + MessageAttributeValue.builder() + .dataType("String") + .stringValue(traceId) + .build()); + + final String queueURL + = "https://sqs.us-east-1.amazonaws.com/" + +AppConfig.ACCOUNT_NO + "/myqueue"; + + SendMessageRequest sendMessageRequest = SendMessageRequest + .builder() + .queueUrl(queueURL) + .messageBody("Test message") + .messageAttributes(messageAttributes) + .build(); + + SendMessageResponse sendMessageResponse + = sqsClient + .sendMessage(sendMessageRequest); + + logger.info("message id: "+ sendMessageResponse.messageId()); + + sqsClient.close(); + } + + + + private static SqsClient getSQSClient() { + AwsCredentialsProvider credentialsProvider + = ProfileCredentialsProvider.create(""); + + SqsClient sqsClient = SqsClient + .builder() + .credentialsProvider(credentialsProvider) + .region(Region.US_EAST_1).build(); + return sqsClient; + } + +} + +``` +In this example, we have added a message attribute named `traceId` which will be of `String` type. + + +## Defining SQS Queue as an SNS Topic Subscriber + +Amazon Simple Notification Service (SNS) is a fully managed publish/subscribe messaging service that allows us to fan out messages from a logical access point called Topic to multiple recipients at the same time. + +SNS topics support different subscription types like SQS queues, AWS Lambda functions, HTTP endpoints, email addresses, SMS, and mobile push where we can publish messages. + +We can subscribe multiple Amazon SQS queues to an Amazon Simple Notification Service (Amazon SNS) topic. When we publish a message to a topic, Amazon SNS sends the message to each of the subscribed queues. + +Let us update our `ResourceHelper` class by adding a method to create an SNS topic along with a subscription to the SQS Standard Queue created earlier: + +```java +public class ResourceHelper { + private static Logger logger + = Logger.getLogger(ResourceHelper.class.getName()); + + public static void main(String[] args) { + createSNSTopicWithSubscription(); + } + + public static void createSNSTopicWithSubscription() { + SnsClient snsClient = getSNSClient(); + + // Prepare the request for creating SNS topic + CreateTopicRequest createTopicRequest + = CreateTopicRequest + .builder() + .name("mytopic") + .build(); + + // Create the topic + CreateTopicResponse createTopicResponse + = snsClient.createTopic(createTopicRequest ); + + String topicArn = createTopicResponse.topicArn(); + + String queueArn= getQueueArn("myqueue","us-east-1"); + + // Prepare the SubscribeRequest for subscribing + // endpoint of protocol sqs to the topic of topicArn + SubscribeRequest subscribeRequest = SubscribeRequest.builder() + .protocol("sqs") + .topicArn(topicArn) + .endpoint(queueArn) + .build(); + + SubscribeResponse subscribeResponse + = snsClient.subscribe( subscribeRequest ); + + + logger.info("subscriptionArn " + + subscribeResponse.subscriptionArn()); + } + +} + +``` +Here we have first created an SNS topic of the name `mytopic`. Then we have created a subscription by adding the SQS queue as a subscriber to the topic. + +Let us now publish a message to this SNS topic using AWS Java SDK as shown below: + +```java +public class MessageSender { + private static Logger logger + = Logger.getLogger(MessageSender.class.getName()); + + + public static void main(String[] args) { + + sendMessageToSnsTopic(); + } + + public static void sendMessageToSnsTopic() { + SnsClient snsClient = getSNSClient(); + + final String topicArn + = "arn:aws:sns:us-east-1:" + AppConfig.ACCOUNT_NO + ":mytopic"; + + // Build the publish request with the + // SNS Topic Arn and the message body + PublishRequest publishRequest = PublishRequest + .builder() + .topicArn(topicArn) + .message("Test message published to topic") + .build(); + + // Publish the message to the SNS topic + PublishResponse publishResponse + = snsClient.publish(publishRequest); + + logger.info("message id: "+ publishResponse.messageId()); + + snsClient.close(); + } + + private static SnsClient getSNSClient() { + AwsCredentialsProvider credentialsProvider + = ProfileCredentialsProvider.create(""); + + // Construct the SnsClient with AWS account credentials + SnsClient snsClient = SnsClient + .builder() + .credentialsProvider(credentialsProvider) + .region(Region.US_EAST_1).build(); + + return snsClient; + } + +} + +``` + +Here we have set up the SNS client using our AWS account credentials and invoked the publish method on the `SnsClient` instance to publish a message to the topic. The SQS queue being a subscriber to the queue receives the message from the topic. + +## Security and Access Control +SQS comes with many security features designed for least privilege access and protecting data integrity. +It requires three types of roles for access to the different components of the producer-consumer model: + +**Administrators**: Administrators need access to control queue policies and to create, modify, and delete queues. +**Producers**: They need access to send messages to queues. +**Consumers**: They need access to receive and delete messages from queues. + +We should define IAM roles to grant these three types of access to SQS to applications or services. + +SQS also supports encryption at rest for encrypting messages stored in the queue : + +1. **SSE-KMS** : Server-side encryption with encryption keys managed in AWS Key Management Service +2. **SSE-SQS** : Server-side encryption with encryption keys managed in SQS + +SSE encrypts the body of the message when the message is received by SQS. The message is stored in encrypted form and SQS decrypts messages when they are sent to an authorized consumer. + + + + +## Conclusion + +Here is a list of the major points for a quick reference: + +1. Message Queueing is an asynchronous style of communication between two or more processes. + +2. Messages and queues are the basic components of a message queuing system. + +3. Amazon Simple Queue Service (SQS) is a fully managed message queuing service using which we can send, store, and receive messages to enable asynchronous communication between decoupled systems. + +4. SQS provides two types of queues: Standard Queue and First-In-First-Out FIFO Queue. + +5. Standard queues are more performant but do not preserve message ordering. + +6. FIFO queues preserve the order of the messages that are sent with the same message group identifier, and also do not allow to send duplicate messages. + +7. We used AWS Java SDK to build queues, topics, and subscriptions and also for sending and receiving messages from the queue. Other than AWS SDK, we can use Infrastructure as Code (IaC) services of AWS like CloudFormation or AWS Cloud Development Kit (CDK) for a supported language to build queues and topics. + +8. We define a Dead-letter queue (DLQ) to receive messages which have failed processing due to any erroneous condition in the producer or consumer program. + +9. We also defined a lambda function that will get triggered by messages in the queue. + +10. At last, we defined an SQS queue as a subscription endpoint to an SNS topic to implement a publish-subscribe pattern of asynchronous communication. + + +You can refer to all the source code used in the article on [Github](https://github.com/thombergs/code-examples/tree/master/aws/sqs). + diff --git a/content/blog/2022/2022-01-28-getting-started-with-amazon-kinesis.md b/content/blog/2022/2022-01-28-getting-started-with-amazon-kinesis.md new file mode 100644 index 000000000..0be54089f --- /dev/null +++ b/content/blog/2022/2022-01-28-getting-started-with-amazon-kinesis.md @@ -0,0 +1,389 @@ +--- +authors: [pratikdas] +title: "Getting Started with AWS Kinesis" +categories: ["aws"] +date: 2022-01-20T00:00:00 +excerpt: "Amazon Kinesis is a fully managed service for collecting and processing streaming data in real-time. Examples of streaming data are data collected from web site click-streams, marketing and financial information, social media feeds, iot sensors, and monitoring and operational logs. In this article, we will introduce Amazon Kinesis, understand its core concepts of the creating data streams, sending, and receiving data from streams and deriving analytical insights using different service variants: Kinesis Data Stream, firehose, Analytics, and Video Streams." +image: images/stock/0115-2021-1200x628-branded.jpg +url: getting-started-with-aws-kinesis +--- + +Amazon Kinesis is a fully managed service for collecting and processing streaming data in real-time. Examples of streaming data are data collected from web site click-streams, marketing and financial information, social media feeds, iot sensors, and monitoring and operational logs. + +In this article, we will introduce Amazon Kinesis, understand its core concepts of the creating data streams, sending, and receiving data from streams and deriving analytical insights using different service variants: Kinesis Data Stream, firehose, Analytics, and Video Streams. + +{{% github "https://github.com/thombergs/code-examples/tree/master/aws/kinesis" %}} + +## What is Streaming Data + +Streaming data is generated continuously (in a stream) by multiple data sources, which typically send in the data records simultaneously, and in small batches of size in the order of Kilobytes. + +Streaming data includes a wide variety of data such as log files generated by customers using mobile or web applications, ecommerce purchases, in-game player activity, information from social networks, financial trading floors, or geospatial services, and telemetry from connected devices or instrumentation in data centers. + +Streaming data is processed sequentially and incrementally on a record-by-record basis or in batches aggregated over sliding time windows, and used for a wide variety of analytics including correlations, aggregations, filtering, and sampling. + +## What is Amazon Kinesis +Amazon Kinesis is a fully managed streaming data platform for processing streaming data. It helps us to set up streams where producers can pump in data and receivers can consume data. Kinesis provides four specialized variants of services based on the type of stream processing we want to perform to suit our use case: + +- **Kinesis Data Streams** : Amazon Kinesis Data Streams is a serverless streaming data service that +we can use to build custom applications to process or analyze streaming data for specialized needs. We can add various types of data such as clickstreams, application logs, and social media to a Kinesis data stream from hundreds of thousands of sources. + +- **Kinesis Data Firehose** : With Kinesis Data Firehose, we don't need to write applications or manage resources. We configure data producers to send data to Kinesis Data Firehose, and it automatically delivers the data to the specified destination. We can also configure Kinesis Data Firehose to transform the data before delivering it. + +- **Kinesis Data Analytics**:With Amazon Kinesis Data Analytics for SQL Applications, you can process and analyze streaming data using standard SQL. The service enables you to quickly author and run powerful SQL code against streaming sources to perform time series analytics, feed real-time dashboards, and create real-time metrics. + +- **Kinesis Video Streams**: Amazon Kinesis Video Streams is a fully managed AWS service that you can use to stream live video from devices to the AWS Cloud, or build applications for real-time video processing or batch-oriented video analytics. + +Let us understand these services in the next sections. In each section is we will first introduce the key concepts of the service and then work through some examples. + +## Kinesis Data Streams + +Kinesis Data Streams is used to send data from data producers as soon as it is produced (in real-time) and then continuously processing that data. The processing can include transformation of the data before emitting to another data store or running real-time metrics and analytics. + +When using Kinesis Data Streams, we first set up a data stream and then build producer applications which write data to the data stream and consumer applications that read data from the data stream: + +// TODO diagram + +As we can see in this diagram, the data stream is composed of multiple shards. The data is sent to a shard. Shards have an identifier called partition key which we use to identify the shard where we want to send our data. The data stored in the shard is called a record. + +Each shard contains a sequence of data records. Each data record has a sequence number that is assigned by Kinesis Data Streams. + + +### Creating a Kinesis Data Stream +Let us first create our data stream where we can send our data. We can create a data stream either using the AWS Kinesis console or using the AWS SDK. + +```java +public class DataStreamResourceHelper { + + public static void main(String[] args) { + createDataStream(); + } + + + public static void createDataStream() { + KinesisClient kinesisClient = getKinesisClient(); + + CreateStreamRequest createStreamRequest + = CreateStreamRequest + .builder() + .streamName(Constants.MY_DATA_STREAM) + .streamModeDetails( + StreamModeDetails + .builder() + .streamMode(StreamMode.ON_DEMAND) + .build()) + .build(); + + CreateStreamResponse createStreamResponse + = kinesisClient.createStream(createStreamRequest); + + DescribeStreamSummaryRequest describeStreamSummaryRequest + = DescribeStreamSummaryRequest + .builder() + .streamName(Constants.MY_DATA_STREAM ) + .build(); + + DescribeStreamSummaryResponse describeStreamSummaryResponse + = kinesisClient.describeStreamSummary(describeStreamSummaryRequest ); + + + long startTime = System.currentTimeMillis(); + long endTime = startTime + ( 10 * 60 * 1000 ); + while ( System.currentTimeMillis() < endTime ) { + try { + Thread.sleep(20 * 1000); + } + catch ( Exception e ) {} + + try { + StreamDescriptionSummary streamDescSumm = describeStreamSummaryResponse.streamDescriptionSummary(); + + if(streamDescSumm.streamStatus().equals(StreamStatus.ACTIVE)) break; + try { + Thread.sleep( 1000 ); + }catch ( Exception e ) {} + }catch ( ResourceNotFoundException e ) {} + + + } + + } + + private static KinesisClient getKinesisClient() { + AwsCredentialsProvider credentialsProvider = + ProfileCredentialsProvider.create(Constants.AWS_PROFILE_NAME); + + KinesisClient kinesisClient = KinesisClient + .builder() + .credentialsProvider(credentialsProvider) + .region(Region.US_EAST_1).build(); + return kinesisClient; + } + +} + +``` + +A Kinesis data stream is a set of shards. Each shard has a sequence of data records. Each data record has a sequence number that is assigned by Kinesis Data Streams. + +### Data Ingestion - Writing data to Kinesis Data Streams + +We can add data to a Kinesis data stream in different ways: +1. **AWS SDK**: with PutRecord and PutRecords operations +2. **Kinesis Producer Library (KPL)**:KPL is a library written in C++ for adding data into an Kinesis data stream. It runs as a child process to the main user process. +3. **Amazon Kinesis Agent** + +Let us use the AWS Java SDK to add records to the Kinesis data stream created in the previous section. We need to first configure the kinesis library as a Maven dependency in our `pom.xml` as shown below: + +```xml + + + software.amazon.awssdk + kinesis + + + + + + software.amazon.awssdk + bom + 2.17.116 + pom + import + + + + +``` + +Here is the code for adding a single event to the Kinesis data stream: + +```java +public class EventSender { + + private static final Logger logger = Logger + .getLogger(EventSender.class.getName()); + + public static void main(String[] args) { + sendEvent(); + } + + public static void sendEvent() { + KinesisClient kinesisClient = getKinesisClient(); + + String partitionKey = String.format("partitionKey-%d", 1); + String sequenceNumberForOrdering = "1"; + SdkBytes data + = SdkBytes.fromByteBuffer( + ByteBuffer.wrap("Test data".getBytes())); + + PutRecordRequest putRecordRequest + = PutRecordRequest + .builder() + .streamName(Constants.MY_DATA_STREAM) + .partitionKey(partitionKey) + .sequenceNumberForOrdering(sequenceNumberForOrdering) + .data(data) + .build(); + + PutRecordResponse putRecordsResult + = kinesisClient.putRecord(putRecordRequest); + + logger.info("Put Result" + putRecordsResult); + kinesisClient.close(); + } + + private static KinesisClient getKinesisClient() { + AwsCredentialsProvider credentialsProvider = + ProfileCredentialsProvider + .create(Constants.AWS_PROFILE_NAME); + + KinesisClient kinesisClient = KinesisClient + .builder() + .credentialsProvider(credentialsProvider) + .region(Region.US_EAST_1).build(); + return kinesisClient; + } +} +``` + +```shell +INFO: Put ResultPutRecordResponse(ShardId=shardId-000000000001, SequenceNumber=49626569155656830268862440193769593466823195675894743058) +``` + +Let us next add multiple events to the Kinesis data stream. We will do this by using the `putRecords()` method as shown below: + +```java +public class EventSender { + + private static final Logger logger = Logger.getLogger(EventSender.class.getName()); + + /** + * @param args + */ + public static void main(String[] args) { + sendEvents(); + + } + + public static void sendEvents() { + KinesisClient kinesisClient = getKinesisClient(); + + String partitionKey = String.format("partitionKey-%d", 1); + + + List putRecordsRequestEntryList = new ArrayList<>(); + for (int i = 0; i < 5; i++) { + SdkBytes data = SdkBytes + .fromByteBuffer(ByteBuffer.wrap(("Test data "+i).getBytes())); + + PutRecordsRequestEntry putRecordsRequestEntry + = PutRecordsRequestEntry.builder() + + .data(data) + .partitionKey(partitionKey) + .build(); + + putRecordsRequestEntryList.add(putRecordsRequestEntry); + } + + + PutRecordsRequest putRecordsRequest + = PutRecordsRequest + .builder() + .streamName(Constants.MY_DATA_STREAM) + .records(putRecordsRequestEntryList) + .build(); + + PutRecordsResponse putRecordsResult = kinesisClient + .putRecords(putRecordsRequest); + + logger.info("Put Result" + putRecordsResult); + kinesisClient.close(); + } + + private static KinesisClient getKinesisClient() { + ... + ... + } +} + +``` + +```shell +...ResultPutRecordsResponse(FailedRecordCount=0, + Records=[ + PutRecordsResultEntry(SequenceNumber=49626569155656830268862440193770802392642928158972051474, ShardId=shardId-000000000001), + + PutRecordsResultEntry(SequenceNumber=49626569155656830268862440193772011318462542788146757650, ShardId=shardId-000000000001), + + PutRecordsResultEntry(SequenceNumber=49626569155656830268862440193773220244282157417321463826, ShardId=shardId-000000000001), + + PutRecordsResultEntry(SequenceNumber=49626569155656830268862440193774429170101772046496170002, ShardId=shardId-000000000001), + + PutRecordsResultEntry(SequenceNumber=49626569155656830268862440193775638095921386675670876178, ShardId=shardId-000000000001)]) +``` + +### Data Consumption - Reading Data from Kinesis Data Streams + +We need to build consumer applications for processing data from a Kinesis data stream. A consumer application can get its own 2 MB/sec allotment of read throughput when it uses enhanced fan-out. This will allow multiple consumers to read data from the same stream in parallel, without contending for read throughput with other consumers. + +We can read data from a Kinesis data stream in different ways: +1. **AWS SDK**: with PutRecord and PutRecords operations +2. **Kinesis Consumer Library (KCL)**:KCL is a library written in C++ for adding data into an Kinesis data stream. It runs as a child process to the main user process. +3. **Amazon Kinesis Agent** + +Let us use the AWS Java SDK to add records to the Kinesis data stream created in the previous section. We need to first configure the kinesis library as a Maven dependency in our `pom.xml` as shown below: + +## Kinesis Data Firehose + +Kinesis Firehose is a fully managed service which is built around the concept of a delivery stream. The delivery stream receives data from a data producer, optionally applies some transformation to the data before delivering the data to a destination. + +The delivery stream buffers the incoming streaming data received from the data producer till it reaches a particular size or exceeds a certain time interval before delivering the data to the destination. + +An example of a data producer is a web server that sends log data to a delivery stream. We can also configure the delivery stream to read data from a Kinesis data stream. + +A delivery stream can send data to the following destinations: + +1. Amazon Simple Storage Service (Amazon S3), +2. Amazon Redshift +3. Amazon OpenSearch Service +4. Splunk, and +5. Any custom HTTP endpoint or HTTP endpoints owned by supported third-party service providers like Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. + +### Creating a Delivery Stream + +We can create a Kinesis Data Firehose delivery stream using the AWS Management Console or an AWS SDK. We need to provide a source of data along with a destination when creating a Firehose delivery stream. + +The source of a Kinesis Data Firehose delivery stream can be : +1. Kinesis Data Stream +2. Producer + +### Sending Data to a Delivery Stream using Kinesis Data Streams + + +### Sending Data to a Delivery Stream using AWS SDK + +### Sending Data to a Delivery Stream using Firehose Agent + +### Data Transformation + +Kinesis Data Firehose can invoke your Lambda function to transform incoming source data and deliver the transformed data to destinations. You can enable Kinesis Data Firehose data transformation when you create your delivery stream. + +### Dynamic Partitioning + + +## Amazon Kinesis Data Analytics +Amazon Kinesis Data Analytics is the easiest way to analyze streaming data in real time. Using templates and built-in operators, you can quickly and easily build queries and stream real-time applications. Amazon Kinesis Data Analytics sets up the resources to run your applications and scales automatically to handle any volume of incoming data. + +### Key Concepts +A Kinesis Data Analytics application has the following components: + +Runtime properties: You can use runtime properties to configure your application without recompiling your application code. +Source: The application consumes data by using a source. A source connector reads data from a Kinesis data stream, an Amazon S3 bucket, etc. For more information, see Sources. +Operators: The application processes data by using one or more operators. An operator can transform, enrich, or aggregate data. For more information, see DataStream API Operators. +Sink: The application produces data to external sources by using sinks. A sink connector writes data to a Kinesis data stream, a Kinesis Data Firehose delivery stream, an Amazon S3 bucket, etc. For more information, see Sinks. + +### Creating a Streaming Application + +## Amazon Kinesis Video Streams +Amazon Kinesis Video Streams is a fully managed AWS service that you can use to stream live video from devices to the AWS Cloud, or build applications for real-time video processing or batch-oriented video analytics. + +### Key Concepts + +Producer – Any source that puts data into a Kinesis video stream. A producer can be any video-generating device, such as a security camera, a body-worn camera, a smartphone camera, or a dashboard camera. A producer can also send non-video data, such as audio feeds, images, or RADAR data. + +Kinesis video stream – A resource that enables you to transport live video data, optionally store it, and make the data available for consumption both in real time and on a batch or ad hoc basis. + +Consumer – Gets data, such as fragments and frames, from a Kinesis video stream to view, process, or analyze it. Generally these consumers are called Kinesis Video Streams applications. + +### Creating a Kinesis Video Stream + + +### Sending Data to a Kinesis Video Stream + + +### Consuming Media Data +We can consume media data by either viewing it in the console, or by creating an application that reads media data from a stream using HLS. + + + +Message Queueing is an asynchronous style of communication between two or more processes. + +Messages and queues are the basic components of a message queuing system. + +Programs communicate with each other by sending data in the form of messages which are placed in a storage called a queue, instead of calling each other directly. The receiver programs retrieve the message from the queue and do the processing without any knowledge of the producer programs. + +This allows the communicating programs to run independently of each other, at different speeds and times, in different processes, and without having a direct connection between them. + + + + + +## Conclusion + +Here is a list of the major points for a quick reference: + + +You can refer to all the source code used in the article on [Github](https://github.com/thombergs/code-examples/tree/master/aws/sqs). + diff --git a/content/blog/2022/2022-02-12-12-factor-app-with-Node.js.md b/content/blog/2022/2022-02-12-12-factor-app-with-Node.js.md new file mode 100644 index 000000000..e53955dc8 --- /dev/null +++ b/content/blog/2022/2022-02-12-12-factor-app-with-Node.js.md @@ -0,0 +1,131 @@ +--- +authors: [pratikdas] +title: "12 Factor app with Node.js" +categories: ["node"] +date: 2022-01-20T00:00:00 +excerpt: "Amazon Kinesis is a fully managed service for collecting and processing streaming data in real-time. Examples of streaming data are data collected from web site click-streams, marketing and financial information, social media feeds, iot sensors, and monitoring and operational logs. In this article, we will introduce Amazon Kinesis, understand its core concepts of the creating data streams, sending, and receiving data from streams and deriving analytical insights using different service variants: Kinesis Data Stream, firehose, Analytics, and Video Streams." +image: images/stock/0115-2021-1200x628-branded.jpg +url: getting-started-with-aws-kinesis +--- + + + +The twelve-factor methodology is not specific to Node.js and much of these tips are already general enough for any application. + + +In this article, we will walk through the twelve-factor methodology with specific examples of how we would implement them in Node.js. Although application that demonstrates the principles is not implemented yet, but we can link to specific line numbers to see a working app, with working code implementing best practices. + +{{% github "https://github.com/thombergs/code-examples/tree/master/aws/kinesis" %}} + +## What is 12-Factor + + +## I. Codebase - Single Codebase Under Version Control for All Environments + +One codebase tracked in revision control, many deploys. + +This helps to establish clear ownership of an application with a single individual or group. The application has a single codebase that evolves with new features, defect fixes, and upgrades to existing features. The application owners are accountable for building different versions and deploying to multiple environments like test, stage, and production during the lifetime of the application. + +This principle advocates having a single codebase that can be built and deployed to multiple environments. Each environment has specific resource configurations like database, configuration data, and API URLs. To achieve this, we need to separate all the environment dependencies into a form that can be specified during the build and run phases of the application. + +This helps to achieve the first two goals of the Twelve-Factor App - maximizing portability across environments using declarative formats. + +Following this principle, we’ll have a single Git repository containing the source code of our Spring Boot application. This code is compiled and packaged and then deployed to one or more environments. + +We configure the application for a specific environment at runtime using Spring profiles and environment-specific properties. + +We’re breaking this rule if we have to change the source code to configure it for a specific environment or if we have separate repositories for different environments like development and production. + + +How we do it + +This code is tracked on github. Git flow can be used to manage branches for releases. + + +## II. Dependencies + +Explicitly declare and isolate dependencies. + +How we do it + +package.json declare and lock dependencies to specific versions. npm installs modules to a local node_modules dir so each application's dependencies are isolated from the rest of the system. + +## Config + +Store config in the environment. + +How we do it + +Configuration is stored in enviornment variables and supplied through the manifest.yml. + +Secrets are also stored in environment variables but supplied through a Cloud Foundry User Provided Service. When setting up the app, they are created with a one-time command cf create-user-provided-service tfn-secrets -p '{"SECRET_KEY": "your-secret-key"}'. + +Connection configuration to Cloud Foundry Services, like our database, are provided through the VCAP_SERVICES environment variable. + +## Backing services + +Treat backing services as attached resources. + +How we do it + +We connect to the database via a connection url provided by the VCAP_SERVICES environment variable. If we needed to setup a new database, we would simply create a new database with cf create-service and bind the database to our application. After restaging with cf restage, the VCAP_SERVICES environment will be updated with the new connection url and our app would be talking to the new database. + +We use a library which handles the database connection. This library abstracts away the differences between different SQL-based databases. This makes it easier to migrate from one database provider to another. + +We expect to be using a database hosted on Cloud Foundry, but using this strategy we could store the connection url in a separate environment variable which could point to a database outside of the Cloud Foundry environment and this strategy would work fine. + +Of course, how you handle migrating your data from one database to another can be complicated and is out of scope with regard to the twelve factor app. + +## Build, release, run + +Strictly separate build and run stages. + +How we do it + +package.json allows to configure "scripts" so that we can codify various tasks. npm run build is used to build this application and produces minified javascript and css files to be served as static assets. + +npm start is used to start the application. The nodejs_buildpack runs this command by default to start your application. + +## Processes + +Execute the app as one or more stateless processes. + +How we do it + +We listen to SIGTERM and SIGINT to know it's time to shutdown. The platform is constantly being updated even if our application is not. Machines die, security patches cause reboots. Server resources become consumed. Any of these things could cause the platform to kill your application. Don't worry though, Cloud Foundry makes sure to start a new process on the new freshly patched host before killing your old process. + +By listening to process signals, we know when to stop serving requests, flush database connections, and close any open resources. + +## Port binding + +Export services via port binding. + +How we do it + +Cloud Foundry assigns your application instance a port on the host machine and exposes it through the PORT environment variable. + +## Logs + +Treat logs as event streams. + +How we do it + +We use winston as our logger. We use logging levels to provide feedback about how the application is working. Some of this feedback could warrant a bug fix. + +Warnings are conditions that are unexpected and might hint that a bug exists in the code. + +## Admin processes + +Run admin/management tasks as one-off processes. + +How we do it + +Any one-off tasks are added as npm scripts. The meat of these tasks is added to the tasks directory. Some take inputs which can be specified when running the task npm run script -- arguments. Note that by default, we avoid writing interactive scripts. If configuration is complex, the task can accept a configuration file or read a configuration from stdin. + +## Conclusion + +Here is a list of the major points for a quick reference: + + +You can refer to all the source code used in the article on [Github](https://github.com/thombergs/code-examples/tree/master/aws/sqs). +