Some sample testing snippet for AWS (to make quick testing)

userdata.sh

#!/bin/bash
# Use this for your user data (script from top to bottom)
# install httpd (Linux 2 version)
yum update -y
yum install -y httpd
systemctl start httpd
systemctl enable httpd
echo "<h1>Hello World from $(hostname -f)</h1>" > /var/www/html/index.html
#!/bin/bash
yum update -y
yum install -y httpd.x86_64
systemctl start httpd
systemctl enable http
echo "$(curl http://169.254.169.254/latest/meta-data/local-ipv4)" > /var/www/html/index.html

List only Running instances as a Table using AWS CLI EC2:

aws ec2 describe-instances --query "Reservations[*].Instances[*].{PublicIP:PublicIpAddress,Name:Tags[?Key=='Name']|[0].Value,Status:State.Name}" --filters Name=instance-state-name,Value=running" --output table

EC2 metadata curl:

metadata.sh

#!/bin/bash
# example
curl http://169.254.169.254/latest/meta-data/instance-id

Make an Amazon EBS volume available for use on Linux:

https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-using-volumes.html

Stress test:

sudo amazon-linux-extras install epel -y
sudo yum install stress -y

sudo stress --cpu 8 --vm-bytes $(awk '/MemAvailable/{printf "%d\n", $2 * 0.9;}' < /proc/meminfo)k --vm-keep -m 1
  • –cpu

    • This will spawn 8 CPU workers spinning on a square root task (sqrt(x))
  • –vm-bytes

    • This will use 90% of the available memory from /proc/meminfo
  • –vm-keep

    • This will re-dirty memory instead of freeing and reallocating.
  • -m 1

    • This will spawn 1 worker spinning on malloc()/free()

Stress test using FIS:

https://docs.aws.amazon.com/fis/latest/userguide/fis-tutorial-run-cpu-stress.html

Lambda sample python:

import json

def lambda_handler(event, context):
    body = "Hello from Lambda!"
    statusCode = 200
    return {
        "statusCode": statusCode,
        "body": json.dumps(body),
        "headers": {
            "Content-Type": "application/json"
        }
    }

efs.sh

sudo yum install -y amazon-efs-utils
sudo mkdir /efs
sudo mount -t efs fs-yourid:/ /efs

kinesis-data-streams.sh

#!/bin/bash

# get the AWS CLI version
aws --version

# PRODUCER

# CLI v2
aws kinesis put-record --stream-name test --partition-key user1 --data "user signup" --cli-binary-format raw-in-base64-out

# CLI v1
aws kinesis put-record --stream-name test --partition-key user1 --data "user signup"


# CONSUMER 

# describe the stream
aws kinesis describe-stream --stream-name test

# Consume some data
aws kinesis get-shard-iterator --stream-name test --shard-id shardId-000000000000 --shard-iterator-type TRIM_HORIZON

aws kinesis get-records --shard-iterator <>

CORS_CONFIG.json

[
    {
        "AllowedHeaders": [
            "Authorization"
        ],
        "AllowedMethods": [
            "GET"
        ],
        "AllowedOrigins": [
            "<url of first bucket with http://...without slash at the end>"
        ],
        "ExposeHeaders": [],
        "MaxAgeSeconds": 3000
    }
]

athena-s3-access-logs.sql

create database s3_access_logs_db;

CREATE EXTERNAL TABLE IF NOT EXISTS s3_access_logs_db.mybucket_logs(
         BucketOwner STRING,
         Bucket STRING,
         RequestDateTime STRING,
         RemoteIP STRING,
         Requester STRING,
         RequestID STRING,
         Operation STRING,
         Key STRING,
         RequestURI_operation STRING,
         RequestURI_key STRING,
         RequestURI_httpProtoversion STRING,
         HTTPstatus STRING,
         ErrorCode STRING,
         BytesSent BIGINT,
         ObjectSize BIGINT,
         TotalTime STRING,
         TurnAroundTime STRING,
         Referrer STRING,
         UserAgent STRING,
         VersionId STRING,
         HostId STRING,
         SigV STRING,
         CipherSuite STRING,
         AuthType STRING,
         EndPoint STRING,
         TLSVersion STRING
) 
ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.RegexSerDe'
WITH SERDEPROPERTIES (
         'serialization.format' = '1', 'input.regex' = '([^ ]*) ([^ ]*) \\[(.*?)\\] ([^ ]*) ([^ ]*) ([^ ]*) ([^ ]*) ([^ ]*) \\\"([^ ]*) ([^ ]*) (- |[^ ]*)\\\" (-|[0-9]*) ([^ ]*) ([^ ]*) ([^ ]*) ([^ ]*) ([^ ]*) ([^ ]*) (\"[^\"]*\") ([^ ]*)(?: ([^ ]*) ([^ ]*) ([^ ]*) ([^ ]*) ([^ ]*) ([^ ]*))?.*$' )
LOCATION 's3://target-bucket-name/prefix/';


SELECT requesturi_operation, httpstatus, count(*) FROM "s3_access_logs_db"."mybucket_logs" 
GROUP BY requesturi_operation, httpstatus;

SELECT * FROM "s3_access_logs_db"."mybucket_logs"
where httpstatus='403';

mfa-delete.sh

# generate root access keys
aws configure --profile root-mfa-delete-demo

# enable mfa delete
aws s3api put-bucket-versioning --bucket mfa-demo-stephane --versioning-configuration Status=Enabled,MFADelete=Enabled --mfa "arn-of-mfa-device mfa-code" --profile root-mfa-delete-demo

# disable mfa delete
aws s3api put-bucket-versioning --bucket mfa-demo-stephane --versioning-configuration Status=Enabled,MFADelete=Disabled --mfa "arn-of-mfa-device mfa-code" --profile root-mfa-delete-demo

# delete the root credentials in the IAM console!!!

pre-signed-url.sh

# do not forget to region parameter! (make sure it's the proper region you're choosing)
aws s3 presign s3://mybucket/myobject --region my-region

# add a custom expiration time
aws s3 presign s3://mybucket/myobject  --expires-in 300 --region my-region


# IF YOU ARE GETTING ISSUES 

# set the proper signature version in order not to get issues when generating URLs for encrypted files
aws configure set default.s3.signature_version s3v4

sqs.sh

# get CLI help
aws sqs help

# list queues and specify the region
aws sqs list-queues --region us-east-1

# send a message
aws sqs send-message help
aws sqs send-message --queue-url https://queue.amazonaws.com/387124123361/MyFirstQueue --region us-east-1 --message-body hello-world

# receive a message
aws sqs receive-message help
aws sqs receive-message --region us-east-1  --queue-url https://queue.amazonaws.com/387124123361/MyFirstQueue --max-number-of-messages 10 --visibility-timeout 30 --wait-time-seconds 20

# delete a message
aws sqs delete-message help
aws sqs receive-message --region us-east-1  --queue-url https://queue.amazonaws.com/387124123361/MyFirstQueue --max-number-of-messages 10 --visibility-timeout 30 --wait-time-seconds 20
aws sqs delete-message --receipt-handle AQEBB+moMioWDaeaCZguaiMPXEqDe6n4JlGiUj/T0yUCLEKkL/tT1+68xyiZMe/ip7HBvgzSZJ6Gys8CCY8QO5qPypqZ9HSKdhl6sluJVl90x1igUHwz0gSEq/UbiLB8tNvFOKF90Dj4aH87mW3K7LLNUtv839z2Uu1Aeqd4kQDVB7SSqPzqCeaYFcLGquz+XIvT69vTAYP5HIsIjmwECx0faEiQF2JZ/KiVHq5n/ZEcG5UbIPMFmP+bg1n4ql8+2dUK+6G+gnIkMRPraZ4aweT9vUZmD5AXHDU5lnJBJNKj1QGuTbxtCjp/pzJvsul/uwsspUUWdRGP92ZpTlTDTL+WiJft3E9AUdqVhksc8NhExYDpdebWEqx43SbvzJMyJlrC --queue-url https://queue.amazonaws.com/387124123361/MyFirstQueue --region us-east-1

cli.sh

# GET PARAMETERS
aws ssm get-parameters --names /my-app/dev/db-url /my-app/dev/db-password
# GET PARAMETERS WITH DECRYPTION
aws ssm get-parameters --names /my-app/dev/db-url /my-app/dev/db-password --with-decryption

# GET PARAMETERS BY PATH
aws ssm get-parameters-by-path --path /my-app/dev/
# GET PARAMETERS BY PATH RECURSIVE
aws ssm get-parameters-by-path --path /my-app/ --recursive
# GET PARAMETERS BY PATH WITH DECRYPTION
aws ssm get-parameters-by-path --path /my-app/ --recursive --with-decryption

handler.py

import json
import boto3
import os

ssm = boto3.client('ssm', region_name="eu-west-3")
dev_or_prod = os.environ['DEV_OR_PROD']

def lambda_handler(event, context):
    db_url = ssm.get_parameters(Names=["/my-app/" + dev_or_prod + "/db-url"])
    print(db_url)   
    db_password = ssm.get_parameters(Names=["/my-app/" + dev_or_prod + "/db-password"], WithDecryption=True)
    print(db_password)
    return "worked!"

AWS services that publish CloudWatch metrics:

https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/aws-services-cloudwatch-metrics.html

Some Terraform code to build sample:

https://github.com/shinchan79/fnx-intro-aws-labs

Public S3 bucket:

{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Principal": "*",
      "Action": [
        "s3:GetObject"
      ],
      "Resource": "arn:aws:s3:::YOUR_BUCKET_NAME/*"
    }
  ]
}

grants public read access to a specific object tag:

{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Principal": "*",
      "Action": "s3:GetObject",
      "Resource": "arn:aws:s3:::DOC-EXAMPLE-BUCKET/*",
      "Condition": {
        "StringEquals": {
          "s3:ExistingObjectTag/public": "yes"
        }
      }
    }
  ]
}

grants public read access to a specific prefix:

{
  "Version":"2012-10-17",
  "Statement":[
    {
      "Sid":"AddPerm",
      "Effect":"Allow",
      "Principal": "*",
      "Action":["s3:GetObject"],
      "Resource":["arn:aws:s3:::DOC-EXAMPLE-BUCKET/publicprefix/*"]
      }
  ]
}

Count Objects in a Folder of an S3 Bucket with AWS CLI :

aws s3 ls s3://YOUR_BUCKET/folder/ --recursive --human-readable  --summarize

Some boto3 examples:

https://boto3.amazonaws.com/v1/documentation/api/latest/guide/examples.html

Terraform tutorials:

https://developer.hashicorp.com/terraform/tutorials

Packer tutorials:

https://developer.hashicorp.com/packer/tutorials

Bicep samples:

https://learn.microsoft.com/en-us/azure/app-service/samples-bicep

https://github.com/Azure/azure-docs-bicep-samples