Skip to main content

aws

query with JMESPath

projection

$ aws ec2 describe-vpcs --query 'Vpcs[].VpcId'
$ aws ec2 describe-vpcs --query 'Vpcs[].{VpcId:VpcId, IsDefault:IsDefault}'

selection (if the targe command has filters option, it would be faster than query.)

$ aws ec2 describe-vpcs --query 'Vpcs[?IsDefault == `true`]'
$ aws ec2 describe-vpcs --filters 'Name=isDefault,Values=true'

$ aws ec2 describe-vpcs --query 'Vpcs[?Tags[?Key == `Name` && Value == `test`]]'
$ aws ec2 describe-vpcs --filters 'Name=tag:Name,Values=test'

function

$ aws ec2 describe-vpcs --query 'Vpcs[?contains(VpcId, `vpc`)].VpcId'

$ aws ec2 describe-vpcs --query 'sort_by(Vpcs[?contains(VpcId, `vpc`)].VpcId, &VpcId)'
$ aws ec2 describe-images --filters "Name=owner-id,Values=<id>" --query "sort_by(Images[].{Name:Name, ImageId:ImageId}, &Name)"

$ aws ec2 describe-vpcs --query 'length(Vpcs[?contains(VpcId, `vpc`)])'

generate-cli-skeleton output

$ aws ec2 describe-vpcs --generate-cli-skeleton output
$ aws ec2 describe-vpcs --generate-cli-skeleton output --query 'Vpcs[].{CidrBlock:CidrBlock, VpcId:VpcId}'
$ aws ec2 describe-vpcs --query 'Vpcs[].{CidrBlock:CidrBlock, VpcId:VpcId}'

generate-cli-skeleton input

$ aws ec2 describe-vpcs --generate-cli-skeleton input | tee describe-vpcs.json
$ vi describe-vpcs.json
$ jq . describe-vpcs.json
{
  "Filters": [
    {
      "Name": "tag:Name",
      "Values": [
        "test"
      ]
    }
  ]
}
$ aws ec2 describe-vpcs --cli-input-json file://describe-vpcs.json

samples of filter

$ aws ec2 describe-vpcs --filters '["Name":"isDefault","Values":["true"]]'
$ aws ec2 describe-vpcs --filters 'Name=isDefault,Values=true'

$ aws ec2 describe-vpcs --filters '[{"Name":"tag-key","Values":["aws:cloudformation:stack-name"]}]'
$ aws ec2 describe-vpcs --filters 'Name=tag-key,Values=aws:cloudformation:stack-name'

$ aws ec2 describe-vpcs --filters '[{"Name":"tag:Name","Values":["terraform_test","mystack-VPC"]}]'
$ aws ec2 describe-vpcs --filters 'Name=tag:Name,Values=terraform_test,mystack-VPC'

$ aws ec2 describe-vpcs --filters '[{"Name":"isDefault","Values":["false"]}, {"Name":"state","Values":["available"]}]'
$ aws ec2 describe-vpcs --filters 'Name=isDefault,Values=true','Name=state,Values=available'

vpc

describe region and abailability zone

$ aws ec2 describe-regions --filters 'Name=region-name,Values=ap-northeast-1'
{
    "Regions": [
        {
            "Endpoint": "ec2.ap-northeast-1.amazonaws.com",
            "RegionName": "ap-northeast-1",
            "OptInStatus": "opt-in-not-required"
        }
    ]
}
$ aws ec2 describe-availability-zones --filters 'Name=region-name,Values=ap-northeast-1'
{
    "AvailabilityZones": [
        {
            "State": "available",
            "Messages": [],
            "RegionName": "ap-northeast-1",
            "ZoneName": "ap-northeast-1a",
            "ZoneId": "apne1-az4"
        },
        {
            "State": "available",
            "Messages": [],
            "RegionName": "ap-northeast-1",
            "ZoneName": "ap-northeast-1c",
            "ZoneId": "apne1-az1"
        },
        {
            "State": "available",
            "Messages": [],
            "RegionName": "ap-northeast-1",
            "ZoneName": "ap-northeast-1d",
            "ZoneId": "apne1-az2"
        }
    ]
}

vpc

$ aws ec2 describe-vpcs --filter "Name=cidr,Values=172.16.100.0/24"
$ aws ec2 create-vpc --cidr-block 172.16.100.0/24

$ aws ec2 describe-vpc-attribute --attribute enableDnsHostnames --vpc-id <vpc>
$ aws ec2 modify-vpc-attribute --enable-dns-hostnames --vpc-id <vpc>

Before remove vpc, subnet need to be removed.

$ aws ec2 delete-vpc --vpc-id <vpc>

tag

$ aws ec2 describe-vpcs --filters "Name=tag:Name,Values=test"
$ jq . tags.json
[
  {
    "Key": "Name",
    "Value": "test"
  }
]
$ aws ec2 create-tags --resources <vpc> --tags file://tags.json

$ aws ec2 create-tags --resources <vpc> --tags '{"Key":"Name", "Value":"test"}'

subnets

$ aws ec2 describe-subnets --filters "Name=cidr-block,Values=172.16.100.0/26"
$ aws ec2 create-subnet --vpc-id <vpc> --cidr-block 172.16.100.0/26
$ aws ec2 delete-subnet --subnet-id <subnet>

nacl

$ aws ec2 describe-network-acls --filters "Name=vpc-id,Values=<vpc>"
$ aws ec2 create-network-acl --vpc-id <vpc>
$ aws ec2 replace-network-acl-association --association-id <aclassoc> --network-acl-id <nacl>

$ aws ec2 delete-network-acl --network-acl-id <nacl>

nacl entry

protocol - all: -1 - icmp: 1 - tcp: 6 - udp: 17 - icmpv6: 58

$ aws ec2 describe-network-acls --filters "Name=network-acl-id,Values=<nacl>"
$ aws ec2 create-network-acl-entry --ingress --network-acl-id <nacl> --cidr-block 172.16.100.64/26 --protocol -1 --rule-action allow --rule-number 100
$ aws ec2 create-network-acl-entry --egress  --network-acl-id <nacl> --cidr-block 172.16.100.64/26 --protocol -1 --rule-action allow --rule-number 100

$ aws ec2 delete-network-acl-entry --ingress --network-acl-id <nacl> --rule-number 100

internet gateway

$ aws ec2 describe-internet-gateways --query 'InternetGateways[?Attachments[?VpcId == `<vpc>`]]'
$ aws ec2 create-internet-gateway
$ aws ec2 attach-internet-gateway --internet-gateway-id <internetgateway> --vpc-id <vpc>

$ aws ec2 detach-internet-gateway --internet-gateway-id <internetgateway> --vpc-id <vpc>
$ aws ec2 delete-internet-gateway --internet-gateway-id <internetgateway>

route table

$ aws ec2 describe-route-tables --filters "Name=vpc-id,Values=<vpc>"
$ aws ec2 create-route-table --vpc-id <vpc>
$ aws ec2 associate-route-table --route-table-id <routetable> --subnet-id <subnet>

$ aws ec2 describe-route-tables --filters "Name=route-table-id,Values=<routetable>"
$ aws ec2 create-route --destination-cidr-block 0.0.0.0/0 --gateway-id <internetgateway> --route-table-id <routetable>

$ aws ec2 delete-route --destination-cidr-block 0.0.0.0/0 --route-table-id <routetable>
$ aws ec2 disassociate-route-table --association-id <rtbassoc>
$ aws ec2 delete-route-table --route-table-id <routetable>

security group

$ aws ec2 describe-security-groups --filters "Name=vpc-id,Values=<vpc>"
$ aws ec2 create-security-group --description "<description>" --group-name "<name>" --vpc-id <vpc>

$ aws ec2 describe-security-groups --filters "Name=group-id,Values=<securitygroup>"
$ aws ec2 authorize-security-group-ingress --group-id <securitygroup> --ip-permissions '[{"IpProtocol": "tcp", "FromPort": 80, "ToPort": 80, "IpRanges": [{"CidrIp": "xxx.xxx.xxx.xxx/32", "Description": "ssh incoming access"}]}]'

$ aws ec2 revoke-security-group-ingress --group-id <securitygroup> --ip-permissions '[{"IpProtocol": "tcp", "FromPort": 80, "ToPort": 80, "IpRanges": [{"CidrIp": "xxx.xxx.xxx.xxx/32", "Description": "ssh incoming access"}]}]'
$ aws ec2 delete-security-group --group-id <securitygroup>

key pair

$ aws ec2 describe-key-pairs 
$ aws ec2 describe-key-pairs --key-names <keyname>
$ aws ec2 create-key-pair --key-name <keyname> | tee id_rsa.testkey.json

$ aws ec2 delete-key-pair --key-name <keyname>
$ aws ec2 import-key-pair --key-name <keyname> --public-key-material file://id_rsa.testkey.pub

save the private key

$ jq -r '.KeyMaterial' id_rsa.testkey.json > id_rsa.testkey.nopass
$ openssl rsa -aes256 -in id_rsa.testkey.nopass -out id_rsa.testkey
$ chmod 600 id_rsa.testkey
($ rm id_rsa.testkey.json id_rsa.testkey.nopass)

volume

$ aws ec2 describe-volumes
$ aws ec2 create-volume --volume-type gp2 --size <size> --availability-zone <az>
$ aws ec2 delete-volume --volume-id <volume>

attach a volume to a instance

$ aws ec2 describe-instances --filters Name=instance-id,Values=<instance> --query 'Reservations[].Instances[].BlockDeviceMappings[]'
$ aws ec2 attach-volume --volume-id <volume> --instance-id <instance> --device /dev/xvdb

after the image attached, then make partition table, partition and file system on the os side.

$ lsblk
$ sudo fdisk -l /dev/nvme1n1
$ sudo fdisk /dev/nvme1n1
$ sudo mkswap /dev/nvme1n1p1
$ sudo swapon /dev/nvme1n1p1
$ cat /proc/swaps
$ ls -l /dev/disk/by-uuid/
$ echo "UUID=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx     none        swap   defaults          0   0" | sudo tee -a /etc/fstab

extend volume

$ aws ec2 describe-volumes --filters Name=volume-id,Values=<volume>
$ aws ec2 modify-volume --size <size> --volume-id <volume>

after that, extend partition and file system

$ df
$ sudo growpart /dev/nvme0n1 1
$ lsblk
$ sudo xfs_growfs -d /

ami

$ aws ec2 describe-images --filters "Name=owner-id,Values=<id>"
$ aws ec2 describe-images --filters "Name=image-id,Values=<ami>"
$ aws ec2 create-image --instance-id <instance> --name <name>
$ aws ec2 deregister-image --image-id <ami>

after deregister-image, need to delete snapshots and volumes

snapshot

$ aws ec2 describe-snapshots --filters Name=owner-id,Values=<id>
$ aws ec2 describe-snapshots --filters Name=snapshot-id,Values=<snapshot>
$ aws ec2 delete-snapshot --snapshot-id <snapshot>
$ aws ec2 create-snapshot --volume-id <volume>

launch template

$ aws ec2 describe-launch-templates
$ jq . template.json
{
  "ImageId": "<ami>",
  "InstanceType": "t3.nano",
  "CreditSpecification": {
    "CpuCredits": "standard"
  },
  "KeyName": "<keyname>",
  "InstanceInitiatedShutdownBehavior": "terminate"
}
$ aws ec2 create-launch-template --launch-template-name <name> --launch-template-data file://template.json
$ aws ec2 describe-launch-template-versions --launch-template-id <templateid> --versions <version>

$ aws ec2 create-launch-template-version \
 --launch-template-id <templateid> \
 --source-version <version> \
 --version-description "<description>" \
 --launch-template-data '{ "Monitoring": { "Enabled": true } }'
$ aws ec2 modify-launch-template --launch-template-id <templateid> --default-version <version>

$ aws ec2 delete-launch-template-versions --launch-template-id <templateid> --versions <version>
$ aws ec2 delete-launch-template --launch-template-id <templateid>

instance

$ aws ec2 describe-instances --filters "Name=tag:Name,Values=test"
$ cat userdata.sh 
sudo apt update
sudo apt install nginx
$ aws ec2 run-instances \
 --security-group-ids <securitygroup> \
 --subnet-id <subnet> \
 --associate-public-ip-address \
 --tag-specifications '{"ResourceType":"instance","Tags":[{"Key":"Name","Value":"test"}]}' \
 --launch-template LaunchTemplateName=<templatename> \
 --user-data file://userdata.sh
#--image-id <ami>
#--instance-type t3.nano
#--credit-specification standard
#--key-name <keyname>
#--instance-initiated-shutdown-behavior terminate

$ aws ec2 describe-instances --filters "Name=tag:Name,Values=test" --query 'Reservations[].Instances[].{InstanceId:InstanceId,State:State}'
$ aws ec2 stop-instances --instance-ids <instance>
$ aws ec2 start-instances --instance-ids <instance>
$ aws ec2 terminate-instances --instance-ids <instance>

instance meta data

curl http://169.254.169.254/latest/meta-data/instance-id
curl http://169.254.169.254/latest/user-data

iam

user

$ aws iam list-users --query 'Users[?UserName==`<username>`]'
$ aws iam get-user --user-name <username>

$ aws iam create-user --user-name <username>
$ aws iam tag-user --user-name <username> --tags '{"Key":"name","Value":"test"}'

$ aws iam delete-user --user-name <username>

login profile

$ aws iam get-login-profile --user-name <username>
$ aws iam create-login-profile --generate-cli-skeleton > create-login-profile.json

$ jq '.' create-login-profile.json
{
  "UserName": "<username>",
  "Password": "<initialpassword>",
  "PasswordResetRequired": true
}
$ aws iam create-login-profile --user-name <username> --cli-input-json file://create-login-profile.json

group

$ aws iam list-groups  --query 'Groups[?GroupName==`<groupname>`]'
$ aws iam create-group --group-name <groupname>

$ aws iam delete-group --group-name <groupname>

policy

$ aws iam list-policies --query 'Policies[?PolicyName==`<policyname>`]'
$ aws iam get-policy --policy-arn <policyarn>

$ jq . policy.json 
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Action": [
        "s3:Get*",
        "s3:List*"
      ],
      "Resource": "*"
    }
  ]
}

$ aws iam create-policy --policy-name <policyname> --policy-document file://policy.json

$ aws iam list-policy-versions --policy-arn <policyarn>
$ aws iam get-policy-version --policy-arn <policyarn> --version-id v1
$ aws iam create-policy-version --policy-arn <policyarn> --policy-document file://policy_v2.json
$ aws iam set-default-policy-version --policy-arn <policyarn> --version-id v1

$ aws iam delete-policy-version --policy-arn <policyarn> --version-id v2
$ aws iam delete-policy --policy-arn <policyarn>

sample policy for allow change password

$ jq '.' policy.json
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Action": "iam:GetAccountPasswordPolicy",
      "Resource": "*"
    },
    {
      "Effect": "Allow",
      "Action": "iam:ChangePassword",
      "Resource": "<userarn>"
    }
  ]
}

add user to group

$ aws iam list-groups-for-user --user-name <username>
$ aws iam add-user-to-group --group-name <groupname> --user-name <username>

$ aws iam remove-user-from-group --group-name <groupname> --user-name <username>

attach policy to user

$ aws iam list-attached-user-policies --user-name <username>
$ aws iam attach-user-policy --user-name <username> --policy-arn "<policyarn>"
$ aws iam detach-user-policy --user-name <username> --policy-arn "<policyarn>"

attach policy to group

$ aws iam list-attached-group-policies --group-name <groupname>
$ aws iam attach-group-policy --group-name <groupname> --policy-arn "<policyarn>"
$ aws iam detach-group-policy --group-name <groupname> --policy-arn "<policyarn>"

role

$ aws iam list-roles --query 'Roles[?RoleName==`<rolename>`]'
$ jq . assumepolicy.json
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Principal": {
        "Service": "ec2.amazonaws.com"
      },
      "Action": "sts:AssumeRole"
    }
  ]
}
$ aws iam create-role --role-name <rolename> --assume-role-policy-document file://assumepolicy.json

(Before you delte role, you must remove roles from instance profile.)

$ aws iam delete-role --role-name <rolename>

attach policy to role

$ aws iam list-attached-role-policies --role-name <rolename>
$ aws iam attach-role-policy --role-name <rolename> --policy-arn <policyarn>
$ aws iam detach-role-policy --role-name <rolename> --policy-arn <policyarn>

add role to instance profile

$ aws iam list-instance-profiles-for-role --role-name <rolename>
$ aws iam add-role-to-instance-profile --instance-profile-name <rolename> --role-name <rolename>
$ aws iam remove-role-from-instance-profile --instance-profile-name <rolename> --role-name <rolename>

associate iam instance profile

$ aws ec2 describe-iam-instance-profile-associations 
$ aws ec2 associate-iam-instance-profile --iam-instance-profile '{"Arn":"<rolearn>","Name":"<rolename>"}' --instance-id <instanceid>
$ aws ec2 disassociate-iam-instance-profile --association-id <associationid>

access key

$ aws iam list-access-keys --user-name <user-name>
$ aws iam get-access-key-last-used --access-key-id <access-key-id>

$ aws iam create-access-key --user-name <user-name>
$ aws iam update-access-key --user-name <user-name> --access-key-id <access-key-id> --status Inactive
$ aws iam delete-access-key --user-name <user-name> --access-key-id <access-key-id>

cloudformation sample template

    TestLambdaRole:
      Type: AWS::IAM::Role
      Properties:
        AssumeRolePolicyDocument:
          Version: 2012-10-17
          Statement:
          - Effect: Allow
            Principal:
              Service: lambda.amazonaws.com
            Action: sts:AssumeRole
        Tags:
          - "Key": "Name"
            "Value": "test"
    AllowAccessDynamoDBTable:
      Type: "AWS::IAM::Policy"
      Properties:
        PolicyName: "AllowAccessDynamoDBTable"
        PolicyDocument:
          Version: "2012-10-17"
          Statement:
            - Effect: "Allow"
              Action:
                - dynamodb:GetItem
                - dynamodb:PutItem
                - dynamodb:DeleteItem
              Resource: !GetAtt TestDynamoDBTable.Arn
        Roles:
          - !Ref TestLambdaRole

sqs

create queue

aws sqs list-queues
aws sqs create-queue --queue-name testqueue

get queue url or attributes

aws sqs get-queue-url --queue-name testqueue
aws sqs get-queue-attributes --queue-url https://<region>.queue.amazonaws.com/<id>/<queuename> --attribute-names All

handle message

aws sqs send-message --queue-url https://<region>.queue.amazonaws.com/<id>/<queuename> --message-body {"foo":"bar"}
aws sqs receive-message --queue-url https://<region>.queue.amazonaws.com/<id>/<queuename>
{
    "Messages": [
        {
            "MessageId": "",
            "ReceiptHandle": "",
            "MD5OfBody": "",
            "Body": "{}"
        }
    ]
}
aws sqs delete-message --queue-url https://<region>.queue.amazonaws.com/<id>/<queuename> --receipt-handle "<ReceiptHandle>"
aws sqs purge-queue --queue-url https://<region>.queue.amazonaws.com/<id>/<queuename>

delete queue

aws sqs delete-queue --queue-url https://<region>.queue.amazonaws.com/<id>/<queuename>

sample python script

send message

receive message

copy boot drive

make partition

parted /dev/sdc print
parted /dev/sdc rm 1
parted /dev/sdc mkpart primary ext4 1MiB 1GiB 
parted /dev/sdc mkpart primary ext4 1GiB 100% 
parted /dev/sdc set 1 boot on

make filesystem

mkfs -t ext4 -O ^has_journal /dev/sdc1
mkfs -t ext4 -O ^has_journal /dev/sdc2
dumpe2fs /dev/sdc1
dumpe2fs /dev/sdc2

copy files

apt-get install --no-install-recommends dump

mount -o noatime /dev/sdc2 /media/chroot
cd /media/chroot
dump -0 -f - /dev/vgsaturn/lvrootura | sudo restore -rf -

mount -o noatime /dev/sdc1 /media/chroot/boot
cd /media/chroot/boot
dump -0 -f - /dev/sdb1 | sudo restore -rf -

install grub

grub-install --boot-directory=/media/chroot/boot /dev/sdc
grub-mkconfig -o /media/chroot/boot/grub/grub.cfg

lvm

for test, create some image files

dd if=/dev/zero of=file1.img bs=1M count=0 seek=5120
dd if=/dev/zero of=file2.img bs=1M count=0 seek=5120
losetup /dev/loop1 file1.img
losetup /dev/loop2 file2.img

create and remove pv

pvcreate /dev/loop1
pvs
pvdisplay
pvdisplay -m
pvremove /dev/loop1

pvmove can move extents from one pv to another

pvmove /dev/loop1

create and vemove vg

vgcreate vgname /dev/loop1
vgs
vgdisplay
vgchange -an vgname
(if you remove the disk and insert it to another system,
you can find the volume group unless you remove the volume group)
vgremove vgname

you can add/remove pv to vg

vgextend vgname /dev/loop2
vgreduce vgname /dev/loop1

create logical volume

lvcreate -L 1G -n lvname vgname
lvs
lvdisplay
mkfs -t ext4 /dev/vgname/lvname
mount /dev/vgname/lvname /mountpoint

mirror or stripe

lvcreate -L 128M -m 1 -n lvname vgname
lvcreate -L 128M -i 2 -n lvname vgname

snapshot and merge. if you don't want to merge, you can simply remove the snapshot lv

lvcreate -L 64M --snapshot /dev/vgname/lvname -n lvsnap
lvconvert --merge vgname/lvsnap

extend logical volume and filesystem

lvextend -L +512MB /dev/vgname/lvname
e2fsck -f /dev/vgname/lvname
resize2fs /dev/vgname/lvname

shrink filesystem and logical volume

e2fsck -f /dev/vgname/lvname
resize2fs /dev/vgname/lvname 500M
lvchange -an /dev/vgname/lvname
lvreduce -L -512MB /dev/vgname/lvname
lvchange -ay /dev/vgname/lvname
resize2fs /dev/vgname/lvname

remove logical volume

lvchange -an /dev/vgname/lvname
lvremove /dev/vgname/lvname

umount

fuser -vm /mountpoint
umount /mountpoint

extend pv

parted /dev/sda
parted /dev/sda print
parted /dev/sda rm 2
parted /dev/sda resizepart 1 100%

pvdisplay /dev/sda1
vgdisplay vgname
pvresize /dev/sda1

extend partition

parted /dev/sdx print
parted /dev/sdx resizepart [num] 100%
e2fsck -f /dev/sdx[num]
resize2fs /dev/sdx[num]

apigateway

create rest-api

$ aws apigateway get-rest-apis
$ aws apigateway create-rest-api --name <API Name>
$ aws apigateway get-rest-api --rest-api-id <API ID>

create resource

$ aws apigateway get-resources --rest-api-id <API ID>
$ aws apigateway create-resource --rest-api-id <API ID> --parent-id <Parent Resource ID> --path-part subpath
$ aws apigateway get-resource --rest-api-id <API ID> --resource-id <Resource ID>

put method

$ aws apigateway put-method --rest-api-id <API ID> --resource-id <Resource ID> --authorization-type none --http-method get 
$ aws apigateway get-method --rest-api-id <API ID> --resource-id <Resource ID> --http-method GET

put integration response

$ aws apigateway put-integration-response --rest-api-id <API ID> --resource-id <Resource ID> --http-method GET --status-code 200 --selection-pattern "" --response-templates '{"application/json":"{\"json\":\"template\"}"}'
$ aws apigateway get-integration-response --rest-api-id <API ID> --resource-id <Resource ID> --http-method GET --status-code 200

put method response

$ aws apigateway put-method-response --rest-api-id <API ID> --resource-id <Resource ID> --http-method GET --status-code 200 --response-models '{"text/plain":"Empty"}'
$ aws apigateway get-method-response --rest-api-id <API ID> --resource-id <Resource ID> --http-method GET --status-code 200

update integration

$ jq '.' test.json
[
  {
    "op": "add",
    "path": "/requestTemplates/application~1json",
    "value": "{\"example\":\"json\"}"
  }
]
$ aws apigateway update-integration --rest-api-id <API ID> --resource-id <Resource ID> --http-method GET --patch-operations file://test.json

create deployment

$ aws apigateway get-deployments --rest-api-id <API ID>
$ aws apigateway create-deployment --rest-api-id <API ID> --stage-name <Stage Name>
$ aws apigateway get-deployment --rest-api-id <API ID> --deployment-id <Deployment ID>

create stage

$ aws apigateway get-stages --rest-api-id <API ID>
$ aws apigateway create-stage --rest-api-id <API ID> --stage-name <Stage Name> --deployment-id <Deployment ID>
$ aws apigateway get-stage --rest-api-id <API ID> --stage-name <Stage Name>

test invoke method

$ aws apigateway test-invoke-method --rest-api-id <API ID> --resource-id <Resource ID>
  --http-method [GET|POST|DELETE|...]
  --path-with-query-string "/"
  --body "bodystring"

create api key

$ aws apigateway get-api-keys
$ aws apigateway create-api-key --name testkey
$ aws apigateway get-api-key --api-key <Key ID>
$ aws apigateway get-api-key --api-key <Key ID> --include-value

update api key

$ aws apigateway update-api-key --api-key <Key ID> --patch-operations '{ "op":"replace", "path":"/enabled", "value":"true" }'

create usage plan

$ aws apigateway get-usage-plans
$ aws apigateway create-usage-plan --name myusageplan --api-stages '{"apiId":"<API ID>","stage":"<Stage Name>"}' --throttle '{ "burstLimit": 2, "rateLimit": 1.0 }' --quota '{ "limit": 320, "offset": 0, "period": "DAY" }'
$ aws apigateway get-usage-plan --usage-plan-id <Usage Plan ID>

associate usage-plan and key

$ aws apigateway get-usage-plan-keys --usage-plan-id <Usage Plan ID>
$ aws apigateway create-usage-plan-key --usage-plan-id <Usage Plan ID> --key-id <Key ID> --key-type API_KEY
$ aws apigateway get-usage-plan-key --usage-plan-id <Usage Plan ID> --key-id <Key ID>

update method

$ aws apigateway update-method --rest-api-id <API ID> --resource-id <Resource ID> --http-method GET --patch-operations op='replace',path='/apiKeyRequired',value='true'
$ aws apigateway get-method --rest-api-id <API ID> --resource-id <Resource ID> --http-method GET

test run with api-key

$ curl https://<API ID>.execute-api.<Region Name>.amazonaws.com/<Stage Name> --header "x-api-key:<API Key Value>"

diassociate usage-plan and key

$ aws apigateway get-usage-plan-key --usage-plan-id <Usage Plan ID> --key-id <Key ID>
$ aws apigateway delete-usage-plan-key --usage-plan-id <Usage Plan ID> --key-id <Key ID>

delete stage

$ aws apigateway get-stage --rest-api-id <API ID> --stage-name <Stage Name>
$ aws apigateway delete-stage --rest-api-id <API ID> --stage-name <Stage Name>

delete deployment

$ aws apigateway get-deployment --rest-api-id <API ID> --deployment-id <Deployment ID>
$ aws apigateway delete-deployment --rest-api-id <API ID> --deployment-id <Deployment ID>

delete usage-plan

$ aws apigateway get-usage-plan --usage-plan-id <Usage Plan ID>
$ aws apigateway delete-usage-plan --usage-plan-id <Usage Plan ID>

delete api key

$ aws apigateway get-api-key --api-key <Key ID>
$ aws apigateway delete-api-key --api-key <Key ID>

delete integration response

$ aws apigateway get-integration-response --rest-api-id <API ID> --resource-id <Resource ID> --http-method GET --status-code 200
$ aws apigateway delete-integration-response --rest-api-id <API ID> --resource-id <Resource ID> --http-method GET --status-code 200

delete method response

$ aws apigateway get-method-response --rest-api-id <API ID> --resource-id <Resource ID> --http-method GET --status-code 200
$ aws apigateway delete-method-response --rest-api-id <API ID> --resource-id <Resource ID> --http-method GET --status-code 200

delete integration

$ aws apigateway get-integration --rest-api-id <API ID> --resource-id <Resource ID> --http-method GET
$ aws apigateway delete-integration --rest-api-id <API ID> --resource-id <Resource ID> --http-method GET

delete method

$ aws apigateway get-method --rest-api-id <API ID> --resource-id <Resource ID> --http-method GET
$ aws apigateway delete-method --rest-api-id <API ID> --resource-id <Resource ID> --http-method GET

delete resource

$ aws apigateway get-resource --rest-api-id <API ID> --resource-id <Resource ID>
$ aws apigateway delete-resource --rest-api-id <API ID> --resource-id <Resource ID>

delete rest-api

$ aws apigateway get-rest-api --rest-api-id <API ID>
$ aws apigateway delete-rest-api --rest-api-id <API ID>

cloudformation sample template

    ApiGatewayRestApi:
      Type: AWS::ApiGateway::RestApi
      Properties:
        Name: "ApiGatewayRestApi"
        Tags:
          - "Key": "Name"
            "Value": "test"
    ApiGatewayResourceSubject:
      Type: AWS::ApiGateway::Resource
      Properties:
        ParentId: !GetAtt ApiGatewayRestApi.RootResourceId
        PathPart: "{subject}"
        RestApiId: !Ref ApiGatewayRestApi
    TestLambdaPermission:
      Type: AWS::Lambda::Permission
      Properties:
        FunctionName: !GetAtt TestLambda.Arn
        Action: lambda:InvokeFunction
        Principal: apigateway.amazonaws.com
    ApiGatewayGET:
      Type: AWS::ApiGateway::Method
      Properties:
        HttpMethod: "GET"
        RestApiId: !Ref ApiGatewayRestApi
        ResourceId: !Ref ApiGatewayResourceSubject
        RequestParameters:
          "method.request.path.string": true
          "method.request.path.year": true
        AuthorizationType: "NONE"
        MethodResponses:
            - StatusCode: 200
              ResponseModels:
                application/json;charset=UTF-8: Empty
        Integration:
          Type: "AWS_PROXY"
          IntegrationHttpMethod: "POST"
          RequestParameters:
            "integration.request.path.string": "method.request.path.string"
            "integration.request.path.year": "method.request.path.year"
          Uri: !Sub >-
            arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${TestLambda.Arn}/invocations
          RequestTemplates:
            application/json: "{ \"statusCode\" : 200 }"
    DeployDevel:
      DependsOn: ApiGatewayGET
      Type: 'AWS::ApiGateway::Deployment'
      Properties:
        RestApiId: !Ref ApiGatewayRestApi
        StageName: "devel"

sns

create topic

$ aws sns create-topic --name <Topic Name>
$ aws sns list-topics
$ aws sns get-topic-attributes --topic-arn <Topic ARN>

create subscription

$ aws lambda list-functions | jq '.Functions[].FunctionArn'

$ aws sns subscribe --topic-arn <Topic ARN> --protocol lambda --notification-endpoint <Lambda ARN>
$ aws sns list-subscriptions
$ aws sns get-subscription-attributes --subscription-arn <Subscription ARN>

publish

$ aws sns publish --topic-arn <Topic ARN> --subject "test subject" --message "this is test message"

unsubscribe

$ aws sns unsubscribe --subscription-arn <Subscription ARN>

delete topic

$ aws sns delete-topic --topic-arn <Topic ARN>

create SNS with CloudFormation

prepare yaml file

$ cat sns.yml
AWSTemplateFormatVersion: 2010-09-09
Resources:
  MySNSTopic:
    Type: 'AWS::SNS::Topic'
  MySubscription:
    Type: 'AWS::SNS::Subscription'
    Properties: 
      Endpoint: !Ref lambdaARN
      Protocol: lambda
      TopicArn: !Ref MySNSTopic
Parameters:
  lambdaARN:
    Type: String
$ aws cloudformation validate-template --template-body file://sns.yml

prepare paremeter file

$ jq '.' sns.parameter
[
  {
    "ParameterKey": "lambdaARN",
    "ParameterValue": "<lambda ARN>"
  }
]

create stack

$ aws cloudformation create-stack --template-body file://sns.yml --stack-name <Stack Name> --parameters file://sns.parameter

confirm CloudFormation

$ aws cloudformation list-stacks
$ aws cloudformation describe-stacks --stack-name <Stack Name>
$ aws cloudformation get-template --stack-name <Stack Name> --query 'TemplateBody' | xargs -0 printf

confirm SNS

$ aws sns list-topics
$ aws sns list-subscriptions

delete stack

$ aws cloudformation delete-stack --stack-name <Stack Name>
$ aws cloudformation list-stacks --query 'StackSummaries[?StackName==`<Stack Name>`]'
$ aws cloudformation describe-stacks --stack-name <Stack Name>

glacier

create vault

$ aws glacier create-vault --account-id - --vault-name <Vault Name>
$ aws glacier list-vaults --account-id -

set vault notification

$ jq '.' vault-notifications.json
{
  "SNSTopic": "<ARN of SNS Topic>",
  "Events": [
    "ArchiveRetrievalCompleted",
    "InventoryRetrievalCompleted"
  ]
}
$ aws glacier set-vault-notifications --account-id - --vault-name <Vault Name> --vault-notification-config file://vault-notifications.json
$ aws glacier get-vault-notifications --account-id - --vault-name <Vault Name>

set strategy of retrieval policy to free tier

$ jq '.' retrieval-policy.json 
{
  "Rules": [
    {
      "Strategy": "FreeTier"
    }
  ]
}
$ aws glacier set-data-retrieval-policy --account-id - --policy file://retrieval-policy.json
$ aws glacier get-data-retrieval-policy --account-id -

upload archive

$ aws glacier upload-archive --account-id - --vault-name <Vault Name> --body testdata.tar.enc
$ aws glacier describe-vault --account-id - --vault-name <Vault Name>

retrieve inventory

$ jq '.' inventory-retrieval.json
{
  "Type": "inventory-retrieval"
}
$ aws glacier initiate-job --account-id - --vault-name <Vault Name> --job-parameters file://inventory-retrieval.json
$ aws glacier list-jobs --account-id - --vault-name <Vault Name>

$ aws glacier describe-job --account-id - --vault-name <Vault Name> --job-id <Job ID>
$ aws glacier get-job-output --account-id - --vault-name <Vault Name> --job-id <Job ID> output.json

retrieve archive

$ jq '.' archive-retrieval.json
{
  "Type": "archive-retrieval",
  "ArchiveId": "<Archive ID>"
}
$ aws glacier initiate-job --account-id - --vault-name <Vault Name> --job-parameters file://archive-retrieval.json
$ aws glacier list-jobs --account-id - --vault-name <Vault Name>

$ aws glacier describe-job --account-id - --vault-name <Vault Name> --job-id <Job ID>
$ aws glacier get-job-output --account-id - --vault-name <Vault Name> --job-id <Job ID> output.dat

delete archive

$ aws glacier delete-archive --account-id - --vault-name <Vault Name> --archive-id <Archive ID>

delete vault notification

$ aws glacier delete-vault-notifications --account-id - --vault-name <Vault Name>
$ aws glacier get-vault-notifications --account-id - --vault-name <Vault Name>

delete vault

$ aws glacier describe-vault --account-id - --vault-name <Vault Name>
$ aws glacier delete-vault --account-id - --vault-name <Vault Name>

s3

objects

$ aws s3 ls s3://mybucketname/
$ aws s3 cp testfile.txt s3://mybucketname/
$ aws s3 cp s3://mybucketname/testfile.txt testfile2.txt
$ aws s3 rm s3://mybucketname/testfile.txt

bucket

$ aws s3api list-buckets
$ aws s3api list-buckets --query 'Buckets[?Name == `mybucketname`]'
$ aws s3api create-bucket --bucket mybucketname --region ap-northeast-1 --create-bucket-configuration LocationConstraint=ap-northeast-1
$ aws s3api delete-bucket --bucket mybucketname

default encryption for a bucket

$ aws s3api get-bucket-encryption --bucket mybucketname
$ aws s3api put-bucket-encryption --bucket mybucketname \
--server-side-encryption-configuration '{"Rules": [{"ApplyServerSideEncryptionByDefault": {"SSEAlgorithm": "AES256"}}]}'

$ aws s3api head-object --bucket mybucketname --key testfile.txt

public access block

$ aws s3api get-public-access-block --bucket mybucketname
$ aws s3api put-public-access-block --bucket mybucketname \
--public-access-block-configuration "BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true"

versioning

enable and disable versioning for a bucket

$ aws s3api get-bucket-versioning --bucket mybucketname
$ aws s3api put-bucket-versioning --bucket mybucketname --versioning-configuration '{"Status":"Enabled"}'
$ aws s3api put-bucket-versioning --bucket mybucketname --versioning-configuration '{"Status":"Suspended"}'

list object versions

$ aws s3api list-object-versions  --bucket mybucketname --prefix testfile.txt
$ aws s3api list-object-versions  --bucket mybucketname --prefix testfile.txt --query 'Versions[?IsLatest==`true`].VersionId'

remove all versions for a file (When a file remains in a bucket, you can not delete the bucket)

$ delete_objects=$(aws s3api list-object-versions --bucket mybucketname --prefix testfile.txt \
> --query='{Objects: Versions[].{Key:Key,VersionId:VersionId}}')

$ aws s3api delete-objects --bucket mybucketname --delete "${delete_objects}"

bucket notification configuration

$ aws s3api get-bucket-notification-configuration --bucket bucketname
$ jq . notification.json 
{
  "QueueConfigurations": [
    {
      "Events": [
        "s3:ObjectCreated:*"
      ],
      "QueueArn": "arn:aws:sqs:<region>:<id>:<queuename>",
      "Filter": {
        "Key": {
          "FilterRules": [
            {
              "Name": "prefix",
              "Value": "input/"
            }
          ]
        }
      }
    }
  ]
}
$ aws s3api put-bucket-notification-configuration --bucket bucketname --notification-configuration file://notification.json

Before configure bucket notification, add policy to SQS queue like below

{
  "Version": "2012-10-17",
  "Id": "arn:aws:sqs:<region>:<id>:<queuename>/<policyname>",
  "Statement": [
    {
      "Effect": "Allow",
      "Principal": {
        "AWS": "*"
      },
      "Action": "sqs:SendMessage",
      "Resource": "arn:aws:sqs:<region>:<id>:<queuename>",
      "Condition": {
        "ArnEquals": {
          "aws:SourceArn": "arn:aws:s3:::<bucketname>"
        }
      }
    }
  ]
}

sample cloudformation template

AWSTemplateFormatVersion: '2010-09-09'
Parameters:
  BucketName:
    Type: String
Resources:
  MyPrivBucket:
    Type: AWS::S3::Bucket
    DeletionPolicy: Retain
    Properties:
      BucketName: !Ref BucketName
      PublicAccessBlockConfiguration:
        BlockPublicAcls: True
        BlockPublicPolicy: True
        IgnorePublicAcls: True
        RestrictPublicBuckets: True
      BucketEncryption:
        ServerSideEncryptionConfiguration:
        - ServerSideEncryptionByDefault:
            SSEAlgorithm: AES256
      VersioningConfiguration:
        Status: "Enabled"
      Tags:
        - "Key": "Name"
          "Value": "test"

boto3 client

file operation

import boto3
import sys
bucket = "mybucketname"
try:
    s3_client = boto3.client('s3')
    s3_client.upload_file('testfile.txt', bucket, 'dirname/uploaded.txt')
    s3_client.download_file(bucket, 'dirname/uploaded.txt', 'downloaded.txt')
    s3_client.delete_object(Bucket = bucket, Key='dirname/uploaded.txt')
except:
    print ("error")
    sys.exit()

bucket operation

import boto3
import sys
bucket = "mybucketname"

try:
    s3_client = boto3.client('s3')
    # list buckets
    response = s3_client.list_buckets()
    for i in response.get('Buckets'):
        print(i.get('Name'))

    # delete a bucket
    response = s3_client.delete_bucket(
        Bucket=bucket
    )
    print(response)

except Exception as e:
    print (e)
    sys.exit()