Skip to main content

Posts for year 2020 (old posts, page 1)

apt-cacher-ng

make a dockerfile

$ cat Dockerfile
FROM debian:buster

RUN apt-get update \
&& apt-get install -y --no-install-recommends apt-cacher-ng \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*

VOLUME ["/var/cache/apt-cacher-ng"]
EXPOSE 3142

CMD chmod 777 /var/cache/apt-cacher-ng \
&& /etc/init.d/apt-cacher-ng start \
&& tail -f /var/log/apt-cacher-ng/*

build an image

$ sudo docker build -t apt-cacher-ng:buster . | tee build.log
$ sudo docker tag apt-cacher-ng:buster apt-cacher-ng:latest

run a container

$ sudo docker run --rm -d -p 3142:3142 -v /mnt/apt-cacher-ng:/var/cache/apt-cacher-ng apt-cacher-ng:latest

test the address and port

$ curl 192.168.xxx.xxx:3142

how to use the cache server

specify it in a config file

$ cat << END | sudo tee /etc/apt/apt.conf.d/01proxy
> Acquire::http::Proxy "http://192.168.xxx.xxx:3142/";
> END

specify it in command line

$ http_proxy=http://192.168.xxx.xxx:3142/ sudo -E apt-get install xxxx

or

$ sudo su -
# http_proxy=http://192.168.xxx.xxx:3142/ apt-get install xxxx

for docker build

$ sudo docker build --build-arg http_proxy=http://192.168.xxx.xxx:3142/ -t imagename:tagname . | tee build.log

tcpdump

tcpdump

    tcpdump 
     -w <output filename>
     -r <input filename>
     -i <interface>
     -c <packet counts>

     -n   # don't convert address and port to names
     -e   # show link level header 
     -v   # verbose output
     -xx  # print the data of each packets with link level header in hex
     -XX  # print the data of each packets with link level header in hex and ascii
     -ttt # print a delta between current and previous line

     arp
     icmp
     port <port number>
     host <ip address>

wireshark

    wireshark 
     -r <input filename>
     -R "read filter"

To know detail of read filter, see man page of wireshark-filter

ecs

cluster

$ aws ecs list-clusters 
$ aws ecs describe-clusters --clusters <clusterArn>

$ aws ecs create-cluster --cluster-name <cluster-name> --tags '[{"key": "Name","value": "test"}]'
$ aws ecs delete-cluster --cluster <clusterArn>

$ aws ecs delete-cluster --cluster <clusterArn>

task definition

$ aws ecs list-task-definitions
$ aws ecs describe-task-definition --task-definition <taskDefinitionArn>

$ jq . task-definition.json
{
  "family": "sample-fargate",
  "networkMode": "awsvpc",
  "containerDefinitions": [
    {
      "name": "fargate-app",
      "image": "busybox",
      "essential": true,
      "command": [
        "sleep",
        "360"
      ]
    }
  ],
  "requiresCompatibilities": [
    "FARGATE"
  ],
  "cpu": "256",
  "memory": "512"
}
$ aws ecs register-task-definition --cli-input-json file://task-definition.json

$ aws ecs deregister-task-definition --task-definition <taskDefinitionArn>

task

When you use fargate and retrieve docker image from docker hub, you have to use internet gateway or nat gateway in the vpc.

$ aws ecs list-tasks --cluster <clusterArn>
$ aws ecs describe-tasks --cluster <clusterArn> --tasks <taskArn>

$ jq . network-configuration.json
{
  "awsvpcConfiguration": {
    "subnets": [
      "<subnet>"
    ],
    "securityGroups": [
      "<securitygroup>"
    ],
    "assignPublicIp": "ENABLED"
  }
}
$ aws ecs run-task --task-definition <taskDefinitionArn> --cluster <clusterArn> --count 1 --launch-type FARGATE --network-configuration file://network-configuration.json

$ aws ecs stop-task --cluster <clusterArn> --task <taskArn>

tags

$ aws ecs list-tags-for-resource --resource-arn <resource-arn>
$ aws ecs tag-resource --resource-arn <resource-arn> --tags '[{"key": "Name","value": "test"}]'

service

$ aws ecs list-services --cluster <clusterArn>
$ aws ecs describe-services --cluster <clusterArn> --services <serviceArn>
$ aws ecs create-service --cluster <clusterArn> --service-name <serviceName> --task-definition <task-definition> --desired-count 1 --launch-type FARGATE --network-configuration file://network-configuration.json

$ aws ecs list-tasks --cluster <clusterArn>
$ aws ecs describe-tasks --cluster <clusterArn> --tasks <taskArn>

$ aws ecs update-service --cluster <clusterArn> --service <serviceArn> --desired-count 0
$ aws ecs delete-service --cluster <clusterArn> --service <serviceArn>

dynamodb

create and delete table

$ aws dynamodb list-tables
$ aws dynamodb describe-table --table-name testtable
$ aws dynamodb create-table --table-name testtable  \
 --attribute-definitions '[{"AttributeName":"Artist","AttributeType":"S"},{"AttributeName":"AlbumTitle","AttributeType":"S"}]' \
 --key-schema '[{"AttributeName":"Artist","KeyType":"HASH"},{"AttributeName":"AlbumTitle","KeyType":"RANGE"}]' \
 --provisioned-throughput '{"ReadCapacityUnits": 1,"WriteCapacityUnits": 1}' \
 --tags '[{"Key": "Name","Value": "test"}]'

$ aws dynamodb delete-table --table-name testtable

put item

$ jq '.' put-item.json
{
  "Artist": {
    "S": "The Beatles"
  },
  "AlbumTitle": {
    "S": "Please Please Me"
  },
  "songs": {
    "L": [
      {
        "S": "I Saw Her Standing There"
      },
      {
        "S": "Misery"
      }
    ]
  }
}
$ aws dynamodb put-item --table-name testtable --item file://put-item.json

get and delete item

$ aws dynamodb get-item --table-name testtable --key '{ "Artist": { "S": "The Beatles" },"AlbumTitle": { "S": "Please Please Me" } }'
$ aws dynamodb delete-item --table-name testtable --key '{ "Artist": { "S": "The Beatles" },"AlbumTitle": { "S": "Please Please Me" } }'

backup and restore database

create backup

$ aws dynamodb list-backups --table-name testtable
$ aws dynamodb create-backup --table-name testtable --backup-name testtablebackup

describe backup

$ aws dynamodb describe-backup --backup-arn $(aws dynamodb list-backups --table-name "testtable" --query 'max_by(BackupSummaries[?BackupName == `testtablebackup`], &BackupCreationDateTime).BackupArn' | jq -r .)

restore from newest backup

$ aws dynamodb delete-table --table-name testtable
$ aws dynamodb restore-table-from-backup --target-table-name testtable --backup-arn $(aws dynamodb list-backups --table-name "testtable" --query 'max_by(BackupSummaries[?BackupName == `testtablebackup`], &BackupCreationDateTime).BackupArn' | jq -r .)
$ aws dynamodb describe-table --table-name testtable --query 'Table.TableStatus'

remove oldest backup

$ aws dynamodb delete-backup --backup-arn $(aws dynamodb list-backups --table-name "testtable" --query 'max_by(BackupSummaries[?BackupName == `testtablebackup`], &BackupCreationDateTime).BackupArn' | jq -r .)

sample python script

put-item.py

#! /usr/bin/python3
import boto3
import json

tablename = 'testtable'
item = {
  "Artist": {
    "S": "The Beatles"
  },
  "AlbumTitle": {
    "S": "Please Please Me"
  },
  "songs": {
    "L": [
      {
        "S": "I Saw Her Standing There"
      },
      {
        "S": "Misery"
      }
    ]
  }
}

dynamo = boto3.client('dynamodb')
res = dynamo.put_item(TableName=tablename, Item=item)
print (json.dumps(res))

get-item.py

#! /usr/bin/python3
import boto3
import json

tablename = 'testtable'
key = {
  "Artist": { "S": "The Beatles" },
  "AlbumTitle": { "S": "Please Please Me" }
}

dynamo = boto3.client('dynamodb')
res = dynamo.get_item(TableName=tablename, Key=key)
print (json.dumps(res))

delete-item.py

#! /usr/bin/python3
import boto3
import json

tablename = 'testtable'
key = {
  "Artist": { "S": "The Beatles" },
  "AlbumTitle": { "S": "Please Please Me" }
}

dynamo = boto3.client('dynamodb')
res = dynamo.delete_item(TableName=tablename, Key=key)
print (json.dumps(res))
# print (json.dumps(res['ResponseMetadata']['HTTPStatusCode']))

cloudformation template

    TestDynamoDBTable:
      Type: AWS::DynamoDB::Table
      Properties:
        TableName: "TestDynamoDBTable"
        Tags:
          - Key: "Name"
            Value: "test"
        AttributeDefinitions:
          - AttributeName: "subject"
            AttributeType: "S"
          - AttributeName: "year"
            AttributeType: "N"
        KeySchema:
          - AttributeName: "subject"
            KeyType: "HASH"
          - AttributeName: "year"
            KeyType: "RANGE"
        BillingMode: "PROVISIONED"
        ProvisionedThroughput:
          ReadCapacityUnits: 1
          WriteCapacityUnits: 1

cryptsetup

luks

install a package

$ sudo apt install cryptsetup

format

$ sudo cryptsetup luksFormat /dev/md0 
$ sudo cryptsetup luksDump /dev/md0

open

$ sudo cryptsetup open /dev/md0 cryptfs
$ sudo cryptsetup status cryptfs

open tcrypt device

$ sudo cryptsetup open --type tcrypt /dev/md0

format mount

$ sudo mkfs -t ext4 /dev/mapper/cryptfs 
$ sudo mount /dev/mapper/cryptfs /mnt
$ df -h /mnt

umount and close

$ sudo umount /mnt 
$ sudo cryptsetup close cryptfs

mdadm

software raid

install mdadm package

$ sudo apt-get install mdadm

make dummy files for test

$ dd if=/dev/zero of=file.img bs=2M count=0 seek=512
$ cp -p file.img file0.img
$ cp -p file.img file1.img
$ cp -p file.img file2.img
$ cp -p file.img file3.img
$ cp -p file.img file4.img
$ ls -lhs file*img

losetup

$ sudo losetup /dev/loop0 file0.img
$ sudo losetup /dev/loop1 file1.img
$ sudo losetup /dev/loop2 file2.img
$ sudo losetup /dev/loop3 file3.img
$ sudo losetup /dev/loop4 file4.img

raid0

$ sudo mdadm --create /dev/md0 -l raid0 -n 2 /dev/loop0 /dev/loop1
$ cat /proc/mdstat
$ sudo mdadm --detail /dev/md0

$ sudo mdadm --detail --scan
$ sudo mdadm --detail --scan > /etc/mdadm.conf

mkfs and mount

$ sudo mkfs -t ext4 /dev/md0
$ sudo mount /dev/md0 /mnt
$ df -h /mnt

stop and remove settings

$ sudo mdadm --stop /dev/md0
$ sudo mdadm --zero-superblock /dev/loop0
$ sudo mdadm --zero-superblock /dev/loop1
( $ sudo rm -i /etc/mdadm.conf )

raid1

$ sudo mdadm --create /dev/md0 -l raid1 -n 2 /dev/loop0 /dev/loop1

make fail

$ sudo mdadm --stop /dev/md0
$ sudo losetup -d /dev/loop1

recover

$ sudo mdadm --assemble --scan -v
$ sudo mdadm --examine /dev/loop0

$ sudo mdadm --add /dev/md0 /dev/loop2
$ sudo mdadm --detail --scan > /etc/mdadm.conf

add extra disk

$ sudo losetup /dev/loop1 file1.img
$ sudo mdadm --add /dev/md0 /dev/loop1

make fail

$ sudo mdadm --stop /dev/md0
$ sudo losetup -d /dev/loop1

recover

$ sudo mdadm --assemble --scan -v
$ sudo mdadm --examine /dev/loop0
$ sudo mdadm --grow /dev/md0 --raid-devices=2

when disk alert has come.

$ sudo mdadm --fail /dev/md0 /dev/loop1
$ sudo mdadm --remove /dev/md0 /dev/loop1
$ sudo mdadm --add /dev/md0 /dev/loop3

if md0 has extra disk, when disk alert has come, automatically rebuild

$ sudo losetup /dev/loop0 file0.img
$ sudo mdadm --add /dev/md0 /dev/loop0
$ sudo mdadm --fail /dev/md0 /dev/loop2
$ sudo mdadm --remove /dev/md0 /dev/loop2

raid5

/dev/loop3 is extra disk for spare

$ sudo mdadm --create /dev/md0 -l raid5 -n 3 /dev/loop0 /dev/loop1 /dev/loop2 -x 1 /dev/loop3

raid10

/dev/loop4 is extra disk for spare

$ sudo mdadm --create /dev/md0 -l raid10 -n 4 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 -x 1 /dev/loop4

aws

query with JMESPath

projection

$ aws ec2 describe-vpcs --query 'Vpcs[].VpcId'
$ aws ec2 describe-vpcs --query 'Vpcs[].{VpcId:VpcId, IsDefault:IsDefault}'

selection (if the targe command has filters option, it would be faster than query.)

$ aws ec2 describe-vpcs --query 'Vpcs[?IsDefault == `true`]'
$ aws ec2 describe-vpcs --filters 'Name=isDefault,Values=true'

$ aws ec2 describe-vpcs --query 'Vpcs[?Tags[?Key == `Name` && Value == `test`]]'
$ aws ec2 describe-vpcs --filters 'Name=tag:Name,Values=test'

function

$ aws ec2 describe-vpcs --query 'Vpcs[?contains(VpcId, `vpc`)].VpcId'

$ aws ec2 describe-vpcs --query 'sort_by(Vpcs[?contains(VpcId, `vpc`)].VpcId, &VpcId)'
$ aws ec2 describe-images --filters "Name=owner-id,Values=<id>" --query "sort_by(Images[].{Name:Name, ImageId:ImageId}, &Name)"

$ aws ec2 describe-vpcs --query 'length(Vpcs[?contains(VpcId, `vpc`)])'

generate-cli-skeleton output

$ aws ec2 describe-vpcs --generate-cli-skeleton output
$ aws ec2 describe-vpcs --generate-cli-skeleton output --query 'Vpcs[].{CidrBlock:CidrBlock, VpcId:VpcId}'
$ aws ec2 describe-vpcs --query 'Vpcs[].{CidrBlock:CidrBlock, VpcId:VpcId}'

generate-cli-skeleton input

$ aws ec2 describe-vpcs --generate-cli-skeleton input | tee describe-vpcs.json
$ vi describe-vpcs.json
$ jq . describe-vpcs.json
{
  "Filters": [
    {
      "Name": "tag:Name",
      "Values": [
        "test"
      ]
    }
  ]
}
$ aws ec2 describe-vpcs --cli-input-json file://describe-vpcs.json

samples of filter

$ aws ec2 describe-vpcs --filters '["Name":"isDefault","Values":["true"]]'
$ aws ec2 describe-vpcs --filters 'Name=isDefault,Values=true'

$ aws ec2 describe-vpcs --filters '[{"Name":"tag-key","Values":["aws:cloudformation:stack-name"]}]'
$ aws ec2 describe-vpcs --filters 'Name=tag-key,Values=aws:cloudformation:stack-name'

$ aws ec2 describe-vpcs --filters '[{"Name":"tag:Name","Values":["terraform_test","mystack-VPC"]}]'
$ aws ec2 describe-vpcs --filters 'Name=tag:Name,Values=terraform_test,mystack-VPC'

$ aws ec2 describe-vpcs --filters '[{"Name":"isDefault","Values":["false"]}, {"Name":"state","Values":["available"]}]'
$ aws ec2 describe-vpcs --filters 'Name=isDefault,Values=true','Name=state,Values=available'

vpc

describe region and abailability zone

$ aws ec2 describe-regions --filters 'Name=region-name,Values=ap-northeast-1'
{
    "Regions": [
        {
            "Endpoint": "ec2.ap-northeast-1.amazonaws.com",
            "RegionName": "ap-northeast-1",
            "OptInStatus": "opt-in-not-required"
        }
    ]
}
$ aws ec2 describe-availability-zones --filters 'Name=region-name,Values=ap-northeast-1'
{
    "AvailabilityZones": [
        {
            "State": "available",
            "Messages": [],
            "RegionName": "ap-northeast-1",
            "ZoneName": "ap-northeast-1a",
            "ZoneId": "apne1-az4"
        },
        {
            "State": "available",
            "Messages": [],
            "RegionName": "ap-northeast-1",
            "ZoneName": "ap-northeast-1c",
            "ZoneId": "apne1-az1"
        },
        {
            "State": "available",
            "Messages": [],
            "RegionName": "ap-northeast-1",
            "ZoneName": "ap-northeast-1d",
            "ZoneId": "apne1-az2"
        }
    ]
}

vpc

$ aws ec2 describe-vpcs --filter "Name=cidr,Values=172.16.100.0/24"
$ aws ec2 create-vpc --cidr-block 172.16.100.0/24

$ aws ec2 describe-vpc-attribute --attribute enableDnsHostnames --vpc-id <vpc>
$ aws ec2 modify-vpc-attribute --enable-dns-hostnames --vpc-id <vpc>

Before remove vpc, subnet need to be removed.

$ aws ec2 delete-vpc --vpc-id <vpc>

tag

$ aws ec2 describe-vpcs --filters "Name=tag:Name,Values=test"
$ jq . tags.json
[
  {
    "Key": "Name",
    "Value": "test"
  }
]
$ aws ec2 create-tags --resources <vpc> --tags file://tags.json

$ aws ec2 create-tags --resources <vpc> --tags '{"Key":"Name", "Value":"test"}'

subnets

$ aws ec2 describe-subnets --filters "Name=cidr-block,Values=172.16.100.0/26"
$ aws ec2 create-subnet --vpc-id <vpc> --cidr-block 172.16.100.0/26
$ aws ec2 delete-subnet --subnet-id <subnet>

nacl

$ aws ec2 describe-network-acls --filters "Name=vpc-id,Values=<vpc>"
$ aws ec2 create-network-acl --vpc-id <vpc>
$ aws ec2 replace-network-acl-association --association-id <aclassoc> --network-acl-id <nacl>

$ aws ec2 delete-network-acl --network-acl-id <nacl>

nacl entry

protocol - all: -1 - icmp: 1 - tcp: 6 - udp: 17 - icmpv6: 58

$ aws ec2 describe-network-acls --filters "Name=network-acl-id,Values=<nacl>"
$ aws ec2 create-network-acl-entry --ingress --network-acl-id <nacl> --cidr-block 172.16.100.64/26 --protocol -1 --rule-action allow --rule-number 100
$ aws ec2 create-network-acl-entry --egress  --network-acl-id <nacl> --cidr-block 172.16.100.64/26 --protocol -1 --rule-action allow --rule-number 100

$ aws ec2 delete-network-acl-entry --ingress --network-acl-id <nacl> --rule-number 100

internet gateway

$ aws ec2 describe-internet-gateways --query 'InternetGateways[?Attachments[?VpcId == `<vpc>`]]'
$ aws ec2 create-internet-gateway
$ aws ec2 attach-internet-gateway --internet-gateway-id <internetgateway> --vpc-id <vpc>

$ aws ec2 detach-internet-gateway --internet-gateway-id <internetgateway> --vpc-id <vpc>
$ aws ec2 delete-internet-gateway --internet-gateway-id <internetgateway>

route table

$ aws ec2 describe-route-tables --filters "Name=vpc-id,Values=<vpc>"
$ aws ec2 create-route-table --vpc-id <vpc>
$ aws ec2 associate-route-table --route-table-id <routetable> --subnet-id <subnet>

$ aws ec2 describe-route-tables --filters "Name=route-table-id,Values=<routetable>"
$ aws ec2 create-route --destination-cidr-block 0.0.0.0/0 --gateway-id <internetgateway> --route-table-id <routetable>

$ aws ec2 delete-route --destination-cidr-block 0.0.0.0/0 --route-table-id <routetable>
$ aws ec2 disassociate-route-table --association-id <rtbassoc>
$ aws ec2 delete-route-table --route-table-id <routetable>

security group

$ aws ec2 describe-security-groups --filters "Name=vpc-id,Values=<vpc>"
$ aws ec2 create-security-group --description "<description>" --group-name "<name>" --vpc-id <vpc>

$ aws ec2 describe-security-groups --filters "Name=group-id,Values=<securitygroup>"
$ aws ec2 authorize-security-group-ingress --group-id <securitygroup> --ip-permissions '[{"IpProtocol": "tcp", "FromPort": 80, "ToPort": 80, "IpRanges": [{"CidrIp": "xxx.xxx.xxx.xxx/32", "Description": "ssh incoming access"}]}]'

$ aws ec2 revoke-security-group-ingress --group-id <securitygroup> --ip-permissions '[{"IpProtocol": "tcp", "FromPort": 80, "ToPort": 80, "IpRanges": [{"CidrIp": "xxx.xxx.xxx.xxx/32", "Description": "ssh incoming access"}]}]'
$ aws ec2 delete-security-group --group-id <securitygroup>

key pair

$ aws ec2 describe-key-pairs 
$ aws ec2 describe-key-pairs --key-names <keyname>
$ aws ec2 create-key-pair --key-name <keyname> | tee id_rsa.testkey.json

$ aws ec2 delete-key-pair --key-name <keyname>
$ aws ec2 import-key-pair --key-name <keyname> --public-key-material file://id_rsa.testkey.pub

save the private key

$ jq -r '.KeyMaterial' id_rsa.testkey.json > id_rsa.testkey.nopass
$ openssl rsa -aes256 -in id_rsa.testkey.nopass -out id_rsa.testkey
$ chmod 600 id_rsa.testkey
($ rm id_rsa.testkey.json id_rsa.testkey.nopass)

volume

$ aws ec2 describe-volumes
$ aws ec2 create-volume --volume-type gp2 --size <size> --availability-zone <az>
$ aws ec2 delete-volume --volume-id <volume>

attach a volume to a instance

$ aws ec2 describe-instances --filters Name=instance-id,Values=<instance> --query 'Reservations[].Instances[].BlockDeviceMappings[]'
$ aws ec2 attach-volume --volume-id <volume> --instance-id <instance> --device /dev/xvdb

after the image attached, then make partition table, partition and file system on the os side.

$ lsblk
$ sudo fdisk -l /dev/nvme1n1
$ sudo fdisk /dev/nvme1n1
$ sudo mkswap /dev/nvme1n1p1
$ sudo swapon /dev/nvme1n1p1
$ cat /proc/swaps
$ ls -l /dev/disk/by-uuid/
$ echo "UUID=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx     none        swap   defaults          0   0" | sudo tee -a /etc/fstab

extend volume

$ aws ec2 describe-volumes --filters Name=volume-id,Values=<volume>
$ aws ec2 modify-volume --size <size> --volume-id <volume>

after that, extend partition and file system

$ df
$ sudo growpart /dev/nvme0n1 1
$ lsblk
$ sudo xfs_growfs -d /

ami

$ aws ec2 describe-images --filters "Name=owner-id,Values=<id>"
$ aws ec2 describe-images --filters "Name=image-id,Values=<ami>"
$ aws ec2 create-image --instance-id <instance> --name <name>
$ aws ec2 deregister-image --image-id <ami>

after deregister-image, need to delete snapshots and volumes

snapshot

$ aws ec2 describe-snapshots --filters Name=owner-id,Values=<id>
$ aws ec2 describe-snapshots --filters Name=snapshot-id,Values=<snapshot>
$ aws ec2 delete-snapshot --snapshot-id <snapshot>
$ aws ec2 create-snapshot --volume-id <volume>

launch template

$ aws ec2 describe-launch-templates
$ jq . template.json
{
  "ImageId": "<ami>",
  "InstanceType": "t3.nano",
  "CreditSpecification": {
    "CpuCredits": "standard"
  },
  "KeyName": "<keyname>",
  "InstanceInitiatedShutdownBehavior": "terminate"
}
$ aws ec2 create-launch-template --launch-template-name <name> --launch-template-data file://template.json
$ aws ec2 describe-launch-template-versions --launch-template-id <templateid> --versions <version>

$ aws ec2 create-launch-template-version \
 --launch-template-id <templateid> \
 --source-version <version> \
 --version-description "<description>" \
 --launch-template-data '{ "Monitoring": { "Enabled": true } }'
$ aws ec2 modify-launch-template --launch-template-id <templateid> --default-version <version>

$ aws ec2 delete-launch-template-versions --launch-template-id <templateid> --versions <version>
$ aws ec2 delete-launch-template --launch-template-id <templateid>

instance

$ aws ec2 describe-instances --filters "Name=tag:Name,Values=test"
$ cat userdata.sh 
sudo apt update
sudo apt install nginx
$ aws ec2 run-instances \
 --security-group-ids <securitygroup> \
 --subnet-id <subnet> \
 --associate-public-ip-address \
 --tag-specifications '{"ResourceType":"instance","Tags":[{"Key":"Name","Value":"test"}]}' \
 --launch-template LaunchTemplateName=<templatename> \
 --user-data file://userdata.sh
#--image-id <ami>
#--instance-type t3.nano
#--credit-specification standard
#--key-name <keyname>
#--instance-initiated-shutdown-behavior terminate

$ aws ec2 describe-instances --filters "Name=tag:Name,Values=test" --query 'Reservations[].Instances[].{InstanceId:InstanceId,State:State}'
$ aws ec2 stop-instances --instance-ids <instance>
$ aws ec2 start-instances --instance-ids <instance>
$ aws ec2 terminate-instances --instance-ids <instance>

instance meta data

curl http://169.254.169.254/latest/meta-data/instance-id
curl http://169.254.169.254/latest/user-data

iam

user

$ aws iam list-users --query 'Users[?UserName==`<username>`]'
$ aws iam get-user --user-name <username>

$ aws iam create-user --user-name <username>
$ aws iam tag-user --user-name <username> --tags '{"Key":"name","Value":"test"}'

$ aws iam delete-user --user-name <username>

login profile

$ aws iam get-login-profile --user-name <username>
$ aws iam create-login-profile --generate-cli-skeleton > create-login-profile.json

$ jq '.' create-login-profile.json
{
  "UserName": "<username>",
  "Password": "<initialpassword>",
  "PasswordResetRequired": true
}
$ aws iam create-login-profile --user-name <username> --cli-input-json file://create-login-profile.json

group

$ aws iam list-groups  --query 'Groups[?GroupName==`<groupname>`]'
$ aws iam create-group --group-name <groupname>

$ aws iam delete-group --group-name <groupname>

policy

$ aws iam list-policies --query 'Policies[?PolicyName==`<policyname>`]'
$ aws iam get-policy --policy-arn <policyarn>

$ jq . policy.json 
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Action": [
        "s3:Get*",
        "s3:List*"
      ],
      "Resource": "*"
    }
  ]
}

$ aws iam create-policy --policy-name <policyname> --policy-document file://policy.json

$ aws iam list-policy-versions --policy-arn <policyarn>
$ aws iam get-policy-version --policy-arn <policyarn> --version-id v1
$ aws iam create-policy-version --policy-arn <policyarn> --policy-document file://policy_v2.json
$ aws iam set-default-policy-version --policy-arn <policyarn> --version-id v1

$ aws iam delete-policy-version --policy-arn <policyarn> --version-id v2
$ aws iam delete-policy --policy-arn <policyarn>

sample policy for allow change password

$ jq '.' policy.json
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Action": "iam:GetAccountPasswordPolicy",
      "Resource": "*"
    },
    {
      "Effect": "Allow",
      "Action": "iam:ChangePassword",
      "Resource": "<userarn>"
    }
  ]
}

add user to group

$ aws iam list-groups-for-user --user-name <username>
$ aws iam add-user-to-group --group-name <groupname> --user-name <username>

$ aws iam remove-user-from-group --group-name <groupname> --user-name <username>

attach policy to user

$ aws iam list-attached-user-policies --user-name <username>
$ aws iam attach-user-policy --user-name <username> --policy-arn "<policyarn>"
$ aws iam detach-user-policy --user-name <username> --policy-arn "<policyarn>"

attach policy to group

$ aws iam list-attached-group-policies --group-name <groupname>
$ aws iam attach-group-policy --group-name <groupname> --policy-arn "<policyarn>"
$ aws iam detach-group-policy --group-name <groupname> --policy-arn "<policyarn>"

role

$ aws iam list-roles --query 'Roles[?RoleName==`<rolename>`]'
$ jq . assumepolicy.json
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Principal": {
        "Service": "ec2.amazonaws.com"
      },
      "Action": "sts:AssumeRole"
    }
  ]
}
$ aws iam create-role --role-name <rolename> --assume-role-policy-document file://assumepolicy.json

(Before you delte role, you must remove roles from instance profile.)

$ aws iam delete-role --role-name <rolename>

attach policy to role

$ aws iam list-attached-role-policies --role-name <rolename>
$ aws iam attach-role-policy --role-name <rolename> --policy-arn <policyarn>
$ aws iam detach-role-policy --role-name <rolename> --policy-arn <policyarn>

add role to instance profile

$ aws iam list-instance-profiles-for-role --role-name <rolename>
$ aws iam add-role-to-instance-profile --instance-profile-name <rolename> --role-name <rolename>
$ aws iam remove-role-from-instance-profile --instance-profile-name <rolename> --role-name <rolename>

associate iam instance profile

$ aws ec2 describe-iam-instance-profile-associations 
$ aws ec2 associate-iam-instance-profile --iam-instance-profile '{"Arn":"<rolearn>","Name":"<rolename>"}' --instance-id <instanceid>
$ aws ec2 disassociate-iam-instance-profile --association-id <associationid>

access key

$ aws iam list-access-keys --user-name <user-name>
$ aws iam get-access-key-last-used --access-key-id <access-key-id>

$ aws iam create-access-key --user-name <user-name>
$ aws iam update-access-key --user-name <user-name> --access-key-id <access-key-id> --status Inactive
$ aws iam delete-access-key --user-name <user-name> --access-key-id <access-key-id>

cloudformation sample template

    TestLambdaRole:
      Type: AWS::IAM::Role
      Properties:
        AssumeRolePolicyDocument:
          Version: 2012-10-17
          Statement:
          - Effect: Allow
            Principal:
              Service: lambda.amazonaws.com
            Action: sts:AssumeRole
        Tags:
          - "Key": "Name"
            "Value": "test"
    AllowAccessDynamoDBTable:
      Type: "AWS::IAM::Policy"
      Properties:
        PolicyName: "AllowAccessDynamoDBTable"
        PolicyDocument:
          Version: "2012-10-17"
          Statement:
            - Effect: "Allow"
              Action:
                - dynamodb:GetItem
                - dynamodb:PutItem
                - dynamodb:DeleteItem
              Resource: !GetAtt TestDynamoDBTable.Arn
        Roles:
          - !Ref TestLambdaRole

sqs

create queue

aws sqs list-queues
aws sqs create-queue --queue-name testqueue

get queue url or attributes

aws sqs get-queue-url --queue-name testqueue
aws sqs get-queue-attributes --queue-url https://<region>.queue.amazonaws.com/<id>/<queuename> --attribute-names All

handle message

aws sqs send-message --queue-url https://<region>.queue.amazonaws.com/<id>/<queuename> --message-body {"foo":"bar"}
aws sqs receive-message --queue-url https://<region>.queue.amazonaws.com/<id>/<queuename>
{
    "Messages": [
        {
            "MessageId": "",
            "ReceiptHandle": "",
            "MD5OfBody": "",
            "Body": "{}"
        }
    ]
}
aws sqs delete-message --queue-url https://<region>.queue.amazonaws.com/<id>/<queuename> --receipt-handle "<ReceiptHandle>"
aws sqs purge-queue --queue-url https://<region>.queue.amazonaws.com/<id>/<queuename>

delete queue

aws sqs delete-queue --queue-url https://<region>.queue.amazonaws.com/<id>/<queuename>

sample python script

send message

receive message