20 Commits

Author SHA1 Message Date
d87f7117d1 Adding latest 2020-11-19 19:39:06 +00:00
c4dd5c1463 Updating subnets 2020-07-30 02:19:48 +01:00
fcd82ae4e5 Updating subnets 2020-07-30 02:18:29 +01:00
7fb2ea43f5 Commenting out DB Subnets 2020-07-30 02:16:44 +01:00
f13baee42f Editing subnets 2020-07-30 02:13:59 +01:00
403e007d05 Adding VPC/Subnets to config files 2020-07-30 01:59:24 +01:00
b33a7001b5 Changing to security group name rather than ID 2020-07-30 01:42:29 +01:00
1f91092468 Updating eb config 2020-07-30 01:25:25 +01:00
149a5a199d Updating single instance 2020-07-30 01:20:16 +01:00
cae918f832 Single instance, external DB
eb create --single
2020-07-30 00:42:23 +01:00
e860a4557c Updating single instance configs 2020-07-29 15:39:12 +01:00
b07e4e1b18 Creating single instance with db
eb create --single --database
2020-07-29 15:33:54 +01:00
a5bcb9e998 Updating package-lock 2020-07-28 23:26:57 +01:00
28313a25d1 Updating .ebignore 2020-07-28 23:02:42 +01:00
065a6e91a8 Updating config 2020-07-28 23:01:45 +01:00
88d0cd0755 Moving documentation 2020-07-28 23:01:36 +01:00
3b18a9193d Adding terraform 2020-07-28 23:01:23 +01:00
b1a31b4f3f updating documentation 2020-05-03 04:59:30 +01:00
2ef0b3f5d7 updating documentation 2020-05-03 04:36:14 +01:00
8d8fcf4834 updating RDS endpoint 2020-05-03 04:36:03 +01:00
27 changed files with 954 additions and 96 deletions

View File

@@ -1,10 +1,10 @@
option_settings:
# aws:elasticbeanstalk:environment:
# EnvironmentType: SingleInstance
# aws:rds:dbinstance:
# DBEngine: postgres
# DBInstanceClass: "db.t2.micro"
# DBAllocatedStorage: 5
# DBUser: strapi
aws:rds:dbinstance:
DBEngine: postgres
DBInstanceClass: "db.t2.micro"
DBAllocatedStorage: 5
DBUser: strapi
aws:ec2:instances:
InstanceTypes: "t2.micro"

View File

@@ -5,16 +5,16 @@ option_settings:
value: true
- option_name: STRAPI_LOG_LEVEL
value: debug
- option_name: STRAPI_S3_ACCESS_KEY
value: AKIA23D4RF6OZWGDKV7W
- option_name: STRAPI_S3_SECRET_KEY
value: "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"
# - option_name: STRAPI_S3_ACCESS_KEY
# value: AKIA23D4RF6OZWGDKV7W
# - option_name: STRAPI_S3_SECRET_KEY
# value: "4sb/fxewDGjMYLocjclPCWDm7JTBCYuFBjQAbbBR"
- option_name: STRAPI_S3_REGION
value: "eu-west-1"
- option_name: STRAPI_S3_BUCKET
value: "elb-example-bucket-cf"
value: "prod-strapi-eb-strapi-uploads"
- option_name: RDS_HOSTNAME
value: src2ziuj8oxjct.chgwfe43ss59.eu-west-1.rds.amazonaws.com
value: prod-strapi-eb.chgwfe43ss59.eu-west-1.rds.amazonaws.com
- option_name: RDS_PORT
value: 5432
- option_name: RDS_NAME

View File

@@ -1,3 +1,4 @@
# Permanantly disabled
# container_commands:
# installpg:
# command: "npm install pg"

View File

@@ -1,3 +1,4 @@
# Done in the Cloudformation 02-stack-vpc.yaml
# Resources:
# sslSecurityGroupIngress:
# Type: AWS::EC2::SecurityGroupIngress

View File

@@ -1,14 +1,14 @@
option_settings:
aws:ec2:vpc:
VPCId: vpc-02f98fa754899162c
Subnets: "subnet-0b17872a2b9315fad,subnet-0342e8a0a77b30e23,subnet-0eacb84d238279a58"
DBSubnets: "subnet-0b17872a2b9315fad,subnet-0342e8a0a77b30e23,subnet-0eacb84d238279a58"
ELBSubnets: "subnet-0b17872a2b9315fad,subnet-0342e8a0a77b30e23,subnet-0eacb84d238279a58"
VPCId: vpc-016efd8cfbcca99a8
Subnets: "subnet-00c0725542e08b1d7,subnet-039fd98ceb88c863c,subnet-0b9fab172a19d818b"
# DBSubnets: "subnet-00c0725542e08b1d7,subnet-039fd98ceb88c863c,subnet-0b9fab172a19d818b"
# ELBSubnets: "subnet-00c0725542e08b1d7,subnet-039fd98ceb88c863c,subnet-0b9fab172a19d818b"
aws:autoscaling:launchconfiguration:
SecurityGroups: sg-07a97fc88ba143f26
aws:elbv2:loadbalancer:
ManagedSecurityGroup: sg-0e6f91df2ed07050a
SecurityGroups: sg-0e6f91df2ed07050a
SecurityGroups: sg-087f33381c535528b
# aws:elbv2:loadbalancer:
# ManagedSecurityGroup: sg-0e6f91df2ed07050a
# SecurityGroups: sg-0e6f91df2ed07050a
aws:autoscaling:asg:
MinSize: 1
MaxSize: 4
MaxSize: 1

View File

@@ -1,4 +1,4 @@
option_settings:
aws:elbv2:listener:443:
Protocol: HTTPS
SSLCertificateArns: arn:aws:acm:eu-west-1:745437999005:certificate/218876af-7f8d-4022-97af-ad982aa540bc
# option_settings:
# aws:elbv2:listener:443:
# Protocol: HTTPS
# SSLCertificateArns: arn:aws:acm:eu-west-1:745437999005:certificate/218876af-7f8d-4022-97af-ad982aa540bc

View File

@@ -1,2 +1,4 @@
node_modules
.tmp
infrastructure
documentation

39
.gitignore vendored
View File

@@ -115,3 +115,42 @@ build
.elasticbeanstalk/*
!.elasticbeanstalk/*.cfg.yml
!.elasticbeanstalk/*.global.yml
############################
# Terraform
############################
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
# Exclude all .tfvars files, which are likely to contain sentitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
#
# *.tfvars
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
#
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc

View File

@@ -0,0 +1,73 @@
# Security groups
## Load balanced
1 for the EC2 instances (applied to the autoscaler).
The instances can be private.
Gateway VPC needed for S3 upload.
1 for the RDS.
1 for the LB.
## Single instances
1 for the EC2 instances (applied to the autoscaler).
The instances need to be public.
No gateway VPC needed - they have internet access.
1 for the RDS.
If using `--database` you don't need to create any SG. Let EB use the default VPC. It will create everything for you.
If not using `--database`:
EC2:
- Create a SG for EC2
- Should have ingress from all (0.0.0.0:80+443)
- Should have egress to all (0.0.0.0:all)
RDS:
- Specify the `security_group_ids` with the SG of the EC2 and EB will create the SG for you with this as ingress for the SG you pass in.
- Specify `associate_security_group_ids` to attach a security group to the RDS (if you need to enable public access)
## Commands
Deploy CF
`aws --profile admin cloudformation deploy --template-file ./03-stack-rdsinstance.yaml --stack-name strapi-rds --parameter-overrides StackName=strapi-vpc --tags git=web-dev owner=home project=strapi-elb test=true deployment=cloudformation`
Destroy CF
`aws --profile admin cloudformation delete-stack --stack-name strapi-rds`
Terraform
`gmake plan`
`gmake applu`
`gmake destroy`
EB Single instance
`eb create --single`
with DB
`eb create --single --database`
Deploy code to environment
`apps-awsebcli`
Health check
`eb health`
Open the URL
`eb open`
Terminate
`eb terminate`

View File

@@ -0,0 +1,80 @@
# Notes
## HTTPS
### With load balancer
HTTPS can terminate at the load balancer
Load balancer to EC2 can be HTTP
From the front end all is well as the connection is secure.
When terminating at the load balancer 08-loadbalancer.config shows the option setting
<https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/configuring-https-elb.html>
## Database
Connecting an external DB: <https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/AWSHowTo.RDS.html>
Configure the auto scaling group to use an additional scaling group that allows ingress to the RDS instance.
You can configure the RDS credentials either with environment variables in the ELB config file, or use S3: <https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/rds-external-credentials.html>.
To create your own RDS instance you will need to create:
- A VPC - for the RDS
- Subnets - for the RDS
- A subnet group
- A security group
Use `aws ec2 describe-availability-zones --region eu-west-1 --profile admin` to get a list of availability zones for the region.
VPC terraform will create
- A IGW
- A route table
- A security group
## AWS Networking
- A VPC is a network that you give a CIDR block to.
- You create subnets for a VPC. These subnets will be split evenly across availability zones (for redundancy) and private/local (whether they have internet access or not).
- Behind the scenes (if using TF), internet gateway, routing tables, attachments will all be created for you. If using CF you will need to create these yourself.
- A security group is a firewall that is _attached to an EC2 instance_. A security group belongs to a VPC. You can permit instances to talk to each other by setting the source and destination to be the security group itself. You can control ports/ips exactly on an instance basis using security groups.
## HTTPS
### Single instance
As it terminates on the Ec2 instance itself, you need to ammend the nginx config locally. This is specific for each application you are deploying.
<https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/https-singleinstance-nodejs.html>.
You need to generate a certificate locally.
`pip install certbot`
`sudo certbot certonly --manual --preferred-challenges=dns --email dtomlinson@panaetius.co.uk --server https://acme-v02.api.letsencrypt.org/directory --agree-tos -d "*.panaetius.co.uk"`
### Load balanced
You have two options:
1. Terminate on the load balancer (easiest).
<https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/configuring-https-elb.html>.
You can use AWS Certificate manager to generate your SSL cert, or you can upload your own.
Use a .config file as documented above and EB will handle the rest.
2. Pass through to the instance.
<https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/https-tcp-passthrough.html>.
If you do this you need to set up termination on the EC2 instances using the config for a single instance above.
You can TCP pass through without the load balancer decrypting the traffic. The traffic is encrypted all the way to the instance. The instances between themselves are HTTP.
Additionally you can configure end-to-end encryption between the EC2 instances if you have strict security requirements.
<https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/configuring-https-endtoend.html>.

39
documentation/steps.todo Normal file
View File

@@ -0,0 +1,39 @@
Connecting external DB:
✔ Create RDS using TF @important @today @done (7/28/2020, 11:34:12 PM)
RDS Config:
☐ Try using `associate_security_group_ids` and creating a security group to allow all incoming traffic to the RDS instance.
Email:
☐ Add `strapi-provider-email-amazon-ses` and configure.
Deployments:
One:
✔ Create S3 bucket for strapi s3. @done (7/29/2020, 2:07:55 PM)
✔ Deploy TF with additional SG for DB. @done (7/30/2020, 3:02:39 AM)
☐ Have TF produce outputs with everything needed.
✔ Redeploy single instance with the EB config file with VPCs created. @done (7/30/2020, 3:02:41 AM)
Two:
☐ Have SSL enabled for single instance.
Three:
☐ Have SSL enabled for multiple instance.
Misc:
☐ Have the EB instances on the private subnet.
☐ Create a Gateway VPC endpoint: <https://docs.aws.amazon.com/vpc/latest/userguide/vpce-gateway.html>.
Prod Steps:
☐ Plan out the posts needed for the series.
This needs to be done at the same time as writing the site pages.
☐ Create everything from scratch
Strapi:
☐ Install from new.
☐ Create TF files.
☐ Initialise EB environment.
☐ Deploy TF.
☐ Deploy EB environment for single instance to start.
Today:
☐ Redeploy with updated config.
☐ Enable HTTPs for single instance.
☐ Use S3 to read in secrets.

View File

@@ -1,35 +1,33 @@
<!-- vscode-markdown-toc -->
* [Decoupling](#Decoupling)
* [Creating Database + VPC + Subnets in Cloudformation](#CreatingDatabaseVPCSubnetsinCloudformation)
* [Single instance (no load balancer)](#Singleinstancenoloadbalancer)
* [EC2::VPC](#EC2::VPC)
* [Enable DNS](#EnableDNS)
* [EC2::Subnet](#EC2::Subnet)
* [EC2::InternetGateway](#EC2::InternetGateway)
* [EC2::VPCGatewayAttachment](#EC2::VPCGatewayAttachment)
* [AWS::EC2::RouteTable](#AWS::EC2::RouteTable)
* [AWS::EC2::Route](#AWS::EC2::Route)
* [AWS::EC2::SubnetRouteTableAssociation](#AWS::EC2::SubnetRouteTableAssociation)
* [Running notes](#Runningnotes)
* [Database](#Database)
* [Work Commands](#WorkCommands)
* [tags](#tags)
* [deploy](#deploy)
* [delete](#delete)
* [describe-stack-resources](#describe-stack-resources)
* [Adding SSL to ELB](#AddingSSLtoELB)
* [With load balancer](#Withloadbalancer)
* [EB Templates/Resources](#EBTemplatesResources)
* [Configuring security groups](#Configuringsecuritygroups)
* [Elastic Load Balancer](#ElasticLoadBalancer)
* [Elastic Scaler](#ElasticScaler)
* [RDS](#RDS)
* [Security group to allow EC2 instances to talk to each other](#SecuritygrouptoallowEC2instancestotalktoeachother)
* [Custom VPC + Subnets in EB](#CustomVPCSubnetsinEB)
* [Using cloudformation functions in EB config files](#UsingcloudformationfunctionsinEBconfigfiles)
* [Creating a read replica RDS](#CreatingareadreplicaRDS)
* [Multiple security groups on the same resource](#Multiplesecuritygroupsonthesameresource)
* [Private subnets](#Privatesubnets)
- [Decoupling](#Decoupling)
- [Creating Database + VPC + Subnets in Cloudformation](#CreatingDatabaseVPCSubnetsinCloudformation)
- [Single instance (no load balancer)](#Singleinstancenoloadbalancer)
_ [EC2::VPC](#EC2::VPC)
_ [Enable DNS](#EnableDNS)
_ [EC2::Subnet](#EC2::Subnet)
_ [EC2::InternetGateway](#EC2::InternetGateway)
_ [EC2::VPCGatewayAttachment](#EC2::VPCGatewayAttachment)
_ [AWS::EC2::RouteTable](#AWS::EC2::RouteTable)
_ [AWS::EC2::Route](#AWS::EC2::Route)
_ [AWS::EC2::SubnetRouteTableAssociation](#AWS::EC2::SubnetRouteTableAssociation)
- [Running notes](#Runningnotes) \* [Database](#Database)
- [Work Commands](#WorkCommands)
_ [tags](#tags)
_ [deploy](#deploy)
_ [delete](#delete)
_ [describe-stack-resources](#describe-stack-resources)
- [Adding SSL to ELB](#AddingSSLtoELB) \* [With load balancer](#Withloadbalancer)
- [EB Templates/Resources](#EBTemplatesResources)
- [Configuring security groups](#Configuringsecuritygroups)
- [Elastic Load Balancer](#ElasticLoadBalancer)
_ [Elastic Scaler](#ElasticScaler)
_ [RDS](#RDS) \* [Security group to allow EC2 instances to talk to each other](#SecuritygrouptoallowEC2instancestotalktoeachother)
- [Custom VPC + Subnets in EB](#CustomVPCSubnetsinEB)
- [Using cloudformation functions in EB config files](#UsingcloudformationfunctionsinEBconfigfiles)
- [Creating a read replica RDS](#CreatingareadreplicaRDS)
- [Multiple security groups on the same resource](#Multiplesecuritygroupsonthesameresource)
- [Private subnets](#Privatesubnets)
<!-- vscode-markdown-toc-config
numbering=false
@@ -304,3 +302,45 @@ If you use private subnets, the nat gateway is not cheap - £30 a month.
You dont need the nat gateway, you can achieve the same thing with security groups (block all incoming) (explained <https://www.reddit.com/r/aws/comments/75bjei/private_subnets_nats_vs_simply_only_allowing/>).
An advantage to NAT is all outgoing requests to the internet come from a single IP.
## Using certbot CLI to generate SSL
### Wildcard certificate
In a new virtualenv install certbot:
```bash
pip install certbot
```
Run the `certbot` command:
```bash
sudo certbot certonly --manual --preferred-challenges=dns --email dtomlinson@panaetius.co.uk --server https://acme-v02.api.letsencrypt.org/directory --agree-tos -d "*.panaetius.co.uk"
```
Follow the instructions to add a `TXT` record to your DNS server for validation.
When finished you should see:
```markdown
- Congratulations! Your certificate and chain have been saved at:
/etc/letsencrypt/live/panaetius.co.uk/fullchain.pem
Your key file has been saved at:
/etc/letsencrypt/live/panaetius.co.uk/privkey.pem
Your cert will expire on 2020-08-01. To obtain a new or tweaked
version of this certificate in the future, simply run certbot
again. To non-interactively renew _all_ of your certificates, run
"certbot renew"
- Your account credentials have been saved in your Certbot
configuration directory at /etc/letsencrypt. You should make a
secure backup of this folder now. This configuration directory will
also contain certificates and private keys obtained by Certbot so
making regular backups of this folder is ideal.
```
## Terraform
### Elastic Beanstalk
Editing the EB default resources in Terraform: <https://github.com/cloudposse/terraform-aws-elastic-beanstalk-environment/issues/98#issuecomment-620677233>.

72
documentation/todo.md Normal file
View File

@@ -0,0 +1,72 @@
# To Do
## Immediate
Merge the CF templates into one, make sure all the importing and other snippets are documented.
- Create single instance deployment + https (document)
- For https: use letsencrypt to generate ssl, configure the eb config to use this.
- Final git branch for each version of the app (load balanced https/http, single http/https).
- Terraform it all up (excluding single + https).
## Long term
Use codebuild to update strapi
Use circle CI instead
Cloudformation template to deploy an S3 bucket
## Documentation
Summarise the flow -> VPC, internet gateway, attachment + route tables, subnets etc. Mention the nat gateway but show how it can be replaced with security groups. Document each individual resource needed bullet point and link to the git repo for the TF/CF templates.
## Running Notes
Various deployments:
- Single instance with EBCLI
- Load balanced with EBCLI
- Single instance with terraform
- Load balanced with terraform
HTTP + HTTPS
Single instance with terraform isn't possible with HTTPS - this is because you can't edit `Resources` or `Files` (and the other advanced EB configs). A workaround would be to create a docker image.
Single instance with EBCLI isn't possible with HTTPS if you're using Certificate Manager to generate the certificates - this is because you need to edit the nginx proxy config locally on the instance to allow https. You don't have access to the private certificate with Cerficiate Manager.
One solution would be to generate your SSL using letsencrypt - then configure the instance with this.
Another solution would be to use Docker and build a custom image. In this image you could install and configure nginx, (using lets encrypt as multistage build to get your certificate).
HTTPS for load balanced environment just requires pointing a domain to the EB endpoint. You can tell the load balancer to forward 443 in the security group without using it.
For final deployment - use an EC2 instance (deploy with TF).
### Other
Work:
Can we use APIGateway + Fargate to run an API containerised?
Fargate documentation: <https://aws.amazon.com/fargate/>.
Fargate option in ECS terraform: <https://www.terraform.io/docs/providers/aws/r/ecs_service.html#launch_type>.
Lambda vs Fargate differences: <https://www.learnaws.org/2019/09/14/deep-dive-aws-fargate/>.
Fargate vs EC2 pricing: <https://www.reddit.com/r/aws/comments/8reem9/fargate_t2small_cost_comparison_dollar_to_dollar/>.
Reddit thread on using API Gateway + Fargate: <https://www.reddit.com/r/aws/comments/bgqz4g/can_api_gateway_route_to_a_container_in_fargate/>.
Using API Gateway + Private endpoints (in a VPC): <https://aws.amazon.com/blogs/compute/introducing-amazon-api-gateway-private-endpoints/>.
Fargate is just running containers serverless - but it isn't a direct replacement to lambda. The spin up times can be long, but if you need to run a task on a schedule and this doesn't matter, you can save money and time as you don't need to manage and run an EC2 instance for docker containers. It's not ideal for tasks that need to be running 24/7.
Have a seperate repos for Terraform + Ansible. Split them inside by project. One central place for all TF and Ansible will make things easier to reference from later.
Generate SSH keys for EC2.
Provision EC2 using TF - set SG to allow SSH from your IP.
Configure EC2 with an Ansible playbook.
## Single options
- Dockerise it + run on EC2/ECS/Fargate
- Use EBCLI + Config options for https. Generate SSL using lets encrypt.
Using certbot with docker: <https://certbot.eff.org/docs/install.html#running-with-docker>
Forcing http > https redirection: <https://github.com/awsdocs/elastic-beanstalk-samples/tree/master/configuration-files/aws-provided/security-configuration/https-redirect/nodejs>.

28
documentation/updated.md Normal file
View File

@@ -0,0 +1,28 @@
Follow this tutorial to do python with asgi
Try with native python deployment + docker
<https://towardsdatascience.com/building-web-app-for-computer-vision-model-deploying-to-production-in-10-minutes-a-detailed-ec6ac52ec7e4>
Try with single instance - does it use the DB settings in .ebextensions?
Have documented options for
- Single instance
- Single instance with DB
- Load balanced instance
Create an RDS instance, ensure the default SG is allowed on ingress to the DB.
Use this SG to define an ebextensions file
<https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/rds-external-defaultvpc.html>
<https://github.com/awsdocs/elastic-beanstalk-samples/blob/master/configuration-files/aws-provided/security-configuration/securitygroup-addexisting.config>
Using a custom VPC created yourself (how it's done now): <https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/vpc.html>
Allows complete control over the security settings.
Q? If we use `--single` it will only create:
Instance subnets One of the public subnets
Instance security groups Add the default security group
Will it ignore the loadbalancer + autoscaling settings even if we define them in 07.config?

1
infrastructure/.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1 @@
{}

19
infrastructure/LICENSE Normal file
View File

@@ -0,0 +1,19 @@
MIT License Copyright (c) <year> <copyright holders>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice (including the next
paragraph) shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

215
infrastructure/Makefile Normal file
View File

@@ -0,0 +1,215 @@
# Copyright 2016 Philip G. Porada
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.ONESHELL:
.SHELL := /usr/bin/bash
.PHONY: apply destroy-backend destroy destroy-target plan-destroy plan plan-target prep
-include Makefile.env
# VARS="variables/$(ENV)-$(REGION).tfvars"
VARS="$(ENV)-$(REGION).tfvars"
CURRENT_FOLDER=$(shell basename "$$(pwd)")
S3_BUCKET="$(ENV)-$(REGION)-$(PROJECT)-terraform"
DYNAMODB_TABLE="$(ENV)-$(REGION)-$(PROJECT)-terraform"
WORKSPACE="$(ENV)-$(REGION)"
BOLD=$(shell tput bold)
RED=$(shell tput setaf 1)
GREEN=$(shell tput setaf 2)
YELLOW=$(shell tput setaf 3)
RESET=$(shell tput sgr0)
help:
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
set-env:
@if [ -z $(ENV) ]; then \
echo "$(BOLD)$(RED)ENV was not set$(RESET)"; \
ERROR=1; \
fi
@if [ -z $(REGION) ]; then \
echo "$(BOLD)$(RED)REGION was not set$(RESET)"; \
ERROR=1; \
fi
@if [ -z $(AWS_PROFILE) ]; then \
echo "$(BOLD)$(RED)AWS_PROFILE was not set.$(RESET)"; \
ERROR=1; \
fi
@if [ ! -z $${ERROR} ] && [ $${ERROR} -eq 1 ]; then \
echo "$(BOLD)Example usage: \`AWS_PROFILE=whatever ENV=demo REGION=us-east-2 make plan\`$(RESET)"; \
exit 1; \
fi
@if [ ! -f "$(VARS)" ]; then \
echo "$(BOLD)$(RED)Could not find variables file: $(VARS)$(RESET)"; \
exit 1; \
fi
prep: set-env ## Prepare a new workspace (environment) if needed, configure the tfstate backend, update any modules, and switch to the workspace
@echo "$(BOLD)Verifying that the S3 bucket $(S3_BUCKET) for remote state exists$(RESET)"
@if ! aws --profile $(AWS_PROFILE) s3api head-bucket --region $(REGION) --bucket $(S3_BUCKET) > /dev/null 2>&1 ; then \
echo "$(BOLD)S3 bucket $(S3_BUCKET) was not found, creating new bucket with versioning enabled to store tfstate$(RESET)"; \
aws --profile $(AWS_PROFILE) s3api create-bucket \
--bucket $(S3_BUCKET) \
--acl private \
--region $(REGION) \
--create-bucket-configuration LocationConstraint=$(REGION) > /dev/null 2>&1 ; \
aws --profile $(AWS_PROFILE) s3api put-bucket-versioning \
--bucket $(S3_BUCKET) \
--versioning-configuration Status=Enabled > /dev/null 2>&1 ; \
echo "$(BOLD)$(GREEN)S3 bucket $(S3_BUCKET) created$(RESET)"; \
else
echo "$(BOLD)$(GREEN)S3 bucket $(S3_BUCKET) exists$(RESET)"; \
fi
@echo "$(BOLD)Verifying that the DynamoDB table exists for remote state locking$(RESET)"
@if ! aws --profile $(AWS_PROFILE) --region $(REGION) dynamodb describe-table --table-name $(DYNAMODB_TABLE) > /dev/null 2>&1 ; then \
echo "$(BOLD)DynamoDB table $(DYNAMODB_TABLE) was not found, creating new DynamoDB table to maintain locks$(RESET)"; \
aws --profile $(AWS_PROFILE) dynamodb create-table \
--region $(REGION) \
--table-name $(DYNAMODB_TABLE) \
--attribute-definitions AttributeName=LockID,AttributeType=S \
--key-schema AttributeName=LockID,KeyType=HASH \
--provisioned-throughput ReadCapacityUnits=5,WriteCapacityUnits=5 > /dev/null 2>&1 ; \
echo "$(BOLD)$(GREEN)DynamoDB table $(DYNAMODB_TABLE) created$(RESET)"; \
echo "Sleeping for 10 seconds to allow DynamoDB state to propagate through AWS"; \
sleep 10; \
else
echo "$(BOLD)$(GREEN)DynamoDB Table $(DYNAMODB_TABLE) exists$(RESET)"; \
fi
@aws ec2 --profile=$(AWS_PROFILE) describe-key-pairs | jq -r '.KeyPairs[].KeyName' | grep "$(ENV)_infra_key" > /dev/null 2>&1; \
if [ $$? -ne 0 ]; then \
echo "$(BOLD)$(RED)EC2 Key Pair $(INFRA_KEY)_infra_key was not found$(RESET)"; \
read -p '$(BOLD)Do you want to generate a new keypair? [y/Y]: $(RESET)' ANSWER && \
if [ "$${ANSWER}" == "y" ] || [ "$${ANSWER}" == "Y" ]; then \
mkdir -p ~/.ssh; \
ssh-keygen -t rsa -b 4096 -N '' -f ~/.ssh/$(ENV)_infra_key; \
aws ec2 --profile=$(AWS_PROFILE) import-key-pair --key-name "$(ENV)_infra_key" --public-key-material "file://~/.ssh/$(ENV)_infra_key.pub"; \
fi; \
else \
echo "$(BOLD)$(GREEN)EC2 Key Pair $(ENV)_infra_key exists$(RESET)";\
fi
@echo "$(BOLD)Configuring the terraform backend$(RESET)"
@terraform init \
-input=false \
-force-copy \
-lock=true \
-upgrade \
-verify-plugins=true \
-backend=true \
-backend-config="profile=$(AWS_PROFILE)" \
-backend-config="region=$(REGION)" \
-backend-config="bucket=$(S3_BUCKET)" \
-backend-config="key=$(ENV)/$(CURRENT_FOLDER)/terraform.tfstate" \
-backend-config="dynamodb_table=$(DYNAMODB_TABLE)"\
-backend-config="acl=private"
@echo "$(BOLD)Switching to workspace $(WORKSPACE)$(RESET)"
@terraform workspace select $(WORKSPACE) || terraform workspace new $(WORKSPACE)
plan: prep ## Show what terraform thinks it will do
@terraform plan \
-lock=true \
-input=false \
-refresh=true \
-var-file="$(VARS)"
format: prep ## Rewrites all Terraform configuration files to a canonical format.
@terraform fmt \
-write=true \
-recursive
# https://github.com/terraform-linters/tflint
lint: prep ## Check for possible errors, best practices, etc in current directory!
@tflint
# https://github.com/liamg/tfsec
check-security: prep ## Static analysis of your terraform templates to spot potential security issues.
@tfsec .
documentation: prep ## Generate README.md for a module
@terraform-docs \
markdown table \
--sort-by-required . > README.md
plan-target: prep ## Shows what a plan looks like for applying a specific resource
@echo "$(YELLOW)$(BOLD)[INFO] $(RESET)"; echo "Example to type for the following question: module.rds.aws_route53_record.rds-master"
@read -p "PLAN target: " DATA && \
terraform plan \
-lock=true \
-input=true \
-refresh=true \
-var-file="$(VARS)" \
-target=$$DATA
plan-destroy: prep ## Creates a destruction plan.
@terraform plan \
-input=false \
-refresh=true \
-destroy \
-var-file="$(VARS)"
apply: prep ## Have terraform do the things. This will cost money.
@terraform apply \
-lock=true \
-input=false \
-refresh=true \
-var-file="$(VARS)"
destroy: prep ## Destroy the things
@terraform destroy \
-lock=true \
-input=false \
-refresh=true \
-var-file="$(VARS)"
destroy-target: prep ## Destroy a specific resource. Caution though, this destroys chained resources.
@echo "$(YELLOW)$(BOLD)[INFO] Specifically destroy a piece of Terraform data.$(RESET)"; echo "Example to type for the following question: module.rds.aws_route53_record.rds-master"
@read -p "Destroy target: " DATA && \
terraform destroy \
-lock=true \
-input=false \
-refresh=true \
-var-file=$(VARS) \
-target=$$DATA
destroy-backend: ## Destroy S3 bucket and DynamoDB table
@if ! aws --profile $(AWS_PROFILE) dynamodb delete-table \
--region $(REGION) \
--table-name $(DYNAMODB_TABLE) > /dev/null 2>&1 ; then \
echo "$(BOLD)$(RED)Unable to delete DynamoDB table $(DYNAMODB_TABLE)$(RESET)"; \
else
echo "$(BOLD)$(RED)DynamoDB table $(DYNAMODB_TABLE) does not exist.$(RESET)"; \
fi
@if ! aws --profile $(AWS_PROFILE) s3api delete-objects \
--region $(REGION) \
--bucket $(S3_BUCKET) \
--delete "$$(aws --profile $(AWS_PROFILE) s3api list-object-versions \
--region $(REGION) \
--bucket $(S3_BUCKET) \
--output=json \
--query='{Objects: Versions[].{Key:Key,VersionId:VersionId}}')" > /dev/null 2>&1 ; then \
echo "$(BOLD)$(RED)Unable to delete objects in S3 bucket $(S3_BUCKET)$(RESET)"; \
fi
@if ! aws --profile $(AWS_PROFILE) s3api delete-objects \
--region $(REGION) \
--bucket $(S3_BUCKET) \
--delete "$$(aws --profile $(AWS_PROFILE) s3api list-object-versions \
--region $(REGION) \
--bucket $(S3_BUCKET) \
--output=json \
--query='{Objects: DeleteMarkers[].{Key:Key,VersionId:VersionId}}')" > /dev/null 2>&1 ; then \
echo "$(BOLD)$(RED)Unable to delete markers in S3 bucket $(S3_BUCKET)$(RESET)"; \
fi
@if ! aws --profile $(AWS_PROFILE) s3api delete-bucket \
--region $(REGION) \
--bucket $(S3_BUCKET) > /dev/null 2>&1 ; then \
echo "$(BOLD)$(RED)Unable to delete S3 bucket $(S3_BUCKET) itself$(RESET)"; \
fi

View File

@@ -0,0 +1,4 @@
ENV="prod"
REGION="eu-west-1"
PROJECT="strapi-elb"
AWS_PROFILE="admin"

11
infrastructure/README.md Normal file
View File

@@ -0,0 +1,11 @@
# terraform
Boilerplate for TF
Usage:
- Clone into a project at root level.
- Rename `./terraform` to `infrastructure` (if needed).
- Delete `./infrastructure/.git/` and `./infrastructure/.gitignore`
Commit to project.

119
infrastructure/main.tf Normal file
View File

@@ -0,0 +1,119 @@
# aws config
provider "aws" {
region = var.region
profile = var.profile
version = "~> 2.70.0"
}
# tags
locals {
tags = {
"Project" = "strapi-eb"
"Description" = "Terraform resources for strapi in Elastic Beanstalk"
}
}
# Network
module "vpc" {
source = "git::https://github.com/cloudposse/terraform-aws-vpc.git?ref=tags/0.14.0"
stage = var.stage
name = var.name
tags = local.tags
cidr_block = "172.16.0.0/16"
enable_default_security_group_with_custom_rules = false
}
module "subnets" {
source = "git::https://github.com/cloudposse/terraform-aws-dynamic-subnets.git?ref=tags/0.23.0"
stage = var.stage
name = var.name
tags = local.tags
availability_zones = ["eu-west-1a", "eu-west-1b", "eu-west-1c"]
vpc_id = module.vpc.vpc_id
igw_id = module.vpc.igw_id
cidr_block = module.vpc.vpc_cidr_block
nat_gateway_enabled = false
nat_instance_enabled = false
}
resource "aws_security_group" "ec2_security_group" {
name = "${var.stage}-${var.name}-ec2_sg"
description = "Security group assigned to the Elastic Scaling group that is applied to the EC2 instances."
vpc_id = module.vpc.vpc_id
tags = local.tags
ingress {
description = "HTTP"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "HTTPS"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
description = "Outbound to all"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_security_group" "rds_security_group_public" {
name = "${var.stage}-${var.name}-rds_public_sg"
description = "Security group for the RDS instance that allows public access from the internet."
vpc_id = module.vpc.vpc_id
tags = local.tags
ingress {
description = "Incoming Postgres"
from_port = 5432
to_port = 5432
protocol = "tcp"
cidr_blocks = ["82.6.205.148/32"]
}
}
# RDS instance
module "rds_instance" {
source = "git::https://github.com/cloudposse/terraform-aws-rds.git?ref=tags/0.20.0"
stage = var.stage
name = var.name
tags = local.tags
allocated_storage = 5
database_name = "postgres"
database_user = "mainuser"
database_password = "password"
database_port = 5432
db_parameter_group = "postgres12"
engine = "postgres"
engine_version = "12.3"
instance_class = "db.t2.micro"
security_group_ids = [aws_security_group.ec2_security_group.id]
associate_security_group_ids = [aws_security_group.rds_security_group_public.id]
subnet_ids = module.subnets.public_subnet_ids
vpc_id = module.vpc.vpc_id
publicly_accessible = true
}
# S3 bucket
resource "aws_s3_bucket" "static_assets" {
bucket = "${var.stage}-${var.name}-strapi-uploads"
acl = "private"
tags = local.tags
}

36
infrastructure/outputs.tf Normal file
View File

@@ -0,0 +1,36 @@
# S3
output "s3_static_assets_id" {
value = aws_s3_bucket.static_assets.id
description = "Name of the static assets S3 bucket."
}
# VPC
output "vpc_id" {
value = module.vpc.vpc_id
description = "The ID of the VPC."
}
output "subnet_public_ids" {
value = module.subnets.public_subnet_ids
description = "The IDs of the public subnets."
}
# Security groups
output "aws_security_group_ec2_security_group" {
value = aws_security_group.ec2_security_group.id
description = "Security group for the EC2 instances applied by the Elastic Scaler."
}
output "aws_security_group_ec2_security_group_rds" {
value = aws_security_group.rds_security_group_public.id
description = "Security group for the RDS instance allowing public access."
}
# RDS
output "rds_instance_endpoint" {
value = module.rds_instance.instance_endpoint
description = "Endpoint of the RDS instance."
}

View File

@@ -0,0 +1,5 @@
# module
name = "strapi-eb"
region = "eu-west-1"
stage = "prod"
profile = "admin"

View File

@@ -0,0 +1,15 @@
variable "name" {
}
variable "region" {
}
variable "stage" {
}
variable "profile" {
}

121
package-lock.json generated
View File

@@ -2148,6 +2148,15 @@
}
}
},
"block-stream": {
"version": "0.0.9",
"resolved": "https://registry.npmjs.org/block-stream/-/block-stream-0.0.9.tgz",
"integrity": "sha1-E+v+d4oDIFz+A3UUgeu0szAMEmo=",
"optional": true,
"requires": {
"inherits": "~2.0.0"
}
},
"bluebird": {
"version": "3.7.2",
"resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz",
@@ -4635,6 +4644,18 @@
"integrity": "sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ==",
"optional": true
},
"fstream": {
"version": "1.0.12",
"resolved": "https://registry.npmjs.org/fstream/-/fstream-1.0.12.tgz",
"integrity": "sha512-WvJ193OHa0GHPEL+AycEJgxvBEwyfRkN1vhjca23OaPVMCaLCXTd5qAu82AjTcgP1UJmytkOKb63Ypde7raDIg==",
"optional": true,
"requires": {
"graceful-fs": "^4.1.2",
"inherits": "~2.0.0",
"mkdirp": ">=0.5 0",
"rimraf": "2"
}
},
"function-bind": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
@@ -6921,9 +6942,9 @@
"integrity": "sha512-ONmRUqK7zj7DWX0D9ADe03wbwOBZxNAfF20PlGfCWQcD3+/MakShIHrMqx9YwPTfxDdF1zLeL+RGZiR9kGMLdg=="
},
"needle": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/needle/-/needle-2.4.1.tgz",
"integrity": "sha512-x/gi6ijr4B7fwl6WYL9FwlCvRQKGlUNvnceho8wxkwXqN8jvVmmmATTmZPRRG7b/yC1eode26C2HO9jl78Du9g==",
"version": "2.5.0",
"resolved": "https://registry.npmjs.org/needle/-/needle-2.5.0.tgz",
"integrity": "sha512-o/qITSDR0JCyCKEQ1/1bnUXMmznxabbwi/Y4WwJElf+evwJNFNwIDMCCt5IigFVxgeGBJESLohGtIS9gEzo1fA==",
"requires": {
"debug": "^3.2.6",
"iconv-lite": "^0.4.4",
@@ -6971,6 +6992,11 @@
"semver": "^5.4.1"
}
},
"node-addon-api": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-2.0.0.tgz",
"integrity": "sha512-ASCL5U13as7HhOExbT6OlWJJUV/lLzL2voOSP1UVehpRD8FbSrSDjfScK/KwAvVTI5AS6r4VwbOMlIqtvRidnA=="
},
"node-fetch": {
"version": "2.6.0",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.0.tgz",
@@ -6981,6 +7007,34 @@
"resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.9.0.tgz",
"integrity": "sha512-7ASaDa3pD+lJ3WvXFsxekJQelBKRpne+GOVbLbtHYdd7pFspyeuJHnWfLplGf3SwKGbfs/aYl5V/JCIaHVUKKQ=="
},
"node-gyp": {
"version": "3.8.0",
"resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-3.8.0.tgz",
"integrity": "sha512-3g8lYefrRRzvGeSowdJKAKyks8oUpLEd/DyPV4eMhVlhJ0aNaZqIrNUIPuEWWTAoPqyFkfGrM67MC69baqn6vA==",
"optional": true,
"requires": {
"fstream": "^1.0.0",
"glob": "^7.0.3",
"graceful-fs": "^4.1.2",
"mkdirp": "^0.5.0",
"nopt": "2 || 3",
"npmlog": "0 || 1 || 2 || 3 || 4",
"osenv": "0",
"request": "^2.87.0",
"rimraf": "2",
"semver": "~5.3.0",
"tar": "^2.0.0",
"which": "1"
},
"dependencies": {
"semver": {
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-5.3.0.tgz",
"integrity": "sha1-myzl094C0XxgEq0yaqa00M9U+U8=",
"optional": true
}
}
},
"node-libs-browser": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/node-libs-browser/-/node-libs-browser-2.2.1.tgz",
@@ -7048,6 +7102,31 @@
"rimraf": "^2.6.1",
"semver": "^5.3.0",
"tar": "^4"
},
"dependencies": {
"nopt": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/nopt/-/nopt-4.0.3.tgz",
"integrity": "sha512-CvaGwVMztSMJLOeXPrez7fyfObdZqNUK1cPAEzLHrTybIua9pMdmmPR5YwtfNftIOMv3DPUhFaxsZMNTQO20Kg==",
"requires": {
"abbrev": "1",
"osenv": "^0.1.4"
}
},
"tar": {
"version": "4.4.13",
"resolved": "https://registry.npmjs.org/tar/-/tar-4.4.13.tgz",
"integrity": "sha512-w2VwSrBoHa5BsSyH+KxEqeQBAllHhccyMFVHtGtdMpF4W7IRWfZjFiQceJPChOeTsSDVUpER2T8FA93pr0L+QA==",
"requires": {
"chownr": "^1.1.1",
"fs-minipass": "^1.2.5",
"minipass": "^2.8.6",
"minizlib": "^1.2.1",
"mkdirp": "^0.5.0",
"safe-buffer": "^5.1.2",
"yallist": "^3.0.3"
}
}
}
},
"node-releases": {
@@ -7084,12 +7163,12 @@
"integrity": "sha1-lKKxYzxPExdVMAfYlm/Q6EG2pMI="
},
"nopt": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/nopt/-/nopt-4.0.3.tgz",
"integrity": "sha512-CvaGwVMztSMJLOeXPrez7fyfObdZqNUK1cPAEzLHrTybIua9pMdmmPR5YwtfNftIOMv3DPUhFaxsZMNTQO20Kg==",
"version": "3.0.6",
"resolved": "https://registry.npmjs.org/nopt/-/nopt-3.0.6.tgz",
"integrity": "sha1-xkZdvwirzU2zWTF/eaxopkayj/k=",
"optional": true,
"requires": {
"abbrev": "1",
"osenv": "^0.1.4"
"abbrev": "1"
}
},
"normalize-path": {
@@ -9882,11 +9961,12 @@
"integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug=="
},
"sqlite3": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/sqlite3/-/sqlite3-4.2.0.tgz",
"integrity": "sha512-roEOz41hxui2Q7uYnWsjMOTry6TcNUNmp8audCx18gF10P2NknwdpF+E+HKvz/F2NvPKGGBF4NGc+ZPQ+AABwg==",
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/sqlite3/-/sqlite3-5.0.0.tgz",
"integrity": "sha512-rjvqHFUaSGnzxDy2AHCwhHy6Zp6MNJzCPGYju4kD8yi6bze4d1/zMTg6C7JI49b7/EM7jKMTvyfN/4ylBKdwfw==",
"requires": {
"nan": "^2.12.1",
"node-addon-api": "2.0.0",
"node-gyp": "3.x",
"node-pre-gyp": "^0.11.0"
}
},
@@ -10959,17 +11039,14 @@
"integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA=="
},
"tar": {
"version": "4.4.13",
"resolved": "https://registry.npmjs.org/tar/-/tar-4.4.13.tgz",
"integrity": "sha512-w2VwSrBoHa5BsSyH+KxEqeQBAllHhccyMFVHtGtdMpF4W7IRWfZjFiQceJPChOeTsSDVUpER2T8FA93pr0L+QA==",
"version": "2.2.2",
"resolved": "https://registry.npmjs.org/tar/-/tar-2.2.2.tgz",
"integrity": "sha512-FCEhQ/4rE1zYv9rYXJw/msRqsnmlje5jHP6huWeBZ704jUTy02c5AZyWujpMR1ax6mVw9NyJMfuK2CMDWVIfgA==",
"optional": true,
"requires": {
"chownr": "^1.1.1",
"fs-minipass": "^1.2.5",
"minipass": "^2.8.6",
"minizlib": "^1.2.1",
"mkdirp": "^0.5.0",
"safe-buffer": "^5.1.2",
"yallist": "^3.0.3"
"block-stream": "*",
"fstream": "^1.0.12",
"inherits": "2"
}
},
"tar-fs": {

19
todo.md
View File

@@ -1,19 +0,0 @@
# To Do
## Immediate
Merge the CF templates into one, make sure all the importing and other snippets are documented.
- Create single instance deployment + https (document)
- Terraform it all up
## Long term
Use codebuild to update strapi
Use circle CI instead
Cloudformation template to deploy an S3 bucket
## Documentation
Summarise the flow -> VPC, internet gateway, attachment + route tables, subnets etc. Mention the nat gateway but show how it can be replaced with security groups. Document each individual resource needed bullet point and link to the git repo for the TF/CF templates.