Step 1 : create all configuration files in the modules folder from this format.
project-root/
├── modules/
│ └── s3-static-website/
│ ├── main.tf
│ ├── variables.tf
│ └── outputs.tf
├── envs/
│ └── dev/
│ ├── main.tf
│ ├── variables.tf
│ └── terraform.tfvars
├── backend.tf
├── provider.tf
└── variables.tf
main.tf file
#create the s3 bucket
resource "aws_s3_bucket" "website_bucket" {
bucket = var.bucket_name
tags = var.tags
}
#Configuration of the S3 bucket website.
resource "aws_s3_bucket_website_configuration" "website_static" {
bucket = aws_s3_bucket.website_bucket.id
index_document {
suffix = "index.html"
}
}
#bucket policy ensures that all objects, regardless of how they are uploaded or by whom, will be accessible publicly.
#using bucket policies over ACLs provide better visibility and manageability
#bucket policy ensures consistent, secure, and more flexible access control
resource "aws_s3_bucket_policy" "website_policy" {
bucket = aws_s3_bucket.website_bucket.id
policy = data.aws_iam_policy_document.website_policy.json
}
data "aws_iam_policy_document" "website_policy" {
statement {
actions = ["s3:GetObject"]
principals {
type = "AWS"
identifiers = ["*"]
}
resources = ["${aws_s3_bucket.website_bucket.arn}/*"]
effect = "Allow"
}
}
resource "aws_s3_bucket_public_access_block" "website_bucket_block" {
bucket = aws_s3_bucket.website_bucket.id
block_public_acls = false
block_public_policy = false
ignore_public_acls = false
restrict_public_buckets = false
}
locals {
s3_origin_id = "${var.bucket_name}-origin"
s3_domain_name = "${var.bucket_name}.s3-website-${var.region}.amazonaws.com"
}
resource "aws_cloudfront_distribution" "cdn" {
origin {
domain_name = aws_s3_bucket.website_bucket.bucket_regional_domain_name
origin_id = var.original_id
}
enabled = true
is_ipv6_enabled = true
default_root_object = "index.html"
default_cache_behavior {
allowed_methods = ["GET", "HEAD"]
cached_methods = ["GET", "HEAD"]
target_origin_id = var.original_id
forwarded_values {
query_string = true
cookies {
forward = "all"
}
}
viewer_protocol_policy = "redirect-to-https"
min_ttl = 0
default_ttl = 0
max_ttl = 0
}
restrictions {
geo_restriction {
restriction_type = "none"
}
}
viewer_certificate {
cloudfront_default_certificate = true
}
depends_on = [aws_s3_bucket.website_bucket]
}
1.2
outputs.tf file contains
# Output the S3 bucket name
output "s3_bucket_name" {
value = aws_s3_bucket.website_bucket.bucket
description = "The name of the S3 bucket"
}
# Output the CloudFront distribution domain name
output "cloudfront_domain_name" {
value = aws_cloudfront_distribution.cdn.domain_name
description = "The domain name of the CloudFront distribution"
}
# Output the CloudFront distribution ID
output "cloudfront_distribution_id" {
value = aws_cloudfront_distribution.cdn.id
description = "The ID of the CloudFront distribution"
}
1.3 variables.tf
variable "region" {
description = "Region"
type = string
default = "us-east-1"
}
variable "bucket_name" {
description = "The name of the S3 bucket"
type = string
}
variable "original_id" {
description = "The original ID for CloudFront"
type = string
default = "S3-Origin"
}
variable "tags" {
description = "Tags for the resources"
type = map(string)
default = {}
}
hcl
Step 2 : head to the specific dev folder to import the configs
2.1
main.tf creates a module called s3_static_website
provider "aws" {
region = "us-east-1"
}
module "s3_static_website" {
source = "../../modules/s3-static-website"
bucket_name = var.bucket_name
tags = var.tags
}
2.2 variables.tf
contains the variables
variable "bucket_name" {
description = "The name of the S3 bucket for the dev environment"
type = string
#default = "dev_static_website_bucket_efantus_new"
}
variable "tags" {
description = "Tags for resources in dev environment"
type = map(string)
default = {
"Environment" = "dev"
}
}
2.3 terraform.tfvars sets the value of the bucket
bucket_name = "dev-static-website-bucket"
2.4 outputs.tf get the name of s3, cloudfront domain names
# Output the S3 bucket name
output "s3_bucket_name" {
value = var.bucket_name
description = "The name of the S3 bucket"
}
# Output the CloudFront distribution domain name
output "cloudfront_domain_name" {
value = module.s3_static_website.cloudfront_domain_name
description = "The domain name of the CloudFront distribution"
}
2.5
create the s3_bucket_cloudfront.tf file
it creates the config for both the s3 , dynamodb to store our state files.
security measure include:
--server side encyrption of the bucket
--version control
--lifecycyle prevent destroy to true.can't be deleted
--block all public access
--hash key for dynamodb
#step 1 : create s3 bucket
resource "aws_s3_bucket" "terraform_state" {
bucket = "dev-state-file-new"
#prevent accidental deletion of s3
lifecycle {
prevent_destroy = true
}
}
#step 2 : Enable versioning of s3 bucket
resource "aws_s3_bucket_versioning" "enabled" {
bucket = aws_s3_bucket.terraform_state.id
versioning_configuration {
status = "Enabled"
}
}
#step 3 : Secure bucket using server side encryption
resource "aws_s3_bucket_server_side_encryption_configuration" "default" {
bucket = aws_s3_bucket.terraform_state.id
rule {
apply_server_side_encryption_by_default {
sse_algorithm = "AES256"
}
}
}
#step 4 : Block all public access to s3 bucket
resource "aws_s3_bucket_public_access_block" "public_access" {
bucket = aws_s3_bucket.terraform_state.id
block_public_acls = true
block_public_policy = true
ignore_public_acls = true
restrict_public_buckets = true
}
#step 5 : create dynamodb table for locking
resource "aws_dynamodb_table" "terraform_locks" {
name = "dev_dynamodb_table_name_new"
billing_mode = "PAY_PER_REQUEST"
hash_key = "LockID"
attribute {
name = "LockID"
type = "S"
}
}
after this step, manually upload the index.html static file to the bucket_name = "dev-static-website-bucket"
2.6
backend.tf file.
NB :
--comment this file code until you have created all resources for storing state files in s3_bucket_state.tf
--uncoomment and upload the state files remotely to the s3 bucket
terraform {
backend "s3" {
bucket = "dev-state-file-new"
key = "global/s3/terraform.tfstate"
region = "us-east-1"
dynamodb_table = "dev_dynamodb_table_name_new"
encrypt = true
}
}
Step 3 :
run terraform output to get:
cloudfront_domain_name && s3_bucket_name name
Step 5 :
Top comments (0)