Skip to content

Commit 5fd363f

Browse files
committed
terraform fmt
1 parent 2bac3e9 commit 5fd363f

31 files changed

+648
-648
lines changed

addons.tf

+8-8
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
resource "aws_eks_addon" "cni" {
2-
cluster_name = aws_eks_cluster.eks_cluster.name
3-
addon_name = "vpc-cni"
2+
cluster_name = aws_eks_cluster.eks_cluster.name
3+
addon_name = "vpc-cni"
44

55
addon_version = var.addon_cni_version
66
resolve_conflicts = "OVERWRITE"
@@ -12,8 +12,8 @@ resource "aws_eks_addon" "cni" {
1212
}
1313

1414
resource "aws_eks_addon" "coredns" {
15-
cluster_name = aws_eks_cluster.eks_cluster.name
16-
addon_name = "coredns"
15+
cluster_name = aws_eks_cluster.eks_cluster.name
16+
addon_name = "coredns"
1717

1818
addon_version = var.addon_coredns_version
1919
resolve_conflicts = "OVERWRITE"
@@ -25,8 +25,8 @@ resource "aws_eks_addon" "coredns" {
2525
}
2626

2727
resource "aws_eks_addon" "kubeproxy" {
28-
cluster_name = aws_eks_cluster.eks_cluster.name
29-
addon_name = "kube-proxy"
28+
cluster_name = aws_eks_cluster.eks_cluster.name
29+
addon_name = "kube-proxy"
3030

3131
addon_version = var.addon_kubeproxy_version
3232
resolve_conflicts = "OVERWRITE"
@@ -37,8 +37,8 @@ resource "aws_eks_addon" "kubeproxy" {
3737
}
3838

3939
resource "aws_eks_addon" "csi_driver" {
40-
cluster_name = aws_eks_cluster.eks_cluster.name
41-
addon_name = "aws-ebs-csi-driver"
40+
cluster_name = aws_eks_cluster.eks_cluster.name
41+
addon_name = "aws-ebs-csi-driver"
4242

4343
addon_version = var.addon_csi_version
4444
resolve_conflicts = "OVERWRITE"

aws-auth-config.tf

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
resource "kubernetes_config_map" "aws-auth" {
22
metadata {
3-
name = "aws-auth"
4-
namespace = "kube-system"
3+
name = "aws-auth"
4+
namespace = "kube-system"
55
}
66

77
data = {
8-
mapRoles = <<YAML
8+
mapRoles = <<YAML
99
- rolearn: ${aws_iam_role.eks_nodes_roles.arn}
1010
username: system:node:{{EC2PrivateDNSName}}
1111
groups:

eks.tf

+61-61
Original file line numberDiff line numberDiff line change
@@ -1,90 +1,90 @@
11

22
resource "aws_eks_cluster" "eks_cluster" {
33

4-
name = var.cluster_name
5-
version = var.k8s_version
6-
role_arn = aws_iam_role.eks_cluster_role.arn
4+
name = var.cluster_name
5+
version = var.k8s_version
6+
role_arn = aws_iam_role.eks_cluster_role.arn
77

8-
vpc_config {
8+
vpc_config {
99

10-
security_group_ids = [
11-
aws_security_group.cluster_sg.id,
12-
aws_security_group.cluster_nodes_sg.id
13-
]
10+
security_group_ids = [
11+
aws_security_group.cluster_sg.id,
12+
aws_security_group.cluster_nodes_sg.id
13+
]
14+
15+
subnet_ids = [
16+
aws_subnet.private_subnet_1a.id,
17+
aws_subnet.private_subnet_1b.id,
18+
aws_subnet.private_subnet_1c.id
19+
]
1420

15-
subnet_ids = [
16-
aws_subnet.private_subnet_1a.id,
17-
aws_subnet.private_subnet_1b.id,
18-
aws_subnet.private_subnet_1c.id
19-
]
21+
}
2022

23+
encryption_config {
24+
provider {
25+
key_arn = aws_kms_key.eks.arn
2126
}
27+
resources = ["secrets"]
28+
}
2229

23-
encryption_config {
24-
provider {
25-
key_arn = aws_kms_key.eks.arn
26-
}
27-
resources = ["secrets"]
28-
}
30+
enabled_cluster_log_types = [
31+
"api", "audit", "authenticator", "controllerManager", "scheduler"
32+
]
2933

30-
enabled_cluster_log_types = [
31-
"api", "audit", "authenticator", "controllerManager", "scheduler"
32-
]
33-
34-
tags = {
35-
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
36-
"k8s.io/cluster-autoscaler/${var.cluster_name}" = "owned",
37-
"k8s.io/cluster-autoscaler/enabled" = true
38-
}
34+
tags = {
35+
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
36+
"k8s.io/cluster-autoscaler/${var.cluster_name}" = "owned",
37+
"k8s.io/cluster-autoscaler/enabled" = true
38+
}
3939

4040
}
4141

4242
resource "aws_security_group" "cluster_sg" {
43-
name = format("%s-sg", var.cluster_name)
44-
vpc_id = aws_vpc.cluster_vpc.id
43+
name = format("%s-sg", var.cluster_name)
44+
vpc_id = aws_vpc.cluster_vpc.id
4545

46-
egress {
47-
from_port = 0
48-
to_port = 0
46+
egress {
47+
from_port = 0
48+
to_port = 0
4949

50-
protocol = "-1"
51-
cidr_blocks = [ "0.0.0.0/0" ]
52-
}
50+
protocol = "-1"
51+
cidr_blocks = ["0.0.0.0/0"]
52+
}
5353

54-
tags = {
55-
Name = format("%s-sg", var.cluster_name)
56-
}
54+
tags = {
55+
Name = format("%s-sg", var.cluster_name)
56+
}
5757

5858
}
5959

6060
resource "aws_security_group_rule" "cluster_ingress_https" {
61-
cidr_blocks = ["0.0.0.0/0"]
62-
from_port = 443
63-
to_port = 443
64-
protocol = "tcp"
61+
cidr_blocks = ["0.0.0.0/0"]
62+
from_port = 443
63+
to_port = 443
64+
protocol = "tcp"
6565

66-
security_group_id = aws_security_group.cluster_sg.id
67-
type = "ingress"
66+
security_group_id = aws_security_group.cluster_sg.id
67+
type = "ingress"
6868
}
6969

7070
resource "aws_security_group_rule" "nodeport_cluster" {
71-
cidr_blocks = ["0.0.0.0/0"]
72-
from_port = 30000
73-
to_port = 32768
74-
description = "nodeport"
75-
protocol = "tcp"
76-
77-
security_group_id = aws_eks_cluster.eks_cluster.vpc_config[0].cluster_security_group_id
78-
type = "ingress"
71+
cidr_blocks = ["0.0.0.0/0"]
72+
from_port = 30000
73+
to_port = 32768
74+
description = "nodeport"
75+
protocol = "tcp"
76+
77+
security_group_id = aws_eks_cluster.eks_cluster.vpc_config[0].cluster_security_group_id
78+
type = "ingress"
7979
}
8080

8181
resource "aws_security_group_rule" "nodeport_cluster_udp" {
82-
cidr_blocks = ["0.0.0.0/0"]
83-
from_port = 30000
84-
to_port = 32768
85-
description = "nodeport"
86-
protocol = "udp"
87-
88-
security_group_id = aws_eks_cluster.eks_cluster.vpc_config[0].cluster_security_group_id
89-
type = "ingress"
82+
cidr_blocks = ["0.0.0.0/0"]
83+
from_port = 30000
84+
to_port = 32768
85+
description = "nodeport"
86+
protocol = "udp"
87+
88+
security_group_id = aws_eks_cluster.eks_cluster.vpc_config[0].cluster_security_group_id
89+
type = "ingress"
9090
}

helm_alb_ingress_controller.tf

+43-43
Original file line numberDiff line numberDiff line change
@@ -1,45 +1,45 @@
11
resource "helm_release" "alb_ingress_controller" {
2-
name = "aws-load-balancer-controller"
3-
repository = "https://aws.github.io/eks-charts"
4-
chart = "aws-load-balancer-controller"
5-
namespace = "kube-system"
6-
create_namespace = true
7-
8-
set {
9-
name = "clusterName"
10-
value = var.cluster_name
11-
}
12-
13-
set {
14-
name = "serviceAccount.create"
15-
value = true
16-
}
17-
18-
set {
19-
name = "serviceAccount.name"
20-
value = "aws-load-balancer-controller"
21-
}
22-
23-
set {
24-
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
25-
value = aws_iam_role.alb_controller.arn
26-
}
27-
28-
set {
29-
name = "region"
30-
value = var.aws_region
31-
}
32-
33-
34-
set {
35-
name = "vpcId"
36-
value = aws_vpc.cluster_vpc.id
37-
38-
}
39-
40-
depends_on = [
41-
aws_eks_cluster.eks_cluster,
42-
aws_eks_node_group.cluster,
43-
kubernetes_config_map.aws-auth
44-
]
2+
name = "aws-load-balancer-controller"
3+
repository = "https://aws.github.io/eks-charts"
4+
chart = "aws-load-balancer-controller"
5+
namespace = "kube-system"
6+
create_namespace = true
7+
8+
set {
9+
name = "clusterName"
10+
value = var.cluster_name
11+
}
12+
13+
set {
14+
name = "serviceAccount.create"
15+
value = true
16+
}
17+
18+
set {
19+
name = "serviceAccount.name"
20+
value = "aws-load-balancer-controller"
21+
}
22+
23+
set {
24+
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
25+
value = aws_iam_role.alb_controller.arn
26+
}
27+
28+
set {
29+
name = "region"
30+
value = var.aws_region
31+
}
32+
33+
34+
set {
35+
name = "vpcId"
36+
value = aws_vpc.cluster_vpc.id
37+
38+
}
39+
40+
depends_on = [
41+
aws_eks_cluster.eks_cluster,
42+
aws_eks_node_group.cluster,
43+
kubernetes_config_map.aws-auth
44+
]
4545
}

helm_argo_rollouts.tf

+31-31
Original file line numberDiff line numberDiff line change
@@ -1,43 +1,43 @@
11
resource "helm_release" "argo_rollouts" {
2-
count = var.argo_rollouts_toggle ? 1 : 0
2+
count = var.argo_rollouts_toggle ? 1 : 0
33

4-
name = "argo-rollouts"
5-
chart = "argo-rollouts"
6-
repository = "https://argoproj.github.io/argo-helm"
7-
namespace = "argo-rollouts"
8-
create_namespace = true
4+
name = "argo-rollouts"
5+
chart = "argo-rollouts"
6+
repository = "https://argoproj.github.io/argo-helm"
7+
namespace = "argo-rollouts"
8+
create_namespace = true
99

10-
set {
11-
name = "dashboard.enabled"
12-
value = true
13-
}
10+
set {
11+
name = "dashboard.enabled"
12+
value = true
13+
}
1414

15-
set {
16-
name = "controller.metrics.enabled"
17-
value = true
18-
}
15+
set {
16+
name = "controller.metrics.enabled"
17+
value = true
18+
}
1919

20-
# set {
21-
# name = "podAnnotations.prometheus\\.io/scrape"
22-
# value = true
23-
# }
20+
# set {
21+
# name = "podAnnotations.prometheus\\.io/scrape"
22+
# value = true
23+
# }
2424

25-
# set {
26-
# name = "podAnnotations.prometheus\\.io/path"
27-
# value = "/metrics"
28-
# }
25+
# set {
26+
# name = "podAnnotations.prometheus\\.io/path"
27+
# value = "/metrics"
28+
# }
2929

30-
# set {
31-
# name = "podAnnotations.prometheus\\.io/port"
32-
# value = "8090"
33-
# }
30+
# set {
31+
# name = "podAnnotations.prometheus\\.io/port"
32+
# value = "8090"
33+
# }
3434

3535

36-
depends_on = [
37-
aws_eks_cluster.eks_cluster,
38-
aws_eks_node_group.cluster,
39-
kubernetes_config_map.aws-auth
40-
]
36+
depends_on = [
37+
aws_eks_cluster.eks_cluster,
38+
aws_eks_node_group.cluster,
39+
kubernetes_config_map.aws-auth
40+
]
4141
}
4242

4343
resource "kubectl_manifest" "rollouts_gateway" {

0 commit comments

Comments
 (0)