-
Notifications
You must be signed in to change notification settings - Fork 1
/
main.tf
259 lines (240 loc) · 7.89 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
# Get our API key for hetzner
provider "hcloud" {
token = var.hcloud_token
}
provider "helm" {
kubernetes {
config_path = "kubeconfig-${var.domain}"
}
}
locals {
controller_count = var.controller_role == "single" ? 1 : var.controller_count
worker_count = var.controller_role == "single" ? 0 : var.worker_count
create_keys = (var.ssh_pub_key == null || var.ssh_priv_key_path == null) ? true : false
ssh_priv_key_path = var.ssh_priv_key_path == null ? "id_ed25519_${var.domain}" : var.ssh_priv_key_path
}
# ED25519 key
resource "tls_private_key" "ed25519" {
count = local.create_keys ? 1 : 0
algorithm = "ED25519"
}
resource "local_file" "ssh_priv_key_path" {
count = local.create_keys ? 1 : 0
filename = local.ssh_priv_key_path
file_permission = "0600"
content = nonsensitive(one(tls_private_key.ed25519.*.private_key_openssh))
}
resource "hcloud_ssh_key" "terraform-hcloud-k0s" {
name = "terraform-hcloud-k0s"
# We depend on the local_file because we want it created, before we create servers
depends_on = [local_file.ssh_priv_key_path]
public_key = local.create_keys ? one(tls_private_key.ed25519.*.public_key_openssh) : var.ssh_pub_key
}
# worker networking section
locals {
extra_worker_ips = [
for _, addresses in var.extra_workers :
compact(concat(
addresses["public_ipv4"],
addresses["public_ipv6"],
addresses["private_ipv4"],
))
]
}
module "worker_ips" {
source = "./modules/network"
amount = local.worker_count
role = "worker"
domain = var.domain
enable_ipv4 = var.enable_ipv4
enable_ipv6 = var.enable_ipv6
enable_balancer = var.balance_worker_plane
balanced_services = [80, 443]
balanced_extraIPs = local.extra_worker_ips
}
# controller networking section
module "controller_ips" {
source = "./modules/network"
amount = local.controller_count
role = var.controller_role
domain = var.domain
enable_ipv4 = var.enable_ipv4
enable_ipv6 = var.enable_ipv6
enable_balancer = var.balance_control_plane
enable_network = var.enable_private_network
network_ip_range = var.network_ip_range
network_subnet_type = var.network_subnet_type
network_subnet_ip_range = var.network_subnet_ip_range
network_vswitch_id = var.network_vswitch_id
network_zone = var.network_zone
hostname = var.single_controller_hostname
}
locals {
control_plane_balancer_cidrs = compact(concat(
module.controller_ips.lb_addresses["ipv6cidr"],
module.controller_ips.lb_addresses["ipv4cidr"],
))
worker_cidrs = compact(concat(
module.worker_ips.addresses["ipv6cidr"],
module.worker_ips.addresses["ipv4cidr"],
var.enable_private_network ? [var.network_subnet_ip_range] : [],
(var.controller_role == "controller+worker" || var.controller_role == "single") ? concat(
module.controller_ips.addresses["ipv6cidr"],
module.controller_ips.addresses["ipv4cidr"]) : []
))
controller_cidrs = compact(concat(
module.controller_ips.addresses["ipv6cidr"],
module.controller_ips.addresses["ipv4cidr"],
var.enable_private_network ? [var.network_subnet_ip_range] : [],
))
base_rules = {
icmp = {
proto = "icmp",
port = null,
cidrs = [
"0.0.0.0/0",
"::/0",
],
}
ssh = {
proto = "tcp",
port = "22",
cidrs = [
"0.0.0.0/0",
"::/0",
],
}
}
base_worker_firewall_rules = {
bgp = {
proto = "tcp",
port = "179",
cidrs = local.worker_cidrs,
}
vxlan = {
proto = "udp",
port = "4789",
cidrs = local.worker_cidrs,
}
kubelet = {
proto = "tcp",
port = "10250",
cidrs = local.worker_cidrs,
}
kubeproxy = {
proto = "tcp",
port = "10249",
cidrs = local.worker_cidrs,
}
prometheusnodeexporter = {
proto = "tcp",
port = "9100",
cidrs = local.worker_cidrs,
}
}
base_controller_firewall_rules = {
k8s-api = {
proto = "tcp",
port = "6443",
cidrs = [
"0.0.0.0/0",
"::/0",
],
}
konnectivity = {
proto = "tcp",
port = "8132-8133",
cidrs = toset(concat(local.worker_cidrs, local.control_plane_balancer_cidrs))
}
etcd = {
proto = "tcp",
port = "2380",
cidrs = local.controller_cidrs,
}
k0s-api = {
proto = "tcp",
port = "9443",
cidrs = toset(concat(local.controller_cidrs, local.control_plane_balancer_cidrs)),
}
}
# If the controller role is "controller+worker" or single then we are going to rely exclusively on Calico HostEndpoints.
controller_firewall_rules = (
(var.controller_role == "controller+worker" || var.controller_role == "single") ? {} :
merge(local.base_rules, local.base_controller_firewall_rules)
)
worker_firewall_rules = (
(var.controller_role == "controller+worker" || var.controller_role == "single") ?
merge(local.base_rules, local.base_controller_firewall_rules, local.base_worker_firewall_rules) :
merge(local.base_rules, local.base_worker_firewall_rules)
)
}
module "workers" {
source = "./modules/server"
amount = local.worker_count
type = var.worker_server_type
image = var.worker_server_image
datacenter = var.worker_server_datacenter
role = "worker"
ssh_pub_key_id = hcloud_ssh_key.terraform-hcloud-k0s.id
ssh_priv_key_path = local.ssh_priv_key_path
domain = var.domain
ip_address_ids = module.worker_ips.address_ids
enable_network = var.enable_private_network
network_subnet_id = module.controller_ips.subnet_id
}
module "controllers" {
source = "./modules/server"
amount = local.controller_count
type = var.controller_server_type
image = var.controller_server_image
datacenter = var.controller_server_datacenter
role = var.controller_role
ssh_pub_key_id = hcloud_ssh_key.terraform-hcloud-k0s.id
ssh_priv_key_path = local.ssh_priv_key_path
domain = var.domain
hostname = var.single_controller_hostname
ip_address_ids = module.controller_ips.address_ids
enable_network = var.enable_private_network
network_subnet_id = module.controller_ips.subnet_id
firewall_rules = local.controller_firewall_rules
}
# This is the first module where we can refer to IP addresses from the output
# of the server module and not the network module. This is because we now have
# the data from the server module
#
locals {
externalIPs = (var.controller_role == "single" || var.controller_role == "controller+worker") ? flatten(
[
for _, addresses in module.controllers.addresses :
compact(values(addresses))
]
) : flatten(
[
for _, addresses in module.workers.addresses :
compact(values(addresses))
]
)
worker_addresses = merge(module.workers.addresses, var.extra_workers)
hccm_enable = length(var.extra_workers) > 0 ? false : var.hccm_enable
}
module "k0s" {
source = "./modules/k0s"
domain = var.domain
k0s_version = var.k0s_version
controller_role = var.controller_role
hcloud_token = var.hcloud_token
hccm_enable = local.hccm_enable
hcsi_enable = var.hcsi_enable
hcsi_encryption_key = var.hcsi_encryption_key
prometheus_enable = var.prometheus_enable
ssh_priv_key_path = local.ssh_priv_key_path
controller_addresses = module.controllers.addresses
worker_addresses = local.worker_addresses
firewall_rules = local.worker_firewall_rules
ingress_service_type = var.ingress_service_type
cp_balancer_ips = concat(
module.controller_ips.lb_addresses["ipv4"],
module.controller_ips.lb_addresses["ipv6"],
)
externalIPs = local.externalIPs
}