-
Notifications
You must be signed in to change notification settings - Fork 32
/
nodes.tf
87 lines (73 loc) · 2.46 KB
/
nodes.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
resource "metal_device" "k8s_workers" {
project_id = metal_project.kubenet.id
metro = var.metro
count = var.worker_count
plan = var.worker_plan
operating_system = var.metal_os
hostname = format("%s-%s-%d", var.metro, "worker", count.index)
billing_cycle = "hourly"
tags = ["kubernetes", "k8s", "worker"]
}
# Using a null_resource so the metal_device doesn't not have to wait to be initially provisioned
resource "null_resource" "setup_worker" {
count = var.worker_count
connection {
type = "ssh"
user = "root"
host = element(metal_device.k8s_workers.*.access_public_ipv4, count.index)
private_key = tls_private_key.k8s_cluster_access_key.private_key_pem
}
provisioner "file" {
source = "${path.module}/scripts/setup-base.sh"
destination = "/tmp/setup-base.sh"
}
provisioner "file" {
content = data.template_file.install_docker.rendered
destination = "/tmp/install-docker.sh"
}
provisioner "file" {
content = data.template_file.install_kubernetes.rendered
destination = "/tmp/setup-kube.sh"
}
provisioner "file" {
source = "${path.module}/scripts/install-calicoctl.sh"
destination = "/tmp/install-calicoctl.sh"
}
provisioner "file" {
source = "${path.module}/scripts/bgp-routes.sh"
destination = "/tmp/bgp-routes.sh"
}
provisioner "remote-exec" {
inline = [
"chmod +x /tmp/*.sh",
"/tmp/setup-base.sh",
"/tmp/install-docker.sh",
"/tmp/setup-kube.sh",
"${data.external.kubeadm_join.result.command}",
"/tmp/install-calicoctl.sh",
# Only enable the execution of this next line if you see issues with BGP peering
# Some BGP speakers will not respect source routing so adding static routes can help.
# "/tmp/bgp-routes.sh",
]
}
provisioner "remote-exec" {
inline = [
"kubectl get nodes -o wide",
]
on_failure = continue
connection {
type = "ssh"
user = "root"
host = metal_device.k8s_controller.access_public_ipv4
private_key = tls_private_key.k8s_cluster_access_key.private_key_pem
}
}
}
# We need to get the private IPv4 Gateway of each worker
data "external" "private_ipv4_gateway" {
count = var.worker_count
program = ["${path.module}/scripts/gateway.sh"]
query = {
host = element(metal_device.k8s_workers.*.access_public_ipv4, count.index)
}
}