Skip to content

Commit

Permalink
HashiCorp Boundary Scenario (#64)
Browse files Browse the repository at this point in the history
* HashiCorp Boundary Scenario

* Move installations into packer

* more idempotency

* Added README

* copy the CA cert to the host
  • Loading branch information
Ranjandas authored Jul 15, 2024
1 parent 08f5f34 commit 9e76c3c
Show file tree
Hide file tree
Showing 4 changed files with 302 additions and 18 deletions.
34 changes: 19 additions & 15 deletions packer/hashibox.pkr.hcl
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,12 @@ variable "vault_version" {
description = "Vault version to install"
}

variable "boundary_version" {
type = string
default = "0.16"
description = "Boundary version to install"
}

variable "consul_cni_version" {
type = string
default = "1.5.0"
Expand Down Expand Up @@ -58,8 +64,8 @@ locals {
efi_firmware_code = "${var.arch == "aarch64" ? "/opt/homebrew/share/qemu/edk2-aarch64-code.fd" : ""}"
efi_firmware_vars = "${var.arch == "aarch64" ? "/opt/homebrew/share/qemu/edk2-arm-vars.fd" : ""}"

source_image_url = "${var.arch == "aarch64" ? var.source_image_url : replace(var.source_image_url, "aarch64", "x86_64")}"
source_image_checksum = "${var.arch == "aarch64" ? var.source_image_checksum : replace(var.source_image_checksum, "aarch64", "x86_64")}"
source_image_url = "${var.arch == "aarch64" ? var.source_image_url : replace(var.source_image_url, "aarch64", "x86_64")}"
source_image_checksum = "${var.arch == "aarch64" ? var.source_image_checksum : replace(var.source_image_checksum, "aarch64", "x86_64")}"
}

source "qemu" "hashibox" {
Expand All @@ -77,7 +83,7 @@ source "qemu" "hashibox" {
boot_command = []
net_device = "virtio-net"

output_directory = ".artifacts/c-${var.consul_version}-n-${var.nomad_version}-v-${var.vault_version}"
output_directory = ".artifacts/c-${var.consul_version}-n-${var.nomad_version}-v-${var.vault_version}-b-${var.boundary_version}"

cpus = 8
memory = 5120
Expand Down Expand Up @@ -112,11 +118,12 @@ build {
"CONSUL_VERSION=${var.consul_version}",
"NOMAD_VERSION=${var.nomad_version}",
"VAULT_VERSION=${var.vault_version}",
"BOUNDARY_VERSION=${var.boundary_version}",
"CONSUL_CNI_VERSION=${var.consul_cni_version}"
]
inline = [
"sudo dnf clean all",
"sudo dnf install -y unzip wget",
"sudo dnf install -y unzip wget postgresql",

# For multicast DNS to use with socket_vmnet in Lima we use systemd-resolved. For rocky we have to install epel repo for Crudini.
"source /etc/os-release && [[ $ID != fedora ]] && sudo dnf install -y epel-release systemd-resolved && sudo systemctl enable --now systemd-resolved",
Expand All @@ -132,7 +139,7 @@ build {

# Enable HashiCorp Repository and install the required packages including CNI libs
"sudo dnf config-manager --add-repo https://rpm.releases.hashicorp.com/$([ $(source /etc/os-release && echo $ID) == fedora ] && echo fedora || echo RHEL)/hashicorp.repo",
"sudo dnf install -y consul-$CONSUL_VERSION* nomad-$NOMAD_VERSION* vault-$VAULT_VERSION* containernetworking-plugins",
"sudo dnf install -y consul-$CONSUL_VERSION* nomad-$NOMAD_VERSION* vault-$VAULT_VERSION* boundary-$BOUNDARY_VERSION* containernetworking-plugins",

# Nomad expects CNI binaries to be under /opt/cni/bin by default. We use symlink to avoid configuring alternate path in Nomad.
"sudo mkdir /opt/cni && sudo ln -s /usr/libexec/cni /opt/cni/bin",
Expand All @@ -142,23 +149,20 @@ build {
"sudo unzip /tmp/consul-cni.zip -d /usr/libexec/cni/",

# Provision Nomad, Consul and Vault CA's that can be later used for agent cert provisioning.
"sudo mkdir /etc/consul.d/certs && cd /etc/consul.d/certs ; sudo consul tls ca create",
"sudo mkdir /etc/nomad.d/certs && cd /etc/nomad.d/certs ; sudo nomad tls ca create",
"sudo install -o consul -g consul -d /etc/consul.d/certs && cd /etc/consul.d/certs ; sudo consul tls ca create",
"sudo install -o nomad -g nomad -d /etc/nomad.d/certs && cd /etc/nomad.d/certs ; sudo nomad tls ca create",
# this will generate CA with the name vault-agent-ca.pem. Ensure the cert generation commands out of these CA use `-domain vault`
"sudo mkdir /etc/vault.d/certs && cd /etc/vault.d/certs ; sudo consul tls ca create -domain vault",
"sudo install -o vault -g vault -d /etc/vault.d/certs && cd /etc/vault.d/certs ; sudo consul tls ca create -domain vault",
# this will generate CA with the name boundary-agent-ca.pem. Ensure the cert generation commands out of these CA use `-domain boundary`
"sudo install -o boundary -g boundary -d /etc/boundary.d/certs && cd /etc/boundary.d/certs ; sudo consul tls ca create -domain boundary",

# Install exec2 driver and copy under /opt/nomad/data/plugins dir
"sudo dnf install -y nomad-driver-exec2 --enablerepo hashicorp-test",
"sudo mkdir /opt/nomad/data/plugins && sudo chown nomad:nomad /opt/nomad/data/plugins",
"sudo cp /usr/bin/nomad-driver-exec2 /opt/nomad/data/plugins/",

# Set permissions for the certs directory
"sudo chown consul:consul /etc/consul.d/certs",
"sudo chown nomad:nomad /etc/nomad.d/certs",
"sudo chown vault:vault /etc/vault.d/certs",

# Enabling of the services is the responsibility of the instance provisioning scripts.
"sudo systemctl disable docker consul nomad"
"sudo systemctl disable docker consul nomad vault boundary"
]
}
}
}
7 changes: 4 additions & 3 deletions packer/variables.pkrvars.hcl
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
consul_version = "1.18"
nomad_version = "1.7"
vault_version = "1.17"
consul_version = "1.18"
nomad_version = "1.7"
vault_version = "1.17"
boundary_version = "0.16"
51 changes: 51 additions & 0 deletions scenarios/boundary-secure/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# Scenario: Boundary Secure

This scenario builds Boundary with TLS enabled. Specificy the number of `controllers` by setting `--servers/-s` and the number of `workers` by setting `--clients/-c` flags resepctively.

## Usage


### Build

The following steps will build a Boundary cluster.

```
shikari create -n <cluster_name> -s 3 -c 3 -i ~/.shikari/c-1.18-n-1.7-v-1.17-b-0.16/hashibox.qcow2
```

### Access

Export the Boundary environment variable using the following command

> This option will only be available in Shikari version >=0.4.1
```
shikari env -n <cluster_name> boundary --tls
```

Extract the Login information from the first server.

```
limactl shell boundary-srv-01 cat /etc/boundary.d/db_init.json | jq .auth_method
{
"auth_method_id": "ampw_KEOB3T5XBL",
"auth_method_name": "Generated global scope initial password auth method",
"login_name": "admin",
"password": "galNGsRubsGdgxKXTfOU",
"scope_id": "global",
"user_id": "u_8ftKbORVbU",
"user_name": "admin"
}
```

Access the UI and login with the above credentials

```
open $BOUNDARY_ADDR
```

### Destroy

```
shikari destroy -n <cluster_name> -f
```
228 changes: 228 additions & 0 deletions scenarios/boundary-secure/hashibox.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,228 @@
plain: true

provision:

- mode: system # Install Boundary License
script: |
#!/bin/bash
if [[ -n $BOUNDARY_LICENSE ]]; then
grep -q BOUNDARY_LICENSE /etc/boundary.d/boundaryenv || echo "BOUNDARY_LICENSE=$BOUNDARY_LICENSE" > /etc/boundary.d/boundary.env
fi
- mode: system # Start postgres db
script: |
#!/bin/bash
if [[ "${HOSTNAME}" != "lima-${SHIKARI_CLUSTER_NAME}-srv-01" ]]; then
exit 0
fi
export POSTGRES_USERNAME=postgres
export POSTGRES_PASSWORD=password
export BOUNDARY_DATABASE=boundary
systemctl enable --now docker
docker ps | grep -q boundary-postgres || docker run --name boundary-postgres -e POSTGRES_PASSWORD="${POSTGRES_PASSWORD}" -p 5432:5432 -d postgres:alpine
until pg_isready -h localhost -U ${POSTGRES_USERNAME}; do echo waiting; sleep 1; done
# Sets the Postgres Password for non-interactive operations
export PGPASSWORD="${POSTGRES_PASSWORD}"
psql -U ${POSTGRES_USERNAME} -tc "SELECT 1 FROM pg_database WHERE datname = \"${BOUNDARY_DATABASE}\"" -h localhost | grep -q 1 || psql -U ${POSTGRES_USERNAME} -h localhost -c "create database ${BOUNDARY_DATABASE}"
echo "BOUNDARY_DB_URL=postgresql://${POSTGRES_USERNAME}:${POSTGRES_PASSWORD}@lima-${SHIKARI_CLUSTER_NAME}-srv-01.local:5432/${BOUNDARY_DATABASE}" >> /etc/boundary.d/boundary.env
- mode: system # Create certificates and required directories
script: |
#!/bin/bash
LIMA_IP_ADDR=$(ip -json -4 addr show lima0 | jq -r '.[] | .addr_info[].local')
# Generate TLS Certificates
cd /etc/boundary.d/certs
if ! [[ -f dc1-server-boundary-0.pem ]]; then
consul tls cert create -server -domain boundary -additional-ipaddress $LIMA_IP_ADDR
cat boundary-agent-ca.pem >> dc1-server-boundary-0.pem # https://developer.hashicorp.com/boundary/docs/configuration/listener/tcp#tls_cert_file
chown boundary:boundary /etc/boundary.d/certs/*
fi
# Directory for audit logs
install -o boundary -g boundary -d /var/log/boundary
- mode: system # Configure controller
script: |
#!/bin/bash
if [[ "${SHIKARI_VM_MODE}" != "server" ]]; then
exit 0
fi
LIMA_IP_ADDR=$(ip -json -4 addr show lima0 | jq -r '.[] | .addr_info[].local')
# Create Controller Config
cat <<-EOF > /etc/boundary.d/boundary.hcl
disable_mlock = true
controller {
name = "${HOSTNAME}"
description = "Boundary Controller: ${HOSTNAME}"
database {
url = "env://BOUNDARY_DB_URL" # this is set inside /etc/boundary.d/boundary.env
}
}
listener "tcp" {
address = "${LIMA_IP_ADDR}"
purpose = "api"
tls_disable = false
tls_cert_file = "/etc/boundary.d/certs/dc1-server-boundary-0.pem"
tls_key_file = "/etc/boundary.d/certs/dc1-server-boundary-0-key.pem"
tls_client_ca_file = "/etc/boundary.d/certs/boundary-agent-ca.pem"
}
# # Data-plane listener configuration block (used for worker coordination)
listener "tcp" {
address = "${LIMA_IP_ADDR}"
purpose = "cluster"
}
# # Root KMS configuration block: this is the root key for Boundary
# # Use a production KMS such as AWS KMS in production installs
kms "aead" {
purpose = "root"
aead_type = "aes-gcm"
key = "sP1fnF5Xz85RrXyELHFeZg9Ad2qt4Z4bgNHVGtD6ung="
key_id = "global_root"
}
# # Worker authorization KMS
# # Use a production KMS such as AWS KMS for production installs
# # This key is the same key used in the worker configuration
kms "aead" {
purpose = "worker-auth"
aead_type = "aes-gcm"
key = "8fZBjCUfN0TzjEGLQldGY4+iE9AkOvCfjh7+p0GtRBQ="
key_id = "global_worker-auth"
}
# # Recovery KMS block: configures the recovery key for Boundary
# # Use a production KMS such as AWS KMS for production installs
kms "aead" {
purpose = "recovery"
aead_type = "aes-gcm"
key = "8fZBjCUfN0TzjEGLQldGY4+iE9AkOvCfjh7+p0GtRBQ="
key_id = "global_recovery"
}
events {
audit_enabled = true
sysevents_enabled = true
observations_enable = true
sink "stderr" {
name = "all-events"
description = "All events sent to stderr"
event_types = ["*"]
format = "cloudevents-json"
}
sink {
name = "file-sink"
description = "All events sent to a file"
event_types = ["*"]
format = "cloudevents-json"
file {
path = "/var/log/boundary"
file_name = "controller.log"
}
audit_config {
audit_filter_overrides {
sensitive = "redact"
secret = "redact"
}
}
}
}
EOF
- mode: system # Configure Workers
script: |
#!/bin/bash
if [[ "${SHIKARI_VM_MODE}" != "client" ]]; then
exit 0
fi
cat <<-EOF > /etc/boundary.d/boundary.hcl
listener "tcp" {
purpose = "proxy"
tls_disable = false
address = "0.0.0.0"
tls_cert_file = "/etc/boundary.d/certs/dc1-server-boundary-0.pem"
tls_key_file = "/etc/boundary.d/certs/dc1-server-boundary-0-key.pem"
tls_client_ca_file = "/etc/boundary.d/certs/boundary-agent-ca.pem"
}
worker {
# # Name attr must be unique across workers
name = "${HOSTNAME}"
description = "A default worker created demonstration"
# # Workers must be able to reach controllers on :9201
controllers = [
$(for x in $(seq $SHIKARI_SERVER_COUNT); do
echo \"lima-${SHIKARI_CLUSTER_NAME}-srv-0$x.local\",
done)
]
public_addr = "${HOSTNAME}.local"
}
# # must be same key as used on controller config
kms "aead" {
purpose = "worker-auth"
aead_type = "aes-gcm"
key = "8fZBjCUfN0TzjEGLQldGY4+iE9AkOvCfjh7+p0GtRBQ="
key_id = "global_worker-auth"
}
EOF
- mode: system # Populate boundary database
script: |
#!/bin/bash
if [[ "${HOSTNAME}" != "lima-${SHIKARI_CLUSTER_NAME}-srv-01" ]]; then
exit 0
fi
BOUNDARY_DB_INIT_JSON_FILE_PATH=/etc/boundary.d/db_init.json
# This export is required for the database init to have the connection string as we are using env:// in the controller config.
export $(cat /etc/boundary.d/boundary.env)
[[ -f ${BOUNDARY_DB_INIT_JSON_FILE_PATH} ]] || boundary database init -config /etc/boundary.d/boundary.hcl -format json | tee /etc/boundary.d/db_init.json
- mode: system # Start Boundary
script: |
#!/bin/bash
if [[ "${SHIKARI_VM_MODE}" == "server" ]]; then
until pg_isready -h lima-${SHIKARI_CLUSTER_NAME}-srv-01.local -U postgres; do echo waiting; sleep 1; done
fi
systemctl enable --now boundary
copyToHost:
- guest: "/etc/boundary.d/certs/boundary-agent-ca.pem"
host: "{{.Dir}}/copied-from-guest/boundary-agent-ca.pem"

networks:
- lima: shared
vmType: qemu
env:
SHIKARI_SCENARIO_NAME: "boundary-secure"

0 comments on commit 9e76c3c

Please sign in to comment.