Skip to content

Commit

Permalink
Add private cluster with socks-proxy in addition to existing public c…
Browse files Browse the repository at this point in the history
…luster (#7751)

* Add private cluster with socks-proxy in addition to existing public cluster.

* Review fdbk
  • Loading branch information
Fraggle authored Sep 30, 2024
1 parent 6b0b40f commit b2df6fa
Show file tree
Hide file tree
Showing 74 changed files with 332 additions and 185 deletions.
4 changes: 0 additions & 4 deletions .github/workflows/apply-infra.yml
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,6 @@ jobs:
run: |
gcloud components install gke-gcloud-auth-plugin
- name: Setup kubectl
run: |
gcloud container clusters get-credentials dust-kube --region us-central1
- name: Install yq
run: |
wget -qO /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64
Expand Down
177 changes: 12 additions & 165 deletions k8s/apply_infra.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,171 +2,18 @@

set -e

function apply_deployment {
# This function applies a deployment, but if the deployment already exists,
# it will replace the image with the current image to avoid a rolling update
DEPLOYMENT_NAME=$1
YAML_FILE="$(dirname "$0")/deployments/$DEPLOYMENT_NAME.yaml"
# array of our clusters
clusters=("dust-kube" "dust-kube-private")

# Get the current image if it exists
CURRENT_IMAGE=$(kubectl get deployment $DEPLOYMENT_NAME -o jsonpath='{.spec.template.spec.containers[0].image}' 2>/dev/null || true)
# loop through each cluster, get-credentials and then apply the infra
for cluster in "${clusters[@]}"
do
# get the credentials for the cluster
gcloud container clusters get-credentials $cluster --region us-central1

# Get the current number of replicas if it exists
CURRENT_REPLICAS=$(kubectl get deployment $DEPLOYMENT_NAME -o jsonpath='{.spec.replicas}' 2>/dev/null || true)
# parse the kubectl config get-contexts -o=name to get the context name by matching the cluster name up to line end
kubectl config use-context $(kubectl config get-contexts -o=name | grep $cluster'$')

# Check if an HPA exists for the deployment
HPA_EXISTS=$(kubectl get hpa $DEPLOYMENT_NAME -o name 2>/dev/null || true)

if [ -n "$CURRENT_IMAGE" ]; then
# If CURRENT_IMAGE is not empty, replace the image in the YAML file with the actual image
UPDATED_YAML=$(yq e ".spec.template.spec.containers[].image = \"$CURRENT_IMAGE\"" $YAML_FILE)

# If the HPA exists, update the replicas in the YAML
if [ -n "$HPA_EXISTS" ]; then
if [ -n "$CURRENT_REPLICAS" ]; then
UPDATED_YAML=$(echo "$UPDATED_YAML" | yq e ".spec.replicas = $CURRENT_REPLICAS" -)
fi
fi

# Apply the updated YAML
echo "$UPDATED_YAML" | kubectl apply -f -
else
# If CURRENT_IMAGE is empty, apply the original YAML
kubectl apply -f $YAML_FILE
fi
}

if helm list -n default | grep -q dust-datadog-agent; then
echo "datadog-agent already installed, skipping."
else
if [ -z ${DD_API_KEY+x} ]; then
echo "DD_API_KEY is unset"
exit 1
fi

if [ -z ${DD_APP_KEY+x} ]; then
echo "DD_APP_KEY is unset"
exit 1
fi
echo "-----------------------------------"
echo "Installing datadog-agent"
echo "-----------------------------------"
helm repo add datadog https://helm.datadoghq.com
helm repo update
helm install dust-datadog-agent datadog/datadog -f "$(dirname "$0")/datadog-values.yml" \
--set datadog.apiKey=$DD_API_KEY \
--set datadog.appKey=$DD_APP_KEY
fi


echo "-----------------------------------"
echo "Applying configmaps"
echo "-----------------------------------"

kubectl apply -f "$(dirname "$0")/configmaps/apache-tika-configmap.yaml"
kubectl apply -f "$(dirname "$0")/configmaps/front-configmap.yaml"
kubectl apply -f "$(dirname "$0")/configmaps/front-worker-configmap.yaml"
kubectl apply -f "$(dirname "$0")/configmaps/front-edge-configmap.yaml"
kubectl apply -f "$(dirname "$0")/configmaps/front-qa-configmap.yaml"
kubectl apply -f "$(dirname "$0")/configmaps/connectors-configmap.yaml"
kubectl apply -f "$(dirname "$0")/configmaps/connectors-worker-configmap.yaml"
kubectl apply -f "$(dirname "$0")/configmaps/connectors-worker-specific-configmap.yaml"
kubectl apply -f "$(dirname "$0")/configmaps/alerting-temporal-configmap.yaml"
kubectl apply -f "$(dirname "$0")/configmaps/core-configmap.yaml"
kubectl apply -f "$(dirname "$0")/configmaps/core-sqlite-worker-configmap.yaml"
kubectl apply -f "$(dirname "$0")/configmaps/oauth-configmap.yaml"
kubectl apply -f "$(dirname "$0")/configmaps/prodbox-configmap.yaml"
kubectl apply -f "$(dirname "$0")/configmaps/viz-configmap.yaml"
kubectl apply -f "$(dirname "$0")/configmaps/socks-proxy-configmap.yaml"

echo "-----------------------------------"
echo "Applying backend configs"
echo "-----------------------------------"

kubectl apply -f "$(dirname "$0")/backend-configs/apache-tika-backend-config.yaml"
kubectl apply -f "$(dirname "$0")/backend-configs/front-backend-config.yaml"
kubectl apply -f "$(dirname "$0")/backend-configs/connectors-backend-config.yaml"
kubectl apply -f "$(dirname "$0")/backend-configs/metabase-backend-config.yaml"
kubectl apply -f "$(dirname "$0")/backend-configs/core-backend-config.yaml"
kubectl apply -f "$(dirname "$0")/backend-configs/oauth-backend-config.yaml"
kubectl apply -f "$(dirname "$0")/backend-configs/viz-backend-config.yaml"

echo "-----------------------------------"
echo "Applying managed certificates"
echo "-----------------------------------"

kubectl apply -f "$(dirname "$0")/managed-certs/front-managed-cert.yaml"
kubectl apply -f "$(dirname "$0")/managed-certs/front-edge-managed-cert.yaml"
kubectl apply -f "$(dirname "$0")/managed-certs/front-qa-managed-cert.yaml"
kubectl apply -f "$(dirname "$0")/managed-certs/connectors-managed-cert.yaml"
kubectl apply -f "$(dirname "$0")/managed-certs/metabase-managed-cert.yaml"
kubectl apply -f "$(dirname "$0")/managed-certs/viz-managed-cert.yaml"


echo "-----------------------------------"
echo "Applying frontend configs"
echo "-----------------------------------"

kubectl apply -f "$(dirname "$0")/frontend-configs/dust-frontend-config.yaml"

echo "-----------------------------------"
echo "Applying deployments"
echo "-----------------------------------"

apply_deployment apache-tika-deployment
apply_deployment front-deployment
apply_deployment front-worker-deployment
apply_deployment front-edge-deployment
apply_deployment front-qa-deployment
apply_deployment connectors-deployment
apply_deployment connectors-worker-deployment
apply_deployment connectors-worker-notion-deployment
apply_deployment connectors-worker-notion-gc-deployment
apply_deployment connectors-worker-webcrawler-deployment
apply_deployment connectors-worker-google-drive-deployment
apply_deployment metabase-deployment
apply_deployment alerting-temporal-deployment
apply_deployment core-deployment
apply_deployment core-sqlite-worker-deployment
apply_deployment oauth-deployment
apply_deployment prodbox-deployment
apply_deployment viz-deployment
apply_deployment socks-proxy-deployment

echo "-----------------------------------"
echo "Applying HPAs"
echo "-----------------------------------"

kubectl apply -f "$(dirname "$0")/hpas/apache-tika-hpa.yaml"

echo "-----------------------------------"
echo "Applying services"
echo "-----------------------------------"

kubectl apply -f "$(dirname "$0")/services/apache-tika-service.yaml"
kubectl apply -f "$(dirname "$0")/services/front-service.yaml"
kubectl apply -f "$(dirname "$0")/services/front-edge-service.yaml"
kubectl apply -f "$(dirname "$0")/services/front-qa-service.yaml"
kubectl apply -f "$(dirname "$0")/services/connectors-service.yaml"
kubectl apply -f "$(dirname "$0")/services/connectors-worker-service.yaml"
kubectl apply -f "$(dirname "$0")/services/metabase-service.yaml"
kubectl apply -f "$(dirname "$0")/services/core-service.yaml"
kubectl apply -f "$(dirname "$0")/services/core-sqlite-worker-headless-service.yaml"
kubectl apply -f "$(dirname "$0")/services/oauth-service.yaml"
kubectl apply -f "$(dirname "$0")/services/viz-service.yaml"
kubectl apply -f "$(dirname "$0")/services/socks-proxy-service.yaml"


echo "-----------------------------------"
echo "Applying ingress"
echo "-----------------------------------"

kubectl apply -f "$(dirname "$0")/ingress.yaml"

echo "-----------------------------------"
echo "Applying network policies"
echo "-----------------------------------"

kubectl apply -f "$(dirname "$0")/network-policies/core-network-policy.yaml"
kubectl apply -f "$(dirname "$0")/network-policies/oauth-network-policy.yaml"
kubectl apply -f "$(dirname "$0")/network-policies/core-sqlite-worker-network-policy.yaml"
# apply the infra
$cluster/apply_infra_$cluster.sh
done
32 changes: 32 additions & 0 deletions k8s/dust-kube-private/apply_infra_dust-kube-private.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
#!/bin/bash

set -e

SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/../utils.sh"

# By convention, the name of the folder enclosing this script is the cluster name
CLUSTER_NAME=$(basename $(dirname "$0"))

check_context $CLUSTER_NAME

install_datadog_agent

echo "-----------------------------------"
echo "Applying configmaps"
echo "-----------------------------------"

kubectl apply -f "$(dirname "$0")/configmaps/socks-proxy-configmap.yaml"

echo "-----------------------------------"
echo "Applying deployments"
echo "-----------------------------------"

apply_deployment socks-proxy-deployment

echo "-----------------------------------"
echo "Applying network policies"
echo "-----------------------------------"

# kubectl apply -f "$(dirname "$0")/network-policies/allow-same-namespace.yaml"
# kubectl apply -f "$(dirname "$0")/network-policies/default-deny-ingress.yaml"
File renamed without changes.
53 changes: 53 additions & 0 deletions k8s/dust-kube-private/datadog-values.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
datadog:
logs:
enabled: true
containerCollectAll: false
containerExcludeLogs:
- "name:datadog-agent"
site: datadoghq.eu
clusterName: dust-kube-private

apm:
socketEnabled: false
portEnabled: true

dogstatsd:
useDogStatsDSocketVolume: false
port: 8125
useHostPort: true
nonLocalTraffic: true

ignoreAutoConfig:
- cilium
- nginx
- redis

kubeStateMetricsCore:
enabled: true

kubeStateMetricsEnabled: false

clusterAgent:
enabled: true
resources:
requests:
cpu: 200m
memory: 512Mi
replicas: 2
createPodDisruptionBudget: true

agents:
priorityClassCreate: true
containers:
agent:
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 200m
memory: 512Mi

providers:
gke:
autopilot: true
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ spec:
name: socks-proxy-pod
admission.datadoghq.com/enabled: "true"
annotations:
ad.datadoghq.com/web.logs: '[{"source": "sock5-proxy","service": "sock5-proxy","tags": ["env:prod"]}]'
ad.datadoghq.com/web.logs: '[{"source": "dante","service": "sock5-proxy","tags": ["env:prod"]}]'
spec:
containers:
- name: socks-proxy
Expand All @@ -37,4 +37,4 @@ spec:
memory: 1Gi
limits:
cpu: 1000m
memory: 1Gi
memory: 1Gi
11 changes: 11 additions & 0 deletions k8s/dust-kube-private/network-policies/allow-same-namespace.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-same-namespace
spec:
podSelector: {}
ingress:
- from:
- podSelector: {}
policyTypes:
- Ingress
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-ingress
spec:
podSelector: {}
policyTypes:
- Ingress
16 changes: 16 additions & 0 deletions k8s/dust-kube-private/services/socks-proxy-service.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: socks-proxy-service
spec:
type: ClusterIP
ports:
- port: 1080
targetPort: 1080
selector:
app: socks-proxy
---
apiVersion: net.gke.io/v1
kind: ServiceExport
metadata:
name: socks-proxy-service
Loading

0 comments on commit b2df6fa

Please sign in to comment.