-
Notifications
You must be signed in to change notification settings - Fork 2
/
Makefile
122 lines (93 loc) · 4.66 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
all: TODO
CLUSTER_SIZE := 200
PODS := 10000
NODE_NUMBERS := $(shell seq -f '%02.0f' 1 ${CLUSTER_SIZE})
LOG_RETRIEVAL_TARGETS := $(addprefix job,${NODE_NUMBERS})
NODE_NAMES := $(addprefix kube-scale-,${NODE_NUMBERS})
kubectl:
wget http://storage.googleapis.com/kubernetes-release/release/v1.1.4/bin/linux/amd64/kubectl
chmod +x kubectl
calicoctl:
wget http://www.projectcalico.org/builds/calicoctl
chmod +x calicoctl
deploy-heapster: remove-heapster
kubectl create -f heapster
remove-heapster:
-kubectl delete service monitoring-grafana --grace-period=1 --namespace=kube-system
-kubectl delete service monitoring-influxdb --grace-period=1 --namespace=kube-system
-kubectl delete service heapster --grace-period=1 --namespace=kube-system
-kubectl delete rc heapster --grace-period=1 --namespace=kube-system
-kubectl delete rc influxdb-grafana --grace-period=1 --namespace=kube-system
# Node selectors in the pod specs don't allow negation, so apply a label that can be used as-is here.
apply-node-labels:
bash -c 'while [ $$(kubectl get no |grep role=node -c) -ne ${CLUSTER_SIZE} ] ; do kubectl label nodes -l kubernetes.io/hostname!=127.0.0.1 role=node; done'
kubectl get no
@echo "Number of labeled nodes: "
@make --no-print-directory gce-list-nodes-count
deploy-pinger: remove-pinger
kubectl create -f pinger
kubectl get rc
kubectl get po
remove-pinger:
-kubectl delete rc pinger --grace-period=1
scale-pinger:
kubectl scale --replicas=10000 rc/pinger
# See http://stackoverflow.com/a/12110773/61318
#make -j12 CLUSTER_SIZE=26 pull-plugin-timings
pull-plugin-timings: ${LOG_RETRIEVAL_TARGETS}
grep TIMING timings/cni*.log > timings/all.timings
grep -v TIMING timings/cni*.log | grep -v INFO > timings/all.errors
-ssh -o LogLevel=quiet core@kube-scale-master.us-central1-a.unique-caldron-775 journalctl --no-pager >timings/master.log
${LOG_RETRIEVAL_TARGETS}: job%:
@mkdir -p timings
-ssh -o LogLevel=quiet core@kube-scale-master.us-central1-a.unique-caldron-775 ssh -o LogLevel=quiet -o StrictHostKeyChecking=no kube-scale-$* cat /var/log/calico/cni/cni.log > timings/cni-$*.log
-ssh -o LogLevel=quiet core@kube-scale-master.us-central1-a.unique-caldron-775 ssh -o LogLevel=quiet -o StrictHostKeyChecking=no kube-scale-$* journalctl --no-pager > timings/journal-$*.log
.PHONEY: ${LOG_RETRIEVAL_TARGETS}
gce-create: kubectl calicoctl
-gcloud compute instances create \
kube-scale-master \
--image-project coreos-cloud \
--image coreos-alpha-899-1-0-v20151218 \
--machine-type n1-highcpu-16 \
--local-ssd interface=scsi \
--metadata-from-file user-data=master-config-template.yaml
gcloud compute instances create \
${NODE_NAMES} \
--image-project coreos-cloud \
--image coreos-alpha-899-1-0-v20151218 \
--machine-type n1-highcpu-2 \
--metadata-from-file user-data=node-config-template.yaml \
--no-address \
--preemptible \
--tags no-ip
make --no-print-directory gce-config-ssh
make --no-print-directory gce-forward-ports
make --no-print-directory apply-node-labels
gce-cleanup:
gcloud compute instances list -r 'kube-scale.*' |tail -n +2 |cut -f1 -d' ' |xargs gcloud compute instances delete
gce-forward-ports:
@-pkill -f '8080:localhost:8080'
bash -c 'until ssh -o LogLevel=quiet -o PasswordAuthentication=no core@kube-scale-master.us-central1-a.unique-caldron-775 date; do echo "Try again"; done'
ssh -o PasswordAuthentication=no -L 8080:localhost:8080 -L 2379:localhost:2379 -L 4194:localhost:4194 -o LogLevel=quiet -nNT core@kube-scale-master.us-central1-a.unique-caldron-775 &
gce-redeploy:
gcloud compute instances add-metadata kube-scale-master --metadata-from-file=user-data=master-config-template.yaml
gcloud compute instances add-metadata ${NODE_NAMES} --metadata-from-file=user-data=node-config-template.yaml
# gcloud compute ssh kube-scale-master sudo reboot
gce-config-ssh:
gcloud compute config-ssh
gce-ssh-master:
ssh core@kube-scale-master.us-central1-a.unique-caldron-775
gce-bgp-status:
ssh core@kube-scale-master.us-central1-a.unique-caldron-775 /opt/bin/calicoctl status
gce-bgp-status-count:
ssh core@kube-scale-master.us-central1-a.unique-caldron-775 /opt/bin/calicoctl status |grep -c Established
gce-list-nodes:
kubectl get no --no-headers -l 'kubernetes.io/hostname!=127.0.0.1'
gce-list-nodes-count:
@kubectl get no --no-headers -l 'kubernetes.io/hostname!=127.0.0.1' | wc -l
gce-successful-pods:
kubectl get po | grep -P -c '1/1\s+Running\s+0'
gce-failed-pods:
kubectl get po |grep -v Pending |grep -v Running
gce-wait-for-pod-creation:
bash -c 'while [ $$(kubectl get po | grep -P -c "1/1\s+Running\s+0") -ne ${PODS} ] ; do date; echo "Not enough nodes created - waiting"; kubectl describe rc |grep "Pods Status"; sleep 1;done'