This repository has been archived by the owner on Feb 9, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 4
/
main.go
171 lines (140 loc) · 6.37 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
package main // import "github.com/domoinc/kube-valet"
import (
"fmt"
"net/http"
"os"
"time"
"github.com/prometheus/client_golang/prometheus/promhttp"
valet "github.com/domoinc/kube-valet/pkg/client/clientset/versioned"
"github.com/op/go-logging"
"gopkg.in/alecthomas/kingpin.v2"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
resourcelock "k8s.io/client-go/tools/leaderelection/resourcelock"
valetconfig "github.com/domoinc/kube-valet/pkg/config"
)
const (
KubernetesComponent = "kube-valet"
DefaultElectionConfigmapName = "kube-valet-election"
)
var (
// DoS vulnerability fix
// https://medium.com/@nate510/don-t-use-go-s-default-http-client-4804cb19f779
netClient = &http.Client{
Timeout: time.Second * 5,
}
// App
app = kingpin.New("kube-valet", "Automated QoS for kubernetes")
logLevel = app.Flag("loglevel", "Logging level.").Short('L').Default("NOTICE").String()
inCluster = app.Flag("in-cluster", "Running In Cluster").Default("false").Bool()
kubeconfig = app.Flag("kubeconfig", "Path to kubeconfig").Short('c').String()
configmapNamespace = app.Flag("configmap-namespace", "Configmap namespace").Default("kube-system").String()
configmapName = app.Flag("configmap-name", "Configmap name").Default("kube-valet").String()
nodeAssignment = app.Flag("node-assignment", "Run the NodeAssignment controllers, Default: true").Default("true").Bool()
packLeft = app.Flag("scheduling-packleft", "Run the Pack Left Scheduling controller, Default: true").Default("true").Bool()
//numNagThreads = app.Flag("num-nag-threads", "Max number of NodAssignmentGroups that will be reconciled concurrently").Default("1").Int() // Can't enable until node resource locking is in place
podAssignment = app.Flag("pod-assignment", "Run the PodAssignment Controllers, Default: true").Default("true").Bool()
numPodThreads = app.Flag("num-pod-threads", "Max number of Pods that will be initilized concurrently").Default("1").Int()
// Follow naming scheme of upstream elected components like kube-scheduler and kube-controller-manager
// EX: https://kubernetes.io/docs/reference/generated/kube-scheduler/
// This makes it easy to copy/paste any election settings to this component
leaderElection = app.Flag("leader-elect", "Enable Leader Elect").Bool()
electDuration = app.Flag("leader-elect-lease-duration", "The duration that non-leaders will wait before attempting to become the leader").Default("30s").Duration()
electDeadline = app.Flag("leader-elect-renew-deadline", "The interval between attempts by the acting master to renew leadership before it stops leading. This must be less the lease duration").Default("10s").Duration()
electResource = app.Flag("leader-elect-resource-lock", "The type of resource that will be used for the lock. Allowed: configmaps, endpoints").Default(resourcelock.ConfigMapsResourceLock).Enum(resourcelock.ConfigMapsResourceLock, resourcelock.EndpointsResourceLock)
electRetry = app.Flag("leader-elect-retry-period", "The duration the clients should wait between attempting acquisition and renewal of a leadership").Default("2s").Duration()
electName = app.Flag("lock-object-name", "Name of the election resource to be used for locks").Default(DefaultElectionConfigmapName).String()
// Allow override of some election properties not configurable in upstream components. Just in case
electID = app.Flag("leader-elect-id", "Unique name for election candidate. Defaults to hostname").String()
electNamespace = app.Flag("leader-elect-namespace", "The namespace that the election resource will be created in").Default("kube-system").String()
// Options for webhook server
listen = app.Flag("listen", "The listen address for the webhook server").Default(":443").String()
tlsCertPath = app.Flag("cert", "The path to a valid tls serving certificate").Required().ExistingFile()
tlsKeyPath = app.Flag("key", "The path to a valid tls serving key").Required().ExistingFile()
log = logging.MustGetLogger("kube-valet")
format = logging.MustStringFormatter(`%{color}%{time:2006-01-02T15:04:05.999Z-07:00} %{shortfunc} : %{level:.4s}%{color:reset} %{message}`)
)
func main() {
// Enable short help flag
app.HelpFlag.Short('h')
// Parse cmd
kingpin.MustParse(app.Parse(os.Args[1:]))
// Setup default identity if not specified
// Default hostname as id
if *electID == "" {
hostname, _ := os.Hostname()
electID = &hostname
}
// Setup logging
logging.SetBackend(logging.NewLogBackend(os.Stdout, "", 0)) // Fix double-timestamp
logging.SetFormatter(format)
backend1 := logging.NewLogBackend(os.Stdout, "", 0)
backend1Leveled := logging.AddModuleLevel(backend1)
level, err := logging.LogLevel(*logLevel)
if err != nil {
fmt.Printf("Invalid log level: %s", *logLevel)
os.Exit(1)
}
backend1Leveled.SetLevel(level, "")
log.SetBackend(backend1Leveled)
config := getConfig()
// create the kubernetes client
kubeClient, err := kubernetes.NewForConfig(config)
if err != nil {
log.Errorf("Error creating Kubernetes client: %s", err)
os.Exit(2)
}
// create the valet client
valetClient, err := valet.NewForConfig(config)
if err != nil {
log.Errorf("Error creating kube-valet client: %s", err)
os.Exit(2)
}
// Create a new KubeValet
kd := NewKubeValet(kubeClient, valetClient, &valetconfig.ValetConfig{
ParController: valetconfig.ControllerConfig{
Threads: *numPodThreads,
ShouldRun: *podAssignment,
},
NagController: valetconfig.ControllerConfig{
Threads: 1,
ShouldRun: *nodeAssignment,
},
PLController: valetconfig.ControllerConfig{
Threads: 1,
ShouldRun: *packLeft,
},
LoggingBackend: backend1Leveled,
})
http.Handle("/metrics", promhttp.Handler())
go startMetricsHttp()
// Run the kube valet
kd.Run()
}
func startMetricsHttp() {
for true {
err := http.ListenAndServe(":8080", nil)
if err != nil {
log.Errorf("Metrics server had an error %v", err)
}
time.Sleep(20 * time.Second)
}
}
func getConfig() *rest.Config {
if *inCluster {
config, err := rest.InClusterConfig()
if err != nil {
log.Errorf("Error getting Kubernetes in-cluster client config: %s", err)
os.Exit(1)
}
return config
}
// use the current context in kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
log.Errorf("Error getting Kubernetes client config: %s", err)
os.Exit(1)
}
return config
}