Skip to content

Commit

Permalink
Merge branch 'main' into hormes-patch-1
Browse files Browse the repository at this point in the history
  • Loading branch information
hormes authored Sep 2, 2024
2 parents 9b2cb8d + cb89d6d commit 0841acf
Show file tree
Hide file tree
Showing 102 changed files with 5,998 additions and 552 deletions.
22 changes: 18 additions & 4 deletions .github/workflows/e2e-k8s-1.22.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,23 @@ jobs:
continue-on-error: true
runs-on: ubuntu-20.04
steps:
- name: Check host environment before
run: |
set -ex
lscpu -e
tree -L 2 /sys/
tree -L 2 /sys/fs/cgroup
df -h
- name: Free Disk Space
uses: jlumbroso/free-disk-space@v1.3.1
with:
tool-cache: false
swap-storage: false
large-packages: false
docker-images: false
android: true
dotnet: true
haskell: true
- uses: actions/checkout@v4
with:
submodules: true
Expand All @@ -48,15 +65,12 @@ jobs:
kind load docker-image --name=${KIND_CLUSTER_NAME} ${MANAGER_IMAGE} || { echo >&2 "kind not installed or error loading image: ${MANAGER_IMAGE}"; exit 1; }
kind load docker-image --name=${KIND_CLUSTER_NAME} ${KOORDLET_IMAGE} || { echo >&2 "kind not installed or error loading image: ${KOORDLET_IMAGE}"; exit 1; }
kind load docker-image --name=${KIND_CLUSTER_NAME} ${SCHEDULER_IMAGE} || { echo >&2 "kind not installed or error loading image: ${SCHEDULER_IMAGE}"; exit 1; }
- name: Check host environment
- name: Check cluster environment
run: |
set -ex
kubectl version --short
kubectl get pods -A
kubectl get nodes -o yaml
tree -L 2 /sys/
tree -L 2 /sys/fs/cgroup
cat /proc/cpuinfo
- name: Install Koordinator
run: |
set -ex
Expand Down
22 changes: 18 additions & 4 deletions .github/workflows/e2e-k8s-1.24.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,23 @@ jobs:
continue-on-error: true
runs-on: ubuntu-20.04
steps:
- name: Check host environment before
run: |
set -ex
lscpu -e
tree -L 2 /sys/
tree -L 2 /sys/fs/cgroup
df -h
- name: Free Disk Space
uses: jlumbroso/free-disk-space@v1.3.1
with:
tool-cache: false
swap-storage: false
large-packages: false
docker-images: false
android: true
dotnet: true
haskell: true
- uses: actions/checkout@v4
with:
submodules: true
Expand All @@ -48,15 +65,12 @@ jobs:
kind load docker-image --name=${KIND_CLUSTER_NAME} ${MANAGER_IMAGE} || { echo >&2 "kind not installed or error loading image: ${MANAGER_IMAGE}"; exit 1; }
kind load docker-image --name=${KIND_CLUSTER_NAME} ${KOORDLET_IMAGE} || { echo >&2 "kind not installed or error loading image: ${KOORDLET_IMAGE}"; exit 1; }
kind load docker-image --name=${KIND_CLUSTER_NAME} ${SCHEDULER_IMAGE} || { echo >&2 "kind not installed or error loading image: ${SCHEDULER_IMAGE}"; exit 1; }
- name: Check host environment
- name: Check cluster environment
run: |
set -ex
kubectl version --short
kubectl get pods -A
kubectl get nodes -o yaml
tree -L 2 /sys/
tree -L 2 /sys/fs/cgroup
cat /proc/cpuinfo
- name: Install Koordinator
run: |
set -ex
Expand Down
22 changes: 18 additions & 4 deletions .github/workflows/e2e-k8s-1.28.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,23 @@ jobs:
continue-on-error: true
runs-on: ubuntu-20.04
steps:
- name: Check host environment before
run: |
set -ex
lscpu -e
tree -L 2 /sys/
tree -L 2 /sys/fs/cgroup
df -h
- name: Free Disk Space
uses: jlumbroso/free-disk-space@v1.3.1
with:
tool-cache: false
swap-storage: false
large-packages: false
docker-images: false
android: true
dotnet: true
haskell: true
- uses: actions/checkout@v4
with:
submodules: true
Expand All @@ -48,15 +65,12 @@ jobs:
kind load docker-image --name=${KIND_CLUSTER_NAME} ${MANAGER_IMAGE} || { echo >&2 "kind not installed or error loading image: ${MANAGER_IMAGE}"; exit 1; }
kind load docker-image --name=${KIND_CLUSTER_NAME} ${KOORDLET_IMAGE} || { echo >&2 "kind not installed or error loading image: ${KOORDLET_IMAGE}"; exit 1; }
kind load docker-image --name=${KIND_CLUSTER_NAME} ${SCHEDULER_IMAGE} || { echo >&2 "kind not installed or error loading image: ${SCHEDULER_IMAGE}"; exit 1; }
- name: Check host environment
- name: Check cluster environment
run: |
set -ex
kubectl version --short
kubectl get pods -A
kubectl get nodes -o yaml
tree -L 2 /sys/
tree -L 2 /sys/fs/cgroup
cat /proc/cpuinfo
- name: Install Koordinator
run: |
set -ex
Expand Down
24 changes: 20 additions & 4 deletions .github/workflows/e2e-k8s-latest.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,23 @@ jobs:
continue-on-error: true
runs-on: ubuntu-20.04
steps:
- name: Check host environment before
run: |
set -ex
lscpu -e
tree -L 2 /sys/
tree -L 2 /sys/fs/cgroup
df -h
- name: Free Disk Space
uses: jlumbroso/free-disk-space@v1.3.1
with:
tool-cache: false
swap-storage: false
large-packages: false
docker-images: false
android: true
dotnet: true
haskell: true
- uses: actions/checkout@v4
with:
submodules: true
Expand All @@ -46,15 +63,12 @@ jobs:
kind load docker-image --name=${KIND_CLUSTER_NAME} ${MANAGER_IMAGE} || { echo >&2 "kind not installed or error loading image: ${MANAGER_IMAGE}"; exit 1; }
kind load docker-image --name=${KIND_CLUSTER_NAME} ${KOORDLET_IMAGE} || { echo >&2 "kind not installed or error loading image: ${KOORDLET_IMAGE}"; exit 1; }
kind load docker-image --name=${KIND_CLUSTER_NAME} ${SCHEDULER_IMAGE} || { echo >&2 "kind not installed or error loading image: ${SCHEDULER_IMAGE}"; exit 1; }
- name: Check host environment
- name: Check cluster environment
run: |
set -ex
kubectl version --short
kubectl get pods -A
kubectl get nodes -o yaml
tree -L 2 /sys/
tree -L 2 /sys/fs/cgroup
cat /proc/cpuinfo
- name: Install Koordinator
run: |
set -ex
Expand Down Expand Up @@ -105,3 +119,5 @@ jobs:
exit 1
fi
exit $retVal
- name: Checkout host environment after
run: df -h
2 changes: 2 additions & 0 deletions .licenseignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ pkg/descheduler/controllers/migration/controllerfinder
pkg/scheduler/plugins/coscheduling
pkg/scheduler/plugins/nodenumaresource/least_allocated.go
pkg/scheduler/plugins/nodenumaresource/most_allocated.go
pkg/scheduler/plugins/reservation/preemption.go
pkg/scheduler/plugins/reservation/preemption_test.go
pkg/scheduler/frameworkext/topologymanager/policy.go
pkg/scheduler/frameworkext/topologymanager/policy_test.go
pkg/scheduler/frameworkext/topologymanager/policy_best_effort.go
Expand Down
39 changes: 32 additions & 7 deletions apis/configuration/slo_controller_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,20 +18,22 @@ package configuration

import (
"github.com/mohae/deepcopy"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

slov1alpha1 "github.com/koordinator-sh/koordinator/apis/slo/v1alpha1"
)

const (
// keys in the configmap
ColocationConfigKey = "colocation-config"
ResourceThresholdConfigKey = "resource-threshold-config"
ResourceQOSConfigKey = "resource-qos-config"
CPUBurstConfigKey = "cpu-burst-config"
SystemConfigKey = "system-config"
HostApplicationConfigKey = "host-application-config"
CPUNormalizationConfigKey = "cpu-normalization-config"
ColocationConfigKey = "colocation-config"
ResourceThresholdConfigKey = "resource-threshold-config"
ResourceQOSConfigKey = "resource-qos-config"
CPUBurstConfigKey = "cpu-burst-config"
SystemConfigKey = "system-config"
HostApplicationConfigKey = "host-application-config"
CPUNormalizationConfigKey = "cpu-normalization-config"
ResourceAmplificationConfigKey = "resource-amplification-config"
)

// +k8s:deepcopy-gen=true
Expand Down Expand Up @@ -295,6 +297,29 @@ type ModelRatioCfg struct {
HyperThreadTurboEnabledRatio *float64 `json:"hyperThreadTurboEnabledRatio,omitempty"`
}

// ResourceAmplificationCfg is the cluster-level configuration of the resource amplification strategy.
// +k8s:deepcopy-gen=true
type ResourceAmplificationCfg struct {
ResourceAmplificationStrategy `json:",inline"`
NodeConfigs []NodeResourceAmplificationCfg `json:"nodeConfigs,omitempty" validate:"dive"`
}

// NodeResourceAmplificationCfg is the node-level configuration of the resource amplification strategy.
// +k8s:deepcopy-gen=true
type NodeResourceAmplificationCfg struct {
NodeCfgProfile `json:",inline"`
ResourceAmplificationStrategy
}

// ResourceAmplificationStrategy is the resource amplification strategy.
// +k8s:deepcopy-gen=true
type ResourceAmplificationStrategy struct {
// Enable defines whether the resource amplification strategy is enabled.
Enable *bool `json:"enable,omitempty"`
// ResourceAmplificationRatio defines resource amplification ratio
ResourceAmplificationRatio map[corev1.ResourceName]float64 `json:"resourceAmplificationRatio,omitempty"`
}

/*
Koordinator uses configmap to manage the configuration of SLO, the configmap is stored in
<ConfigNameSpace>/<SLOCtrlConfigMap>, with the following keys respectively:
Expand Down
68 changes: 68 additions & 0 deletions apis/configuration/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 14 additions & 0 deletions apis/extension/preemption.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ const (
// PreemptionPolicyTransformer is enabled.
// When PreemptionPolicyTransformer is enabled but the pod does not set the label, DefaultPreemptionPolicy is used.
LabelPodPreemptionPolicy = SchedulingDomainPrefix + "/preemption-policy"

// LabelDisablePreemptible determines whether the pod disables being a victim during the reservation preemption.
LabelDisablePreemptible = SchedulingDomainPrefix + "/disable-preemptible"
)

func GetPodKoordPreemptionPolicy(pod *corev1.Pod) *corev1.PreemptionPolicy {
Expand All @@ -40,3 +43,14 @@ func GetPodKoordPreemptionPolicy(pod *corev1.Pod) *corev1.PreemptionPolicy {
func GetPreemptionPolicyPtr(policy corev1.PreemptionPolicy) *corev1.PreemptionPolicy {
return &policy
}

func IsPodPreemptible(pod *corev1.Pod) bool {
if pod == nil || pod.Labels == nil {
return true
}
v, ok := pod.Labels[LabelDisablePreemptible]
if !ok {
return true
}
return v != "true"
}
Loading

0 comments on commit 0841acf

Please sign in to comment.