Skip to content
This repository has been archived by the owner on Jun 10, 2024. It is now read-only.

Commit

Permalink
Merge pull request #37 from planetf1/opdev1
Browse files Browse the repository at this point in the history
Operator development update: config files now loading
  • Loading branch information
planetf1 authored Apr 29, 2022
2 parents a550137 + 7a4b213 commit cda53b7
Show file tree
Hide file tree
Showing 19 changed files with 12,771 additions and 63 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@ This github project is part of the overall Egeria project. See the [egeria proje
This project provides a Kubernetes operator to support the deployment of Egeria. This is in addition to the current
support in the main project for building the base egeria container images, as well as a few helm charts that make use of them.

Documentation can be found on our new [Egeria Documentation Site](https://odpi.github.io/egeria-docs/guides/admin/kubernetes/intro/)
Documentation can be found on our new [Egeria Documentation Site](http://egeria-project.org/guides/operations/kubernetes/)

Please engage via our main project [slack channels](slack.odpi.org), team calls, or via github issues in this repo.
Please engage via our [community](http://egeria-project.org/guides/community/), team calls, or via github issues in this repo.

See [DEVELOP.md](DEVELOP.md) for more specific details of the implementation and how to build.

Expand Down
18 changes: 15 additions & 3 deletions api/v1alpha1/egeriaplatform_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

// EgeriaPlatformSpec : Desired State for Egeria Platform
// EgeriaPlatformSpec : Desired State for Egeria Platform
type EgeriaPlatformSpec struct {
// TODO: Look at moving to use the standard scaling approach https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#scale-subresource
// Number of replicas for this platform (ie number of pods to run)
Expand All @@ -37,10 +37,22 @@ type EgeriaPlatformSpec struct {
// +kubebuilder:validation:MinLength=1
// +kubebuilder:default="quay.io/odpi/egeria:latest"
Image string `json:"image,omitempty"`
// Configmap used for server configuration
// Container image to use, overriding operator configuration
// +kubebuilder:validation:MaxLength=253
// +kubebuilder:validation:MinLength=1
ServerConfig string `json:"serverconfig"`
// +kubebuilder:default="quay.io/odpi/egeria:latest"
UtilImage string `json:"utilimage,omitempty"`
// Configmap used for server configuration
// +kubebuilder:validation:MaxItems=253
// +kubebuilder:validation:MinItems=1
// Should be unique, but cannot be set - restriction of schema validation
ServerConfig []string `json:"serverconfig"`
// +kubebuilder:default:=true
// If true, configured servers will be started when the platform starts up
Autostart bool `json:"autostart"`
// +kubebuilder:default:="OFF"
// Sets the log level for debugging
EgeriaLogLevel string `json:"egerialoglevel"`
}

// EgeriaPlatformStatus : Observed state of Egeria Platform
Expand Down
7 changes: 6 additions & 1 deletion api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion buildid.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
105
163
24 changes: 22 additions & 2 deletions config/crd/bases/egeria.egeria-project.org_egeriaplatforms.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,17 @@ spec:
metadata:
type: object
spec:
description: 'EgeriaPlatformSpec : Desired State for Egeria Platform'
description: 'EgeriaPlatformSpec : Desired State for Egeria Platform'
properties:
autostart:
default: true
description: If true, configured servers will be started when the
platform starts up
type: boolean
egerialoglevel:
default: "OFF"
description: Sets the log level for debugging
type: string
image:
default: quay.io/odpi/egeria:latest
description: Container image to use, overriding operator configuration
Expand All @@ -70,11 +79,22 @@ spec:
minLength: 1
type: string
serverconfig:
description: Configmap used for server configuration
description: Configmap used for server configuration Should be unique,
but cannot be set - restriction of schema validation
items:
type: string
maxItems: 253
minItems: 1
type: array
utilimage:
default: quay.io/odpi/egeria:latest
description: Container image to use, overriding operator configuration
maxLength: 253
minLength: 1
type: string
required:
- autostart
- egerialoglevel
- serverconfig
type: object
status:
Expand Down
2 changes: 1 addition & 1 deletion config/manager/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@ kind: Kustomization
images:
- name: controller
newName: quay.io/odpi/egeria-k8s-operator
newTag: 0.9.105
newTag: 0.9.163
7 changes: 5 additions & 2 deletions config/samples/egeria_v1alpha1_egeriaplatform.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@ kind: EgeriaPlatform
metadata:
name: egeriaplatform-sample
spec:
# Add fields here
replicas: 3
serverconfig: corecfg
serverconfig:
- cocomds1
# Useful for debugging
#autostart: false
#egerialoglevel: "DEBUG"
187 changes: 155 additions & 32 deletions controllers/egeriaplatform_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ func (reconciler *EgeriaPlatformReconciler) ensureDeployment(ctx context.Context
err := reconciler.Get(ctx, types.NamespacedName{Name: egeria.Name + "-deployment", Namespace: egeria.Namespace}, deployment)
if err != nil && errors.IsNotFound(err) {
// Define a new Deployment
dep := reconciler.deploymentForEgeriaPlatform(egeria)
dep := reconciler.deploymentForEgeriaPlatform(ctx, egeria)
log.FromContext(ctx).Info("Creating a new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name)
err = reconciler.Create(ctx, dep)
if err != nil {
Expand Down Expand Up @@ -352,7 +352,7 @@ func (reconciler *EgeriaPlatformReconciler) checkReplicas(ctx context.Context, e

// TODO: Migrate to stateful set if identity needed
// deploymentForEgeria returns an egeria Deployment object
func (reconciler *EgeriaPlatformReconciler) deploymentForEgeriaPlatform(egeriaInstance *egeriav1alpha1.EgeriaPlatform) *appsv1.Deployment {
func (reconciler *EgeriaPlatformReconciler) deploymentForEgeriaPlatform(ctx context.Context, egeriaInstance *egeriav1alpha1.EgeriaPlatform) *appsv1.Deployment {
labels := egeriaLabels(egeriaInstance.Name, "deployment")
replicas := egeriaInstance.Spec.Size

Expand All @@ -371,8 +371,27 @@ func (reconciler *EgeriaPlatformReconciler) deploymentForEgeriaPlatform(egeriaIn
Labels: labels,
},
Spec: corev1.PodSpec{
// The server configuration is stored in a configmap, which is mapped to a volume
Volumes: getVolumes(egeriaInstance.Spec.ServerConfig),
// The server configuration is stored in configmaps, which are each mapped to a volume
Volumes: reconciler.getVolumes(ctx, egeriaInstance.Spec.ServerConfig, egeriaInstance),

// The initContainer will copy config data obtained from configmaps into the data directory
// This is required as Egeria will write to these files -- though the data is ephemeral
InitContainers: []corev1.Container{{
Name: "init",
Image: egeriaInstance.Spec.UtilImage,
VolumeMounts: reconciler.getVolumeMounts(ctx, egeriaInstance.Spec.ServerConfig, egeriaInstance),
Command: []string{
"/bin/cp",
"-rTfL",
"/deployments/shadowdata",
"/deployments/data",
},
//Command: []string{
// "/bin/sh",
// "-c",
// "sleep 10000",
//},
}},
Containers: []corev1.Container{{
Name: "platform",
Image: egeriaInstance.Spec.Image,
Expand All @@ -389,8 +408,10 @@ func (reconciler *EgeriaPlatformReconciler) deploymentForEgeriaPlatform(egeriaIn
},
},
}},
Env: []corev1.EnvVar{corev1.EnvVar{Name: "LOGGING_LEVEL_ROOT", Value: egeriaInstance.Spec.EgeriaLogLevel}},
// Mountpoints are needed for egeria configuration
//TODO: VolumeMounts: BmeMounts(),
//TODO: Fix mounts
VolumeMounts: reconciler.getVolumeMounts(ctx, egeriaInstance.Spec.ServerConfig, egeriaInstance),
// This probe defines when to RESTART the container
LivenessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
Expand Down Expand Up @@ -483,10 +504,9 @@ func (reconciler *EgeriaPlatformReconciler) configmapForEgeriaPlatform(ctx conte

// Figure out autostart list
var autostart = make(map[string]string)
//TODO avoid hardcoding serverame
//autostart["STARTUP_SERVER_LIST"] = "cocoMDS4, cocoMDS1, cocoView1"
autostart["STARTUP_SERVER_LIST"], _ = reconciler.getServersFromConfigMap(ctx, egeriaInstance)

if egeriaInstance.Spec.Autostart == true {
autostart["STARTUP_SERVER_LIST"], _ = reconciler.getServersFromConfigMaps(ctx, egeriaInstance)
}
configmap := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Expand Down Expand Up @@ -557,40 +577,143 @@ func getPodNames(pods []corev1.Pod) []string {
// }
//}

func getVolumes(configname string) []corev1.Volume {
// In future we may have multiple volumes, so extracted out to fn
return []corev1.Volume{{
Name: "serverconfig",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: configname,
// creates the volume section of the pod spec, based on list of configmaps specified in CR
func (reconciler *EgeriaPlatformReconciler) getVolumes(ctx context.Context, configname []string, egeria *egeriav1alpha1.EgeriaPlatform) []corev1.Volume {

log.FromContext(ctx).Info("Getting list of volumes")
var vols []corev1.Volume
serverconfigmap := &corev1.ConfigMap{}
var mountName string

for i := range configname {
// entry for each volume
// TODO - there is no error checking here - needs refactoring
_ = reconciler.Get(ctx, types.NamespacedName{Name: egeria.Spec.ServerConfig[i], Namespace: egeria.Namespace}, serverconfigmap)

// We now have the configmap object. need to extract server names. Should just be one, but allow for multiple
// for now we'll just run this loop and use last
for k := range serverconfigmap.Data {
log.FromContext(ctx).Info("Using filename as key: ", "servername", k)
mountName = k // use last one
}

log.FromContext(ctx).Info("Adding to volume list: ", "name", configname[i])
vols = append(vols, corev1.Volume{
Name: configname[i],
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: configname[i],
},
Items: []corev1.KeyToPath{
{
Key: mountName,
Path: mountName + ".config",
},
},
},
},
},
}}
)
}

// Finally we add an emptyDir -- this is used for the main 'data' directory, since this container is ephemeral. Any
// persistent data needs to be managed through the server configuration documents, or the repositories themselves,
// for example using a XTDB deployment. No attempt is made at this level to keep persistent store, since scaling/HA is
// an intrinsic part of using an operator

log.FromContext(ctx).Info("Adding to volume list: ", "name", "data")
vols = append(vols, corev1.Volume{
Name: "data",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
)

// return the built list
return vols
}

func (reconciler *EgeriaPlatformReconciler) getVolumeMounts(ctx context.Context, configname []string, egeria *egeriav1alpha1.EgeriaPlatform) []corev1.VolumeMount {

var vols []corev1.VolumeMount
serverconfigmap := &corev1.ConfigMap{}
var mountName string

log.FromContext(ctx).Info("Building list of volume mounts")
for i := range configname {
log.FromContext(ctx).Info("Adding to volume mount: ", "name", configname[i])

// Need to inspect the configmap to check the name (specifically, could be mixed case)
// TODO - there is no error checking here - needs refactoring
_ = reconciler.Get(ctx, types.NamespacedName{Name: egeria.Spec.ServerConfig[i], Namespace: egeria.Namespace}, serverconfigmap)

// We now have the configmap object. need to extract server names. Should just be one, but allow for multiple
// for now we'll just run this loop and use last
for k := range serverconfigmap.Data {
// TODO: needs error checking
log.FromContext(ctx).Info("Using filename as key: ", "servername", k)
mountName = k // use last one
//TODO: Enforce only one server name, or fix lowercase in better way
}

// entry for each volume
vols = append(vols, corev1.VolumeMount{
Name: configname[i],
ReadOnly: true,
// TODO: Mountpath should be configurable - though does depend on container image
// These are mounted to an alternate location. Egeria needs to write to config files and this
// cannot be done for a configmap mount. Instead an initialization pod will perform a copy from shadowdata to data
// so care should be taken with what is placed in shadowdata
MountPath: "/deployments/shadowdata/servers/" + mountName + "/config",
// TODO: Note this is a lower-cased name. If it needs to be same as server name we'll need to read from configmap

},
)

}

// Now add our data mount
vols = append(vols, corev1.VolumeMount{
Name: "data",
// TODO: Mountpath should be configurable - though does depend on container image
// These are mounted to an alternate location. Egeria needs to write to config files and this
// cannot be done for a configmap mount. Instead an initialization pod will perform a copy from shadowdata to data
// so care should be taken with what is placed in shadowdata
MountPath: "/deployments/data",
},
)

// return the built list
return vols
}

// TODO : Automatically create autostart server list from querying config

func (reconciler *EgeriaPlatformReconciler) getServersFromConfigMap(ctx context.Context, egeria *egeriav1alpha1.EgeriaPlatform) (servers string, err error) {
func (reconciler *EgeriaPlatformReconciler) getServersFromConfigMaps(ctx context.Context, egeria *egeriav1alpha1.EgeriaPlatform) (servers string, err error) {
// Retrieve the list of servers from the configmap
configmap := &corev1.ConfigMap{}
err = reconciler.Get(ctx, types.NamespacedName{Name: egeria.Spec.ServerConfig, Namespace: egeria.Namespace}, configmap)
if err != nil {
if errors.IsNotFound(err) {
log.FromContext(ctx).Error(err, "Configured server configmap not found.")
serverconfigmap := &corev1.ConfigMap{}
// iterate through the list of configured configmaps - one per server ideally
for cm := range egeria.Spec.ServerConfig {
err = reconciler.Get(ctx, types.NamespacedName{Name: egeria.Spec.ServerConfig[cm], Namespace: egeria.Namespace}, serverconfigmap)
if err != nil {
if errors.IsNotFound(err) {
log.FromContext(ctx).Error(err, "Configured server configmap not found.")
return "", err
}
// Error reading the object - requeue the request.
log.FromContext(ctx).Error(err, "Error reading server configmap")
return "", err
}
// Error reading the object - requeue the request.
log.FromContext(ctx).Error(err, "Error reading server configmap")
return "", err
}
// We now have the configmap object. need to extract server names
for k := range configmap.Data {
log.FromContext(ctx).Info("Found server config & adding to startup list: ", "servername", k)
servers += k + ","
// We now have the configmap object. need to extract server names. Should just be one, but allow for multiple
for k := range serverconfigmap.Data {
log.FromContext(ctx).Info("Found server config & adding to startup list: ", "servername", k)
servers += k + ","
}
}

// This should be a list built from all configmaps
return servers, nil
}

Expand Down
Loading

0 comments on commit cda53b7

Please sign in to comment.