diff --git a/api/restHandler/DeploymentConfigurationRestHandler.go b/api/restHandler/DeploymentConfigurationRestHandler.go index 144838da010..7a9f4676290 100644 --- a/api/restHandler/DeploymentConfigurationRestHandler.go +++ b/api/restHandler/DeploymentConfigurationRestHandler.go @@ -1,17 +1,20 @@ package restHandler import ( + "context" "fmt" "github.com/devtron-labs/devtron/api/restHandler/common" "github.com/devtron-labs/devtron/pkg/auth/authorisation/casbin" "github.com/devtron-labs/devtron/pkg/auth/user" "github.com/devtron-labs/devtron/pkg/configDiff" "github.com/devtron-labs/devtron/pkg/configDiff/bean" + util2 "github.com/devtron-labs/devtron/util" "github.com/devtron-labs/devtron/util/rbac" "github.com/gorilla/schema" "go.uber.org/zap" "gopkg.in/go-playground/validator.v9" "net/http" + "time" ) type DeploymentConfigurationRestHandler interface { @@ -88,6 +91,7 @@ func (handler *DeploymentConfigurationRestHandlerImpl) GetConfigData(w http.Resp return } + configDataQueryParams.UserId = userId //RBAC START token := r.Header.Get(common.TokenHeaderKey) object := handler.enforcerUtil.GetAppRBACName(configDataQueryParams.AppName) @@ -97,8 +101,12 @@ func (handler *DeploymentConfigurationRestHandlerImpl) GetConfigData(w http.Resp return } //RBAC END - - res, err := handler.deploymentConfigurationService.GetAllConfigData(r.Context(), configDataQueryParams) + isSuperAdmin := handler.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionGet, "*") + userHasAdminAccess := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionUpdate, object) + ctx, cancel := context.WithTimeout(r.Context(), 60*time.Second) + defer cancel() + ctx = util2.SetSuperAdminInContext(ctx, isSuperAdmin) + res, err := handler.deploymentConfigurationService.GetAllConfigData(ctx, configDataQueryParams, userHasAdminAccess) if err != nil { handler.logger.Errorw("service err, GetAllConfigData ", "err", err) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) diff --git a/api/restHandler/app/pipeline/configure/BuildPipelineRestHandler.go b/api/restHandler/app/pipeline/configure/BuildPipelineRestHandler.go index 5d89f39da56..0a1115a44c0 100644 --- a/api/restHandler/app/pipeline/configure/BuildPipelineRestHandler.go +++ b/api/restHandler/app/pipeline/configure/BuildPipelineRestHandler.go @@ -1562,12 +1562,16 @@ func (handler *PipelineConfigRestHandlerImpl) CancelWorkflow(w http.ResponseWrit return } var forceAbort bool - forceAbort, err = strconv.ParseBool(queryVars.Get("forceAbort")) - if err != nil { - handler.Logger.Errorw("request err, CancelWorkflow", "err", err) - common.WriteJsonResp(w, err, nil, http.StatusBadRequest) - return + forceAbortQueryParam := queryVars.Get("forceAbort") + if len(forceAbortQueryParam) > 0 { + forceAbort, err = strconv.ParseBool(forceAbortQueryParam) + if err != nil { + handler.Logger.Errorw("request err, CancelWorkflow", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } } + handler.Logger.Infow("request payload, CancelWorkflow", "workflowId", workflowId, "pipelineId", pipelineId) ciPipeline, err := handler.ciPipelineRepository.FindById(pipelineId) diff --git a/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go b/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go index 950468f79f1..c25ede47b05 100644 --- a/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go +++ b/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go @@ -2071,6 +2071,16 @@ func (handler *PipelineConfigRestHandlerImpl) CancelStage(w http.ResponseWriter, common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) return } + var forceAbort bool + forceAbortQueryParam := r.URL.Query().Get("forceAbort") + if len(forceAbortQueryParam) > 0 { + forceAbort, err = strconv.ParseBool(forceAbortQueryParam) + if err != nil { + handler.Logger.Errorw("request err, CancelWorkflow", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + } handler.Logger.Infow("request payload, CancelStage", "pipelineId", pipelineId, "workflowRunnerId", workflowRunnerId) //RBAC @@ -2082,7 +2092,7 @@ func (handler *PipelineConfigRestHandlerImpl) CancelStage(w http.ResponseWriter, } //RBAC - resp, err := handler.cdHandler.CancelStage(workflowRunnerId, userId) + resp, err := handler.cdHandler.CancelStage(workflowRunnerId, forceAbort, userId) if err != nil { handler.Logger.Errorw("service err, CancelStage", "err", err, "pipelineId", pipelineId, "workflowRunnerId", workflowRunnerId) if util.IsErrNoRows(err) { diff --git a/api/restHandler/app/pipeline/trigger/PipelineTriggerRestHandler.go b/api/restHandler/app/pipeline/trigger/PipelineTriggerRestHandler.go index 6415e556a05..0902c3eb837 100644 --- a/api/restHandler/app/pipeline/trigger/PipelineTriggerRestHandler.go +++ b/api/restHandler/app/pipeline/trigger/PipelineTriggerRestHandler.go @@ -144,14 +144,14 @@ func (handler PipelineTriggerRestHandlerImpl) OverrideConfig(w http.ResponseWrit triggerContext := bean3.TriggerContext{ Context: ctx, } - mergeResp, err := handler.cdTriggerService.ManualCdTrigger(triggerContext, &overrideRequest) + mergeResp, helmPackageName, err := handler.cdTriggerService.ManualCdTrigger(triggerContext, &overrideRequest) span.End() if err != nil { handler.logger.Errorw("request err, OverrideConfig", "err", err, "payload", overrideRequest) common.WriteJsonResp(w, err, err.Error(), http.StatusInternalServerError) return } - res := map[string]interface{}{"releaseId": mergeResp} + res := map[string]interface{}{"releaseId": mergeResp, "helmPackageName": helmPackageName} common.WriteJsonResp(w, err, res, http.StatusOK) } diff --git a/go.mod b/go.mod index 099524b3fbe..ab7367a2f70 100644 --- a/go.mod +++ b/go.mod @@ -288,7 +288,7 @@ require gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect replace ( github.com/argoproj/argo-workflows/v3 v3.5.10 => github.com/devtron-labs/argo-workflows/v3 v3.5.10 - github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20241010131105-e2c23f9c80da + github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20241024135802-b4888f54a136 github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127 github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.5.5 k8s.io/api => k8s.io/api v0.29.7 diff --git a/go.sum b/go.sum index 2fcf7dcfa69..4577dd412f5 100644 --- a/go.sum +++ b/go.sum @@ -794,8 +794,8 @@ github.com/devtron-labs/argo-workflows/v3 v3.5.10 h1:6rxQOesOzDz6SgQCMDQNHaehsKF github.com/devtron-labs/argo-workflows/v3 v3.5.10/go.mod h1:/vqxcovDPT4zqr4DjR5v7CF8ggpY1l3TSa2CIG3jmjA= github.com/devtron-labs/authenticator v0.4.35-0.20240809073103-6e11da8083f8 h1:2+Q7Jdhpo/uMiaQiZZzAh+ZX7wEJIFuMFG6DEiMuo64= github.com/devtron-labs/authenticator v0.4.35-0.20240809073103-6e11da8083f8/go.mod h1:702R6WIf5y9UzKGoCGxQ+x3l5Ws+l0fXg2xlCpSGFZI= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20241010131105-e2c23f9c80da h1:vC6SMz6BM1doN+ZBGiDGyERJ/LphFQi5+Ab/YQkNJVo= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20241010131105-e2c23f9c80da/go.mod h1:KpKnF4OSpQNDJmb4wVZq3Za88ePBw4xec2GOAGRm5UQ= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20241024135802-b4888f54a136 h1:rNGxjU5L6NvObxGMt0+vNFmjkqstm7zDASiS+pakrgQ= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20241024135802-b4888f54a136/go.mod h1:KpKnF4OSpQNDJmb4wVZq3Za88ePBw4xec2GOAGRm5UQ= github.com/devtron-labs/go-bitbucket v0.9.60-beta h1:VEx1jvDgdtDPS6A1uUFoaEi0l1/oLhbr+90xOwr6sDU= github.com/devtron-labs/go-bitbucket v0.9.60-beta/go.mod h1:GnuiCesvh8xyHeMCb+twm8lBR/kQzJYSKL28ZfObp1Y= github.com/devtron-labs/protos v0.0.3-0.20240802105333-92ee9bb85d80 h1:xwbTeijNTf4/j1v+tSfwVqwLVnReas/NqEKeQHvSTys= diff --git a/internal/sql/repository/DeploymentTemplateRepository.go b/internal/sql/repository/DeploymentTemplateRepository.go index 00116eb20b4..053e7c8395e 100644 --- a/internal/sql/repository/DeploymentTemplateRepository.go +++ b/internal/sql/repository/DeploymentTemplateRepository.go @@ -38,6 +38,7 @@ type DeploymentTemplateComparisonMetadata struct { EnvironmentId int `json:"environmentId,omitempty"` EnvironmentName string `json:"environmentName,omitempty"` DeploymentTemplateHistoryId int `json:"deploymentTemplateHistoryId,omitempty"` + WfrId int `json:"wfrId,omitempty"` StartedOn *time.Time `json:"startedOn,omitempty"` FinishedOn *time.Time `json:"finishedOn,omitempty"` Status string `json:"status,omitempty"` @@ -69,7 +70,7 @@ func (impl DeploymentTemplateRepositoryImpl) FetchDeploymentHistoryWithChartRefs limit := 15 query := "select p.id as pipeline_id, dth.id as deployment_template_history_id," + - " wfr.finished_on, wfr.status, c.chart_ref_id, c.chart_version FROM cd_workflow_runner wfr" + + " wfr.id as wfr_id, wfr.finished_on, wfr.status, c.chart_ref_id, c.chart_version FROM cd_workflow_runner wfr" + " JOIN cd_workflow wf ON wf.id = wfr.cd_workflow_id JOIN pipeline p ON p.id = wf.pipeline_id" + " JOIN deployment_template_history dth ON dth.deployed_on = wfr.started_on " + "JOIN pipeline_config_override pco ON pco.cd_workflow_id = wf.id " + diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index cb81f4ca1b4..abd44441035 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -693,9 +693,10 @@ func (impl *CdWorkflowRepositoryImpl) GetLatestTriggersOfHelmPipelinesStuckInNon Where("cd_workflow_runner.cd_workflow_id in"+ " (SELECT max(cd_workflow.id) as id from cd_workflow"+ " INNER JOIN cd_workflow_runner on cd_workflow.id = cd_workflow_runner.cd_workflow_id"+ - " WHERE cd_workflow_runner.status != ?"+ + " WHERE cd_workflow_runner.workflow_type = ? "+ + " AND cd_workflow_runner.status != ?"+ " GROUP BY cd_workflow.pipeline_id"+ - " ORDER BY cd_workflow.pipeline_id desc)", cdWorkflow.WorkflowInQueue). + " ORDER BY cd_workflow.pipeline_id desc)", apiBean.CD_WORKFLOW_TYPE_DEPLOY, cdWorkflow.WorkflowInQueue). Where("(cd_workflow__pipeline.deployment_app_type=? or dc.deployment_app_type=?)", util.PIPELINE_DEPLOYMENT_TYPE_HELM, util.PIPELINE_DEPLOYMENT_TYPE_HELM). Where("cd_workflow_runner.started_on > NOW() - INTERVAL '? hours'", getPipelineDeployedWithinHours). Where("cd_workflow__pipeline.deleted=?", false). diff --git a/pkg/bean/configSecretData.go b/pkg/bean/configSecretData.go index d172859dc23..c945080e6f1 100644 --- a/pkg/bean/configSecretData.go +++ b/pkg/bean/configSecretData.go @@ -30,6 +30,8 @@ type SecretList struct { ConfigData []*ConfigData `json:"secrets"` } +// there is an adapter written in pkg/bean folder to convert below ConfigData struct to pipeline/bean's ConfigData + // TODO refactoring: duplicate struct of ConfigData in ConfigMapBean.go type ConfigData struct { Name string `json:"name"` @@ -49,6 +51,7 @@ type ConfigData struct { SubPath bool `json:"subPath"` ESOSubPath []string `json:"esoSubPath"` FilePermission string `json:"filePermission"` + Overridden bool `json:"overridden"` } func (c *ConfigData) IsESOExternalSecretType() bool { diff --git a/pkg/cluster/repository/EnvironmentRepository.go b/pkg/cluster/repository/EnvironmentRepository.go index 048aa83ae76..15e806e2ca9 100644 --- a/pkg/cluster/repository/EnvironmentRepository.go +++ b/pkg/cluster/repository/EnvironmentRepository.go @@ -80,6 +80,7 @@ type EnvironmentRepository interface { FindAllActiveWithFilter() ([]*Environment, error) FindEnvClusterInfosByIds([]int) ([]*EnvCluserInfo, error) FindEnvLinkedWithCiPipelines(externalCi bool, ciPipelineIds []int) ([]*Environment, error) + FindEnvByNameWithClusterDetails(envName string) (*Environment, error) } func NewEnvironmentRepositoryImpl(dbConnection *pg.DB, logger *zap.SugaredLogger, appStatusRepository appStatus.AppStatusRepository) *EnvironmentRepositoryImpl { @@ -160,6 +161,17 @@ func (repositoryImpl EnvironmentRepositoryImpl) FindByName(name string) (*Enviro return environment, err } +func (repositoryImpl EnvironmentRepositoryImpl) FindEnvByNameWithClusterDetails(envName string) (*Environment, error) { + environment := &Environment{} + err := repositoryImpl.dbConnection. + Model(environment). + Column("environment.*", "Cluster"). + Where("environment.environment_name = ?", envName). + Where("environment.active = ?", true). + Select() + return environment, err +} + func (repositoryImpl EnvironmentRepositoryImpl) FindIdByName(name string) (int, error) { environment := &Environment{} err := repositoryImpl.dbConnection. diff --git a/pkg/configDiff/DeploymentConfigurationService.go b/pkg/configDiff/DeploymentConfigurationService.go index f64de5cd2f7..160e620df23 100644 --- a/pkg/configDiff/DeploymentConfigurationService.go +++ b/pkg/configDiff/DeploymentConfigurationService.go @@ -5,16 +5,30 @@ import ( "encoding/json" repository2 "github.com/devtron-labs/devtron/internal/sql/repository" appRepository "github.com/devtron-labs/devtron/internal/sql/repository/app" + "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/util" + bean3 "github.com/devtron-labs/devtron/pkg/bean" chartService "github.com/devtron-labs/devtron/pkg/chart" "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/configDiff/adaptor" bean2 "github.com/devtron-labs/devtron/pkg/configDiff/bean" "github.com/devtron-labs/devtron/pkg/configDiff/helper" "github.com/devtron-labs/devtron/pkg/configDiff/utils" + "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef" "github.com/devtron-labs/devtron/pkg/generateManifest" "github.com/devtron-labs/devtron/pkg/pipeline" + "github.com/devtron-labs/devtron/pkg/pipeline/adapter" "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/history" + repository3 "github.com/devtron-labs/devtron/pkg/pipeline/history/repository" + "github.com/devtron-labs/devtron/pkg/resourceQualifiers" + "github.com/devtron-labs/devtron/pkg/variables" + "github.com/devtron-labs/devtron/pkg/variables/parsers" + repository6 "github.com/devtron-labs/devtron/pkg/variables/repository" + util2 "github.com/devtron-labs/devtron/util" + "github.com/go-pg/pg" + "github.com/juju/errors" "go.uber.org/zap" "net/http" "strconv" @@ -22,16 +36,26 @@ import ( type DeploymentConfigurationService interface { ConfigAutoComplete(appId int, envId int) (*bean2.ConfigDataResponse, error) - GetAllConfigData(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams) (*bean2.DeploymentAndCmCsConfigDto, error) + GetAllConfigData(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams, userHasAdminAccess bool) (*bean2.DeploymentAndCmCsConfigDto, error) } type DeploymentConfigurationServiceImpl struct { - logger *zap.SugaredLogger - configMapService pipeline.ConfigMapService - appRepository appRepository.AppRepository - environmentRepository repository.EnvironmentRepository - chartService chartService.ChartService - deploymentTemplateService generateManifest.DeploymentTemplateService + logger *zap.SugaredLogger + configMapService pipeline.ConfigMapService + appRepository appRepository.AppRepository + environmentRepository repository.EnvironmentRepository + chartService chartService.ChartService + deploymentTemplateService generateManifest.DeploymentTemplateService + deploymentTemplateHistoryRepository repository3.DeploymentTemplateHistoryRepository + pipelineStrategyHistoryRepository repository3.PipelineStrategyHistoryRepository + configMapHistoryRepository repository3.ConfigMapHistoryRepository + scopedVariableManager variables.ScopedVariableCMCSManager + configMapRepository chartConfig.ConfigMapRepository + deploymentConfigService pipeline.PipelineDeploymentConfigService + chartRefService chartRef.ChartRefService + pipelineRepository pipelineConfig.PipelineRepository + deploymentTemplateHistoryService history.DeploymentTemplateHistoryService + configMapHistoryService history.ConfigMapHistoryService } func NewDeploymentConfigurationServiceImpl(logger *zap.SugaredLogger, @@ -40,14 +64,34 @@ func NewDeploymentConfigurationServiceImpl(logger *zap.SugaredLogger, environmentRepository repository.EnvironmentRepository, chartService chartService.ChartService, deploymentTemplateService generateManifest.DeploymentTemplateService, + deploymentTemplateHistoryRepository repository3.DeploymentTemplateHistoryRepository, + pipelineStrategyHistoryRepository repository3.PipelineStrategyHistoryRepository, + configMapHistoryRepository repository3.ConfigMapHistoryRepository, + scopedVariableManager variables.ScopedVariableCMCSManager, + configMapRepository chartConfig.ConfigMapRepository, + deploymentConfigService pipeline.PipelineDeploymentConfigService, + chartRefService chartRef.ChartRefService, + pipelineRepository pipelineConfig.PipelineRepository, + deploymentTemplateHistoryService history.DeploymentTemplateHistoryService, + configMapHistoryService history.ConfigMapHistoryService, ) (*DeploymentConfigurationServiceImpl, error) { deploymentConfigurationService := &DeploymentConfigurationServiceImpl{ - logger: logger, - configMapService: configMapService, - appRepository: appRepository, - environmentRepository: environmentRepository, - chartService: chartService, - deploymentTemplateService: deploymentTemplateService, + logger: logger, + configMapService: configMapService, + appRepository: appRepository, + environmentRepository: environmentRepository, + chartService: chartService, + deploymentTemplateService: deploymentTemplateService, + deploymentTemplateHistoryRepository: deploymentTemplateHistoryRepository, + pipelineStrategyHistoryRepository: pipelineStrategyHistoryRepository, + configMapHistoryRepository: configMapHistoryRepository, + scopedVariableManager: scopedVariableManager, + configMapRepository: configMapRepository, + deploymentConfigService: deploymentConfigService, + chartRefService: chartRefService, + pipelineRepository: pipelineRepository, + deploymentTemplateHistoryService: deploymentTemplateHistoryService, + configMapHistoryService: configMapHistoryService, } return deploymentConfigurationService, nil @@ -82,19 +126,23 @@ func (impl *DeploymentConfigurationServiceImpl) ConfigAutoComplete(appId int, en return configDataResp, nil } -func (impl *DeploymentConfigurationServiceImpl) GetAllConfigData(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams) (*bean2.DeploymentAndCmCsConfigDto, error) { - if !configDataQueryParams.IsValidConfigType() { - return nil, &util.ApiError{HttpStatusCode: http.StatusBadRequest, Code: strconv.Itoa(http.StatusBadRequest), InternalMessage: bean2.InvalidConfigTypeErr, UserMessage: bean2.InvalidConfigTypeErr} - } +func (impl *DeploymentConfigurationServiceImpl) GetAllConfigData(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams, userHasAdminAccess bool) (*bean2.DeploymentAndCmCsConfigDto, error) { var err error - var envId int - var appId int + var envId, appId, clusterId int + systemMetadata := &resourceQualifiers.SystemMetadata{ + AppName: configDataQueryParams.AppName, + } if configDataQueryParams.IsEnvNameProvided() { - envId, err = impl.environmentRepository.FindIdByName(configDataQueryParams.EnvName) + env, err := impl.environmentRepository.FindEnvByNameWithClusterDetails(configDataQueryParams.EnvName) if err != nil { impl.logger.Errorw("GetAllConfigData, error in getting environment model by envName", "envName", configDataQueryParams.EnvName, "err", err) return nil, err } + envId = env.Id + clusterId = env.ClusterId + systemMetadata.EnvironmentName = env.Name + systemMetadata.Namespace = env.Namespace + systemMetadata.ClusterName = env.Cluster.ClusterName } appId, err = impl.appRepository.FindAppIdByName(configDataQueryParams.AppName) if err != nil { @@ -102,10 +150,353 @@ func (impl *DeploymentConfigurationServiceImpl) GetAllConfigData(ctx context.Con return nil, err } + switch configDataQueryParams.ConfigArea { + case bean2.CdRollback.ToString(): + return impl.getConfigDataForCdRollback(ctx, configDataQueryParams, userHasAdminAccess) + case bean2.DeploymentHistory.ToString(): + return impl.getConfigDataForDeploymentHistory(ctx, configDataQueryParams, userHasAdminAccess) + } + // this would be the default case + return impl.getConfigDataForAppConfiguration(ctx, configDataQueryParams, appId, envId, clusterId, userHasAdminAccess, systemMetadata) +} + +func (impl *DeploymentConfigurationServiceImpl) getConfigDataForCdRollback(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams, userHasAdminAccess bool) (*bean2.DeploymentAndCmCsConfigDto, error) { + // wfrId is expected in this case to return the expected data + if configDataQueryParams.WfrId == 0 { + return nil, &util.ApiError{HttpStatusCode: http.StatusNotFound, Code: strconv.Itoa(http.StatusNotFound), InternalMessage: bean2.ExpectedWfrIdNotPassedInQueryParamErr, UserMessage: bean2.ExpectedWfrIdNotPassedInQueryParamErr} + } + return impl.getConfigDataForDeploymentHistory(ctx, configDataQueryParams, userHasAdminAccess) +} + +func (impl *DeploymentConfigurationServiceImpl) getDeploymentHistoryConfig(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams) (*bean2.DeploymentAndCmCsConfig, error) { + deploymentJson := json.RawMessage{} + deploymentHistory, err := impl.deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId(configDataQueryParams.PipelineId, configDataQueryParams.WfrId) + if err != nil && !util.IsErrNoRows(err) { + impl.logger.Errorw("error in getting deployment template history for pipelineId and wfrId", "pipelineId", configDataQueryParams.PipelineId, "wfrId", configDataQueryParams.WfrId, "err", err) + return nil, err + } else if util.IsErrNoRows(err) { + return nil, util.GetApiError(http.StatusNotFound, bean2.NoDeploymentDoneForSelectedImage, bean2.NoDeploymentDoneForSelectedImage) + } + err = deploymentJson.UnmarshalJSON([]byte(deploymentHistory.Template)) + if err != nil { + impl.logger.Errorw("getDeploymentTemplateForEnvLevel, error in unmarshalling string deploymentTemplateResponse data into json Raw message", "data", deploymentHistory.Template, "err", err) + return nil, err + } + isSuperAdmin, err := util2.GetIsSuperAdminFromContext(ctx) + if err != nil { + return nil, err + } + reference := repository6.HistoryReference{ + HistoryReferenceId: deploymentHistory.Id, + HistoryReferenceType: repository6.HistoryReferenceTypeDeploymentTemplate, + } + variableSnapshotMap, resolvedTemplate, err := impl.scopedVariableManager.GetVariableSnapshotAndResolveTemplate(deploymentHistory.Template, parsers.JsonVariableTemplate, reference, isSuperAdmin, false) + if err != nil { + impl.logger.Errorw("error while resolving template from history", "deploymentHistoryId", deploymentHistory.Id, "pipelineId", configDataQueryParams.PipelineId, "err", err) + } + + deploymentConfig := bean2.NewDeploymentAndCmCsConfig(). + WithConfigData(deploymentJson). + WithResourceType(bean.DeploymentTemplate). + WithVariableSnapshot(map[string]map[string]string{bean.DeploymentTemplate.ToString(): variableSnapshotMap}). + WithResolvedValue(json.RawMessage(resolvedTemplate)). + WithDeploymentConfigMetadata(deploymentHistory.TemplateVersion, deploymentHistory.IsAppMetricsEnabled) + return deploymentConfig, nil +} + +func (impl *DeploymentConfigurationServiceImpl) getPipelineStrategyConfigHistory(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams) (*bean2.DeploymentAndCmCsConfig, error) { + pipelineStrategyJson := json.RawMessage{} + pipelineConfig := bean2.NewDeploymentAndCmCsConfig() + pipelineStrategyHistory, err := impl.pipelineStrategyHistoryRepository.GetHistoryByPipelineIdAndWfrId(ctx, configDataQueryParams.PipelineId, configDataQueryParams.WfrId) + if err != nil && !util.IsErrNoRows(err) { + impl.logger.Errorw("error in checking if history exists for pipelineId and wfrId", "pipelineId", configDataQueryParams.PipelineId, "wfrId", configDataQueryParams.WfrId, "err", err) + return nil, err + } else if util.IsErrNoRows(err) { + return pipelineConfig, nil + } + err = pipelineStrategyJson.UnmarshalJSON([]byte(pipelineStrategyHistory.Config)) + if err != nil { + impl.logger.Errorw("getDeploymentTemplateForEnvLevel, error in unmarshalling string pipelineStrategyHistory data into json Raw message", "pipelineStrategyHistoryConfig", pipelineStrategyHistory.Config, "err", err) + return nil, err + } + pipelineConfig.WithConfigData(pipelineStrategyJson). + WithResourceType(bean.PipelineStrategy). + WithPipelineStrategyMetadata(pipelineStrategyHistory.PipelineTriggerType, string(pipelineStrategyHistory.Strategy)) + return pipelineConfig, nil +} + +func (impl *DeploymentConfigurationServiceImpl) getConfigDataForDeploymentHistory(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams, userHasAdminAccess bool) (*bean2.DeploymentAndCmCsConfigDto, error) { + // we would be expecting wfrId in case of getting data for Deployment history configDataDto := &bean2.DeploymentAndCmCsConfigDto{} + var err error + //fetching history for deployment config starts + deploymentConfig, err := impl.getDeploymentHistoryConfig(ctx, configDataQueryParams) + if err != nil { + impl.logger.Errorw("getConfigDataForDeploymentHistory, error in getDeploymentHistoryConfig", "configDataQueryParams", configDataQueryParams, "err", err) + return nil, err + } + configDataDto.WithDeploymentTemplateData(deploymentConfig) + // fetching for deployment config ends + + // fetching for pipeline strategy config starts + pipelineConfig, err := impl.getPipelineStrategyConfigHistory(ctx, configDataQueryParams) + if err != nil { + impl.logger.Errorw("getConfigDataForDeploymentHistory, error in getPipelineStrategyConfigHistory", "configDataQueryParams", configDataQueryParams, "err", err) + return nil, err + } + if len(pipelineConfig.Data) > 0 { + configDataDto.WithPipelineConfigData(pipelineConfig) + } + + // fetching for pipeline strategy config ends + + // fetching for cm config starts + cmConfigData, err := impl.getCmCsConfigHistory(ctx, configDataQueryParams, repository3.CONFIGMAP_TYPE, userHasAdminAccess) + if err != nil { + impl.logger.Errorw("getConfigDataForDeploymentHistory, error in getCmConfigHistory", "configDataQueryParams", configDataQueryParams, "err", err) + return nil, err + } + configDataDto.WithConfigMapData(cmConfigData) + // fetching for cm config ends + + // fetching for cs config starts + secretConfigDto, err := impl.getCmCsConfigHistory(ctx, configDataQueryParams, repository3.SECRET_TYPE, userHasAdminAccess) + if err != nil { + impl.logger.Errorw("getConfigDataForDeploymentHistory, error in getSecretConfigHistory", "configDataQueryParams", configDataQueryParams, "err", err) + return nil, err + } + configDataDto.WithSecretData(secretConfigDto) + // fetching for cs config ends + + return configDataDto, nil +} + +func (impl *DeploymentConfigurationServiceImpl) getCmCsConfigHistory(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams, configType repository3.ConfigType, userHasAdminAccess bool) (*bean2.DeploymentAndCmCsConfig, error) { + var resourceType bean.ResourceType + history, err := impl.configMapHistoryRepository.GetHistoryByPipelineIdAndWfrId(configDataQueryParams.PipelineId, configDataQueryParams.WfrId, configType) + if err != nil { + impl.logger.Errorw("error in checking if cm cs history exists for pipelineId and wfrId", "pipelineId", configDataQueryParams.PipelineId, "wfrId", configDataQueryParams.WfrId, "err", err) + return nil, err + } + var configData []*bean.ConfigData + configList := bean.ConfigsList{} + secretList := bean.SecretsList{} + switch configType { + case repository3.CONFIGMAP_TYPE: + if len(history.Data) > 0 { + err = json.Unmarshal([]byte(history.Data), &configList) + if err != nil { + impl.logger.Debugw("error while Unmarshal", "err", err) + return nil, err + } + } + resourceType = bean.CM + configData = configList.ConfigData + case repository3.SECRET_TYPE: + if len(history.Data) > 0 { + err = json.Unmarshal([]byte(history.Data), &secretList) + if err != nil { + impl.logger.Debugw("error while Unmarshal", "err", err) + return nil, err + } + } + resourceType = bean.CS + configData = secretList.ConfigData + + } + + resolvedDataMap, variableSnapshotMap, err := impl.scopedVariableManager.GetResolvedCMCSHistoryDtos(ctx, configType, adaptor.ReverseConfigListConvertor(configList), history, adaptor.ReverseSecretListConvertor(secretList)) + if err != nil { + return nil, err + } + resolvedConfigDataList := make([]*bean.ConfigData, 0, len(resolvedDataMap)) + for _, resolvedConfigData := range resolvedDataMap { + resolvedConfigDataList = append(resolvedConfigDataList, adapter.ConvertConfigDataToPipelineConfigData(&resolvedConfigData)) + } + + if configType == repository3.SECRET_TYPE { + impl.encodeSecretDataFromNonAdminUsers(configData, userHasAdminAccess) + impl.encodeSecretDataFromNonAdminUsers(resolvedConfigDataList, userHasAdminAccess) + + } + + configDataReq := &bean.ConfigDataRequest{ConfigData: configData} + configDataJson, err := utils.ConvertToJsonRawMessage(configDataReq) + if err != nil { + impl.logger.Errorw("getCmCsPublishedConfigResponse, error in converting config data to json raw message", "pipelineId", configDataQueryParams.PipelineId, "wfrId", configDataQueryParams.WfrId, "err", err) + return nil, err + } + resolvedConfigDataReq := &bean.ConfigDataRequest{ConfigData: resolvedConfigDataList} + resolvedConfigDataString, err := utils.ConvertToString(resolvedConfigDataReq) + if err != nil { + impl.logger.Errorw("getCmCsPublishedConfigResponse, error in converting config data to json raw message", "pipelineId", configDataQueryParams.PipelineId, "wfrId", configDataQueryParams.WfrId, "err", err) + return nil, err + } + resolvedConfigDataStringJson, err := utils.ConvertToJsonRawMessage(resolvedConfigDataString) + if err != nil { + impl.logger.Errorw("getCmCsPublishedConfigResponse, error in ConvertToJsonRawMessage for resolvedConfigDataString", "pipelineId", configDataQueryParams.PipelineId, "wfrId", configDataQueryParams.WfrId, "err", err) + return nil, err + } + cmConfigData := bean2.NewDeploymentAndCmCsConfig(). + WithConfigData(configDataJson). + WithResourceType(resourceType). + WithVariableSnapshot(variableSnapshotMap). + WithResolvedValue(resolvedConfigDataStringJson) + return cmConfigData, nil +} + +func (impl *DeploymentConfigurationServiceImpl) encodeSecretDataFromNonAdminUsers(configDataList []*bean.ConfigData, userHasAdminAccess bool) { + for _, config := range configDataList { + if config.Data != nil { + if !userHasAdminAccess { + //removing keys and sending + resultMap := make(map[string]string) + resultMapFinal := make(map[string]string) + err := json.Unmarshal(config.Data, &resultMap) + if err != nil { + impl.logger.Errorw("unmarshal failed", "error", err) + return + } + for key, _ := range resultMap { + //hard-coding values to show them as hidden to user + resultMapFinal[key] = bean2.SecretMaskedValue + } + config.Data, err = utils.ConvertToJsonRawMessage(resultMapFinal) + if err != nil { + impl.logger.Errorw("error while marshaling request", "err", err) + return + } + } + } + } +} + +func (impl *DeploymentConfigurationServiceImpl) getCmCsDataForPreviousDeployments(ctx context.Context, deploymentTemplateHistoryId, pipelineId int, userHasAdminAccess bool) (*bean2.DeploymentAndCmCsConfigDto, error) { + + configDataDto := &bean2.DeploymentAndCmCsConfigDto{} + + deplTemplateHistory, err := impl.deploymentTemplateHistoryService.GetTemplateHistoryModelForDeployedTemplateById(deploymentTemplateHistoryId, pipelineId) + if err != nil { + impl.logger.Errorw("error in getting deployment template history", "err", err, "deploymentTemplateHistoryId", deploymentTemplateHistoryId, "pipelineId", pipelineId) + return nil, err + } + + secretConfigData, cmConfigData, err := impl.configMapHistoryService.GetConfigmapHistoryDataByDeployedOnAndPipelineId(ctx, pipelineId, deplTemplateHistory.DeployedOn, userHasAdminAccess) + if err != nil { + impl.logger.Errorw("error in getting secretData and cmData", "err", err, "deploymentTemplateHistoryId", deploymentTemplateHistoryId, "pipelineId", pipelineId) + return nil, err + } + configDataDto.WithConfigMapData(cmConfigData).WithSecretData(secretConfigData) + return configDataDto, nil + +} +func (impl *DeploymentConfigurationServiceImpl) getPipelineStrategyForPreviousDeployments(ctx context.Context, deploymentTemplateHistoryId, pipelineId int) (*bean2.DeploymentAndCmCsConfig, error) { + pipelineStrategyJson := json.RawMessage{} + pipelineConfig := bean2.NewDeploymentAndCmCsConfig() + deplTemplateHistory, err := impl.deploymentTemplateHistoryService.GetTemplateHistoryModelForDeployedTemplateById(deploymentTemplateHistoryId, pipelineId) + if err != nil { + impl.logger.Errorw("error in getting deployment template history", "deploymentTemplateHistoryId", deploymentTemplateHistoryId, "pipelineId", pipelineId, "err", err) + return nil, err + } + pipelineStrategyHistory, err := impl.pipelineStrategyHistoryRepository.FindPipelineStrategyForDeployedOnAndPipelineId(pipelineId, deplTemplateHistory.DeployedOn) + if err != nil && !util.IsErrNoRows(err) { + impl.logger.Errorw("error in FindPipelineStrategyForDeployedOnAndPipelineId", "deploymentTemplateHistoryId", deploymentTemplateHistoryId, "deployedOn", deplTemplateHistory.DeployedOn, "pipelineId", pipelineId, "err", err) + return nil, err + } else if util.IsErrNoRows(err) { + return pipelineConfig, nil + } + err = pipelineStrategyJson.UnmarshalJSON([]byte(pipelineStrategyHistory.Config)) + if err != nil { + impl.logger.Errorw("getDeploymentTemplateForEnvLevel, error in unmarshalling string pipelineStrategyHistory data into json Raw message", "err", err) + return nil, err + } + pipelineConfig.WithConfigData(pipelineStrategyJson). + WithResourceType(bean.PipelineStrategy). + WithPipelineStrategyMetadata(pipelineStrategyHistory.PipelineTriggerType, string(pipelineStrategyHistory.Strategy)) + return pipelineConfig, nil +} + +func (impl *DeploymentConfigurationServiceImpl) getDeploymentsConfigForPreviousDeployments(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams, + appId, envId int) (generateManifest.DeploymentTemplateResponse, error) { + deploymentTemplateRequest := generateManifest.DeploymentTemplateRequest{ + PipelineId: configDataQueryParams.PipelineId, + DeploymentTemplateHistoryId: configDataQueryParams.IdentifierId, + RequestDataMode: generateManifest.Values, + Type: repository2.DeployedOnSelfEnvironment, + } + var deploymentTemplateResponse generateManifest.DeploymentTemplateResponse + deploymentTemplateResponse, err := impl.deploymentTemplateService.GetDeploymentTemplate(ctx, deploymentTemplateRequest) + if err != nil { + impl.logger.Errorw("getDeploymentTemplateForEnvLevel, error in getting deployment template for ", "deploymentTemplateRequest", deploymentTemplateRequest, "err", err) + return deploymentTemplateResponse, err + } + + return deploymentTemplateResponse, nil +} + +func (impl *DeploymentConfigurationServiceImpl) getDeploymentAndCmCsConfigDataForPreviousDeployments(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams, + appId, envId int, userHasAdminAccess bool) (*bean2.DeploymentAndCmCsConfigDto, error) { + + // getting DeploymentAndCmCsConfigDto obj with cm and cs data populated + configDataDto, err := impl.getCmCsDataForPreviousDeployments(ctx, configDataQueryParams.IdentifierId, configDataQueryParams.PipelineId, userHasAdminAccess) + if err != nil { + impl.logger.Errorw("error in getting cm cs for PreviousDeployments state", "deploymentTemplateHistoryId", configDataQueryParams.IdentifierId, "pipelineId", configDataQueryParams.PipelineId, "err", err) + return nil, err + } + pipelineStrategy, err := impl.getPipelineStrategyForPreviousDeployments(ctx, configDataQueryParams.IdentifierId, configDataQueryParams.PipelineId) + if err != nil { + impl.logger.Errorw(" error in getting cm cs for PreviousDeployments state", "deploymentTemplateHistoryId", configDataQueryParams.IdentifierId, "pipelineId", configDataQueryParams.PipelineId, "err", err) + return nil, err + } + if len(pipelineStrategy.Data) > 0 { + configDataDto.WithPipelineConfigData(pipelineStrategy) + } + + deploymentTemplateData, err := impl.getDeploymentsConfigForPreviousDeployments(ctx, configDataQueryParams, appId, envId) + if err != nil { + impl.logger.Errorw("error in getting deployment config", "appName", configDataQueryParams.AppName, "envName", configDataQueryParams.EnvName, "err", err) + return nil, err + } + deploymentJson := json.RawMessage{} + err = deploymentJson.UnmarshalJSON([]byte(deploymentTemplateData.Data)) + if err != nil { + impl.logger.Errorw("error in unmarshalling string deploymentTemplateResponse data into json Raw message", "appName", configDataQueryParams.AppName, "envName", configDataQueryParams.EnvName, "err", err) + return nil, err + } + variableSnapShotMap := map[string]map[string]string{bean.DeploymentTemplate.ToString(): deploymentTemplateData.VariableSnapshot} + + deploymentConfig := bean2.NewDeploymentAndCmCsConfig(). + WithDeploymentConfigMetadata(deploymentTemplateData.TemplateVersion, deploymentTemplateData.IsAppMetricsEnabled). + WithConfigData(deploymentJson). + WithResourceType(bean.DeploymentTemplate). + WithResolvedValue(json.RawMessage(deploymentTemplateData.ResolvedData)). + WithVariableSnapshot(variableSnapShotMap) + + configDataDto.WithDeploymentTemplateData(deploymentConfig) + + return configDataDto, nil +} + +func (impl *DeploymentConfigurationServiceImpl) getConfigDataForAppConfiguration(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams, + appId, envId, clusterId int, userHasAdminAccess bool, systemMetadata *resourceQualifiers.SystemMetadata) (*bean2.DeploymentAndCmCsConfigDto, error) { + configDataDto := &bean2.DeploymentAndCmCsConfigDto{} + var err error switch configDataQueryParams.ConfigType { + case bean2.DefaultVersion.ToString(): + configDataDto, err = impl.getDeploymentAndCmCsConfigDataForDefaultVersion(ctx, configDataQueryParams) + if err != nil { + impl.logger.Errorw("GetAllConfigData, error in config data for Default version", "configDataQueryParams", configDataQueryParams, "err", err) + return nil, err + } + //no cm or cs to send for default versions + case bean2.PreviousDeployments.ToString(): + configDataDto, err = impl.getDeploymentAndCmCsConfigDataForPreviousDeployments(ctx, configDataQueryParams, appId, envId, userHasAdminAccess) + if err != nil { + impl.logger.Errorw("GetAllConfigData, error in config data for Previous Deployments", "configDataQueryParams", configDataQueryParams, "err", err) + return nil, err + } default: // keeping default as PublishedOnly - configDataDto, err = impl.getPublishedConfigData(ctx, configDataQueryParams, appId, envId) + configDataDto, err = impl.getPublishedConfigData(ctx, configDataQueryParams, appId, envId, clusterId, userHasAdminAccess, systemMetadata) if err != nil { impl.logger.Errorw("GetAllConfigData, error in config data for PublishedOnly", "configDataQueryParams", configDataQueryParams, "err", err) return nil, err @@ -114,7 +505,40 @@ func (impl *DeploymentConfigurationServiceImpl) GetAllConfigData(ctx context.Con return configDataDto, nil } -func (impl *DeploymentConfigurationServiceImpl) getCmCsEditDataForPublishedOnly(configDataQueryParams *bean2.ConfigDataQueryParams, envId, appId int) (*bean2.DeploymentAndCmCsConfigDto, error) { +func (impl *DeploymentConfigurationServiceImpl) getDeploymentsConfigForDefaultVersion(ctx context.Context, chartRefId int) (json.RawMessage, error) { + deploymentTemplateRequest := generateManifest.DeploymentTemplateRequest{ + ChartRefId: chartRefId, + RequestDataMode: generateManifest.Values, + Type: repository2.DefaultVersions, + } + deploymentTemplateResponse, err := impl.deploymentTemplateService.GetDeploymentTemplate(ctx, deploymentTemplateRequest) + if err != nil { + impl.logger.Errorw("getDeploymentTemplateForEnvLevel, error in getting deployment template for ", "deploymentTemplateRequest", deploymentTemplateRequest, "err", err) + return nil, err + } + deploymentJson := json.RawMessage{} + err = deploymentJson.UnmarshalJSON([]byte(deploymentTemplateResponse.Data)) + if err != nil { + impl.logger.Errorw("getDeploymentTemplateForEnvLevel, error in unmarshalling string deploymentTemplateResponse data into json Raw message", "data", deploymentTemplateResponse.Data, "err", err) + return nil, err + } + return deploymentJson, nil +} + +func (impl *DeploymentConfigurationServiceImpl) getDeploymentAndCmCsConfigDataForDefaultVersion(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams) (*bean2.DeploymentAndCmCsConfigDto, error) { + configData := &bean2.DeploymentAndCmCsConfigDto{} + deploymentTemplateJsonData, err := impl.getDeploymentsConfigForDefaultVersion(ctx, configDataQueryParams.IdentifierId) + if err != nil { + impl.logger.Errorw("GetAllConfigData, error in getting deployment config for default version", "chartRefId", configDataQueryParams.IdentifierId, "err", err) + return nil, err + } + deploymentConfig := bean2.NewDeploymentAndCmCsConfig().WithConfigData(deploymentTemplateJsonData).WithResourceType(bean.DeploymentTemplate) + configData.WithDeploymentTemplateData(deploymentConfig) + return configData, nil +} + +func (impl *DeploymentConfigurationServiceImpl) getCmCsEditDataForPublishedOnly(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams, envId, + appId int, clusterId int, userHasAdminAccess bool, systemMetadata *resourceQualifiers.SystemMetadata) (*bean2.DeploymentAndCmCsConfigDto, error) { configDataDto := &bean2.DeploymentAndCmCsConfigDto{} var resourceType bean.ResourceType @@ -140,17 +564,35 @@ func (impl *DeploymentConfigurationServiceImpl) getCmCsEditDataForPublishedOnly( impl.logger.Errorw("getCmCsEditDataForPublishedOnly, error in converting to json raw message", "configDataQueryParams", configDataQueryParams, "err", err) return nil, err } + resolvedCmCsMetadataDto, err := impl.ResolveCmCs(ctx, envId, appId, clusterId, userHasAdminAccess, configDataQueryParams.ResourceName, resourceType, systemMetadata) + if err != nil { + impl.logger.Errorw("error in resolving cm and cs for published only config only response", "appId", appId, "envId", envId, "err", err) + return nil, err + } cmCsConfig := bean2.NewDeploymentAndCmCsConfig().WithConfigData(respJson).WithResourceType(resourceType) + if resourceType == bean.CS { + resolvedConfigDataStringJson, err := utils.ConvertToJsonRawMessage(resolvedCmCsMetadataDto.ResolvedSecretData) + if err != nil { + impl.logger.Errorw("getCmCsPublishedConfigResponse, error in ConvertToJsonRawMessage ", "err", err) + return nil, err + } + cmCsConfig.WithResolvedValue(resolvedConfigDataStringJson).WithVariableSnapshot(resolvedCmCsMetadataDto.VariableMapCS) configDataDto.WithSecretData(cmCsConfig) } else if resourceType == bean.CM { + resolvedConfigDataStringJson, err := utils.ConvertToJsonRawMessage(resolvedCmCsMetadataDto.ResolvedConfigMapData) + if err != nil { + impl.logger.Errorw("getCmCsPublishedConfigResponse, error in ConvertToJsonRawMessage for resolvedJson", "ResolvedConfigMapData", resolvedCmCsMetadataDto.ResolvedConfigMapData, "err", err) + return nil, err + } + cmCsConfig.WithResolvedValue(resolvedConfigDataStringJson).WithVariableSnapshot(resolvedCmCsMetadataDto.VariableMapCM) configDataDto.WithConfigMapData(cmCsConfig) } return configDataDto, nil } -func (impl *DeploymentConfigurationServiceImpl) getCmCsPublishedConfigResponse(envId, appId int) (*bean2.DeploymentAndCmCsConfigDto, error) { +func (impl *DeploymentConfigurationServiceImpl) getCmCsPublishedConfigResponse(ctx context.Context, envId, appId, clusterId int, userHasAdminAccess bool, systemMetadata *resourceQualifiers.SystemMetadata) (*bean2.DeploymentAndCmCsConfigDto, error) { configDataDto := &bean2.DeploymentAndCmCsConfigDto{} secretData, err := impl.getSecretConfigResponse("", 0, envId, appId) @@ -178,79 +620,277 @@ func (impl *DeploymentConfigurationServiceImpl) getCmCsPublishedConfigResponse(e return nil, err } - cmConfigData := bean2.NewDeploymentAndCmCsConfig().WithConfigData(cmRespJson).WithResourceType(bean.CM) - secretConfigData := bean2.NewDeploymentAndCmCsConfig().WithConfigData(secretRespJson).WithResourceType(bean.CS) + resolvedCmCsMetadataDto, err := impl.ResolveCmCs(ctx, envId, appId, clusterId, userHasAdminAccess, "", "", systemMetadata) + if err != nil { + impl.logger.Errorw("error in resolving cm and cs for published only config only response", "appId", appId, "envId", envId, "err", err) + return nil, err + } + resolvedConfigMapDataStringJson, err := utils.ConvertToJsonRawMessage(resolvedCmCsMetadataDto.ResolvedConfigMapData) + if err != nil { + impl.logger.Errorw("error in ConvertToJsonRawMessage for resolvedConfigMapDataStringJson", "resolvedCmData", resolvedCmCsMetadataDto.ResolvedConfigMapData, "err", err) + return nil, err + } + resolvedSecretDataStringJson, err := utils.ConvertToJsonRawMessage(resolvedCmCsMetadataDto.ResolvedSecretData) + if err != nil { + impl.logger.Errorw(" error in ConvertToJsonRawMessage for resolvedConfigDataString", "err", err) + return nil, err + } + + cmConfigData := bean2.NewDeploymentAndCmCsConfig().WithConfigData(cmRespJson).WithResourceType(bean.CM). + WithResolvedValue(resolvedConfigMapDataStringJson).WithVariableSnapshot(resolvedCmCsMetadataDto.VariableMapCM) + + secretConfigData := bean2.NewDeploymentAndCmCsConfig().WithConfigData(secretRespJson).WithResourceType(bean.CS). + WithResolvedValue(resolvedSecretDataStringJson).WithVariableSnapshot(resolvedCmCsMetadataDto.VariableMapCS) configDataDto.WithConfigMapData(cmConfigData).WithSecretData(secretConfigData) return configDataDto, nil } -func (impl *DeploymentConfigurationServiceImpl) getPublishedDeploymentConfig(ctx context.Context, appId, envId int) (json.RawMessage, error) { +func (impl *DeploymentConfigurationServiceImpl) getMergedCmCs(envId, appId int) (*bean2.CmCsMetadataDto, error) { + configAppLevel, err := impl.configMapRepository.GetByAppIdAppLevel(appId) + if err != nil && pg.ErrNoRows != err { + impl.logger.Errorw("error in getting CM/CS app level data", "appId", appId, "err", err) + return nil, err + } + var configMapAppLevel string + var secretAppLevel string + if configAppLevel != nil && configAppLevel.Id > 0 { + configMapAppLevel = configAppLevel.ConfigMapData + secretAppLevel = configAppLevel.SecretData + } + configEnvLevel, err := impl.configMapRepository.GetByAppIdAndEnvIdEnvLevel(appId, envId) + if err != nil && pg.ErrNoRows != err { + impl.logger.Errorw("error in getting CM/CS env level data", "appId", appId, "envId", envId, "err", err) + return nil, err + } + var configMapEnvLevel string + var secretEnvLevel string + if configEnvLevel != nil && configEnvLevel.Id > 0 { + configMapEnvLevel = configEnvLevel.ConfigMapData + secretEnvLevel = configEnvLevel.SecretData + } + mergedConfigMap, err := impl.deploymentConfigService.GetMergedCMCSConfigMap(configMapAppLevel, configMapEnvLevel, repository3.CONFIGMAP_TYPE) + if err != nil { + impl.logger.Errorw("error in merging app level and env level CM configs", "err", err) + return nil, err + } + + mergedSecret, err := impl.deploymentConfigService.GetMergedCMCSConfigMap(secretAppLevel, secretEnvLevel, repository3.SECRET_TYPE) + if err != nil { + impl.logger.Errorw("error in merging app level and env level CM configs", "err", err) + return nil, err + } + return &bean2.CmCsMetadataDto{ + CmMap: mergedConfigMap, + SecretMap: mergedSecret, + ConfigAppLevelId: configAppLevel.Id, + ConfigEnvLevelId: configEnvLevel.Id, + }, nil +} + +func (impl *DeploymentConfigurationServiceImpl) ResolveCmCs(ctx context.Context, envId, appId, clusterId int, userHasAdminAccess bool, + resourceName string, resourceType bean.ResourceType, systemMetadata *resourceQualifiers.SystemMetadata) (*bean2.ResolvedCmCsMetadataDto, error) { + scope := resourceQualifiers.Scope{ + AppId: appId, + EnvId: envId, + ClusterId: clusterId, + SystemMetadata: systemMetadata, + } + cmcsMetadataDto, err := impl.getMergedCmCs(envId, appId) + if err != nil { + impl.logger.Errorw("error in getting merged cm cs", "appId", appId, "envId", envId, "err", err) + return nil, err + } + // if resourceName is provided then, resolve cmcs request is for single resource, then remove other data from merged cmCs + if len(resourceName) > 0 { + helper.FilterOutMergedCmCsForResourceName(cmcsMetadataDto, resourceName, resourceType) + } + resolvedConfigList, resolvedSecretList, variableMapCM, variableMapCS, err := impl.scopedVariableManager.ResolveCMCS(ctx, scope, cmcsMetadataDto.ConfigAppLevelId, cmcsMetadataDto.ConfigEnvLevelId, cmcsMetadataDto.CmMap, cmcsMetadataDto.SecretMap) + if err != nil { + impl.logger.Errorw("error in resolving CM/CS", "scope", scope, "appId", appId, "envId", envId, "err", err) + return nil, err + } + + resolvedConfigString, resolvedSecretString, err := impl.getStringifiedCmCs(resolvedConfigList, resolvedSecretList, userHasAdminAccess) + if err != nil { + impl.logger.Errorw("error in getStringifiedCmCs", "resolvedConfigList", resolvedConfigList, "err", err) + return nil, err + } + resolvedData := &bean2.ResolvedCmCsMetadataDto{ + VariableMapCM: variableMapCM, + VariableMapCS: variableMapCS, + ResolvedSecretData: resolvedSecretString, + ResolvedConfigMapData: resolvedConfigString, + } + + return resolvedData, nil +} + +func (impl *DeploymentConfigurationServiceImpl) getStringifiedCmCs(resolvedCmMap map[string]*bean3.ConfigData, resolvedSecretMap map[string]*bean3.ConfigData, + userHasAdminAccess bool) (string, string, error) { + + resolvedConfigDataList := make([]*bean.ConfigData, 0, len(resolvedCmMap)) + resolvedSecretDataList := make([]*bean.ConfigData, 0, len(resolvedSecretMap)) + + for _, resolvedConfigData := range resolvedCmMap { + resolvedConfigDataList = append(resolvedConfigDataList, adapter.ConvertConfigDataToPipelineConfigData(resolvedConfigData)) + } + + for _, resolvedSecretData := range resolvedSecretMap { + resolvedSecretDataList = append(resolvedSecretDataList, adapter.ConvertConfigDataToPipelineConfigData(resolvedSecretData)) + } + if len(resolvedSecretMap) > 0 { + impl.encodeSecretDataFromNonAdminUsers(resolvedSecretDataList, userHasAdminAccess) + } + resolvedConfigDataReq := &bean.ConfigDataRequest{ConfigData: resolvedConfigDataList} + resolvedConfigDataString, err := utils.ConvertToString(resolvedConfigDataReq) + if err != nil { + impl.logger.Errorw(" error in converting resolved config data to string", "resolvedConfigDataReq", resolvedConfigDataReq, "err", err) + return "", "", err + } + resolvedSecretDataReq := &bean.ConfigDataRequest{ConfigData: resolvedSecretDataList} + resolvedSecretDataString, err := utils.ConvertToString(resolvedSecretDataReq) + if err != nil { + impl.logger.Errorw(" error in converting resolved config data to string", "err", err) + return "", "", err + } + return resolvedConfigDataString, resolvedSecretDataString, nil +} +func (impl *DeploymentConfigurationServiceImpl) getPublishedDeploymentConfig(ctx context.Context, appId, envId int) (*bean2.DeploymentAndCmCsConfig, error) { if envId > 0 { - return impl.getDeploymentTemplateForEnvLevel(ctx, appId, envId) + deplTemplateResp, err := impl.getDeploymentTemplateForEnvLevel(ctx, appId, envId) + if err != nil { + impl.logger.Errorw("error in getting deployment template env level", "err", err) + return nil, err + } + deploymentJson := json.RawMessage{} + err = deploymentJson.UnmarshalJSON([]byte(deplTemplateResp.Data)) + if err != nil { + impl.logger.Errorw("getDeploymentTemplateForEnvLevel, error in unmarshalling string deploymentTemplateResponse data into json Raw message", "appId", appId, "envId", envId, "err", err) + return nil, err + } + + variableSnapShotMap := make(map[string]map[string]string, len(deplTemplateResp.VariableSnapshot)) + variableSnapShotMap[bean.DeploymentTemplate.ToString()] = deplTemplateResp.VariableSnapshot + + return bean2.NewDeploymentAndCmCsConfig().WithConfigData(deploymentJson).WithResourceType(bean.DeploymentTemplate). + WithResolvedValue(json.RawMessage(deplTemplateResp.ResolvedData)).WithVariableSnapshot(variableSnapShotMap). + WithDeploymentConfigMetadata(deplTemplateResp.TemplateVersion, deplTemplateResp.IsAppMetricsEnabled), nil } - return impl.getBaseDeploymentTemplate(appId) + deplMetadata, err := impl.getBaseDeploymentTemplate(appId) + if err != nil { + impl.logger.Errorw("getting base depl. template", "appid", appId, "err", err) + return nil, err + } + deploymentTemplateRequest := generateManifest.DeploymentTemplateRequest{ + AppId: appId, + RequestDataMode: generateManifest.Values, + } + resolvedTemplate, variableSnapshot, err := impl.deploymentTemplateService.ResolveTemplateVariables(ctx, string(deplMetadata.DeploymentTemplateJson), deploymentTemplateRequest) + if err != nil { + impl.logger.Errorw("error in getting resolved data for base deployment template", "appid", appId, "err", err) + return nil, err + } + + variableSnapShotMap := map[string]map[string]string{bean.DeploymentTemplate.ToString(): variableSnapshot} + return bean2.NewDeploymentAndCmCsConfig().WithConfigData(deplMetadata.DeploymentTemplateJson).WithResourceType(bean.DeploymentTemplate). + WithResolvedValue(json.RawMessage(resolvedTemplate)).WithVariableSnapshot(variableSnapShotMap). + WithDeploymentConfigMetadata(deplMetadata.TemplateVersion, deplMetadata.IsAppMetricsEnabled), nil } func (impl *DeploymentConfigurationServiceImpl) getPublishedConfigData(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams, - appId, envId int) (*bean2.DeploymentAndCmCsConfigDto, error) { + appId, envId, clusterId int, userHasAdminAccess bool, systemMetadata *resourceQualifiers.SystemMetadata) (*bean2.DeploymentAndCmCsConfigDto, error) { if configDataQueryParams.IsRequestMadeForOneResource() { - return impl.getCmCsEditDataForPublishedOnly(configDataQueryParams, envId, appId) + return impl.getCmCsEditDataForPublishedOnly(ctx, configDataQueryParams, envId, appId, clusterId, userHasAdminAccess, systemMetadata) } //ConfigMapsData and SecretsData are populated here - configData, err := impl.getCmCsPublishedConfigResponse(envId, appId) + configData, err := impl.getCmCsPublishedConfigResponse(ctx, envId, appId, clusterId, userHasAdminAccess, systemMetadata) if err != nil { impl.logger.Errorw("getPublishedConfigData, error in getting cm cs for PublishedOnly state", "appName", configDataQueryParams.AppName, "envName", configDataQueryParams.EnvName, "err", err) return nil, err } - deploymentTemplateJsonData, err := impl.getPublishedDeploymentConfig(ctx, appId, envId) + deploymentTemplateData, err := impl.getPublishedDeploymentConfig(ctx, appId, envId) if err != nil { impl.logger.Errorw("getPublishedConfigData, error in getting publishedOnly deployment config ", "configDataQueryParams", configDataQueryParams, "err", err) return nil, err } - deploymentConfig := bean2.NewDeploymentAndCmCsConfig().WithConfigData(deploymentTemplateJsonData).WithResourceType(bean.DeploymentTemplate) + configData.WithDeploymentTemplateData(deploymentTemplateData) - configData.WithDeploymentTemplateData(deploymentConfig) + pipelineConfigData, err := impl.getPublishedPipelineStrategyConfig(ctx, appId, envId) + if err != nil { + impl.logger.Errorw("getPublishedConfigData, error in getting publishedOnly pipeline strategy ", "configDataQueryParams", configDataQueryParams, "err", err) + return nil, err + } + if len(pipelineConfigData.Data) > 0 { + configData.WithPipelineConfigData(pipelineConfigData) + } return configData, nil } -func (impl *DeploymentConfigurationServiceImpl) getBaseDeploymentTemplate(appId int) (json.RawMessage, error) { +func (impl *DeploymentConfigurationServiceImpl) getPublishedPipelineStrategyConfig(ctx context.Context, appId int, envId int) (*bean2.DeploymentAndCmCsConfig, error) { + pipelineConfig := bean2.NewDeploymentAndCmCsConfig() + if envId == 0 { + return pipelineConfig, nil + } + pipeline, err := impl.pipelineRepository.FindActiveByAppIdAndEnvId(appId, envId) + if err != nil { + impl.logger.Errorw("error in FindActiveByAppIdAndEnvId", "appId", appId, "envId", envId, "err", err) + return nil, err + } + pipelineStrategy, err := impl.deploymentConfigService.GetLatestPipelineStrategyConfig(pipeline) + if err != nil && !errors.IsNotFound(err) { + impl.logger.Errorw("error in GetLatestPipelineStrategyConfig", "pipelineId", pipeline.Id, "err", err) + return nil, err + } else if errors.IsNotFound(err) { + return pipelineConfig, nil + } + pipelineStrategyJson := json.RawMessage{} + err = pipelineStrategyJson.UnmarshalJSON([]byte(pipelineStrategy.CodeEditorValue.Value)) + if err != nil { + impl.logger.Errorw("getDeploymentTemplateForEnvLevel, error in unmarshalling string pipelineStrategyHistory data into json Raw message", "err", err) + return nil, err + } + pipelineConfig.WithConfigData(pipelineStrategyJson). + WithResourceType(bean.PipelineStrategy). + WithPipelineStrategyMetadata(pipelineStrategy.PipelineTriggerType, string(pipelineStrategy.Strategy)) + return pipelineConfig, nil +} + +func (impl *DeploymentConfigurationServiceImpl) getBaseDeploymentTemplate(appId int) (*bean2.DeploymentTemplateMetadata, error) { deploymentTemplateData, err := impl.chartService.FindLatestChartForAppByAppId(appId) if err != nil { impl.logger.Errorw("error in getting base deployment template for appId", "appId", appId, "err", err) return nil, err } - return deploymentTemplateData.DefaultAppOverride, nil + _, _, version, _, err := impl.chartRefService.GetRefChart(deploymentTemplateData.ChartRefId) + if err != nil { + impl.logger.Errorw("error in getting chart ref by chartRefId ", "chartRefId", deploymentTemplateData.ChartRefId, "err", err) + return nil, err + } + return &bean2.DeploymentTemplateMetadata{ + DeploymentTemplateJson: deploymentTemplateData.DefaultAppOverride, + IsAppMetricsEnabled: deploymentTemplateData.IsAppMetricsEnabled, + TemplateVersion: version, + }, nil } -func (impl *DeploymentConfigurationServiceImpl) getDeploymentTemplateForEnvLevel(ctx context.Context, appId, envId int) (json.RawMessage, error) { +func (impl *DeploymentConfigurationServiceImpl) getDeploymentTemplateForEnvLevel(ctx context.Context, appId, envId int) (generateManifest.DeploymentTemplateResponse, error) { deploymentTemplateRequest := generateManifest.DeploymentTemplateRequest{ AppId: appId, EnvId: envId, RequestDataMode: generateManifest.Values, Type: repository2.PublishedOnEnvironments, } - deploymentTemplateResponse, err := impl.deploymentTemplateService.GetDeploymentTemplate(ctx, deploymentTemplateRequest) + var deploymentTemplateResponse generateManifest.DeploymentTemplateResponse + var err error + deploymentTemplateResponse, err = impl.deploymentTemplateService.GetDeploymentTemplate(ctx, deploymentTemplateRequest) if err != nil { impl.logger.Errorw("getDeploymentTemplateForEnvLevel, error in getting deployment template for ", "deploymentTemplateRequest", deploymentTemplateRequest, "err", err) - return nil, err - } - deploymentJson := json.RawMessage{} - err = deploymentJson.UnmarshalJSON([]byte(deploymentTemplateResponse.Data)) - if err != nil { - impl.logger.Errorw("getDeploymentTemplateForEnvLevel, error in unmarshalling string deploymentTemplateResponse data into json Raw message", "data", deploymentTemplateResponse.Data, "err", err) - return nil, err - } - return deploymentJson, nil -} - -func (impl *DeploymentConfigurationServiceImpl) getDeploymentConfig(ctx context.Context, appId, envId int) (json.RawMessage, error) { - if envId > 0 { - return impl.getDeploymentTemplateForEnvLevel(ctx, appId, envId) + return deploymentTemplateResponse, err } - return impl.getBaseDeploymentTemplate(appId) + return deploymentTemplateResponse, nil } func (impl *DeploymentConfigurationServiceImpl) getSecretConfigResponse(resourceName string, resourceId, envId, appId int) (*bean.ConfigDataRequest, error) { diff --git a/pkg/configDiff/adaptor/adaptor.go b/pkg/configDiff/adaptor/adaptor.go index 4ab81eb2d11..6fd46129fe1 100644 --- a/pkg/configDiff/adaptor/adaptor.go +++ b/pkg/configDiff/adaptor/adaptor.go @@ -1,7 +1,9 @@ package adaptor import ( + bean3 "github.com/devtron-labs/devtron/pkg/bean" bean2 "github.com/devtron-labs/devtron/pkg/configDiff/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/adapter" "github.com/devtron-labs/devtron/pkg/pipeline/bean" ) @@ -27,3 +29,35 @@ func GetCmCsAppAndEnvLevelMap(cMCSNamesAppLevel, cMCSNamesEnvLevel []bean.Config } return cMCSNamesAppLevelMap, cMCSNamesEnvLevelMap } + +func ConfigListConvertor(r bean3.ConfigList) bean.ConfigsList { + pipelineConfigData := make([]*bean.ConfigData, 0, len(r.ConfigData)) + for _, item := range r.ConfigData { + pipelineConfigData = append(pipelineConfigData, adapter.ConvertConfigDataToPipelineConfigData(item)) + } + return bean.ConfigsList{ConfigData: pipelineConfigData} +} + +func SecretListConvertor(r bean3.SecretList) bean.SecretsList { + pipelineConfigData := make([]*bean.ConfigData, 0, len(r.ConfigData)) + for _, item := range r.ConfigData { + pipelineConfigData = append(pipelineConfigData, adapter.ConvertConfigDataToPipelineConfigData(item)) + } + return bean.SecretsList{ConfigData: pipelineConfigData} +} + +func ReverseConfigListConvertor(r bean.ConfigsList) bean3.ConfigList { + configData := make([]*bean3.ConfigData, 0, len(r.ConfigData)) + for _, item := range r.ConfigData { + configData = append(configData, adapter.ConvertPipelineConfigDataToConfigData(item)) + } + return bean3.ConfigList{ConfigData: configData} +} + +func ReverseSecretListConvertor(r bean.SecretsList) bean3.SecretList { + configData := make([]*bean3.ConfigData, 0, len(r.ConfigData)) + for _, item := range r.ConfigData { + configData = append(configData, adapter.ConvertPipelineConfigDataToConfigData(item)) + } + return bean3.SecretList{ConfigData: configData} +} diff --git a/pkg/configDiff/bean/bean.go b/pkg/configDiff/bean/bean.go index 2113ea81a65..da4905e29c8 100644 --- a/pkg/configDiff/bean/bean.go +++ b/pkg/configDiff/bean/bean.go @@ -4,6 +4,8 @@ import "C" import ( "encoding/json" "fmt" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + bean3 "github.com/devtron-labs/devtron/pkg/bean" "github.com/devtron-labs/devtron/pkg/pipeline/bean" ) @@ -11,6 +13,8 @@ type ConfigState string const ( PublishedConfigState ConfigState = "PublishedOnly" + PreviousDeployments ConfigState = "PreviousDeployments" + DefaultVersion ConfigState = "DefaultVersion" ) func (r ConfigState) ToString() string { @@ -25,6 +29,19 @@ const ( Overridden ConfigStage = "Overridden" ) +type ConfigArea string + +const ( + AppConfiguration ConfigArea = "AppConfiguration" + DeploymentHistory ConfigArea = "DeploymentHistory" + CdRollback ConfigArea = "CdRollback" + ResolveData ConfigArea = "ResolveData" +) + +func (r ConfigArea) ToString() string { + return string(r) +} + type ConfigProperty struct { Id int `json:"id"` Name string `json:"name"` @@ -71,8 +88,16 @@ func (r *ConfigProperty) GetIdentifier() ConfigPropertyIdentifier { } type DeploymentAndCmCsConfig struct { - ResourceType bean.ResourceType `json:"resourceType"` - Data json.RawMessage `json:"data"` + ResourceType bean.ResourceType `json:"resourceType"` + Data json.RawMessage `json:"data"` + VariableSnapshot map[string]map[string]string `json:"variableSnapshot"` // for deployment->{Deployment Template: resolvedValuesMap}, for cm->{cmComponentName: resolvedValuesMap} + ResolvedValue json.RawMessage `json:"resolvedValue"` + // for deployment template + TemplateVersion string `json:"templateVersion,omitempty"` + IsAppMetricsEnabled bool `json:"isAppMetricsEnabled,omitempty"` + //for pipeline strategy + PipelineTriggerType pipelineConfig.TriggerType `json:"pipelineTriggerType,omitempty"` + Strategy string `json:"strategy,omitempty"` } func NewDeploymentAndCmCsConfig() *DeploymentAndCmCsConfig { @@ -89,10 +114,33 @@ func (r *DeploymentAndCmCsConfig) WithConfigData(data json.RawMessage) *Deployme return r } +func (r *DeploymentAndCmCsConfig) WithVariableSnapshot(snapshot map[string]map[string]string) *DeploymentAndCmCsConfig { + r.VariableSnapshot = snapshot + return r +} + +func (r *DeploymentAndCmCsConfig) WithResolvedValue(resolvedValue json.RawMessage) *DeploymentAndCmCsConfig { + r.ResolvedValue = resolvedValue + return r +} + +func (r *DeploymentAndCmCsConfig) WithDeploymentConfigMetadata(templateVersion string, isAppMetricsEnabled bool) *DeploymentAndCmCsConfig { + r.TemplateVersion = templateVersion + r.IsAppMetricsEnabled = isAppMetricsEnabled + return r +} + +func (r *DeploymentAndCmCsConfig) WithPipelineStrategyMetadata(pipelineTriggerType pipelineConfig.TriggerType, strategy string) *DeploymentAndCmCsConfig { + r.PipelineTriggerType = pipelineTriggerType + r.Strategy = strategy + return r +} + type DeploymentAndCmCsConfigDto struct { DeploymentTemplate *DeploymentAndCmCsConfig `json:"deploymentTemplate"` ConfigMapsData *DeploymentAndCmCsConfig `json:"configMapData"` SecretsData *DeploymentAndCmCsConfig `json:"secretsData"` + PipelineConfigData *DeploymentAndCmCsConfig `json:"pipelineConfigData,omitempty"` IsAppAdmin bool `json:"isAppAdmin"` } @@ -112,17 +160,23 @@ func (r *DeploymentAndCmCsConfigDto) WithSecretData(data *DeploymentAndCmCsConfi r.SecretsData = data return r } +func (r *DeploymentAndCmCsConfigDto) WithPipelineConfigData(data *DeploymentAndCmCsConfig) *DeploymentAndCmCsConfigDto { + r.PipelineConfigData = data + return r +} type ConfigDataQueryParams struct { AppName string `schema:"appName"` EnvName string `schema:"envName"` ConfigType string `schema:"configType"` IdentifierId int `schema:"identifierId"` - PipelineId int `schema:"pipelineId"` // req for fetching previous deployments data - ResourceName string `schema:"resourceName"` - ResourceType string `schema:"resourceType"` - ResourceId int `schema:"resourceId"` + PipelineId int `schema:"pipelineId"` // req for fetching previous deployments data + ResourceName string `schema:"resourceName"` // used in case of cm and cs + ResourceType string `schema:"resourceType"` // used in case of cm and cs + ResourceId int `schema:"resourceId"` // used in case of cm and cs UserId int32 `schema:"-"` + WfrId int `schema:"wfrId"` + ConfigArea string `schema:"configArea"` } // FilterCriteria []string `schema:"filterCriteria"` @@ -150,3 +204,33 @@ func (r *ConfigDataQueryParams) IsRequestMadeForOneResource() bool { const ( InvalidConfigTypeErr = "invalid config type provided, please send a valid config type" ) + +type CmCsMetadataDto struct { + CmMap map[string]*bean3.ConfigData + SecretMap map[string]*bean3.ConfigData + ConfigAppLevelId int + ConfigEnvLevelId int +} + +type ResolvedCmCsMetadataDto struct { + ResolvedConfigMapData string + ResolvedSecretData string + VariableMapCM map[string]map[string]string + VariableMapCS map[string]map[string]string +} + +type ValuesDto struct { + Values string `json:"values"` +} + +type DeploymentTemplateMetadata struct { + DeploymentTemplateJson json.RawMessage + TemplateVersion string + IsAppMetricsEnabled bool +} + +const ( + NoDeploymentDoneForSelectedImage = "there were no deployments done for the selected image" + ExpectedWfrIdNotPassedInQueryParamErr = "wfrId is expected in the query param which was not passed" + SecretMaskedValue = "*****" +) diff --git a/pkg/configDiff/helper/helper.go b/pkg/configDiff/helper/helper.go index 70082a7bea6..3bf5e5ffaab 100644 --- a/pkg/configDiff/helper/helper.go +++ b/pkg/configDiff/helper/helper.go @@ -1,7 +1,9 @@ package helper import ( + bean3 "github.com/devtron-labs/devtron/pkg/bean" bean2 "github.com/devtron-labs/devtron/pkg/configDiff/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/bean" ) func GetCombinedPropertiesMap(cmcsKeyPropertyAppLevelMap, cmcsKeyPropertyEnvLevelMap map[string]*bean2.ConfigProperty) []*bean2.ConfigProperty { @@ -18,3 +20,39 @@ func GetCombinedPropertiesMap(cmcsKeyPropertyAppLevelMap, cmcsKeyPropertyEnvLeve } return combinedProperties } + +func GetKeysToDelete(cmcsData map[string]*bean3.ConfigData, resourceName string) []string { + keysToDelete := make([]string, 0, len(cmcsData)) + for key, _ := range cmcsData { + if key != resourceName { + keysToDelete = append(keysToDelete, key) + } + } + return keysToDelete +} + +func FilterOutMergedCmCsForResourceName(cmcsMerged *bean2.CmCsMetadataDto, resourceName string, resourceType bean.ResourceType) { + for _, key := range GetKeysToDelete(cmcsMerged.SecretMap, resourceName) { + delete(cmcsMerged.SecretMap, key) + } + for _, key := range GetKeysToDelete(cmcsMerged.CmMap, resourceName) { + delete(cmcsMerged.CmMap, key) + } + + // handle the case when a cm and a cs can have a same name, in that case, check from resource type if correct key is filtered out or not + if resourceType == bean.CS { + if len(cmcsMerged.CmMap) > 0 { + // delete all elements from cmMap as requested resource is of secret type + for key, _ := range cmcsMerged.CmMap { + delete(cmcsMerged.CmMap, key) + } + } + } else if resourceType == bean.CM { + if len(cmcsMerged.SecretMap) > 0 { + // delete all elements from secretMap as requested resource is of secret type + for key, _ := range cmcsMerged.SecretMap { + delete(cmcsMerged.SecretMap, key) + } + } + } +} diff --git a/pkg/configDiff/utils/utils.go b/pkg/configDiff/utils/utils.go index 8185993775f..62d1272c312 100644 --- a/pkg/configDiff/utils/utils.go +++ b/pkg/configDiff/utils/utils.go @@ -14,3 +14,11 @@ func ConvertToJsonRawMessage(request interface{}) (json.RawMessage, error) { } return r, nil } + +func ConvertToString(req interface{}) (string, error) { + reqByte, err := json.Marshal(req) + if err != nil { + return "", err + } + return string(reqByte), nil +} diff --git a/pkg/deployment/deployedApp/DeployedAppService.go b/pkg/deployment/deployedApp/DeployedAppService.go index 64f0b0b1a1f..be9e9f8d623 100644 --- a/pkg/deployment/deployedApp/DeployedAppService.go +++ b/pkg/deployment/deployedApp/DeployedAppService.go @@ -110,7 +110,7 @@ func (impl *DeployedAppServiceImpl) StopStartApp(ctx context.Context, stopReques Context: ctx, ReferenceId: stopRequest.ReferenceId, } - id, err := impl.cdTriggerService.ManualCdTrigger(triggerContext, overrideRequest) + id, _, err := impl.cdTriggerService.ManualCdTrigger(triggerContext, overrideRequest) if err != nil { impl.logger.Errorw("error in stopping app", "err", err, "appId", stopRequest.AppId, "envId", stopRequest.EnvironmentId) return 0, err diff --git a/pkg/deployment/gitOps/git/GitOperationService.go b/pkg/deployment/gitOps/git/GitOperationService.go index 5ce732825de..7e3b9eb4bb5 100644 --- a/pkg/deployment/gitOps/git/GitOperationService.go +++ b/pkg/deployment/gitOps/git/GitOperationService.go @@ -56,6 +56,7 @@ type GitOperationService interface { CloneInDir(repoUrl, chartDir string) (string, error) ReloadGitOpsProvider() error UpdateGitHostUrlByProvider(request *apiBean.GitOpsConfigDto) error + GetRepoUrlWithUserName(url string) (string, error) } type GitOperationServiceImpl struct { @@ -475,3 +476,7 @@ func (impl *GitOperationServiceImpl) addConfigFileToChart(config *ChartConfig, d } return nil } + +func (impl *GitOperationServiceImpl) GetRepoUrlWithUserName(url string) (string, error) { + return url, nil +} diff --git a/pkg/deployment/trigger/devtronApps/PreStageTriggerService.go b/pkg/deployment/trigger/devtronApps/PreStageTriggerService.go index 010d3986d57..33e47e6b1a9 100644 --- a/pkg/deployment/trigger/devtronApps/PreStageTriggerService.go +++ b/pkg/deployment/trigger/devtronApps/PreStageTriggerService.go @@ -146,6 +146,57 @@ func (impl *TriggerServiceImpl) TriggerPreStage(request bean.TriggerRequest) err return nil } +func (impl *TriggerServiceImpl) TriggerAutoCDOnPreStageSuccess(triggerContext bean.TriggerContext, cdPipelineId, ciArtifactId, workflowId int, triggerdBy int32, scanExecutionHistoryId int) error { + pipeline, err := impl.pipelineRepository.FindById(cdPipelineId) + if err != nil { + return err + } + if pipeline.TriggerType == pipelineConfig.TRIGGER_TYPE_AUTOMATIC { + ciArtifact, err := impl.ciArtifactRepository.Get(ciArtifactId) + if err != nil { + return err + } + cdWorkflow, err := impl.cdWorkflowRepository.FindById(workflowId) + if err != nil { + return err + } + // TODO : confirm about this logic used for applyAuth + + // checking if deployment is triggered already, then ignore trigger + deploymentTriggeredAlready := impl.checkDeploymentTriggeredAlready(cdWorkflow.Id) + if deploymentTriggeredAlready { + impl.logger.Warnw("deployment is already triggered, so ignoring this msg", "cdPipelineId", cdPipelineId, "ciArtifactId", ciArtifactId, "workflowId", workflowId) + return nil + } + + triggerRequest := bean.TriggerRequest{ + CdWf: cdWorkflow, + Pipeline: pipeline, + Artifact: ciArtifact, + TriggeredBy: triggerdBy, + TriggerContext: triggerContext, + } + + triggerRequest.TriggerContext.Context = context.Background() + err = impl.TriggerAutomaticDeployment(triggerRequest) + if err != nil { + return err + } + } + return nil +} +func (impl *TriggerServiceImpl) checkDeploymentTriggeredAlready(wfId int) bool { + deploymentTriggeredAlready := false + // TODO : need to check this logic for status check in case of multiple deployments requirement for same workflow + workflowRunner, err := impl.cdWorkflowRepository.FindByWorkflowIdAndRunnerType(context.Background(), wfId, bean2.CD_WORKFLOW_TYPE_DEPLOY) + if err != nil { + impl.logger.Errorw("error occurred while fetching workflow runner", "wfId", wfId, "err", err) + return deploymentTriggeredAlready + } + deploymentTriggeredAlready = workflowRunner.CdWorkflowId == wfId + return deploymentTriggeredAlready +} + func (impl *TriggerServiceImpl) createStartingWfAndRunner(request bean.TriggerRequest, triggeredAt time.Time) (*pipelineConfig.CdWorkflow, *pipelineConfig.CdWorkflowRunner, error) { triggeredBy := request.TriggeredBy artifact := request.Artifact diff --git a/pkg/deployment/trigger/devtronApps/TriggerService.go b/pkg/deployment/trigger/devtronApps/TriggerService.go index 49ad2949eca..a4ffae1c152 100644 --- a/pkg/deployment/trigger/devtronApps/TriggerService.go +++ b/pkg/deployment/trigger/devtronApps/TriggerService.go @@ -56,6 +56,7 @@ import ( "github.com/devtron-labs/devtron/pkg/deployment/common" bean9 "github.com/devtron-labs/devtron/pkg/deployment/common/bean" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" + "github.com/devtron-labs/devtron/pkg/deployment/gitOps/git" "github.com/devtron-labs/devtron/pkg/deployment/manifest" bean5 "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/bean" "github.com/devtron-labs/devtron/pkg/deployment/manifest/publish" @@ -77,7 +78,7 @@ import ( "github.com/devtron-labs/devtron/pkg/sql" "github.com/devtron-labs/devtron/pkg/variables" "github.com/devtron-labs/devtron/pkg/workflow/cd" - util3 "github.com/devtron-labs/devtron/util" + globalUtil "github.com/devtron-labs/devtron/util" "github.com/devtron-labs/devtron/util/argo" util2 "github.com/devtron-labs/devtron/util/event" "github.com/devtron-labs/devtron/util/rbac" @@ -100,9 +101,11 @@ type TriggerService interface { TriggerPostStage(request bean.TriggerRequest) error TriggerPreStage(request bean.TriggerRequest) error + TriggerAutoCDOnPreStageSuccess(triggerContext bean.TriggerContext, cdPipelineId, ciArtifactId, workflowId int, triggerdBy int32, scanExecutionHistoryId int) error + TriggerStageForBulk(triggerRequest bean.TriggerRequest) error - ManualCdTrigger(triggerContext bean.TriggerContext, overrideRequest *bean3.ValuesOverrideRequest) (int, error) + ManualCdTrigger(triggerContext bean.TriggerContext, overrideRequest *bean3.ValuesOverrideRequest) (int, string, error) TriggerAutomaticDeployment(request bean.TriggerRequest) error TriggerRelease(overrideRequest *bean3.ValuesOverrideRequest, envDeploymentConfig *bean9.DeploymentConfig, ctx context.Context, triggeredAt time.Time, triggeredBy int32) (releaseNo int, err error) @@ -120,7 +123,7 @@ type TriggerServiceImpl struct { chartTemplateService util.ChartTemplateService eventFactory client.EventFactory eventClient client.EventClient - globalEnvVariables *util3.GlobalEnvVariables + globalEnvVariables *globalUtil.GlobalEnvVariables workflowEventPublishService out.WorkflowEventPublishService manifestCreationService manifest.ManifestCreationService deployedConfigurationHistoryService history.DeployedConfigurationHistoryService @@ -163,8 +166,9 @@ type TriggerServiceImpl struct { K8sUtil *util5.K8sServiceImpl transactionUtilImpl *sql.TransactionUtilImpl deploymentConfigService common.DeploymentConfigService - deploymentServiceTypeConfig *util3.DeploymentServiceTypeConfig + deploymentServiceTypeConfig *globalUtil.DeploymentServiceTypeConfig ciCdPipelineOrchestrator pipeline.CiCdPipelineOrchestrator + gitOperationService git.GitOperationService attributeService attributes.AttributesService } @@ -197,7 +201,7 @@ func NewTriggerServiceImpl(logger *zap.SugaredLogger, helmAppClient gRPC.HelmAppClient, eventFactory client.EventFactory, eventClient client.EventClient, - envVariables *util3.EnvironmentVariables, + envVariables *globalUtil.EnvironmentVariables, appRepository appRepository.AppRepository, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, imageScanHistoryRepository security.ImageScanHistoryRepository, @@ -220,7 +224,9 @@ func NewTriggerServiceImpl(logger *zap.SugaredLogger, K8sUtil *util5.K8sServiceImpl, transactionUtilImpl *sql.TransactionUtilImpl, deploymentConfigService common.DeploymentConfigService, - ciCdPipelineOrchestrator pipeline.CiCdPipelineOrchestrator, attributeService attributes.AttributesService, + ciCdPipelineOrchestrator pipeline.CiCdPipelineOrchestrator, + gitOperationService git.GitOperationService, + attributeService attributes.AttributesService, ) (*TriggerServiceImpl, error) { impl := &TriggerServiceImpl{ logger: logger, @@ -277,6 +283,7 @@ func NewTriggerServiceImpl(logger *zap.SugaredLogger, deploymentConfigService: deploymentConfigService, deploymentServiceTypeConfig: envVariables.DeploymentServiceTypeConfig, ciCdPipelineOrchestrator: ciCdPipelineOrchestrator, + gitOperationService: gitOperationService, attributeService: attributeService, } config, err := types.GetCdConfig() @@ -369,54 +376,91 @@ func (impl *TriggerServiceImpl) validateDeploymentTriggerRequest(ctx context.Con } // TODO: write a wrapper to handle auto and manual trigger -func (impl *TriggerServiceImpl) ManualCdTrigger(triggerContext bean.TriggerContext, overrideRequest *bean3.ValuesOverrideRequest) (int, error) { +func (impl *TriggerServiceImpl) ManualCdTrigger(triggerContext bean.TriggerContext, overrideRequest *bean3.ValuesOverrideRequest) (int, string, error) { + + triggerContext.TriggerType = bean.Manual //setting triggeredAt variable to have consistent data for various audit log places in db for deployment time triggeredAt := time.Now() releaseId := 0 ctx := triggerContext.Context - var err error cdPipeline, err := impl.getCdPipelineForManualCdTrigger(ctx, overrideRequest.PipelineId) if err != nil { - return 0, err + if overrideRequest.WfrId != 0 { + err2 := impl.cdWorkflowCommonService.MarkDeploymentFailedForRunnerId(overrideRequest.WfrId, err, overrideRequest.UserId) + if err2 != nil { + impl.logger.Errorw("error while updating current runner status to failed, ManualCdTrigger", "cdWfr", overrideRequest.WfrId, "err2", err2) + } + } + return 0, "", err } envDeploymentConfig, err := impl.deploymentConfigService.GetAndMigrateConfigIfAbsentForDevtronApps(cdPipeline.AppId, cdPipeline.EnvironmentId) if err != nil { impl.logger.Errorw("error in fetching environment deployment config by appId and envId", "appId", cdPipeline.AppId, "envId", cdPipeline.EnvironmentId, "err", err) - return 0, err + return 0, "", err } adapter.SetPipelineFieldsInOverrideRequest(overrideRequest, cdPipeline, envDeploymentConfig) + ciArtifactId := overrideRequest.CiArtifactId + + _, span := otel.Tracer("orchestrator").Start(ctx, "ciArtifactRepository.Get") + artifact, err := impl.ciArtifactRepository.Get(ciArtifactId) + span.End() + if err != nil { + impl.logger.Errorw("error in getting CiArtifact", "CiArtifactId", overrideRequest.CiArtifactId, "err", err) + return 0, "", err + } + + // Migration of deprecated DataSource Type + if artifact.IsMigrationRequired() { + migrationErr := impl.ciArtifactRepository.MigrateToWebHookDataSourceType(artifact.Id) + if migrationErr != nil { + impl.logger.Warnw("unable to migrate deprecated DataSource", "artifactId", artifact.Id) + } + } + + _, imageTag, err := artifact.ExtractImageRepoAndTag() + if err != nil { + impl.logger.Errorw("error in getting image tag and repo", "err", err) + } + helmPackageName := fmt.Sprintf("%s-%s-%s", cdPipeline.App.AppName, cdPipeline.Environment.Name, imageTag) switch overrideRequest.CdWorkflowType { case bean3.CD_WORKFLOW_TYPE_PRE: - _, span := otel.Tracer("orchestrator").Start(ctx, "ciArtifactRepository.Get") - artifact, err := impl.ciArtifactRepository.Get(overrideRequest.CiArtifactId) - span.End() - if err != nil { - impl.logger.Errorw("error in getting CiArtifact", "CiArtifactId", overrideRequest.CiArtifactId, "err", err) - return 0, err - } - // Migration of deprecated DataSource Type - if artifact.IsMigrationRequired() { - migrationErr := impl.ciArtifactRepository.MigrateToWebHookDataSourceType(artifact.Id) - if migrationErr != nil { - impl.logger.Warnw("unable to migrate deprecated DataSource", "artifactId", artifact.Id) + var cdWf *pipelineConfig.CdWorkflow + if overrideRequest.CdWorkflowId == 0 { + cdWf = &pipelineConfig.CdWorkflow{ + CiArtifactId: artifact.Id, + PipelineId: cdPipeline.Id, + AuditLog: sql.AuditLog{CreatedOn: triggeredAt, CreatedBy: 1, UpdatedOn: triggeredAt, UpdatedBy: 1}, + } + err := impl.cdWorkflowRepository.SaveWorkFlow(ctx, cdWf) + if err != nil { + return 0, "", err + } + } else { + cdWf, err = impl.cdWorkflowRepository.FindById(overrideRequest.CdWorkflowId) + if err != nil { + impl.logger.Errorw("error in TriggerPreStage, ManualCdTrigger", "err", err) + return 0, "", err } } + overrideRequest.CdWorkflowId = cdWf.Id + _, span = otel.Tracer("orchestrator").Start(ctx, "TriggerPreStage") triggerRequest := bean.TriggerRequest{ - CdWf: nil, + CdWf: cdWf, Artifact: artifact, Pipeline: cdPipeline, TriggeredBy: overrideRequest.UserId, ApplyAuth: false, TriggerContext: triggerContext, RefCdWorkflowRunnerId: 0, + CdWorkflowRunnerId: overrideRequest.WfrId, } err = impl.TriggerPreStage(triggerRequest) span.End() if err != nil { impl.logger.Errorw("error in TriggerPreStage, ManualCdTrigger", "err", err) - return 0, err + return 0, "", err } case bean3.CD_WORKFLOW_TYPE_DEPLOY: if overrideRequest.DeploymentType == models.DEPLOYMENTTYPE_UNKNOWN { @@ -426,7 +470,7 @@ func (impl *TriggerServiceImpl) ManualCdTrigger(triggerContext bean.TriggerConte cdWf, err := impl.cdWorkflowRepository.FindByWorkflowIdAndRunnerType(ctx, overrideRequest.CdWorkflowId, bean3.CD_WORKFLOW_TYPE_PRE) if err != nil && !util.IsErrNoRows(err) { impl.logger.Errorw("error in getting cdWorkflow, ManualCdTrigger", "CdWorkflowId", overrideRequest.CdWorkflowId, "err", err) - return 0, err + return 0, "", err } cdWorkflowId := cdWf.CdWorkflowId @@ -439,7 +483,7 @@ func (impl *TriggerServiceImpl) ManualCdTrigger(triggerContext bean.TriggerConte err := impl.cdWorkflowRepository.SaveWorkFlow(ctx, cdWf) if err != nil { impl.logger.Errorw("error in creating cdWorkflow, ManualCdTrigger", "PipelineId", overrideRequest.PipelineId, "err", err) - return 0, err + return 0, "", err } cdWorkflowId = cdWf.Id } @@ -457,43 +501,29 @@ func (impl *TriggerServiceImpl) ManualCdTrigger(triggerContext bean.TriggerConte ReferenceId: triggerContext.ReferenceId, } savedWfr, err := impl.cdWorkflowRepository.SaveWorkFlowRunner(runner) - overrideRequest.WfrId = savedWfr.Id if err != nil { impl.logger.Errorw("err in creating cdWorkflowRunner, ManualCdTrigger", "cdWorkflowId", cdWorkflowId, "err", err) - return 0, err + return 0, "", err } runner.CdWorkflow = &pipelineConfig.CdWorkflow{ Pipeline: cdPipeline, } + overrideRequest.WfrId = savedWfr.Id overrideRequest.CdWorkflowId = cdWorkflowId // creating cd pipeline status timeline for deployment initialisation - timeline := impl.pipelineStatusTimelineService.NewDevtronAppPipelineStatusTimelineDbObject(savedWfr.Id, timelineStatus.TIMELINE_STATUS_DEPLOYMENT_INITIATED, timelineStatus.TIMELINE_DESCRIPTION_DEPLOYMENT_INITIATED, overrideRequest.UserId) - _, span := otel.Tracer("orchestrator").Start(ctx, "cdPipelineStatusTimelineRepo.SaveTimelineForACDHelmApps") + timeline := impl.pipelineStatusTimelineService.NewDevtronAppPipelineStatusTimelineDbObject(runner.Id, timelineStatus.TIMELINE_STATUS_DEPLOYMENT_INITIATED, timelineStatus.TIMELINE_DESCRIPTION_DEPLOYMENT_INITIATED, overrideRequest.UserId) + _, span = otel.Tracer("orchestrator").Start(ctx, "cdPipelineStatusTimelineRepo.SaveTimelineForACDHelmApps") _, err = impl.pipelineStatusTimelineService.SaveTimelineIfNotAlreadyPresent(timeline, nil) span.End() if err != nil { impl.logger.Errorw("error in creating timeline status for deployment initiation, ManualCdTrigger", "err", err, "timeline", timeline) } - _, span = otel.Tracer("orchestrator").Start(ctx, "ciArtifactRepository.Get") - artifact, err := impl.ciArtifactRepository.Get(overrideRequest.CiArtifactId) - span.End() - if err != nil { - impl.logger.Errorw("error in getting ciArtifact, ManualCdTrigger", "CiArtifactId", overrideRequest.CiArtifactId, "err", err) - return 0, err - } - // Migration of deprecated DataSource Type - if artifact.IsMigrationRequired() { - migrationErr := impl.ciArtifactRepository.MigrateToWebHookDataSourceType(artifact.Id) - if migrationErr != nil { - impl.logger.Warnw("unable to migrate deprecated DataSource", "artifactId", artifact.Id) - } - } if isNotHibernateRequest(overrideRequest.DeploymentType) { validationErr := impl.validateDeploymentTriggerRequest(ctx, runner, cdPipeline, artifact.ImageDigest, envDeploymentConfig, overrideRequest.UserId) if validationErr != nil { impl.logger.Errorw("validation error deployment request", "cdWfr", runner.Id, "err", validationErr) - return 0, validationErr + return 0, "", validationErr } } // Deploy the release @@ -505,57 +535,58 @@ func (impl *TriggerServiceImpl) ManualCdTrigger(triggerContext bean.TriggerConte if err != nil { impl.logger.Errorw("error while updating current runner status to failed", "cdWfr", runner.Id, "err", err) } - return 0, releaseErr + return 0, "", releaseErr } case bean3.CD_WORKFLOW_TYPE_POST: cdWfRunner, err := impl.cdWorkflowRepository.FindByWorkflowIdAndRunnerType(ctx, overrideRequest.CdWorkflowId, bean3.CD_WORKFLOW_TYPE_DEPLOY) if err != nil && !util.IsErrNoRows(err) { impl.logger.Errorw("err in getting cdWorkflowRunner, ManualCdTrigger", "cdWorkflowId", overrideRequest.CdWorkflowId, "err", err) - return 0, err + return 0, "", err } var cdWf *pipelineConfig.CdWorkflow if cdWfRunner.CdWorkflowId == 0 { cdWf = &pipelineConfig.CdWorkflow{ - CiArtifactId: overrideRequest.CiArtifactId, + CiArtifactId: ciArtifactId, PipelineId: overrideRequest.PipelineId, AuditLog: sql.AuditLog{CreatedOn: triggeredAt, CreatedBy: overrideRequest.UserId, UpdatedOn: triggeredAt, UpdatedBy: overrideRequest.UserId}, } err := impl.cdWorkflowRepository.SaveWorkFlow(ctx, cdWf) if err != nil { impl.logger.Errorw("error in creating cdWorkflow, ManualCdTrigger", "CdWorkflowId", overrideRequest.CdWorkflowId, "err", err) - return 0, err + return 0, "", err } + overrideRequest.CdWorkflowId = cdWf.Id } else { - _, span := otel.Tracer("orchestrator").Start(ctx, "cdWorkflowRepository.FindById") + _, span = otel.Tracer("orchestrator").Start(ctx, "cdWorkflowRepository.FindById") cdWf, err = impl.cdWorkflowRepository.FindById(overrideRequest.CdWorkflowId) span.End() if err != nil && !util.IsErrNoRows(err) { impl.logger.Errorw("error in getting cdWorkflow, ManualCdTrigger", "CdWorkflowId", overrideRequest.CdWorkflowId, "err", err) - return 0, err + return 0, "", err } } - _, span := otel.Tracer("orchestrator").Start(ctx, "TriggerPostStage") + _, span = otel.Tracer("orchestrator").Start(ctx, "TriggerPostStage") triggerRequest := bean.TriggerRequest{ CdWf: cdWf, Pipeline: cdPipeline, TriggeredBy: overrideRequest.UserId, RefCdWorkflowRunnerId: 0, TriggerContext: triggerContext, + CdWorkflowRunnerId: overrideRequest.WfrId, } err = impl.TriggerPostStage(triggerRequest) span.End() if err != nil { impl.logger.Errorw("error in TriggerPostStage, ManualCdTrigger", "CdWorkflowId", cdWf.Id, "err", err) - return 0, err + return 0, "", err } default: impl.logger.Errorw("invalid CdWorkflowType, ManualCdTrigger", "CdWorkflowType", overrideRequest.CdWorkflowType, "err", err) - return 0, fmt.Errorf("invalid CdWorkflowType %s for the trigger request", string(overrideRequest.CdWorkflowType)) + return 0, "", fmt.Errorf("invalid CdWorkflowType %s for the trigger request", string(overrideRequest.CdWorkflowType)) } - - return releaseId, err + return releaseId, helmPackageName, err } func isNotHibernateRequest(deploymentType models.DeploymentType) bool { @@ -643,8 +674,12 @@ func (impl *TriggerServiceImpl) TriggerAutomaticDeployment(request bean.TriggerR func (impl *TriggerServiceImpl) TriggerCD(ctx context.Context, artifact *repository3.CiArtifact, cdWorkflowId, wfrId int, pipeline *pipelineConfig.Pipeline, envDeploymentConfig *bean9.DeploymentConfig, triggeredAt time.Time) error { impl.logger.Debugw("automatic pipeline trigger attempt async", "artifactId", artifact.Id) - - return impl.triggerReleaseAsync(ctx, artifact, cdWorkflowId, wfrId, pipeline, envDeploymentConfig, triggeredAt) + err := impl.triggerReleaseAsync(ctx, artifact, cdWorkflowId, wfrId, pipeline, envDeploymentConfig, triggeredAt) + if err != nil { + impl.logger.Errorw("error in cd trigger", "err", err) + return err + } + return err } func (impl *TriggerServiceImpl) triggerReleaseAsync(ctx context.Context, artifact *repository3.CiArtifact, cdWorkflowId, wfrId int, pipeline *pipelineConfig.Pipeline, envDeploymentConfig *bean9.DeploymentConfig, triggeredAt time.Time) error { @@ -841,10 +876,10 @@ func (impl *TriggerServiceImpl) performGitOps(ctx context.Context, impl.logger.Errorw("error in building manifest push template", "err", err) return err } - manifestPushService := impl.getManifestPushService(triggerEvent) + manifestPushService := impl.getManifestPushService(triggerEvent.ManifestStorageType) manifestPushResponse := manifestPushService.PushChart(newCtx, manifestPushTemplate) if manifestPushResponse.Error != nil { - impl.logger.Errorw("error in pushing manifest to git", "err", manifestPushResponse.Error, "git_repo_url", manifestPushTemplate.RepoUrl) + impl.logger.Errorw("error in pushing manifest to git/helm", "err", manifestPushResponse.Error, "git_repo_url", manifestPushTemplate.RepoUrl) return manifestPushResponse.Error } if manifestPushResponse.IsNewGitRepoConfigured() { @@ -907,7 +942,7 @@ func (impl *TriggerServiceImpl) triggerPipeline(overrideRequest *bean3.ValuesOve } } - go impl.writeCDTriggerEvent(overrideRequest, valuesOverrideResponse.Artifact, valuesOverrideResponse.PipelineOverride.PipelineReleaseCounter, valuesOverrideResponse.PipelineOverride.Id) + go impl.writeCDTriggerEvent(overrideRequest, valuesOverrideResponse.Artifact, valuesOverrideResponse.PipelineOverride.PipelineReleaseCounter, valuesOverrideResponse.PipelineOverride.Id, overrideRequest.WfrId) _ = impl.markImageScanDeployed(newCtx, overrideRequest.AppId, overrideRequest.EnvId, overrideRequest.ClusterId, valuesOverrideResponse.Artifact.ImageDigest, valuesOverrideResponse.Artifact.ScanEnabled, valuesOverrideResponse.Artifact.Image) @@ -961,9 +996,9 @@ func (impl *TriggerServiceImpl) buildManifestPushTemplate(overrideRequest *bean3 return manifestPushTemplate, err } -func (impl *TriggerServiceImpl) getManifestPushService(triggerEvent bean.TriggerEvent) publish.ManifestPushService { +func (impl *TriggerServiceImpl) getManifestPushService(storageType string) publish.ManifestPushService { var manifestPushService publish.ManifestPushService - if triggerEvent.ManifestStorageType == bean2.ManifestStorageGit { + if storageType == bean2.ManifestStorageGit { manifestPushService = impl.gitOpsManifestPushService } return manifestPushService @@ -972,23 +1007,31 @@ func (impl *TriggerServiceImpl) getManifestPushService(triggerEvent bean.Trigger func (impl *TriggerServiceImpl) deployApp(ctx context.Context, overrideRequest *bean3.ValuesOverrideRequest, valuesOverrideResponse *app.ValuesOverrideResponse, triggerEvent bean.TriggerEvent) error { newCtx, span := otel.Tracer("orchestrator").Start(ctx, "TriggerServiceImpl.deployApp") defer span.End() + var referenceChartByte []byte + var err error + if util.IsAcdApp(overrideRequest.DeploymentAppType) && triggerEvent.DeployArgoCdApp { - err := impl.deployArgoCdApp(newCtx, overrideRequest, valuesOverrideResponse) + err = impl.deployArgoCdApp(newCtx, overrideRequest, valuesOverrideResponse) if err != nil { impl.logger.Errorw("error in deploying app on ArgoCd", "err", err) return err } } else if util.IsHelmApp(overrideRequest.DeploymentAppType) { - _, err := impl.createHelmAppForCdPipeline(newCtx, overrideRequest, valuesOverrideResponse) + _, referenceChartByte, err = impl.createHelmAppForCdPipeline(newCtx, overrideRequest, valuesOverrideResponse) if err != nil { impl.logger.Errorw("error in creating or updating helm application for cd pipeline", "err", err) return err } } + impl.postDeployHook(overrideRequest, valuesOverrideResponse, referenceChartByte, err) return nil } -func (impl *TriggerServiceImpl) createHelmAppForCdPipeline(ctx context.Context, overrideRequest *bean3.ValuesOverrideRequest, valuesOverrideResponse *app.ValuesOverrideResponse) (bool, error) { +func (impl *TriggerServiceImpl) postDeployHook(overrideRequest *bean3.ValuesOverrideRequest, valuesOverrideResponse *app.ValuesOverrideResponse, referenceChartByte []byte, err error) { + impl.logger.Debugw("no post deploy hook registered") +} + +func (impl *TriggerServiceImpl) createHelmAppForCdPipeline(ctx context.Context, overrideRequest *bean3.ValuesOverrideRequest, valuesOverrideResponse *app.ValuesOverrideResponse) (bool, []byte, error) { newCtx, span := otel.Tracer("orchestrator").Start(ctx, "TriggerServiceImpl.createHelmAppForCdPipeline") defer span.End() pipelineModel := valuesOverrideResponse.Pipeline @@ -1001,7 +1044,7 @@ func (impl *TriggerServiceImpl) createHelmAppForCdPipeline(ctx context.Context, } referenceTemplate := envOverride.Chart.ReferenceTemplate referenceTemplatePath := path.Join(bean5.RefChartDirPath, referenceTemplate) - + var referenceChartByte []byte if util.IsHelmApp(valuesOverrideResponse.DeploymentConfig.DeploymentAppType) { var sanitizedK8sVersion string //handle specific case for all cronjob charts from cronjob-chart_1-2-0 to cronjob-chart_1-5-0 where semverCompare @@ -1012,17 +1055,17 @@ func (impl *TriggerServiceImpl) createHelmAppForCdPipeline(ctx context.Context, k8sServerVersion, err := impl.K8sUtil.GetKubeVersion() if err != nil { impl.logger.Errorw("exception caught in getting k8sServerVersion", "err", err) - return false, err + return false, nil, err } sanitizedK8sVersion = k8s2.StripPrereleaseFromK8sVersion(k8sServerVersion.String()) } - referenceChartByte := envOverride.Chart.ReferenceChart + referenceChartByte = envOverride.Chart.ReferenceChart // here updating reference chart into database. if len(envOverride.Chart.ReferenceChart) == 0 { refChartByte, err := impl.chartTemplateService.GetByteArrayRefChart(chartMetaData, referenceTemplatePath) if err != nil { impl.logger.Errorw("ref chart commit error on cd trigger", "err", err, "req", overrideRequest) - return false, err + return false, nil, err } ch := envOverride.Chart ch.ReferenceChart = refChartByte @@ -1031,7 +1074,7 @@ func (impl *TriggerServiceImpl) createHelmAppForCdPipeline(ctx context.Context, err = impl.chartRepository.Update(ch) if err != nil { impl.logger.Errorw("chart update error", "err", err, "req", overrideRequest) - return false, err + return false, nil, err } referenceChartByte = refChartByte } @@ -1074,15 +1117,15 @@ func (impl *TriggerServiceImpl) createHelmAppForCdPipeline(ctx context.Context, if err != nil { impl.logger.Errorw("error in updating helm application for cd pipelineModel", "err", err) if util.IsErrorContextCancelled(err) { - return false, cdWorkflow.ErrorDeploymentSuperseded + return false, nil, cdWorkflow.ErrorDeploymentSuperseded } else if util.IsErrorContextDeadlineExceeded(err) { - return false, context.DeadlineExceeded + return false, nil, context.DeadlineExceeded } apiError := clientErrors.ConvertToApiError(err) if apiError != nil { - return false, apiError + return false, nil, apiError } - return false, err + return false, nil, err } else { impl.logger.Debugw("updated helm application", "response", updateApplicationResponse, "isSuccess", updateApplicationResponse.Success) } @@ -1095,7 +1138,7 @@ func (impl *TriggerServiceImpl) createHelmAppForCdPipeline(ctx context.Context, // For connection related errors, no need to update the db if err != nil && strings.Contains(err.Error(), "connection error") { impl.logger.Errorw("error in helm install custom chart", "err", err) - return false, err + return false, nil, err } // IMP: update cd pipelineModel to mark deployment app created, even if helm install fails @@ -1109,20 +1152,20 @@ func (impl *TriggerServiceImpl) createHelmAppForCdPipeline(ctx context.Context, impl.logger.Errorw("failed to update deployment app created flag in pipelineModel table", "err", err) } if util.IsErrorContextCancelled(err) { - return false, cdWorkflow.ErrorDeploymentSuperseded + return false, nil, cdWorkflow.ErrorDeploymentSuperseded } else if util.IsErrorContextDeadlineExceeded(err) { - return false, context.DeadlineExceeded + return false, nil, context.DeadlineExceeded } apiError := clientErrors.ConvertToApiError(err) if apiError != nil { - return false, apiError + return false, nil, apiError } - return false, err + return false, nil, err } if pgErr != nil { impl.logger.Errorw("failed to update deployment app created flag in pipelineModel table", "err", err) - return false, err + return false, nil, err } impl.logger.Debugw("received helm release response", "helmResponse", helmResponse, "isSuccess", helmResponse.Success) @@ -1132,10 +1175,10 @@ func (impl *TriggerServiceImpl) createHelmAppForCdPipeline(ctx context.Context, err := impl.cdWorkflowCommonService.UpdateNonTerminalStatusInRunner(newCtx, overrideRequest.WfrId, overrideRequest.UserId, cdWorkflow.WorkflowInProgress) if err != nil { impl.logger.Errorw("error in updating the workflow runner status, createHelmAppForCdPipeline", "err", err) - return false, err + return false, nil, err } } - return true, nil + return true, referenceChartByte, nil } func (impl *TriggerServiceImpl) deployArgoCdApp(ctx context.Context, overrideRequest *bean3.ValuesOverrideRequest, @@ -1208,6 +1251,11 @@ func (impl *TriggerServiceImpl) updateArgoPipeline(ctx context.Context, pipeline TargetRevision: bean7.TargetRevisionMaster, PatchType: bean7.PatchTypeMerge, } + url, err := impl.gitOperationService.GetRepoUrlWithUserName(deploymentConfig.RepoURL) + if err != nil { + return false, err + } + patchRequestDto.GitRepoUrl = url err = impl.argoClientWrapperService.PatchArgoCdApp(newCtx, patchRequestDto) if err != nil { impl.logger.Errorw("error in patching argo pipeline", "err", err, "req", patchRequestDto) @@ -1270,6 +1318,10 @@ func (impl *TriggerServiceImpl) createArgoApplicationIfRequired(ctx context.Cont RepoUrl: chart.GitRepoUrl, AutoSyncEnabled: impl.ACDConfig.ArgoCDAutoSyncEnabled, } + appRequest.RepoUrl, err = impl.gitOperationService.GetRepoUrlWithUserName(appRequest.RepoUrl) + if err != nil { + return "", err + } argoAppName, err := impl.argoK8sClient.CreateAcdApp(newCtx, appRequest, argocdServer.ARGOCD_APPLICATION_TEMPLATE) if err != nil { return "", err @@ -1313,11 +1365,19 @@ func (impl *TriggerServiceImpl) helmInstallReleaseWithCustomChart(ctx context.Co return impl.helmAppClient.InstallReleaseWithCustomChart(newCtx, &helmInstallRequest) } -func (impl *TriggerServiceImpl) writeCDTriggerEvent(overrideRequest *bean3.ValuesOverrideRequest, artifact *repository3.CiArtifact, releaseId, pipelineOverrideId int) { +func (impl *TriggerServiceImpl) getEnrichedWorkflowRunner(overrideRequest *bean3.ValuesOverrideRequest, artifact *repository3.CiArtifact, wfrId int) *pipelineConfig.CdWorkflowRunner { + return nil +} + +func (impl *TriggerServiceImpl) writeCDTriggerEvent(overrideRequest *bean3.ValuesOverrideRequest, artifact *repository3.CiArtifact, releaseId, pipelineOverrideId, wfrId int) { - event, _ := impl.eventFactory.Build(util2.Trigger, &overrideRequest.PipelineId, overrideRequest.AppId, &overrideRequest.EnvId, util2.CD) - impl.logger.Debugw("event writeCDTriggerEvent", "event", event) - event = impl.eventFactory.BuildExtraCDData(event, nil, pipelineOverrideId, bean3.CD_WORKFLOW_TYPE_DEPLOY) + event, err := impl.eventFactory.Build(util2.Trigger, &overrideRequest.PipelineId, overrideRequest.AppId, &overrideRequest.EnvId, util2.CD) + if err != nil { + impl.logger.Errorw("error in building cd trigger event", "cdPipelineId", overrideRequest.PipelineId, "err", err) + } + impl.logger.Debugw("event WriteCDTriggerEvent", "event", event) + wfr := impl.getEnrichedWorkflowRunner(overrideRequest, artifact, wfrId) + event = impl.eventFactory.BuildExtraCDData(event, wfr, pipelineOverrideId, bean3.CD_WORKFLOW_TYPE_DEPLOY) _, evtErr := impl.eventClient.WriteNotificationEvent(event) if evtErr != nil { impl.logger.Errorw("CD trigger event not sent", "error", evtErr) @@ -1355,6 +1415,7 @@ func (impl *TriggerServiceImpl) markImageScanDeployed(ctx context.Context, appId imageDigest string, isScanEnabled bool, image string) error { _, span := otel.Tracer("orchestrator").Start(ctx, "TriggerServiceImpl.markImageScanDeployed") defer span.End() + // TODO KB: send NATS event for self consumption impl.logger.Debugw("mark image scan deployed for devtron app, from cd auto or manual trigger", "imageDigest", imageDigest) executionHistory, err := impl.imageScanHistoryRepository.FindByImageAndDigest(imageDigest, image) if err != nil && !errors.Is(err, pg.ErrNoRows) { diff --git a/pkg/deployment/trigger/devtronApps/bean/bean.go b/pkg/deployment/trigger/devtronApps/bean/bean.go index c4f4a5ca5cb..bd4b5f13ab0 100644 --- a/pkg/deployment/trigger/devtronApps/bean/bean.go +++ b/pkg/deployment/trigger/devtronApps/bean/bean.go @@ -48,6 +48,7 @@ type TriggerRequest struct { RefCdWorkflowRunnerId int RunStageInEnvNamespace string WorkflowType bean.WorkflowType + CdWorkflowRunnerId int TriggerContext } @@ -58,8 +59,18 @@ type TriggerContext struct { // ReferenceId is a unique identifier for the workflow runner // refer pipelineConfig.CdWorkflowRunner ReferenceId *string + + // manual or automatic + TriggerType TriggerType } +type TriggerType int + +const ( + Automatic TriggerType = 1 + Manual TriggerType = 2 +) + type DeploymentType = string const ( diff --git a/pkg/eventProcessor/in/CDPipelineEventProcessorService.go b/pkg/eventProcessor/in/CDPipelineEventProcessorService.go index 669ce12ebdb..108f36c3941 100644 --- a/pkg/eventProcessor/in/CDPipelineEventProcessorService.go +++ b/pkg/eventProcessor/in/CDPipelineEventProcessorService.go @@ -86,7 +86,7 @@ func (impl *CDPipelineEventProcessorImpl) SubscribeCDBulkTriggerTopic() error { ReferenceId: pointer.String(msg.MsgId), Context: ctx, } - _, err = impl.cdTriggerService.ManualCdTrigger(triggerContext, event.ValuesOverrideRequest) + _, _, err = impl.cdTriggerService.ManualCdTrigger(triggerContext, event.ValuesOverrideRequest) if err != nil { impl.logger.Errorw("Error triggering CD", "topic", pubsub.CD_BULK_DEPLOY_TRIGGER_TOPIC, "msg", msg.Data, "err", err) } diff --git a/pkg/generateManifest/DeploymentTemplateService.go b/pkg/generateManifest/DeploymentTemplateService.go index d4a7a2270d0..fadd1927261 100644 --- a/pkg/generateManifest/DeploymentTemplateService.go +++ b/pkg/generateManifest/DeploymentTemplateService.go @@ -62,6 +62,8 @@ type DeploymentTemplateService interface { GetDeploymentTemplate(ctx context.Context, request DeploymentTemplateRequest) (DeploymentTemplateResponse, error) GenerateManifest(ctx context.Context, request *DeploymentTemplateRequest, valuesYaml string) (*openapi2.TemplateChartResponse, error) GetRestartWorkloadData(ctx context.Context, appIds []int, envId int) (*RestartPodResponse, error) + GetDeploymentTemplateWithResolvedData(ctx context.Context, request DeploymentTemplateRequest) (DeploymentTemplateResponse, error) + ResolveTemplateVariables(ctx context.Context, values string, request DeploymentTemplateRequest) (string, map[string]string, error) } type DeploymentTemplateServiceImpl struct { Logger *zap.SugaredLogger @@ -201,6 +203,7 @@ func (impl DeploymentTemplateServiceImpl) FetchDeploymentsWithChartRefs(appId in func (impl DeploymentTemplateServiceImpl) GetDeploymentTemplate(ctx context.Context, request DeploymentTemplateRequest) (DeploymentTemplateResponse, error) { var result DeploymentTemplateResponse + var response *DeploymentTemplateResponse var values, resolvedValue string var err error var variableSnapshot map[string]string @@ -217,9 +220,9 @@ func (impl DeploymentTemplateServiceImpl) GetDeploymentTemplate(ctx context.Cont _, values, err = impl.chartRefService.GetAppOverrideForDefaultTemplate(request.ChartRefId) resolvedValue = values case repository.PublishedOnEnvironments: - values, resolvedValue, variableSnapshot, err = impl.fetchResolvedTemplateForPublishedEnvs(ctx, request) + response, err = impl.fetchResolvedTemplateForPublishedEnvs(ctx, request) case repository.DeployedOnSelfEnvironment, repository.DeployedOnOtherEnvironment: - values, resolvedValue, variableSnapshot, err = impl.fetchTemplateForDeployedEnv(ctx, request) + response, err = impl.fetchTemplateForDeployedEnv(ctx, request) } if err != nil { impl.Logger.Errorw("error in getting values", "err", err) @@ -227,6 +230,42 @@ func (impl DeploymentTemplateServiceImpl) GetDeploymentTemplate(ctx context.Cont } } + if request.RequestDataMode == Values { + result.Data = values + result.ResolvedData = resolvedValue + result.VariableSnapshot = variableSnapshot + if response != nil { + result = ConvertPointerDeploymentTemplateResponseToNonPointer(response) + } + return result, nil + } + if variableSnapshot != nil { + result.VariableSnapshot = variableSnapshot + } + request = impl.setRequestMetadata(&request) + manifest, err := impl.GenerateManifest(ctx, &request, resolvedValue) + if err != nil { + return result, err + } + if manifest != nil { + result.Data = *manifest.Manifest + } + return result, nil +} + +func (impl DeploymentTemplateServiceImpl) GetDeploymentTemplateWithResolvedData(ctx context.Context, request DeploymentTemplateRequest) (DeploymentTemplateResponse, error) { + var result DeploymentTemplateResponse + var values, resolvedValue string + var err error + var variableSnapshot map[string]string + + if request.Values != "" { + values = request.Values + resolvedValue, variableSnapshot, err = impl.resolveTemplateVariables(ctx, request.Values, request) + if err != nil { + return result, err + } + } if request.RequestDataMode == Values { result.Data = values result.ResolvedData = resolvedValue @@ -281,7 +320,7 @@ func (impl DeploymentTemplateServiceImpl) setRequestMetadata(request *Deployment return *request } -func (impl DeploymentTemplateServiceImpl) fetchResolvedTemplateForPublishedEnvs(ctx context.Context, request DeploymentTemplateRequest) (string, string, map[string]string, error) { +func (impl DeploymentTemplateServiceImpl) fetchResolvedTemplateForPublishedEnvs(ctx context.Context, request DeploymentTemplateRequest) (*DeploymentTemplateResponse, error) { var values string override, err := impl.propertiesConfigService.GetEnvironmentProperties(request.AppId, request.EnvId, request.ChartRefId) if err == nil && override.GlobalConfig != nil { @@ -292,24 +331,47 @@ func (impl DeploymentTemplateServiceImpl) fetchResolvedTemplateForPublishedEnvs( } } else { impl.Logger.Errorw("error in getting overridden values", "err", err) - return "", "", nil, err + return nil, err + } + // handle here for chart ref id in case + chartRefId := override.EnvironmentConfig.ChartRefId + if chartRefId == 0 { + chartRefId = override.GlobalChartRefId + } + _, _, version, _, err := impl.chartRefService.GetRefChart(chartRefId) + if err != nil { + impl.Logger.Errorw("error in getting chart ref by chartRefId ", "chartRefId", request.ChartRefId, "err", err) + return nil, err } resolvedTemplate, variableSnapshot, err := impl.resolveTemplateVariables(ctx, values, request) if err != nil { - return values, values, variableSnapshot, err + impl.Logger.Errorw("error in resolving template variables for env override ", "deploymentTemplateRequest", request, "err", err) + return nil, err } - return values, resolvedTemplate, variableSnapshot, nil + return &DeploymentTemplateResponse{ + Data: values, + ResolvedData: resolvedTemplate, + VariableSnapshot: variableSnapshot, + TemplateVersion: version, + IsAppMetricsEnabled: *override.AppMetrics, + }, nil } -func (impl DeploymentTemplateServiceImpl) fetchTemplateForDeployedEnv(ctx context.Context, request DeploymentTemplateRequest) (string, string, map[string]string, error) { +func (impl DeploymentTemplateServiceImpl) fetchTemplateForDeployedEnv(ctx context.Context, request DeploymentTemplateRequest) (*DeploymentTemplateResponse, error) { historyObject, err := impl.deploymentTemplateHistoryService.GetHistoryForDeployedTemplateById(ctx, request.DeploymentTemplateHistoryId, request.PipelineId) if err != nil { impl.Logger.Errorw("error in getting deployment template history", "err", err, "id", request.DeploymentTemplateHistoryId, "pipelineId", request.PipelineId) - return "", "", nil, err + return nil, err } //todo Subhashish solve variable leak - return historyObject.CodeEditorValue.Value, historyObject.CodeEditorValue.ResolvedValue, historyObject.CodeEditorValue.VariableSnapshot, nil + return &DeploymentTemplateResponse{ + Data: historyObject.CodeEditorValue.Value, + ResolvedData: historyObject.CodeEditorValue.ResolvedValue, + VariableSnapshot: historyObject.CodeEditorValue.VariableSnapshot, + TemplateVersion: historyObject.TemplateVersion, + IsAppMetricsEnabled: *historyObject.IsAppMetricsEnabled, + }, nil } func (impl DeploymentTemplateServiceImpl) resolveTemplateVariables(ctx context.Context, values string, request DeploymentTemplateRequest) (string, map[string]string, error) { @@ -557,3 +619,21 @@ func (impl DeploymentTemplateServiceImpl) GetRestartWorkloadData(ctx context.Con } return podResp, nil } + +func (impl DeploymentTemplateServiceImpl) ResolveTemplateVariables(ctx context.Context, values string, request DeploymentTemplateRequest) (string, map[string]string, error) { + + isSuperAdmin, err := util2.GetIsSuperAdminFromContext(ctx) + if err != nil { + return values, nil, err + } + scope, err := impl.extractScopeData(request) + if err != nil { + return values, nil, err + } + maskUnknownVariableForHelmGenerate := request.RequestDataMode == Manifest + resolvedTemplate, variableSnapshot, err := impl.scopedVariableManager.ExtractVariablesAndResolveTemplate(scope, values, parsers.JsonVariableTemplate, isSuperAdmin, maskUnknownVariableForHelmGenerate) + if err != nil { + return values, variableSnapshot, err + } + return resolvedTemplate, variableSnapshot, nil +} diff --git a/pkg/generateManifest/adapter.go b/pkg/generateManifest/adapter.go new file mode 100644 index 00000000000..9528ec2d8b2 --- /dev/null +++ b/pkg/generateManifest/adapter.go @@ -0,0 +1,8 @@ +package generateManifest + +func ConvertPointerDeploymentTemplateResponseToNonPointer(r *DeploymentTemplateResponse) DeploymentTemplateResponse { + if r != nil { + return *r + } + return DeploymentTemplateResponse{} +} diff --git a/pkg/generateManifest/bean.go b/pkg/generateManifest/bean.go index 124a0cf0831..26a465dd3f4 100644 --- a/pkg/generateManifest/bean.go +++ b/pkg/generateManifest/bean.go @@ -69,9 +69,11 @@ var ReleaseIdentifier = &gRPC.ReleaseIdentifier{ } type DeploymentTemplateResponse struct { - Data string `json:"data"` - ResolvedData string `json:"resolvedData"` - VariableSnapshot map[string]string `json:"variableSnapshot"` + Data string `json:"data"` + ResolvedData string `json:"resolvedData"` + VariableSnapshot map[string]string `json:"variableSnapshot"` + TemplateVersion string `json:"-"` + IsAppMetricsEnabled bool `json:"-"` } type RestartPodResponse struct { diff --git a/pkg/pipeline/CdHandler.go b/pkg/pipeline/CdHandler.go index 68900146147..e62d080bee5 100644 --- a/pkg/pipeline/CdHandler.go +++ b/pkg/pipeline/CdHandler.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/adapter/cdWorkflow" + bean2 "github.com/devtron-labs/devtron/pkg/bean" common2 "github.com/devtron-labs/devtron/pkg/deployment/common" util2 "github.com/devtron-labs/devtron/pkg/pipeline/util" "os" @@ -64,7 +65,7 @@ type CdHandler interface { FetchCdWorkflowDetails(appId int, environmentId int, pipelineId int, buildId int) (types.WorkflowResponse, error) DownloadCdWorkflowArtifacts(buildId int) (*os.File, error) FetchCdPrePostStageStatus(pipelineId int) ([]pipelineBean.CdWorkflowWithArtifact, error) - CancelStage(workflowRunnerId int, userId int32) (int, error) + CancelStage(workflowRunnerId int, forceAbort bool, userId int32) (int, error) FetchAppWorkflowStatusForTriggerView(appId int) ([]*pipelineConfig.CdWorkflowStatus, error) FetchAppWorkflowStatusForTriggerViewForEnvironment(request resourceGroup2.ResourceGroupingRequest, token string) ([]*pipelineConfig.CdWorkflowStatus, error) FetchAppDeploymentStatusForEnvironments(request resourceGroup2.ResourceGroupingRequest, token string) ([]*pipelineConfig.AppDeploymentStatus, error) @@ -133,16 +134,12 @@ func NewCdHandlerImpl(Logger *zap.SugaredLogger, userService user.UserService, return cdh } -func (impl *CdHandlerImpl) CancelStage(workflowRunnerId int, userId int32) (int, error) { +func (impl *CdHandlerImpl) CancelStage(workflowRunnerId int, forceAbort bool, userId int32) (int, error) { workflowRunner, err := impl.cdWorkflowRepository.FindWorkflowRunnerById(workflowRunnerId) if err != nil { impl.Logger.Errorw("err", "err", err) return 0, err } - if !(string(v1alpha1.NodePending) == workflowRunner.Status || string(v1alpha1.NodeRunning) == workflowRunner.Status) { - impl.Logger.Info("cannot cancel stage, stage not in progress") - return 0, errors.New("cannot cancel stage, stage not in progress") - } pipeline, err := impl.pipelineRepository.FindById(workflowRunner.CdWorkflow.PipelineId) if err != nil { impl.Logger.Errorw("error while fetching cd pipeline", "err", err) @@ -175,11 +172,35 @@ func (impl *CdHandlerImpl) CancelStage(workflowRunnerId int, userId int32) (int, } } // Terminate workflow - err = impl.workflowService.TerminateWorkflow(workflowRunner.ExecutorType, workflowRunner.Name, workflowRunner.Namespace, restConfig, isExtCluster, nil) - if err != nil { + cancelWfDtoRequest := &types.CancelWfRequestDto{ + ExecutorType: workflowRunner.ExecutorType, + WorkflowName: workflowRunner.Name, + Namespace: workflowRunner.Namespace, + RestConfig: restConfig, + IsExt: isExtCluster, + Environment: nil, + } + err = impl.workflowService.TerminateWorkflow(cancelWfDtoRequest) + if err != nil && forceAbort { + impl.Logger.Errorw("error in terminating workflow, with force abort flag as true", "workflowName", workflowRunner.Name, "err", err) + cancelWfDtoRequest.WorkflowGenerateName = fmt.Sprintf("%d-%s", workflowRunnerId, workflowRunner.Name) + err1 := impl.workflowService.TerminateDanglingWorkflows(cancelWfDtoRequest) + if err1 != nil { + impl.Logger.Errorw("error in terminating dangling workflows", "cancelWfDtoRequest", cancelWfDtoRequest, "err", err) + // ignoring error here in case of force abort, confirmed from product + } + } else if err != nil { impl.Logger.Error("cannot terminate wf runner", "err", err) return 0, err } + if forceAbort { + err = impl.handleForceAbortCaseForCdStage(workflowRunner, forceAbort) + if err != nil { + impl.Logger.Errorw("error in handleForceAbortCaseForCdStage", "forceAbortFlag", forceAbort, "workflowRunner", workflowRunner, "err", err) + return 0, err + } + return workflowRunner.Id, nil + } if len(workflowRunner.ImagePathReservationIds) > 0 { err := impl.customTagService.DeactivateImagePathReservationByImageIds(workflowRunner.ImagePathReservationIds) if err != nil { @@ -198,6 +219,34 @@ func (impl *CdHandlerImpl) CancelStage(workflowRunnerId int, userId int32) (int, return workflowRunner.Id, nil } +func (impl *CdHandlerImpl) updateWorkflowRunnerForForceAbort(workflowRunner *pipelineConfig.CdWorkflowRunner) error { + workflowRunner.Status = executors.WorkflowCancel + workflowRunner.PodStatus = string(bean2.Failed) + workflowRunner.Message = FORCE_ABORT_MESSAGE_AFTER_STARTING_STAGE + err := impl.cdWorkflowRepository.UpdateWorkFlowRunner(workflowRunner) + if err != nil { + impl.Logger.Errorw("error in updating workflow status in cd workflow runner in force abort case", "err", err) + return err + } + return nil +} + +func (impl *CdHandlerImpl) handleForceAbortCaseForCdStage(workflowRunner *pipelineConfig.CdWorkflowRunner, forceAbort bool) error { + isWorkflowInNonTerminalStage := workflowRunner.Status == string(v1alpha1.NodePending) || workflowRunner.Status == string(v1alpha1.NodeRunning) + if !isWorkflowInNonTerminalStage { + if forceAbort { + return impl.updateWorkflowRunnerForForceAbort(workflowRunner) + } else { + return &util.ApiError{Code: "200", HttpStatusCode: 400, UserMessage: "cannot cancel stage, stage not in progress"} + } + } + //this arises when someone deletes the workflow in resource browser and wants to force abort a cd stage(pre/post) + if workflowRunner.Status == string(v1alpha1.NodeRunning) && forceAbort { + return impl.updateWorkflowRunnerForForceAbort(workflowRunner) + } + return nil +} + func (impl *CdHandlerImpl) UpdateWorkflow(workflowStatus v1alpha1.WorkflowStatus) (int, string, error) { wfStatusRs := impl.extractWorkfowStatus(workflowStatus) workflowName, status, podStatus, message, podName := wfStatusRs.WorkflowName, wfStatusRs.Status, wfStatusRs.PodStatus, wfStatusRs.Message, wfStatusRs.PodName diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index 1488002b366..b247a2f9919 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -163,7 +163,7 @@ const Running = "Running" const Starting = "Starting" const POD_DELETED_MESSAGE = "pod deleted" const TERMINATE_MESSAGE = "workflow shutdown with strategy: Terminate" -const ABORT_MESSAGE_AFTER_STARTING_STAGE = "workflow shutdown with strategy: Force Abort" +const FORCE_ABORT_MESSAGE_AFTER_STARTING_STAGE = "workflow shutdown with strategy: Force Abort" func (impl *CiHandlerImpl) CheckAndReTriggerCI(workflowStatus v1alpha1.WorkflowStatus) error { @@ -592,20 +592,9 @@ func (impl *CiHandlerImpl) GetBuildHistory(pipelineId int, appId int, offset int func (impl *CiHandlerImpl) CancelBuild(workflowId int, forceAbort bool) (int, error) { workflow, err := impl.ciWorkflowRepository.FindById(workflowId) if err != nil { - impl.Logger.Errorw("err", "err", err) + impl.Logger.Errorw("error in finding ci-workflow by workflow id", "ciWorkflowId", workflowId, "err", err) return 0, err } - if !(string(v1alpha1.NodePending) == workflow.Status || string(v1alpha1.NodeRunning) == workflow.Status) { - if forceAbort { - return impl.cancelBuildAfterStartWorkflowStage(workflow) - } else { - return 0, &util.ApiError{Code: "200", HttpStatusCode: 400, UserMessage: "cannot cancel build, build not in progress"} - } - } - //this arises when someone deletes the workflow in resource browser and wants to force abort a ci - if workflow.Status == string(v1alpha1.NodeRunning) && forceAbort { - return impl.cancelBuildAfterStartWorkflowStage(workflow) - } isExt := workflow.Namespace != DefaultCiWorkflowNamespace var env *repository3.Environment var restConfig *rest.Config @@ -615,15 +604,40 @@ func (impl *CiHandlerImpl) CancelBuild(workflowId int, forceAbort bool) (int, er return 0, err } } - // Terminate workflow - err = impl.workflowService.TerminateWorkflow(workflow.ExecutorType, workflow.Name, workflow.Namespace, restConfig, isExt, env) - if err != nil && strings.Contains(err.Error(), "cannot find workflow") { + cancelWfDtoRequest := &types.CancelWfRequestDto{ + ExecutorType: workflow.ExecutorType, + WorkflowName: workflow.Name, + Namespace: workflow.Namespace, + RestConfig: restConfig, + IsExt: isExt, + Environment: env, + } + // Terminate workflow + err = impl.workflowService.TerminateWorkflow(cancelWfDtoRequest) + if err != nil && forceAbort { + impl.Logger.Errorw("error in terminating workflow, with force abort flag flag as true", "workflowName", workflow.Name, "err", err) + + cancelWfDtoRequest.WorkflowGenerateName = fmt.Sprintf("%d-%s", workflowId, workflow.Name) + err1 := impl.workflowService.TerminateDanglingWorkflows(cancelWfDtoRequest) + if err1 != nil { + impl.Logger.Errorw("error in terminating dangling workflows", "cancelWfDtoRequest", cancelWfDtoRequest, "err", err) + // ignoring error here in case of force abort, confirmed from product + } + } else if err != nil && strings.Contains(err.Error(), "cannot find workflow") { return 0, &util.ApiError{Code: "200", HttpStatusCode: http.StatusBadRequest, UserMessage: err.Error()} } else if err != nil { impl.Logger.Errorw("cannot terminate wf", "err", err) return 0, err } + if forceAbort { + err = impl.handleForceAbortCaseForCi(workflow, forceAbort) + if err != nil { + impl.Logger.Errorw("error in handleForceAbortCaseForCi", "forceAbortFlag", forceAbort, "workflow", workflow, "err", err) + return 0, err + } + return workflow.Id, nil + } workflow.Status = executors.WorkflowCancel if workflow.ExecutorType == cdWorkflow.WORKFLOW_EXECUTOR_TYPE_SYSTEM { @@ -652,16 +666,32 @@ func (impl *CiHandlerImpl) CancelBuild(workflowId int, forceAbort bool) (int, er return workflow.Id, nil } -func (impl *CiHandlerImpl) cancelBuildAfterStartWorkflowStage(workflow *pipelineConfig.CiWorkflow) (int, error) { +func (impl *CiHandlerImpl) handleForceAbortCaseForCi(workflow *pipelineConfig.CiWorkflow, forceAbort bool) error { + isWorkflowInNonTerminalStage := workflow.Status == string(v1alpha1.NodePending) || workflow.Status == string(v1alpha1.NodeRunning) + if !isWorkflowInNonTerminalStage { + if forceAbort { + return impl.updateWorkflowForForceAbort(workflow) + } else { + return &util.ApiError{Code: "200", HttpStatusCode: 400, UserMessage: "cannot cancel build, build not in progress"} + } + } + //this arises when someone deletes the workflow in resource browser and wants to force abort a ci + if workflow.Status == string(v1alpha1.NodeRunning) && forceAbort { + return impl.updateWorkflowForForceAbort(workflow) + } + return nil +} + +func (impl *CiHandlerImpl) updateWorkflowForForceAbort(workflow *pipelineConfig.CiWorkflow) error { workflow.Status = executors.WorkflowCancel workflow.PodStatus = string(bean.Failed) - workflow.Message = ABORT_MESSAGE_AFTER_STARTING_STAGE + workflow.Message = FORCE_ABORT_MESSAGE_AFTER_STARTING_STAGE err := impl.ciWorkflowRepository.UpdateWorkFlow(workflow) if err != nil { impl.Logger.Errorw("error in updating workflow status", "err", err) - return 0, err + return err } - return workflow.Id, nil + return nil } func (impl *CiHandlerImpl) getRestConfig(workflow *pipelineConfig.CiWorkflow) (*rest.Config, error) { diff --git a/pkg/pipeline/ConfigMapService.go b/pkg/pipeline/ConfigMapService.go index d2e0bb883c9..bbfb74148bf 100644 --- a/pkg/pipeline/ConfigMapService.go +++ b/pkg/pipeline/ConfigMapService.go @@ -47,10 +47,6 @@ const ( HashiCorpVault string = "HashiCorpVault" ) -type ConfigsList struct { - ConfigData []*bean.ConfigData `json:"maps"` -} - type ConfigMapService interface { CMGlobalAddUpdate(configMapRequest *bean.ConfigDataRequest) (*bean.ConfigDataRequest, error) CMGlobalFetch(appId int) (*bean.ConfigDataRequest, error) @@ -165,7 +161,7 @@ func (impl ConfigMapServiceImpl) CMGlobalAddUpdate(configMapRequest *bean.Config impl.logger.Errorw("error while fetching from db", "error", err) return nil, err } - configsList := &ConfigsList{} + configsList := &bean.ConfigsList{} found := false var configs []*bean.ConfigData if len(model.ConfigMapData) > 0 { @@ -208,7 +204,7 @@ func (impl ConfigMapServiceImpl) CMGlobalAddUpdate(configMapRequest *bean.Config } else { //creating config map record for first time - configsList := &ConfigsList{ + configsList := &bean.ConfigsList{ ConfigData: configMapRequest.ConfigData, } configDataByte, err := json.Marshal(configsList) @@ -254,7 +250,7 @@ func (impl ConfigMapServiceImpl) CMGlobalFetch(appId int) (*bean.ConfigDataReque impl.logger.Debugw("no config map data found for this request", "appId", appId) } - configMapGlobalList := &ConfigsList{} + configMapGlobalList := &bean.ConfigsList{} if len(configMapGlobal.ConfigMapData) > 0 { err = json.Unmarshal([]byte(configMapGlobal.ConfigMapData), configMapGlobalList) if err != nil { @@ -301,7 +297,7 @@ func (impl ConfigMapServiceImpl) CMEnvironmentAddUpdate(configMapRequest *bean.C return nil, err } if err == nil && model.Id > 0 { - configsList := &ConfigsList{} + configsList := &bean.ConfigsList{} found := false var configs []*bean.ConfigData if len(model.ConfigMapData) > 0 { @@ -345,7 +341,7 @@ func (impl ConfigMapServiceImpl) CMEnvironmentAddUpdate(configMapRequest *bean.C } else if err == pg.ErrNoRows { //creating config map record for first time - configsList := &ConfigsList{ + configsList := &bean.ConfigsList{ ConfigData: configMapRequest.ConfigData, } configDataByte, err := json.Marshal(configsList) @@ -391,7 +387,7 @@ func (impl ConfigMapServiceImpl) CMGlobalFetchForEdit(name string, id int) (*bea impl.logger.Debugw("no config map data found for this request", "id", id) } - configMapGlobalList := &ConfigsList{} + configMapGlobalList := &bean.ConfigsList{} if len(configMapGlobal.ConfigMapData) > 0 { err = json.Unmarshal([]byte(configMapGlobal.ConfigMapData), configMapGlobalList) if err != nil { @@ -439,7 +435,7 @@ func (impl ConfigMapServiceImpl) CMEnvironmentFetch(appId int, envId int) (*bean if pg.ErrNoRows == err { impl.logger.Debugw("no config map data found for this request", "appId", appId) } - configMapGlobalList := &ConfigsList{} + configMapGlobalList := &bean.ConfigsList{} if len(configMapGlobal.ConfigMapData) > 0 { err = json.Unmarshal([]byte(configMapGlobal.ConfigMapData), configMapGlobalList) if err != nil { @@ -454,7 +450,7 @@ func (impl ConfigMapServiceImpl) CMEnvironmentFetch(appId int, envId int) (*bean if pg.ErrNoRows == err { impl.logger.Debugw("no config map data found for this request", "appId", appId) } - configsListEnvLevel := &ConfigsList{} + configsListEnvLevel := &bean.ConfigsList{} if len(configMapEnvLevel.ConfigMapData) > 0 { err = json.Unmarshal([]byte(configMapEnvLevel.ConfigMapData), configsListEnvLevel) if err != nil { @@ -918,7 +914,7 @@ func (impl ConfigMapServiceImpl) CMGlobalDelete(name string, id int, userId int3 impl.logger.Errorw("error while fetching from db", "error", err) return false, err } - configsList := &ConfigsList{} + configsList := &bean.ConfigsList{} found := false var configs []*bean.ConfigData if len(model.ConfigMapData) > 0 { @@ -974,7 +970,7 @@ func (impl ConfigMapServiceImpl) CMEnvironmentDelete(name string, id int, userId impl.logger.Errorw("error while fetching from db", "error", err) return false, err } - configsList := &ConfigsList{} + configsList := &bean.ConfigsList{} found := false var configs []*bean.ConfigData if len(model.ConfigMapData) > 0 { @@ -1140,7 +1136,7 @@ func (impl ConfigMapServiceImpl) CMGlobalDeleteByAppId(name string, appId int, u impl.logger.Errorw("error while fetching from db", "error", err) return false, err } - configsList := &ConfigsList{} + configsList := &bean.ConfigsList{} found := false var configs []*bean.ConfigData if len(model.ConfigMapData) > 0 { @@ -1190,7 +1186,7 @@ func (impl ConfigMapServiceImpl) CMEnvironmentDeleteByAppIdAndEnvId(name string, impl.logger.Errorw("error while fetching from db", "error", err) return false, err } - configsList := &ConfigsList{} + configsList := &bean.ConfigsList{} found := false var configs []*bean.ConfigData if len(model.ConfigMapData) > 0 { @@ -1540,7 +1536,7 @@ func (impl ConfigMapServiceImpl) ConfigSecretGlobalBulkPatch(bulkPatchRequest *b continue } if bulkPatchRequest.Type == "CM" { - configsList := &ConfigsList{} + configsList := &bean.ConfigsList{} var configs []*bean.ConfigData if len(model.ConfigMapData) > 0 { err = json.Unmarshal([]byte(model.ConfigMapData), configsList) @@ -1645,7 +1641,7 @@ func (impl ConfigMapServiceImpl) ConfigSecretEnvironmentBulkPatch(bulkPatchReque continue } if bulkPatchRequest.Type == "CM" { - configsList := &ConfigsList{} + configsList := &bean.ConfigsList{} var configs []*bean.ConfigData if len(model.ConfigMapData) > 0 { err = json.Unmarshal([]byte(model.ConfigMapData), configsList) diff --git a/pkg/pipeline/DeploymentConfigService.go b/pkg/pipeline/DeploymentConfigService.go index 798f7da2c4d..bb35e4fc41d 100644 --- a/pkg/pipeline/DeploymentConfigService.go +++ b/pkg/pipeline/DeploymentConfigService.go @@ -38,6 +38,8 @@ import ( type PipelineDeploymentConfigService interface { GetLatestDeploymentConfigurationByPipelineId(ctx context.Context, pipelineId int, userHasAdminAccess bool) (*history.AllDeploymentConfigurationDetail, error) + GetMergedCMCSConfigMap(appLevelConfig, envLevelConfig string, configType repository2.ConfigType) (map[string]*bean.ConfigData, error) + GetLatestPipelineStrategyConfig(pipeline *pipelineConfig.Pipeline) (*history.HistoryDetailDto, error) } type PipelineDeploymentConfigServiceImpl struct { diff --git a/pkg/pipeline/PropertiesConfig.go b/pkg/pipeline/PropertiesConfig.go index 26c387897ef..ad4e2806d67 100644 --- a/pkg/pipeline/PropertiesConfig.go +++ b/pkg/pipeline/PropertiesConfig.go @@ -129,6 +129,9 @@ func (impl PropertiesConfigServiceImpl) GetEnvironmentProperties(appId, environm IsBasicViewLocked: envOverride.IsBasicViewLocked, CurrentViewEditor: envOverride.CurrentViewEditor, } + if chartRefId == 0 && envOverride.Chart != nil { + environmentProperties.ChartRefId = envOverride.Chart.ChartRefId + } if environmentPropertiesResponse.Namespace == "" { environmentPropertiesResponse.Namespace = envOverride.Namespace @@ -140,8 +143,10 @@ func (impl PropertiesConfigServiceImpl) GetEnvironmentProperties(appId, environm } if errors.IsNotFound(err) { environmentProperties.Id = 0 - environmentProperties.ChartRefId = chartRefId environmentProperties.IsOverride = false + if chartRefId > 0 { + environmentProperties.ChartRefId = chartRefId + } } else { environmentProperties.Id = ecOverride.Id environmentProperties.Latest = ecOverride.Latest @@ -153,6 +158,9 @@ func (impl PropertiesConfigServiceImpl) GetEnvironmentProperties(appId, environm environmentProperties.Active = ecOverride.Active environmentProperties.IsBasicViewLocked = ecOverride.IsBasicViewLocked environmentProperties.CurrentViewEditor = ecOverride.CurrentViewEditor + if chartRefId == 0 && ecOverride.Chart != nil { + environmentProperties.ChartRefId = ecOverride.Chart.ChartRefId + } } environmentPropertiesResponse.ChartRefId = chartRefId environmentPropertiesResponse.EnvironmentConfig = *environmentProperties diff --git a/pkg/pipeline/WorkflowService.go b/pkg/pipeline/WorkflowService.go index 81f312058c6..c51b74b0181 100644 --- a/pkg/pipeline/WorkflowService.go +++ b/pkg/pipeline/WorkflowService.go @@ -22,6 +22,7 @@ import ( "errors" v1alpha12 "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/workflow/util" + "github.com/devtron-labs/common-lib/utils" "github.com/devtron-labs/common-lib/utils/k8s" "github.com/devtron-labs/common-lib/utils/k8s/commonBean" "github.com/devtron-labs/devtron/api/bean" @@ -39,6 +40,8 @@ import ( v12 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/rest" + "net/http" + "strconv" "strings" ) @@ -51,7 +54,8 @@ type WorkflowService interface { GetWorkflowStatus(executorType cdWorkflow.WorkflowExecutorType, name string, namespace string, restConfig *rest.Config) (*types.WorkflowStatus, error) // ListAllWorkflows(namespace string) (*v1alpha1.WorkflowList, error) // UpdateWorkflow(wf *v1alpha1.Workflow) (*v1alpha1.Workflow, error) - TerminateWorkflow(executorType cdWorkflow.WorkflowExecutorType, name string, namespace string, restConfig *rest.Config, isExt bool, environment *repository.Environment) error + TerminateWorkflow(cancelWfDtoRequest *types.CancelWfRequestDto) error + TerminateDanglingWorkflows(cancelWfDtoRequest *types.CancelWfRequestDto) error } type WorkflowServiceImpl struct { @@ -157,12 +161,10 @@ func (impl *WorkflowServiceImpl) createWorkflowTemplate(workflowRequest *types.W } workflowMainContainer, err := workflowRequest.GetWorkflowMainContainer(impl.ciCdConfig, infraConfiguration, workflowJson, &workflowTemplate, workflowConfigMaps, workflowSecrets) - if err != nil { impl.Logger.Errorw("error occurred while getting workflow main container", "err", err) return bean3.WorkflowTemplate{}, err } - workflowTemplate.Containers = []v12.Container{workflowMainContainer} impl.updateBlobStorageConfig(workflowRequest, &workflowTemplate) if workflowRequest.Type == bean3.CI_WORKFLOW_PIPELINE_TYPE || workflowRequest.Type == bean3.JOB_WORKFLOW_PIPELINE_TYPE { @@ -352,27 +354,42 @@ func (impl *WorkflowServiceImpl) GetWorkflowStatus(executorType cdWorkflow.Workf return wfStatus, err } -func (impl *WorkflowServiceImpl) TerminateWorkflow(executorType cdWorkflow.WorkflowExecutorType, name string, namespace string, restConfig *rest.Config, isExt bool, environment *repository.Environment) error { - impl.Logger.Debugw("terminating wf", "name", name) +func (impl *WorkflowServiceImpl) TerminateWorkflow(cancelWfDtoRequest *types.CancelWfRequestDto) error { + impl.Logger.Debugw("terminating wf", "name", cancelWfDtoRequest.WorkflowName) var err error - if executorType != "" { - workflowExecutor := impl.getWorkflowExecutor(executorType) + if cancelWfDtoRequest.ExecutorType != "" { + workflowExecutor := impl.getWorkflowExecutor(cancelWfDtoRequest.ExecutorType) if workflowExecutor == nil { return errors.New("workflow executor not found") } - if restConfig == nil { - restConfig = impl.config + if cancelWfDtoRequest.RestConfig == nil { + cancelWfDtoRequest.RestConfig = impl.config } - err = workflowExecutor.TerminateWorkflow(name, namespace, restConfig) + err = workflowExecutor.TerminateWorkflow(cancelWfDtoRequest.WorkflowName, cancelWfDtoRequest.Namespace, cancelWfDtoRequest.RestConfig) } else { - wfClient, err := impl.getWfClient(environment, namespace, isExt) + wfClient, err := impl.getWfClient(cancelWfDtoRequest.Environment, cancelWfDtoRequest.Namespace, cancelWfDtoRequest.IsExt) if err != nil { return err } - err = util.TerminateWorkflow(context.Background(), wfClient, name) + err = util.TerminateWorkflow(context.Background(), wfClient, cancelWfDtoRequest.WorkflowName) + } + return err +} + +func (impl *WorkflowServiceImpl) TerminateDanglingWorkflows(cancelWfDtoRequest *types.CancelWfRequestDto) error { + impl.Logger.Debugw("terminating dangling wf", "name", cancelWfDtoRequest.WorkflowName) + var err error + workflowExecutor := impl.getWorkflowExecutor(cancelWfDtoRequest.ExecutorType) + if workflowExecutor == nil { + return &utils.ApiError{HttpStatusCode: http.StatusNotFound, Code: strconv.Itoa(http.StatusNotFound), InternalMessage: "workflow executor not found", UserMessage: "workflow executor not found"} } + if cancelWfDtoRequest.RestConfig == nil { + cancelWfDtoRequest.RestConfig = impl.config + } + err = workflowExecutor.TerminateDanglingWorkflow(cancelWfDtoRequest.WorkflowGenerateName, cancelWfDtoRequest.Namespace, cancelWfDtoRequest.RestConfig) return err } + func (impl *WorkflowServiceImpl) getRuntimeEnvClientInstance(environment *repository.Environment) (v1alpha12.WorkflowInterface, error) { restConfig, err, _ := impl.k8sCommonService.GetRestConfigByClusterId(context.Background(), environment.ClusterId) if err != nil { diff --git a/pkg/pipeline/adapter/adapter.go b/pkg/pipeline/adapter/adapter.go index ed179e14588..e587a119bc4 100644 --- a/pkg/pipeline/adapter/adapter.go +++ b/pkg/pipeline/adapter/adapter.go @@ -21,6 +21,7 @@ import ( dockerRegistryRepository "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/ciPipeline" + "github.com/devtron-labs/devtron/pkg/bean" pipelineConfigBean "github.com/devtron-labs/devtron/pkg/pipeline/bean" "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "github.com/devtron-labs/devtron/pkg/pipeline/types" @@ -225,3 +226,130 @@ func GetSourceCiDownStreamResponse(linkedCIDetails []ciPipeline.LinkedCIDetails, } return response } + +func ConvertConfigDataToPipelineConfigData(r *bean.ConfigData) *pipelineConfigBean.ConfigData { + if r != nil { + return &pipelineConfigBean.ConfigData{ + Name: r.Name, + Type: r.Type, + External: r.External, + MountPath: r.MountPath, + Data: r.Data, + DefaultData: r.DefaultData, + DefaultMountPath: r.DefaultMountPath, + Global: r.Global, + ExternalSecretType: r.ExternalSecretType, + ESOSecretData: ConvertESOSecretDataToPipelineESOSecretData(r.ESOSecretData), + DefaultESOSecretData: ConvertESOSecretDataToPipelineESOSecretData(r.DefaultESOSecretData), + ExternalSecret: ConvertExternalSecretToPipelineExternalSecret(r.ExternalSecret), + DefaultExternalSecret: ConvertExternalSecretToPipelineExternalSecret(r.DefaultExternalSecret), + RoleARN: r.RoleARN, + SubPath: r.SubPath, + ESOSubPath: r.ESOSubPath, + FilePermission: r.FilePermission, + Overridden: r.Overridden, + } + } + return &pipelineConfigBean.ConfigData{} +} + +func ConvertESOSecretDataToPipelineESOSecretData(r bean.ESOSecretData) pipelineConfigBean.ESOSecretData { + return pipelineConfigBean.ESOSecretData{ + SecretStore: r.SecretStore, + SecretStoreRef: r.SecretStoreRef, + ESOData: ConvertEsoDataToPipelineEsoData(r.ESOData), + RefreshInterval: r.RefreshInterval, + } +} + +func ConvertExternalSecretToPipelineExternalSecret(r []bean.ExternalSecret) []pipelineConfigBean.ExternalSecret { + extSec := make([]pipelineConfigBean.ExternalSecret, 0, len(r)) + for _, item := range r { + newItem := pipelineConfigBean.ExternalSecret{ + Key: item.Key, + Name: item.Name, + Property: item.Property, + IsBinary: item.IsBinary, + } + extSec = append(extSec, newItem) + } + return extSec +} + +func ConvertEsoDataToPipelineEsoData(r []bean.ESOData) []pipelineConfigBean.ESOData { + newEsoData := make([]pipelineConfigBean.ESOData, 0, len(r)) + for _, item := range r { + newItem := pipelineConfigBean.ESOData{ + SecretKey: item.SecretKey, + Key: item.Key, + Property: item.Property, + } + newEsoData = append(newEsoData, newItem) + } + return newEsoData +} + +// reverse adapter for the above adapters + +func ConvertPipelineConfigDataToConfigData(r *pipelineConfigBean.ConfigData) *bean.ConfigData { + if r != nil { + return &bean.ConfigData{ + Name: r.Name, + Type: r.Type, + External: r.External, + MountPath: r.MountPath, + Data: r.Data, + DefaultData: r.DefaultData, + DefaultMountPath: r.DefaultMountPath, + Global: r.Global, + ExternalSecretType: r.ExternalSecretType, + ESOSecretData: ConvertPipelineESOSecretDataToESOSecretData(r.ESOSecretData), + DefaultESOSecretData: ConvertPipelineESOSecretDataToESOSecretData(r.DefaultESOSecretData), + ExternalSecret: ConvertPipelineExternalSecretToExternalSecret(r.ExternalSecret), + DefaultExternalSecret: ConvertPipelineExternalSecretToExternalSecret(r.DefaultExternalSecret), + RoleARN: r.RoleARN, + SubPath: r.SubPath, + ESOSubPath: r.ESOSubPath, + FilePermission: r.FilePermission, + Overridden: r.Overridden, + } + } + return &bean.ConfigData{} + +} + +func ConvertPipelineESOSecretDataToESOSecretData(r pipelineConfigBean.ESOSecretData) bean.ESOSecretData { + return bean.ESOSecretData{ + SecretStore: r.SecretStore, + SecretStoreRef: r.SecretStoreRef, + ESOData: ConvertPipelineEsoDataToEsoData(r.ESOData), + RefreshInterval: r.RefreshInterval, + } +} + +func ConvertPipelineExternalSecretToExternalSecret(r []pipelineConfigBean.ExternalSecret) []bean.ExternalSecret { + extSec := make([]bean.ExternalSecret, 0, len(r)) + for _, item := range r { + newItem := bean.ExternalSecret{ + Key: item.Key, + Name: item.Name, + Property: item.Property, + IsBinary: item.IsBinary, + } + extSec = append(extSec, newItem) + } + return extSec +} + +func ConvertPipelineEsoDataToEsoData(r []pipelineConfigBean.ESOData) []bean.ESOData { + newEsoData := make([]bean.ESOData, 0, len(r)) + for _, item := range r { + newItem := bean.ESOData{ + SecretKey: item.SecretKey, + Key: item.Key, + Property: item.Property, + } + newEsoData = append(newEsoData, newItem) + } + return newEsoData +} diff --git a/pkg/pipeline/bean/ConfigMapBean.go b/pkg/pipeline/bean/ConfigMapBean.go index 8a9e78a5ee3..65cb8af637b 100644 --- a/pkg/pipeline/bean/ConfigMapBean.go +++ b/pkg/pipeline/bean/ConfigMapBean.go @@ -44,6 +44,8 @@ type ESOData struct { Property string `json:"property,omitempty"` } +// there is an adapter written in pkg/bean folder to convert below ConfigData struct to pkg/bean's ConfigData + type ConfigData struct { Name string `json:"name"` Type string `json:"type"` @@ -117,6 +119,10 @@ type SecretsList struct { ConfigData []*ConfigData `json:"secrets"` } +type ConfigsList struct { + ConfigData []*ConfigData `json:"maps"` +} + type ConfigNameAndType struct { Id int Name string @@ -129,6 +135,7 @@ const ( CM ResourceType = "ConfigMap" CS ResourceType = "Secret" DeploymentTemplate ResourceType = "Deployment Template" + PipelineStrategy ResourceType = "Pipeline Strategy" ) func (r ResourceType) ToString() string { diff --git a/pkg/pipeline/bean/WorkflowTemplate.go b/pkg/pipeline/bean/WorkflowTemplate.go index 0245dec4697..dc2476ecb3c 100644 --- a/pkg/pipeline/bean/WorkflowTemplate.go +++ b/pkg/pipeline/bean/WorkflowTemplate.go @@ -50,13 +50,14 @@ type WorkflowTemplate struct { } const ( - CI_WORKFLOW_NAME = "ci" - CI_WORKFLOW_WITH_STAGES = "ci-stages-with-env" - CiStage = "CI" - JobStage = "JOB" - CdStage = "CD" - CD_WORKFLOW_NAME = "cd" - CD_WORKFLOW_WITH_STAGES = "cd-stages-with-env" + CI_WORKFLOW_NAME = "ci" + CI_WORKFLOW_WITH_STAGES = "ci-stages-with-env" + CiStage = "CI" + JobStage = "JOB" + CdStage = "CD" + CD_WORKFLOW_NAME = "cd" + CD_WORKFLOW_WITH_STAGES = "cd-stages-with-env" + WorkflowGenerateNamePrefix = "devtron.ai/generate-name-prefix" ) func (workflowTemplate *WorkflowTemplate) GetEntrypoint() string { @@ -72,17 +73,20 @@ func (workflowTemplate *WorkflowTemplate) GetEntrypoint() string { func (workflowTemplate *WorkflowTemplate) CreateObjectMetadata() *v12.ObjectMeta { + workflowLabels := map[string]string{WorkflowGenerateNamePrefix: workflowTemplate.WorkflowNamePrefix} switch workflowTemplate.WorkflowType { case CI_WORKFLOW_NAME: + workflowLabels["devtron.ai/workflow-purpose"] = "ci" return &v12.ObjectMeta{ GenerateName: workflowTemplate.WorkflowNamePrefix + "-", - Labels: map[string]string{"devtron.ai/workflow-purpose": "ci"}, + Labels: workflowLabels, } case CD_WORKFLOW_NAME: + workflowLabels["devtron.ai/workflow-purpose"] = "cd" return &v12.ObjectMeta{ GenerateName: workflowTemplate.WorkflowNamePrefix + "-", Annotations: map[string]string{"workflows.argoproj.io/controller-instanceid": workflowTemplate.WfControllerInstanceID}, - Labels: map[string]string{"devtron.ai/workflow-purpose": "cd"}, + Labels: workflowLabels, } default: return nil diff --git a/pkg/pipeline/executors/ArgoWorkflowExecutor.go b/pkg/pipeline/executors/ArgoWorkflowExecutor.go index 0f622717f6c..c84a903e06d 100644 --- a/pkg/pipeline/executors/ArgoWorkflowExecutor.go +++ b/pkg/pipeline/executors/ArgoWorkflowExecutor.go @@ -59,6 +59,7 @@ type WorkflowExecutor interface { TerminateWorkflow(workflowName string, namespace string, clusterConfig *rest.Config) error GetWorkflow(workflowName string, namespace string, clusterConfig *rest.Config) (*unstructured.UnstructuredList, error) GetWorkflowStatus(workflowName string, namespace string, clusterConfig *rest.Config) (*types.WorkflowStatus, error) + TerminateDanglingWorkflow(workflowGenerateName string, namespace string, clusterConfig *rest.Config) error } type ArgoWorkflowExecutor interface { @@ -89,6 +90,29 @@ func (impl *ArgoWorkflowExecutorImpl) TerminateWorkflow(workflowName string, nam return err } +func (impl *ArgoWorkflowExecutorImpl) TerminateDanglingWorkflow(workflowGenerateName string, namespace string, clusterConfig *rest.Config) error { + impl.logger.Debugw("terminating dangling wf", "workflowGenerateName", workflowGenerateName) + wfClient, err := impl.getClientInstance(namespace, clusterConfig) + if err != nil { + impl.logger.Errorw("cannot build wf client", "workflowGenerateName", workflowGenerateName, "err", err) + return err + } + jobSelectorLabel := fmt.Sprintf("%s=%s", bean.WorkflowGenerateNamePrefix, workflowGenerateName) + wfList, err := wfClient.List(context.Background(), v1.ListOptions{LabelSelector: jobSelectorLabel}) + if err != nil { + impl.logger.Errorw("error in fetching list of workflows", "namespace", namespace, "err", err) + return err + } + for _, wf := range wfList.Items { + err = util.TerminateWorkflow(context.Background(), wfClient, wf.Name) + if err != nil { + impl.logger.Errorw("error in terminating argo executor workflow", "name", wf.Name, "err", err) + return err + } + } + return nil +} + func (impl *ArgoWorkflowExecutorImpl) ExecuteWorkflow(workflowTemplate bean.WorkflowTemplate) (*unstructured.UnstructuredList, error) { entryPoint := workflowTemplate.WorkflowType diff --git a/pkg/pipeline/executors/SystemWorkflowExecutor.go b/pkg/pipeline/executors/SystemWorkflowExecutor.go index 69cf5bb7d4c..6d587384678 100644 --- a/pkg/pipeline/executors/SystemWorkflowExecutor.go +++ b/pkg/pipeline/executors/SystemWorkflowExecutor.go @@ -114,6 +114,31 @@ func (impl *SystemWorkflowExecutorImpl) TerminateWorkflow(workflowName string, n return err } +func (impl *SystemWorkflowExecutorImpl) TerminateDanglingWorkflow(workflowGenerateName string, namespace string, clusterConfig *rest.Config) error { + _, clientset, err := impl.k8sUtil.GetK8sConfigAndClientsByRestConfig(clusterConfig) + if err != nil { + impl.logger.Errorw("error occurred while creating k8s client", "workflowGenerateName", workflowGenerateName, "namespace", namespace, "err", err) + return err + } + jobSelectorLabel := fmt.Sprintf("%s=%s", bean.WorkflowGenerateNamePrefix, workflowGenerateName) + jobList, err := clientset.BatchV1().Jobs(namespace).List(context.Background(), v12.ListOptions{LabelSelector: jobSelectorLabel}) + if err != nil { + impl.logger.Errorw("error occurred while fetching jobs list for terminating dangling workflows", "namespace", namespace, "err", err) + return err + } + for _, job := range jobList.Items { + err = clientset.BatchV1().Jobs(namespace).Delete(context.Background(), job.Name, v12.DeleteOptions{}) + if err != nil { + if errors.IsNotFound(err) { + err = fmt.Errorf("cannot find job workflow %s", job.Name) + } + impl.logger.Errorw("error occurred while deleting workflow", "workflowName", job.Name, "namespace", namespace, "err", err) + return err + } + } + return nil +} + func (impl *SystemWorkflowExecutorImpl) GetWorkflow(workflowName string, namespace string, clusterConfig *rest.Config) (*unstructured.UnstructuredList, error) { templatesList := &unstructured.UnstructuredList{} _, clientset, err := impl.k8sUtil.GetK8sConfigAndClientsByRestConfig(clusterConfig) @@ -160,8 +185,7 @@ func (impl *SystemWorkflowExecutorImpl) GetWorkflowStatus(workflowName string, n } func (impl *SystemWorkflowExecutorImpl) getJobTemplate(workflowTemplate bean.WorkflowTemplate) *v1.Job { - - workflowLabels := map[string]string{DEVTRON_WORKFLOW_LABEL_KEY: DEVTRON_WORKFLOW_LABEL_VALUE, "devtron.ai/purpose": "workflow", "workflowType": workflowTemplate.WorkflowType} + workflowLabels := GetWorkflowLabelsForSystemExecutor(workflowTemplate) //setting TerminationGracePeriodSeconds in PodSpec //which ensures Pod has enough time to execute cleanup on SIGTERM event diff --git a/pkg/pipeline/executors/WorkflowUtils.go b/pkg/pipeline/executors/WorkflowUtils.go index 6593658255b..2933356f295 100644 --- a/pkg/pipeline/executors/WorkflowUtils.go +++ b/pkg/pipeline/executors/WorkflowUtils.go @@ -256,3 +256,12 @@ func CheckIfReTriggerRequired(status, message, workflowRunnerStatus string) bool const WorkflowCancel = "CANCELLED" const POD_DELETED_MESSAGE = "pod deleted" + +func GetWorkflowLabelsForSystemExecutor(workflowTemplate bean.WorkflowTemplate) map[string]string { + return map[string]string{ + DEVTRON_WORKFLOW_LABEL_KEY: DEVTRON_WORKFLOW_LABEL_VALUE, + "devtron.ai/purpose": "workflow", + "workflowType": workflowTemplate.WorkflowType, + bean.WorkflowGenerateNamePrefix: workflowTemplate.WorkflowNamePrefix, + } +} diff --git a/pkg/pipeline/history/ConfigMapHistoryService.go b/pkg/pipeline/history/ConfigMapHistoryService.go index 4f56673bd1e..107f486dea4 100644 --- a/pkg/pipeline/history/ConfigMapHistoryService.go +++ b/pkg/pipeline/history/ConfigMapHistoryService.go @@ -20,6 +20,11 @@ import ( "context" "encoding/json" "errors" + "github.com/devtron-labs/devtron/pkg/configDiff/adaptor" + bean2 "github.com/devtron-labs/devtron/pkg/configDiff/bean" + "github.com/devtron-labs/devtron/pkg/configDiff/utils" + "github.com/devtron-labs/devtron/pkg/pipeline/adapter" + bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean" globalUtil "github.com/devtron-labs/devtron/util" "time" @@ -48,6 +53,8 @@ type ConfigMapHistoryService interface { CheckIfTriggerHistoryExistsForPipelineIdOnTime(pipelineId int, deployedOn time.Time) (cmId int, csId int, exists bool, err error) GetDeployedHistoryDetailForCMCSByPipelineIdAndWfrId(ctx context.Context, pipelineId, wfrId int, configType repository.ConfigType, userHasAdminAccess bool) ([]*ComponentLevelHistoryDetailDto, error) ConvertConfigDataToComponentLevelDto(config *bean.ConfigData, configType repository.ConfigType, userHasAdminAccess bool) (*ComponentLevelHistoryDetailDto, error) + + GetConfigmapHistoryDataByDeployedOnAndPipelineId(ctx context.Context, pipelineId int, deployedOn time.Time, userHasAdminAccess bool) (*bean2.DeploymentAndCmCsConfig, *bean2.DeploymentAndCmCsConfig, error) } type ConfigMapHistoryServiceImpl struct { @@ -691,3 +698,129 @@ func (impl ConfigMapHistoryServiceImpl) CheckIfTriggerHistoryExistsForPipelineId } return cmId, csId, exists, nil } + +func (impl ConfigMapHistoryServiceImpl) GetConfigmapHistoryDataByDeployedOnAndPipelineId(ctx context.Context, pipelineId int, deployedOn time.Time, userHasAdminAccess bool) (*bean2.DeploymentAndCmCsConfig, *bean2.DeploymentAndCmCsConfig, error) { + secretConfigData, err := impl.getResolvedConfigData(ctx, pipelineId, deployedOn, repository.SECRET_TYPE, userHasAdminAccess) + if err != nil { + impl.logger.Errorw("error in getting resolved secret config data in case of previous deployments ", "pipelineId", pipelineId, "deployedOn", deployedOn, "err", err) + return nil, nil, err + } + cmConfigData, err := impl.getResolvedConfigData(ctx, pipelineId, deployedOn, repository.CONFIGMAP_TYPE, userHasAdminAccess) + if err != nil { + impl.logger.Errorw("error in getting resolved cm config data in case of previous deployments ", "pipelineId", pipelineId, "deployedOn", deployedOn, "err", err) + return nil, nil, err + } + + return secretConfigData, cmConfigData, nil +} + +func (impl *ConfigMapHistoryServiceImpl) getResolvedConfigData(ctx context.Context, pipelineId int, deployedOn time.Time, configType repository.ConfigType, userHasAdminAccess bool) (*bean2.DeploymentAndCmCsConfig, error) { + configsList := &bean3.ConfigsList{} + secretsList := &bean3.SecretsList{} + var err error + history, err := impl.configMapHistoryRepository.GetDeployedHistoryByPipelineIdAndDeployedOn(pipelineId, deployedOn, configType) + if err != nil { + impl.logger.Errorw("error in getting deployed history by pipeline id and deployed on", "pipelineId", pipelineId, "deployedOn", deployedOn, "err", err) + return nil, err + } + if configType == repository.SECRET_TYPE { + _, secretsList, err = impl.getConfigDataRequestForHistory(history) + if err != nil { + impl.logger.Errorw("error in getting config data request for history", "err", err) + return nil, err + } + } else if configType == repository.CONFIGMAP_TYPE { + configsList, _, err = impl.getConfigDataRequestForHistory(history) + if err != nil { + impl.logger.Errorw("error in getting config data request for history", "cmCsHistory", history, "err", err) + return nil, err + } + } + + resolvedDataMap, variableSnapshotMap, err := impl.scopedVariableManager.GetResolvedCMCSHistoryDtos(ctx, configType, adaptor.ReverseConfigListConvertor(*configsList), history, adaptor.ReverseSecretListConvertor(*secretsList)) + if err != nil { + return nil, err + } + resolvedConfigDataList := make([]*bean3.ConfigData, 0, len(resolvedDataMap)) + for _, resolvedConfigData := range resolvedDataMap { + resolvedConfigDataList = append(resolvedConfigDataList, adapter.ConvertConfigDataToPipelineConfigData(&resolvedConfigData)) + } + configDataReq := &bean3.ConfigDataRequest{} + var resourceType bean3.ResourceType + if configType == repository.SECRET_TYPE { + impl.encodeSecretDataFromNonAdminUsers(secretsList.ConfigData, userHasAdminAccess) + impl.encodeSecretDataFromNonAdminUsers(resolvedConfigDataList, userHasAdminAccess) + configDataReq.ConfigData = secretsList.ConfigData + resourceType = bean3.CS + } else if configType == repository.CONFIGMAP_TYPE { + configDataReq.ConfigData = configsList.ConfigData + resourceType = bean3.CM + } + + configDataJson, err := utils.ConvertToJsonRawMessage(configDataReq) + if err != nil { + impl.logger.Errorw("getCmCsPublishedConfigResponse, error in converting config data to json raw message", "pipelineId", pipelineId, "deployedOn", deployedOn, "err", err) + return nil, err + } + resolvedConfigDataReq := &bean3.ConfigDataRequest{ConfigData: resolvedConfigDataList} + resolvedConfigDataString, err := utils.ConvertToString(resolvedConfigDataReq) + if err != nil { + impl.logger.Errorw("getCmCsPublishedConfigResponse, error in converting config data to json raw message", "pipelineId", pipelineId, "deployedOn", deployedOn, "err", err) + return nil, err + } + resolvedConfigDataStringJson, err := utils.ConvertToJsonRawMessage(resolvedConfigDataString) + if err != nil { + impl.logger.Errorw("getCmCsPublishedConfigResponse, error in ConvertToJsonRawMessage for resolvedJson", "resolvedJson", resolvedConfigDataStringJson, "err", err) + return nil, err + } + return bean2.NewDeploymentAndCmCsConfig().WithConfigData(configDataJson).WithResourceType(resourceType). + WithVariableSnapshot(variableSnapshotMap).WithResolvedValue(resolvedConfigDataStringJson), nil +} + +func (impl *ConfigMapHistoryServiceImpl) encodeSecretDataFromNonAdminUsers(configDataList []*bean3.ConfigData, userHasAdminAccess bool) { + for _, config := range configDataList { + if config.Data != nil { + if !userHasAdminAccess { + //removing keys and sending + resultMap := make(map[string]string) + resultMapFinal := make(map[string]string) + err := json.Unmarshal(config.Data, &resultMap) + if err != nil { + impl.logger.Errorw("unmarshal failed", "error", err) + return + } + for key, _ := range resultMap { + //hard-coding values to show them as hidden to user + resultMapFinal[key] = "*****" + } + config.Data, err = utils.ConvertToJsonRawMessage(resultMapFinal) + if err != nil { + impl.logger.Errorw("error while marshaling request", "err", err) + return + } + } + } + } +} + +func (impl ConfigMapHistoryServiceImpl) getConfigDataRequestForHistory(history *repository.ConfigmapAndSecretHistory) (*bean3.ConfigsList, *bean3.SecretsList, error) { + + configsList := &bean3.ConfigsList{} + secretsList := &bean3.SecretsList{} + if history.IsConfigmapHistorySecretType() { + err := json.Unmarshal([]byte(history.Data), secretsList) + if err != nil { + impl.logger.Errorw("error while Unmarshal in secret history data", "error", err) + return configsList, secretsList, err + } + return configsList, secretsList, nil + } else if history.IsConfigmapHistoryConfigMapType() { + err := json.Unmarshal([]byte(history.Data), configsList) + if err != nil { + impl.logger.Errorw("error while Unmarshal in config history data", "historyData", history.Data, "error", err) + return configsList, secretsList, err + } + return configsList, secretsList, nil + } + return configsList, secretsList, nil +} diff --git a/pkg/pipeline/history/DeployedConfigurationHistoryService.go b/pkg/pipeline/history/DeployedConfigurationHistoryService.go index 26124a6df73..241574a2f18 100644 --- a/pkg/pipeline/history/DeployedConfigurationHistoryService.go +++ b/pkg/pipeline/history/DeployedConfigurationHistoryService.go @@ -152,6 +152,7 @@ func (impl *DeployedConfigurationHistoryServiceImpl) GetDeployedConfigurationByW impl.logger.Errorw("error in checking if history exists for deployment template", "err", err, "pipelineId", pipelineId, "wfrId", wfrId) return nil, err } + deploymentTemplateConfiguration := &DeploymentConfigurationDto{ Name: DEPLOYMENT_TEMPLATE_TYPE_HISTORY_COMPONENT, } @@ -161,6 +162,7 @@ func (impl *DeployedConfigurationHistoryServiceImpl) GetDeployedConfigurationByW deployedConfigurations = append(deployedConfigurations, deploymentTemplateConfiguration) //checking if pipeline strategy configuration for this pipelineId and wfrId exists or not + strategyHistoryId, exists, err := impl.strategyHistoryService.CheckIfHistoryExistsForPipelineIdAndWfrId(newCtx, pipelineId, wfrId) if err != nil { impl.logger.Errorw("error in checking if history exists for pipeline strategy", "err", err, "pipelineId", pipelineId, "wfrId", wfrId) diff --git a/pkg/pipeline/history/DeploymentTemplateHistoryService.go b/pkg/pipeline/history/DeploymentTemplateHistoryService.go index 276a0905002..d2eac61f809 100644 --- a/pkg/pipeline/history/DeploymentTemplateHistoryService.go +++ b/pkg/pipeline/history/DeploymentTemplateHistoryService.go @@ -50,6 +50,8 @@ type DeploymentTemplateHistoryService interface { // used for rollback GetDeployedHistoryByPipelineIdAndWfrId(ctx context.Context, pipelineId, wfrId int) (*HistoryDetailDto, error) + + GetTemplateHistoryModelForDeployedTemplateById(deploymentTemplateHistoryId, pipelineId int) (*repository.DeploymentTemplateHistory, error) } type DeploymentTemplateHistoryServiceImpl struct { @@ -407,3 +409,12 @@ func (impl DeploymentTemplateHistoryServiceImpl) CheckIfTriggerHistoryExistsForP exists = true return deploymentTemplateHistoryId, exists, err } + +func (impl DeploymentTemplateHistoryServiceImpl) GetTemplateHistoryModelForDeployedTemplateById(deploymentTemplateHistoryId, pipelineId int) (*repository.DeploymentTemplateHistory, error) { + history, err := impl.deploymentTemplateHistoryRepository.GetHistoryForDeployedTemplateById(deploymentTemplateHistoryId, pipelineId) + if err != nil { + impl.logger.Errorw("error in getting deployment template history", "err", err, "deploymentTemplateHistoryId", deploymentTemplateHistoryId, "pipelineId", pipelineId) + return nil, err + } + return history, nil +} diff --git a/pkg/pipeline/history/repository/ConfigMapHistoryRepository.go b/pkg/pipeline/history/repository/ConfigMapHistoryRepository.go index e3a6918ee66..ebf45afe844 100644 --- a/pkg/pipeline/history/repository/ConfigMapHistoryRepository.go +++ b/pkg/pipeline/history/repository/ConfigMapHistoryRepository.go @@ -39,6 +39,7 @@ type ConfigMapHistoryRepository interface { GetHistoryByPipelineIdAndWfrId(pipelineId, wfrId int, configType ConfigType) (*ConfigmapAndSecretHistory, error) GetDeployedHistoryForPipelineIdOnTime(pipelineId int, deployedOn time.Time, configType ConfigType) (*ConfigmapAndSecretHistory, error) GetDeployedHistoryList(pipelineId, baseConfigId int, configType ConfigType, componentName string) ([]*ConfigmapAndSecretHistory, error) + GetDeployedHistoryByPipelineIdAndDeployedOn(pipelineId int, deployedOn time.Time, configType ConfigType) (*ConfigmapAndSecretHistory, error) } type ConfigMapHistoryRepositoryImpl struct { @@ -71,6 +72,13 @@ type ConfigmapAndSecretHistory struct { DeployedByEmailId string `sql:"-"` } +func (r *ConfigmapAndSecretHistory) IsConfigmapHistorySecretType() bool { + return r.DataType == SECRET_TYPE +} + +func (r *ConfigmapAndSecretHistory) IsConfigmapHistoryConfigMapType() bool { + return r.DataType == CONFIGMAP_TYPE +} func (impl ConfigMapHistoryRepositoryImpl) CreateHistory(tx *pg.Tx, model *ConfigmapAndSecretHistory) (*ConfigmapAndSecretHistory, error) { var err error if tx != nil { @@ -149,3 +157,14 @@ func (impl ConfigMapHistoryRepositoryImpl) GetDeployedHistoryForPipelineIdOnTime Select() return &history, err } + +func (impl ConfigMapHistoryRepositoryImpl) GetDeployedHistoryByPipelineIdAndDeployedOn(pipelineId int, deployedOn time.Time, configType ConfigType) (*ConfigmapAndSecretHistory, error) { + var history ConfigmapAndSecretHistory + err := impl.dbConnection.Model(&history). + Where("pipeline_id = ?", pipelineId). + Where("data_type = ?", configType). + Where("deployed_on = ?", deployedOn). + Where("deployed = ?", true). + Select() + return &history, err +} diff --git a/pkg/pipeline/history/repository/PipelineStrategyHistoryRepository.go b/pkg/pipeline/history/repository/PipelineStrategyHistoryRepository.go index 092a81239ec..755d38686cc 100644 --- a/pkg/pipeline/history/repository/PipelineStrategyHistoryRepository.go +++ b/pkg/pipeline/history/repository/PipelineStrategyHistoryRepository.go @@ -35,6 +35,7 @@ type PipelineStrategyHistoryRepository interface { GetHistoryByPipelineIdAndWfrId(ctx context.Context, pipelineId, wfrId int) (*PipelineStrategyHistory, error) CheckIfTriggerHistoryExistsForPipelineIdOnTime(pipelineId int, deployedOn time.Time) (bool, error) GetDeployedHistoryList(pipelineId, baseConfigId int) ([]*PipelineStrategyHistory, error) + FindPipelineStrategyForDeployedOnAndPipelineId(pipelineId int, deployedOn time.Time) (PipelineStrategyHistory, error) } type PipelineStrategyHistoryRepositoryImpl struct { @@ -145,3 +146,11 @@ func (impl PipelineStrategyHistoryRepositoryImpl) CheckIfTriggerHistoryExistsFor Exists() return exists, err } + +func (impl PipelineStrategyHistoryRepositoryImpl) FindPipelineStrategyForDeployedOnAndPipelineId(pipelineId int, deployedOn time.Time) (PipelineStrategyHistory, error) { + var history PipelineStrategyHistory + err := impl.dbConnection.Model(&history). + Where("pipeline_strategy_history.deployed_on = ?", deployedOn). + Where("pipeline_strategy_history.pipeline_id = ?", pipelineId).Select() + return history, err +} diff --git a/pkg/pipeline/types/CiCdConfig.go b/pkg/pipeline/types/CiCdConfig.go index 91a48f7d952..f5bcadd57ec 100644 --- a/pkg/pipeline/types/CiCdConfig.go +++ b/pkg/pipeline/types/CiCdConfig.go @@ -25,6 +25,7 @@ import ( blob_storage "github.com/devtron-labs/common-lib/blob-storage" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/workflow/cdWorkflow" + "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/pipeline/bean" v12 "k8s.io/api/core/v1" "k8s.io/client-go/rest" @@ -37,6 +38,17 @@ import ( "time" ) +type CancelWfRequestDto struct { + ExecutorType cdWorkflow.WorkflowExecutorType + WorkflowName string + Namespace string + RestConfig *rest.Config + IsExt bool + Environment *repository.Environment + ForceAbort bool + WorkflowGenerateName string +} + // build infra configurations like ciTimeout,ciCpuLimit,ciMemLimit,ciCpuReq,ciMemReq are being managed by infraConfig service type CiCdConfig struct { diff --git a/pkg/pipeline/types/Workflow.go b/pkg/pipeline/types/Workflow.go index 23fa62023dc..1f9f3704b6b 100644 --- a/pkg/pipeline/types/Workflow.go +++ b/pkg/pipeline/types/Workflow.go @@ -589,13 +589,12 @@ func updateContainerEnvs(isCM bool, workflowMainContainer *v1.Container, configS } } -const PRE = "PRE" - -const POST = "POST" - -const CI_NODE_PVC_ALL_ENV = "devtron.ai/ci-pvc-all" - -const CI_NODE_PVC_PIPELINE_PREFIX = "devtron.ai/ci-pvc" +const ( + PRE = "PRE" + POST = "POST" + CI_NODE_PVC_ALL_ENV = "devtron.ai/ci-pvc-all" + CI_NODE_PVC_PIPELINE_PREFIX = "devtron.ai/ci-pvc" +) type CiArtifactDTO struct { Id int `json:"id"` diff --git a/pkg/workflow/dag/WorkflowDagExecutor.go b/pkg/workflow/dag/WorkflowDagExecutor.go index 671400fedde..849c5d9e1b3 100644 --- a/pkg/workflow/dag/WorkflowDagExecutor.go +++ b/pkg/workflow/dag/WorkflowDagExecutor.go @@ -549,12 +549,6 @@ func (impl *WorkflowDagExecutorImpl) HandlePreStageSuccessEvent(triggerContext t } util4.MergeMaps(pluginArtifacts, cdStageCompleteEvent.PluginRegistryArtifactDetails) - err = impl.deactivateUnusedPaths(wfRunner.ImagePathReservationIds, pluginArtifacts) - if err != nil { - impl.logger.Errorw("error in deactiving unusedImagePaths", "err", err) - return err - } - pipeline, err := impl.pipelineRepository.FindById(cdStageCompleteEvent.CdPipelineId) if err != nil { return err @@ -575,32 +569,16 @@ func (impl *WorkflowDagExecutorImpl) HandlePreStageSuccessEvent(triggerContext t impl.logger.Errorw("error in saving plugin artifacts", "err", err) return err } - if pipeline.TriggerType == pipelineConfig.TRIGGER_TYPE_AUTOMATIC { - if len(PreCDArtifacts) > 0 { - ciArtifact = PreCDArtifacts[0] // deployment will be trigger with artifact copied by plugin - } - cdWorkflow, err := impl.cdWorkflowRepository.FindById(cdStageCompleteEvent.WorkflowId) - if err != nil { - return err - } - //passing applyAuth as false since this event is for auto trigger and user who already has access to this cd can trigger pre cd also - applyAuth := false - if cdStageCompleteEvent.TriggeredBy != 1 { - applyAuth = true - } - triggerRequest := triggerBean.TriggerRequest{ - CdWf: cdWorkflow, - Pipeline: pipeline, - Artifact: ciArtifact, - ApplyAuth: applyAuth, - TriggeredBy: cdStageCompleteEvent.TriggeredBy, - TriggerContext: triggerContext, - } - triggerRequest.TriggerContext.Context = context.Background() - err = impl.cdTriggerService.TriggerAutomaticDeployment(triggerRequest) - if err != nil { - return err - } + ciArtifactId := 0 + if len(PreCDArtifacts) > 0 { + ciArtifactId = PreCDArtifacts[len(PreCDArtifacts)-1].Id // deployment will be trigger with artifact copied by plugin + } else { + ciArtifactId = cdStageCompleteEvent.CiArtifactDTO.Id + } + err = impl.cdTriggerService.TriggerAutoCDOnPreStageSuccess(triggerContext, cdStageCompleteEvent.CdPipelineId, ciArtifactId, cdStageCompleteEvent.WorkflowId, cdStageCompleteEvent.TriggeredBy, 0) + if err != nil { + impl.logger.Errorw("error in triggering cd on pre cd succcess", "err", err) + return err } } return nil @@ -673,13 +651,6 @@ func (impl *WorkflowDagExecutorImpl) HandlePostStageSuccessEvent(triggerContext return err } if len(pluginRegistryImageDetails) > 0 { - if wfr != nil { - err = impl.deactivateUnusedPaths(wfr.ImagePathReservationIds, pluginRegistryImageDetails) - if err != nil { - impl.logger.Errorw("error in deactivation images", "err", err) - return err - } - } PostCDArtifacts, err := impl.commonArtifactService.SavePluginArtifacts(ciArtifact, pluginRegistryImageDetails, cdPipelineId, repository.POST_CD, triggeredBy) if err != nil { impl.logger.Errorw("error in saving plugin artifacts", "err", err) @@ -735,11 +706,6 @@ func (impl *WorkflowDagExecutorImpl) UpdateCiWorkflowForCiSuccess(request *bean2 return err } - err = impl.deactivateUnusedPaths(savedWorkflow.ImagePathReservationIds, request.PluginRegistryArtifactDetails) - if err != nil { - impl.logger.Errorw("error in deactivation images", "err", err) - return err - } return nil } @@ -910,37 +876,6 @@ func (impl *WorkflowDagExecutorImpl) HandleCiSuccessEvent(triggerContext trigger return buildArtifact.Id, err } -func (impl *WorkflowDagExecutorImpl) deactivateUnusedPaths(reserveImagePathIds []int, pluginRegistryArtifactDetails map[string][]string) error { - // for copy container image plugin if images reserved are not equal to actual copird - reservedImagePaths, err := impl.customTagService.GetImagePathsByIds(reserveImagePathIds) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in getting imagePaths by ids", "ImagePathReservationIds", reserveImagePathIds, "err", err) - return err - } - - copiedImagesMapping := make(map[string]bool) - for _, savedImages := range pluginRegistryArtifactDetails { - for _, image := range savedImages { - copiedImagesMapping[image] = true - } - } - - unusedPaths := make([]string, 0, len(reservedImagePaths)) - for _, reservedImage := range reservedImagePaths { - if _, ok := copiedImagesMapping[reservedImage.ImagePath]; !ok { - unusedPaths = append(unusedPaths, reservedImage.ImagePath) - } - } - - err = impl.customTagService.DeactivateImagePathReservationByImagePath(unusedPaths) - if err != nil { - impl.logger.Errorw("error in deactivating unused image paths", "imagePathReservationIds", reserveImagePathIds, "err", err) - return err - } - - return nil -} - func (impl *WorkflowDagExecutorImpl) WriteCiSuccessEvent(request *bean2.CiArtifactWebhookRequest, pipeline *pipelineConfig.CiPipeline, artifact *repository.CiArtifact) { event, _ := impl.eventFactory.Build(util2.Success, &pipeline.Id, pipeline.AppId, nil, util2.CI) event.CiArtifactId = artifact.Id diff --git a/scripts/casbin/10_terminal.down.sql b/scripts/casbin/10_terminal.down.sql new file mode 100644 index 00000000000..e19c73b16c3 --- /dev/null +++ b/scripts/casbin/10_terminal.down.sql @@ -0,0 +1 @@ +DELETE FROM casbin_rule where v0='role:super-admin___' and v1='terminal'; \ No newline at end of file diff --git a/scripts/casbin/10_terminal.up.sql b/scripts/casbin/10_terminal.up.sql new file mode 100644 index 00000000000..09e46eede25 --- /dev/null +++ b/scripts/casbin/10_terminal.up.sql @@ -0,0 +1,2 @@ +INSERT INTO "public"."casbin_rule" ("p_type", "v0", "v1", "v2", "v3", "v4", "v5") VALUES +('p','role:super-admin___','terminal','*','*','allow',''); \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/.helmignore b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/.helmignore new file mode 100644 index 00000000000..50af0317254 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/.image_descriptor_template.json b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/.image_descriptor_template.json new file mode 100644 index 00000000000..8a99a95664c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/.image_descriptor_template.json @@ -0,0 +1 @@ +{"server":{"deployment":{"image_tag":"{{.Tag}}","image":"{{.Name}}"}},"pipelineName": "{{.PipelineName}}","releaseVersion":"{{.ReleaseVersion}}","deploymentType": "{{.DeploymentType}}", "app": "{{.App}}", "env": "{{.Env}}", "appMetrics": {{.AppMetrics}}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/Chart.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/Chart.yaml new file mode 100644 index 00000000000..4d7d3386052 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: deployment-chart_4-20-0 +version: 4.20.0 diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/README.md b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/README.md new file mode 100644 index 00000000000..57ee0cefa1c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/README.md @@ -0,0 +1,991 @@ + +# Deployment Chart - v4.20.0 + +## 1. Yaml File - + +### Container Ports + +This defines ports on which application services will be exposed to other services + +```yaml +ContainerPort: + - envoyPort: 8799 + idleTimeout: + name: app + port: 8080 + servicePort: 80 + nodePort: 32056 + supportStreaming: true + useHTTP2: true + protocol: TCP +``` + +| Key | Description | +| :--- | :--- | +| `envoyPort` | envoy port for the container. | +| `idleTimeout` | the duration of time that a connection is idle before the connection is terminated. | +| `name` | name of the port. | +| `port` | port for the container. | +| `servicePort` | port of the corresponding kubernetes service. | +| `nodePort` | nodeport of the corresponding kubernetes service. | +| `supportStreaming` | Used for high performance protocols like grpc where timeout needs to be disabled. | +| `useHTTP2` | Envoy container can accept HTTP2 requests. | +| `protocol` | Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP"| + +### EnvVariables +```yaml +EnvVariables: [] +``` +To set environment variables for the containers that run in the Pod. +### EnvVariablesFromSecretKeys +```yaml +EnvVariablesFromSecretKeys: + - name: ENV_NAME + secretName: SECRET_NAME + keyName: SECRET_KEY + +``` + It is use to get the name of Environment Variable name, Secret name and the Key name from which we are using the value in that corresponding Environment Variable. + + ### EnvVariablesFromConfigMapKeys +```yaml +EnvVariablesFromConfigMapKeys: + - name: ENV_NAME + configMapName: CONFIG_MAP_NAME + keyName: CONFIG_MAP_KEY + +``` + It is use to get the name of Environment Variable name, Config Map name and the Key name from which we are using the value in that corresponding Environment Variable. + +### Liveness Probe + +If this check fails, kubernetes restarts the pod. This should return error code in case of non-recoverable error. + +```yaml +LivenessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + scheme: "" + tcp: true + grpc: + port: 8080 + service: "" +``` + +| Key | Description | +| :--- | :--- | +| `Path` | It define the path where the liveness needs to be checked. | +| `initialDelaySeconds` | It defines the time to wait before a given container is checked for liveliness. | +| `periodSeconds` | It defines the time to check a given container for liveness. | +| `successThreshold` | It defines the number of successes required before a given container is said to fulfil the liveness probe. | +| `timeoutSeconds` | It defines the time for checking timeout. | +| `failureThreshold` | It defines the maximum number of failures that are acceptable before a given container is not considered as live. | +| `httpHeaders` | Custom headers to set in the request. HTTP allows repeated headers,You can override the default headers by defining .httpHeaders for the probe. | +| `scheme` | Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP. +| `tcp` | The kubelet will attempt to open a socket to your container on the specified port. If it can establish a connection, the container is considered healthy. | +| `grpc` | GRPC specifies an action involving a GRPC port. Port is a required field if using gRPC service for health probes. Number must be in the range 1 to 65535. Service (optional) is the name of the service to place in the gRPC HealthCheckRequest. | + + + +### MaxUnavailable + +```yaml + MaxUnavailable: 0 +``` +The maximum number of pods that can be unavailable during the update process. The value of "MaxUnavailable: " can be an absolute number or percentage of the replicas count. The default value of "MaxUnavailable: " is 25%. + +### MaxSurge + +```yaml +MaxSurge: 1 +``` +The maximum number of pods that can be created over the desired number of pods. For "MaxSurge: " also, the value can be an absolute number or percentage of the replicas count. +The default value of "MaxSurge: " is 25%. + +### Min Ready Seconds + +```yaml +MinReadySeconds: 60 +``` +This specifies the minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available. This defaults to 0 (the Pod will be considered available as soon as it is ready). + +### Readiness Probe + +If this check fails, kubernetes stops sending traffic to the application. This should return error code in case of errors which can be recovered from if traffic is stopped. + +```yaml +ReadinessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + scheme: "" + tcp: true + grpc: + port: 8080 + service: "" +``` + +| Key | Description | +| :--- | :--- | +| `Path` | It define the path where the readiness needs to be checked. | +| `initialDelaySeconds` | It defines the time to wait before a given container is checked for readiness. | +| `periodSeconds` | It defines the time to check a given container for readiness. | +| `successThreshold` | It defines the number of successes required before a given container is said to fulfill the readiness probe. | +| `timeoutSeconds` | It defines the time for checking timeout. | +| `failureThreshold` | It defines the maximum number of failures that are acceptable before a given container is not considered as ready. | +| `httpHeaders` | Custom headers to set in the request. HTTP allows repeated headers,You can override the default headers by defining .httpHeaders for the probe. | +| `scheme` | Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP. +| `tcp` | The kubelet will attempt to open a socket to your container on the specified port. If it can establish a connection, the container is considered healthy. | +| `grpc` | GRPC specifies an action involving a GRPC port. Port is a required field if using gRPC service for health probes. Number must be in the range 1 to 65535. Service (optional) is the name of the service to place in the gRPC HealthCheckRequest. | + + +### Pod Disruption Budget + +You can create `PodDisruptionBudget` for each application. A PDB limits the number of pods of a replicated application that are down simultaneously from voluntary disruptions. For example, an application would like to ensure the number of replicas running is never brought below the certain number. + +```yaml +podDisruptionBudget: + minAvailable: 1 +``` + +or + +```yaml +podDisruptionBudget: + maxUnavailable: 50% +``` + +You can specify either `maxUnavailable` or `minAvailable` in a PodDisruptionBudget and it can be expressed as integers or as a percentage + +| Key | Description | +| :--- | :--- | +| `minAvailable` | Evictions are allowed as long as they leave behind 1 or more healthy pods of the total number of desired replicas. | +| `maxUnavailable` | Evictions are allowed as long as at most 1 unhealthy replica among the total number of desired replicas. | + +### Ambassador Mappings + +You can create ambassador mappings to access your applications from outside the cluster. At its core a Mapping resource maps a resource to a service. + +```yaml +ambassadorMapping: + ambassadorId: "prod-emissary" + cors: {} + enabled: true + hostname: devtron.example.com + labels: {} + prefix: / + retryPolicy: {} + rewrite: "" + tls: + context: "devtron-tls-context" + create: false + hosts: [] + secretName: "" +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Set true to enable ambassador mapping else set false.| +| `ambassadorId` | used to specify id for specific ambassador mappings controller. | +| `cors` | used to specify cors policy to access host for this mapping. | +| `weight` | used to specify weight for canary ambassador mappings. | +| `hostname` | used to specify hostname for ambassador mapping. | +| `prefix` | used to specify path for ambassador mapping. | +| `labels` | used to provide custom labels for ambassador mapping. | +| `retryPolicy` | used to specify retry policy for ambassador mapping. | +| `corsPolicy` | Provide cors headers on flagger resource. | +| `rewrite` | used to specify whether to redirect the path of this mapping and where. | +| `tls` | used to create or define ambassador TLSContext resource. | +| `extraSpec` | used to provide extra spec values which not present in deployment template for ambassador resource. | + +### Autoscaling + +This is connected to HPA and controls scaling up and down in response to request load. + +```yaml +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + containerResource: + enabled: true + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + + extraMetrics: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Set true to enable autoscaling else set false.| +| `MinReplicas` | Minimum number of replicas allowed for scaling. | +| `MaxReplicas` | Maximum number of replicas allowed for scaling. | +| `TargetCPUUtilizationPercentage` | The target CPU utilization that is expected for a container. | +| `TargetMemoryUtilizationPercentage` | The target memory utilization that is expected for a container. | +| `extraMetrics` | Used to give external metrics for autoscaling. | +| `containerResource` | Used to scale resource as per container resource. | + +### Flagger + +You can use flagger for canary releases with deployment objects. It supports flexible traffic routing with istio service mesh as well. + +```yaml +flaggerCanary: + addOtherGateways: [] + addOtherHosts: [] + analysis: + interval: 15s + maxWeight: 50 + stepWeight: 5 + threshold: 5 + annotations: {} + appProtocol: http + corsPolicy: + allowCredentials: false + allowHeaders: + - x-some-header + allowMethods: + - GET + allowOrigin: + - example.com + maxAge: 24h + createIstioGateway: + annotations: {} + enabled: false + host: example.com + labels: {} + tls: + enabled: false + secretName: example-tls-secret + enabled: false + gatewayRefs: null + headers: + request: + add: + x-some-header: value + labels: {} + loadtest: + enabled: true + url: http://flagger-loadtester.istio-system/ + match: + - uri: + prefix: / + port: 8080 + portDiscovery: true + retries: null + rewriteUri: / + targetPort: 8080 + thresholds: + latency: 500 + successRate: 90 + timeout: null +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Set true to enable canary releases using flagger else set false.| +| `addOtherGateways` | To provide multiple istio gateways for flagger. | +| `addOtherHosts` | Add multiple hosts for istio service mesh with flagger. | +| `analysis` | Define how the canary release should progresss and at what interval. | +| `annotations` | Annotation to add on flagger resource. | +| `labels` | Labels to add on flagger resource. | +| `appProtocol` | Protocol to use for canary. | +| `corsPolicy` | Provide cors headers on flagger resource. | +| `createIstioGateway` | Set to true if you want to create istio gateway as well with flagger. | +| `headers` | Add headers if any. | +| `loadtest` | Enable load testing for your canary release. | + + + +### Fullname Override + +```yaml +fullnameOverride: app-name +``` +`fullnameOverride` replaces the release fullname created by default by devtron, which is used to construct Kubernetes object names. By default, devtron uses {app-name}-{environment-name} as release fullname. + +### Image + +```yaml +image: + pullPolicy: IfNotPresent +``` + +Image is used to access images in kubernetes, pullpolicy is used to define the instances calling the image, here the image is pulled when the image is not present,it can also be set as "Always". + +### imagePullSecrets + +`imagePullSecrets` contains the docker credentials that are used for accessing a registry. + +```yaml +imagePullSecrets: + - regcred +``` +regcred is the secret that contains the docker credentials that are used for accessing a registry. Devtron will not create this secret automatically, you'll have to create this secret using dt-secrets helm chart in the App store or create one using kubectl. You can follow this documentation Pull an Image from a Private Registry [https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) . + +### Ingress + +This allows public access to the url, please ensure you are using right nginx annotation for nginx class, its default value is nginx + +```yaml +ingress: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + className: nginx + annotations: {} + hosts: + - host: example1.com + paths: + - /example + - host: example2.com + paths: + - /example2 + - /example2/healthz + tls: [] +``` +Legacy deployment-template ingress format + +```yaml +ingress: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + ingressClassName: nginx-internal + annotations: {} + path: "" + host: "" + tls: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Enable or disable ingress | +| `annotations` | To configure some options depending on the Ingress controller | +| `path` | Path name | +| `host` | Host name | +| `tls` | It contains security details | + +### Ingress Internal + +This allows private access to the url, please ensure you are using right nginx annotation for nginx class, its default value is nginx + +```yaml +ingressInternal: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + ingressClassName: nginx-internal + annotations: {} + hosts: + - host: example1.com + paths: + - /example + - host: example2.com + paths: + - /example2 + - /example2/healthz + tls: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Enable or disable ingress | +| `annotations` | To configure some options depending on the Ingress controller | +| `path` | Path name | +| `host` | Host name | +| `tls` | It contains security details | + +### additionalBackends + +This defines additional backend path in the ingress . + +```yaml + hosts: + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + additionalBackends: + - path: /example1 + pathType: "ImplementationSpecific" + backend: + service: + name: test-service + port: + number: 80 +``` + +### Init Containers +```yaml +initContainers: + - reuseContainerImage: true + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + volumeMounts: + - mountPath: /etc/ls-oms + name: ls-oms-cm-vol + args: + - sleep 300 + command: + - flyway + - -configFiles=/etc/ls-oms/flyway.conf + - migrate + + - name: nginx + image: nginx:1.14.2 + securityContext: + privileged: true + ports: + - containerPort: 80 + command: ["/usr/local/bin/nginx"] + args: ["-g", "daemon off;"] +``` +Specialized containers that run before app containers in a Pod. Init containers can contain utilities or setup scripts not present in an app image. One can use base image inside initContainer by setting the reuseContainerImage flag to `true`. + +### Istio + +Istio is a service mesh which simplifies observability, traffic management, security and much more with it's virtual services and gateways. + +```yaml +istio: + enable: true + gateway: + annotations: {} + enabled: false + host: example.com + labels: {} + tls: + enabled: false + secretName: example-tls-secret + virtualService: + annotations: {} + enabled: false + gateways: [] + hosts: [] + http: + - corsPolicy: + allowCredentials: false + allowHeaders: + - x-some-header + allowMethods: + - GET + allowOrigin: + - example.com + maxAge: 24h + headers: + request: + add: + x-some-header: value + match: + - uri: + prefix: /v1 + - uri: + prefix: /v2 + retries: + attempts: 2 + perTryTimeout: 3s + rewriteUri: / + route: + - destination: + host: service1 + port: 80 + timeout: 12s + - route: + - destination: + host: service2 + labels: {} +``` + +### Pause For Seconds Before Switch Active +```yaml +pauseForSecondsBeforeSwitchActive: 30 +``` +To wait for given period of time before switch active the container. + +### Resources + +These define minimum and maximum RAM and CPU available to the application. + +```yaml +resources: + limits: + cpu: "1" + memory: "200Mi" + requests: + cpu: "0.10" + memory: "100Mi" +``` + +Resources are required to set CPU and memory usage. + +#### Limits + +Limits make sure a container never goes above a certain value. The container is only allowed to go up to the limit, and then it is restricted. + +#### Requests + +Requests are what the container is guaranteed to get. + +### Service + +This defines annotations and the type of service, optionally can define name also. + +Supports "ClientIP" and "None". Used to maintain session affinity. Enable + client IP based session affinity. + +```yaml + service: + type: ClusterIP + annotations: {} + sessionAffinity: + enabled: true + sessionAffinityConfig: {} +``` + +### Volumes + +```yaml +volumes: + - name: log-volume + emptyDir: {} + - name: logpv + persistentVolumeClaim: + claimName: logpvc +``` + +It is required when some values need to be read from or written to an external disk. + +### Volume Mounts + +```yaml +volumeMounts: + - mountPath: /var/log/nginx/ + name: log-volume + - mountPath: /mnt/logs + name: logpvc + subPath: employee +``` + +It is used to provide mounts to the volume. + +### Affinity and anti-affinity + +```yaml +Spec: + Affinity: + Key: + Values: +``` + +Spec is used to define the desire state of the given container. + +Node Affinity allows you to constrain which nodes your pod is eligible to schedule on, based on labels of the node. + +Inter-pod affinity allow you to constrain which nodes your pod is eligible to be scheduled based on labels on pods. + +#### Key + +Key part of the label for node selection, this should be same as that on node. Please confirm with devops team. + +#### Values + +Value part of the label for node selection, this should be same as that on node. Please confirm with devops team. + +### Tolerations + +```yaml +tolerations: + - key: "key" + operator: "Equal" + value: "value" + effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" +``` + +Taints are the opposite, they allow a node to repel a set of pods. + +A given pod can access the given node and avoid the given taint only if the given pod satisfies a given taint. + +Taints and tolerations are a mechanism which work together that allows you to ensure that pods are not placed on inappropriate nodes. Taints are added to nodes, while tolerations are defined in the pod specification. When you taint a node, it will repel all the pods except those that have a toleration for that taint. A node can have one or many taints associated with it. + +### Arguments + +```yaml +args: + enabled: false + value: [] +``` + +This is used to give arguments to command. + +### Command + +```yaml +command: + enabled: false + value: [] +``` + +It contains the commands for the server. + +| Key | Description | +| :--- | :--- | +| `enabled` | To enable or disable the command. | +| `value` | It contains the commands. | + + +### Containers +Containers section can be used to run side-car containers along with your main container within same pod. Containers running within same pod can share volumes and IP Address and can address each other @localhost. We can use base image inside container by setting the reuseContainerImage flag to `true`. + +```yaml + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + command: ["/usr/local/bin/nginx"] + args: ["-g", "daemon off;"] + - reuseContainerImage: true + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + volumeMounts: + - mountPath: /etc/ls-oms + name: ls-oms-cm-vol + command: + - flyway + - -configFiles=/etc/ls-oms/flyway.conf + - migrate +``` + +### Prometheus + +```yaml + prometheus: + release: monitoring +``` + +It is a kubernetes monitoring tool and the name of the file to be monitored as monitoring in the given case.It describes the state of the prometheus. + +### rawYaml + +```yaml +rawYaml: + - apiVersion: v1 + kind: Service + metadata: + name: my-service + spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + type: ClusterIP +``` +Accepts an array of Kubernetes objects. You can specify any kubernetes yaml here and it will be applied when your app gets deployed. + +### Grace Period + +```yaml +GracePeriod: 30 +``` +Kubernetes waits for the specified time called the termination grace period before terminating the pods. By default, this is 30 seconds. If your pod usually takes longer than 30 seconds to shut down gracefully, make sure you increase the `GracePeriod`. + +A Graceful termination in practice means that your application needs to handle the SIGTERM message and begin shutting down when it receives it. This means saving all data that needs to be saved, closing down network connections, finishing any work that is left, and other similar tasks. + +There are many reasons why Kubernetes might terminate a perfectly healthy container. If you update your deployment with a rolling update, Kubernetes slowly terminates old pods while spinning up new ones. If you drain a node, Kubernetes terminates all pods on that node. If a node runs out of resources, Kubernetes terminates pods to free those resources. It’s important that your application handle termination gracefully so that there is minimal impact on the end user and the time-to-recovery is as fast as possible. + + +### Server + +```yaml +server: + deployment: + image_tag: 1-95a53 + image: "" +``` + +It is used for providing server configurations. + +#### Deployment + +It gives the details for deployment. + +| Key | Description | +| :--- | :--- | +| `image_tag` | It is the image tag | +| `image` | It is the URL of the image | + +### Service Monitor + +```yaml +servicemonitor: + enabled: true + path: /abc + scheme: 'http' + interval: 30s + scrapeTimeout: 20s + metricRelabelings: + - sourceLabels: [namespace] + regex: '(.*)' + replacement: myapp + targetLabel: target_namespace +``` + +It gives the set of targets to be monitored. + +### Db Migration Config + +```yaml +dbMigrationConfig: + enabled: false +``` + +It is used to configure database migration. + + +### KEDA Autoscaling +[KEDA](https://keda.sh) is a Kubernetes-based Event Driven Autoscaler. With KEDA, you can drive the scaling of any container in Kubernetes based on the number of events needing to be processed. KEDA can be installed into any Kubernetes cluster and can work alongside standard Kubernetes components like the Horizontal Pod Autoscaler(HPA). + +Example for autosccaling with KEDA using Prometheus metrics is given below: +```yaml +kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 + pollingInterval: 30 + advanced: + restoreToOriginalReplicaCount: true + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + triggers: + - type: prometheus + metadata: + serverAddress: http://:9090 + metricName: http_request_total + query: envoy_cluster_upstream_rq{appId="300", cluster_name="300-0", container="envoy",} + threshold: "50" + triggerAuthentication: + enabled: false + name: + spec: {} + authenticationRef: {} +``` +Example for autosccaling with KEDA based on kafka is given below : +```yaml +kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 + pollingInterval: 30 + advanced: {} + triggers: + - type: kafka + metadata: + bootstrapServers: b-2.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092,b-3.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092,b-1.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092 + topic: Orders-Service-ESP.info + lagThreshold: "100" + consumerGroup: oders-remove-delivered-packages + allowIdleConsumers: "true" + triggerAuthentication: + enabled: true + name: keda-trigger-auth-kafka-credential + spec: + secretTargetRef: + - parameter: sasl + name: keda-kafka-secrets + key: sasl + - parameter: username + name: keda-kafka-secrets + key: username + authenticationRef: + name: keda-trigger-auth-kafka-credential +``` + +### Winter-Soldier +Winter Soldier can be used to +- cleans up (delete) Kubernetes resources +- reduce workload pods to 0 + +**_NOTE:_** After deploying this we can create the Hibernator object and provide the custom configuration by which workloads going to delete, sleep and many more. for more information check [the main repo](https://github.com/devtron-labs/winter-soldier) + +Given below is template values you can give in winter-soldier: +```yaml +winterSoldier: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + action: sleep + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: [] + targetReplicas: [] + fieldSelector: [] +``` +Here, +| Key | values | Description | +| :--- | :--- | :--- | +| `enabled` | `fasle`,`true` | decide the enabling factor | +| `apiVersion` | `pincher.devtron.ai/v1beta1`, `pincher.devtron.ai/v1alpha1` | specific api version | +| `action` | `sleep`,`delete`, `scale` | This specify the action need to perform. | +| `timeRangesWithZone`:`timeZone` | eg:- `"Asia/Kolkata"`,`"US/Pacific"` | It use to specify the timeZone used. (It uses standard format. please refer [this](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones)) | +| `timeRangesWithZone`:`timeRanges` | array of [ `timeFrom`, `timeTo`, `weekdayFrom`, `weekdayTo`] | It use to define time period/range on which the user need to perform the specified action. you can have multiple timeRanges.
These settings will take `action` on Sat and Sun from 00:00 to 23:59:59, | +| `targetReplicas` | `[n]` : n - number of replicas to scale. | These is mandatory field when the `action` is `scale`
Defalut value is `[]`. | +| `fieldSelector` | `- AfterTime(AddTime( ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) ` | These value will take a list of methods to select the resources on which we perform specified `action` . | + + +here is an example, +```yaml +winterSoldier: + apiVersion: pincher.devtron.ai/v1alpha1 + enabled: true + annotations: {} + labels: {} + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: + - timeFrom: 00:00 + timeTo: 23:59:59 + weekdayFrom: Sat + weekdayTo: Sun + - timeFrom: 00:00 + timeTo: 08:00 + weekdayFrom: Mon + weekdayTo: Fri + - timeFrom: 20:00 + timeTo: 23:59:59 + weekdayFrom: Mon + weekdayTo: Fri + action: scale + targetReplicas: [1,1,1] + fieldSelector: + - AfterTime(AddTime( ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '10h'), Now()) +``` +Above settings will take action on `Sat` and `Sun` from 00:00 to 23:59:59, and on `Mon`-`Fri` from 00:00 to 08:00 and 20:00 to 23:59:59. If `action:sleep` then runs hibernate at timeFrom and unhibernate at `timeTo`. If `action: delete` then it will delete workloads at `timeFrom` and `timeTo`. Here the `action:scale` thus it scale the number of resource replicas to `targetReplicas: [1,1,1]`. Here each element of `targetReplicas` array is mapped with the corresponding elments of array `timeRangesWithZone/timeRanges`. Thus make sure the length of both array is equal, otherwise the cnages cannot be observed. + +The above example will select the application objects which have been created 10 hours ago across all namespaces excluding application's namespace. Winter soldier exposes following functions to handle time, cpu and memory. + +- ParseTime - This function can be used to parse time. For eg to parse creationTimestamp use ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z') +- AddTime - This can be used to add time. For eg AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '-10h') ll add 10h to the time. Use d for day, h for hour, m for minutes and s for seconds. Use negative number to get earlier time. +- Now - This can be used to get current time. +- CpuToNumber - This can be used to compare CPU. For eg any({{spec.containers.#.resources.requests}}, { MemoryToNumber(.memory) < MemoryToNumber('60Mi')}) will check if any resource.requests is less than 60Mi. + + +### Security Context +A security context defines privilege and access control settings for a Pod or Container. + +To add a security context for main container: +```yaml +containerSecurityContext: + allowPrivilegeEscalation: false +``` + +To add a security context on pod level: +```yaml +podSecurityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 +``` + +### Topology Spread Constraints +You can use topology spread constraints to control how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains. This can help to achieve high availability as well as efficient resource utilization. + +```yaml +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: zone + whenUnsatisfiable: DoNotSchedule + autoLabelSelector: true + customLabelSelector: {} + minDomains: 1 + nodeAffinityPolicy: Ignore +``` + +### Persistent Volume Claim +You can use persistent volume claim to mount volume as per your usecase. + +```yaml +persistentVolumeClaim: + name: my-pvc + storageClassName: default + accessMode: + - ReadWriteOnce + mountPath: /tmp +``` + +### Vertical Pod Autoscaling +This is connected to VPA and controls scaling up and down in response to request load. +```yaml +verticalPodScaling: + enabled: true + resourcePolicy: {} + updatePolicy: {} + ``` + +### Scheduler Name + +You can provide you own custom scheduler to schedule your application + +```yaml +schedulerName: "" +``` + +### Deployment Metrics + +It gives the realtime metrics of the deployed applications + +| Key | Description | +| :--- | :--- | +| `Deployment Frequency` | It shows how often this app is deployed to production | +| `Change Failure Rate` | It shows how often the respective pipeline fails. | +| `Mean Lead Time` | It shows the average time taken to deliver a change to production. | +| `Mean Time to Recovery` | It shows the average time taken to fix a failed pipeline. | + +## 2. Show application metrics + +If you want to see application metrics like different HTTP status codes metrics, application throughput, latency, response time. Enable the Application metrics from below the deployment template Save button. After enabling it, you should be able to see all metrics on App detail page. By default it remains disabled. +![](../../../.gitbook/assets/deployment_application_metrics%20%282%29.png) + +Once all the Deployment template configurations are done, click on `Save` to save your deployment configuration. Now you are ready to create [Workflow](workflow/) to do CI/CD. + +### Helm Chart Json Schema + +Helm Chart [json schema](../../../scripts/devtron-reference-helm-charts/reference-chart_4-11-0/schema.json) is used to validate the deployment template values. + +### Other Validations in Json Schema + +The values of CPU and Memory in limits must be greater than or equal to in requests respectively. Similarly, In case of envoyproxy, the values of limits are greater than or equal to requests as mentioned below. +``` +resources.limits.cpu >= resources.requests.cpu +resources.limits.memory >= resources.requests.memory +envoyproxy.resources.limits.cpu >= envoyproxy.resources.requests.cpu +envoyproxy.resources.limits.memory >= envoyproxy.resources.requests.memory +``` diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/app-values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/app-values.yaml new file mode 100644 index 00000000000..5f8216c0a71 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/app-values.yaml @@ -0,0 +1,530 @@ +# Mandatory configs + +podDisruptionBudget: {} +deploymentLabels: {} +deploymentAnnotations: {} + +containerSpec: + lifecycle: + enabled: false + preStop: + exec: + command: ["sleep","10"] + postStart: + httpGet: + host: example.com + path: /example + port: 90 + +replicaCount: 1 +MinReadySeconds: 60 +GracePeriod: 30 +image: + pullPolicy: IfNotPresent +restartPolicy: Always +service: + type: ClusterIP + # enabled: true + #name: "service-1234567890" + loadBalancerSourceRanges: [] + # loadBalancerSourceRanges: + # - 1.2.3.4/32 + # - 1.2.5.6/23 + annotations: {} + # test1: test2 + # test3: test4 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyPort: 8799 + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s + protocol: TCP +# servicemonitor: +# enabled: true +# path: /abc +# scheme: 'http' +# interval: 30s +# scrapeTimeout: 20s +# metricRelabelings: +# - sourceLabels: [namespace] +# regex: '(.*)' +# replacement: myapp +# targetLabel: target_namespace +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 0.10 + memory: 100Mi + +# Optional configs +LivenessProbe: + Path: "" + port: 8080 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + tcp: false + command: [] + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + grpc: {} + + +ReadinessProbe: + Path: "" + port: 8080 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + tcp: false + command: [] + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + grpc: {} + + +StartupProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] + command: [] + tcp: false + grpc: {} + + +ingress: + enabled: false + className: "" + labels: {} + annotations: {} +# nginx.ingress.kubernetes.io/force-ssl-redirect: 'false' +# nginx.ingress.kubernetes.io/ssl-redirect: 'false' +# kubernetes.io/ingress.class: nginx +# nginx.ingress.kubernetes.io/rewrite-target: /$2 +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +istio: + enable: false + gateway: + enabled: false + labels: {} + annotations: {} + host: example.com + tls: + enabled: false + secretName: example-secret + virtualService: + enabled: false + labels: {} + annotations: {} + gateways: [] + hosts: [] + http: [] + # - match: + # - uri: + # prefix: /v1 + # - uri: + # prefix: /v2 + # timeout: 12 + # headers: + # request: + # add: + # x-some-header: "value" + # retries: + # attempts: 2 + # perTryTimeout: 3s + destinationRule: + enabled: false + labels: {} + annotations: {} + subsets: [] + trafficPolicy: {} + peerAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + mtls: + mode: "" + portLevelMtls: {} + requestAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + jwtRules: [] + authorizationPolicy: + enabled: false + labels: {} + annotations: {} + action: + provider: {} + rules: [] + +winterSoldier: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + annotation: {} + labels: {} + type: Deployment + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: [] + action: sleep + targetReplicas: [] + fieldSelector: + - AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) + +networkPolicy: + enabled: false + annotations: {} + labels: {} + podSelector: + matchExpressions: [] + matchLabels: {} + policyTypes: [] + ingress: [] + egress: [] + +flaggerCanary: + enabled: false + labels: {} + annotations: {} + createIstioGateway: + enabled: false + labels: {} + annotations: {} + host: + tls: + enabled: false + secretName: + # Istio gateways (optional) + addOtherGateways: [] + # Istio virtual service host names (optional) + addOtherHosts: [] + # Istio gateway refs (optional) + gatewayRefs: + # - name: istio-gateway + # namespace: istio-system + #service port + serviceport: 8080 + #containerPort + targetPort: 8080 + # discover all port open in container + portDiscovery: true + # application protocol (optional) + appProtocol: http + # Istio retry policy (optional) + retries: + # attempts: 3 + # perTryTimeout: 1s + # retryOn: "gateway-error,connect-failure,refused-stream" + # HTTP match conditions (optional) + match: + - uri: + prefix: / + # HTTP rewrite (optional) + rewriteUri: / + # timeout (optional) + timeout: + # Add headers (optional) + headers: + # request: + # add: + # x-some-header: "value" + # cross-origin resource sharing policy (optional) + corsPolicy: + # allowOrigin: + # - example.com + # allowMethods: + # - GET + # allowCredentials: false + # allowHeaders: + # - x-some-header + # maxAge: 24h + analysis: + # schedule interval (default 60s) + interval: 15s + # max number of failed metric checks before rollback + threshold: 5 + # max traffic percentage routed to canary + # percentage (0-100) + maxWeight: 50 + # canary increment step + # percentage (0-100) + stepWeight: 5 + thresholds: + # minimum req success rate (non 5xx responses) + # percentage (0-100) + successRate: 90 + # maximum req duration P99 + # milliseconds + latency: 500 + loadtest: + enabled: true + # load tester address + url: http://flagger-loadtester.istio-system/ + +ingressInternal: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +command: + workingDir: {} + enabled: false + value: [] + +args: + enabled: false + value: + - /bin/sh + - -c + - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600 + +#For adding custom labels to pods + +podLabels: {} +# customKey: customValue +podAnnotations: {} +# customKey: customValue + +rawYaml: [] + +topologySpreadConstraints: [] + +initContainers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + +containers: [] + ## Additional containers to run along with application pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + +dbMigrationConfig: + enabled: false + +tolerations: [] + +podSecurityContext: {} + +containerSecurityContext: {} + +Spec: + Affinity: + Key: "" + # Key: kops.k8s.io/instancegroup + Values: "" + +affinity: + enabled: false + values: {} + +ambassadorMapping: + enabled: false + labels: {} + prefix: / + ambassadorId: "" + hostname: devtron.example.com + rewrite: "" + retryPolicy: {} + cors: {} + tls: + context: "" + create: false + secretName: "" + hosts: [] + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 70 + TargetMemoryUtilizationPercentage: 80 + annotations: {} + labels: {} + behavior: {} + containerResource: + enabled: false + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +kedaAutoscaling: + enabled: false + envSourceContainerName: "" # Optional. Default: .spec.template.spec.containers[0] + minReplicaCount: 1 + maxReplicaCount: 2 + advanced: {} + triggers: [] + triggerAuthentication: + enabled: false + name: "" + spec: {} + authenticationRef: {} + +# kedaHttpScaledObject: +# enabled: false +# minReplicaCount: 1 +# maxReplicaCount: 2 +# targetPendingRequests: +# scaledownPeriod: +# servicePort: 80 # port of the service (required) + +prometheus: + release: monitoring + +server: + deployment: + image_tag: 1-95af053 + image: "" + +servicemonitor: + additionalLabels: {} + +envoyproxy: + image: quay.io/devtron/envoy:v1.16.0 + configMapName: "" + lifecycle: {} + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +## Pods Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: {} + +imagePullSecrets: [] + # - test1 + # - test2 +hostAliases: [] +# - ip: "127.0.0.1" +# hostnames: +# - "foo.local" +# - "bar.local" +# - ip: "10.1.2.3" +# hostnames: +# - "foo.remote" +# - "bar.remote" + +verticalPodScaling: + enabled: false \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/env-values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/env-values.yaml new file mode 100644 index 00000000000..48b794e8f28 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/env-values.yaml @@ -0,0 +1,66 @@ +replicaCount: 1 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 + +Spec: + Affinity: + Key: "" + Values: "" + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + behavior: {} +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# +secret: + enabled: false + data: {} +# my_own_secret: S3ViZXJuZXRlcyBXb3Jrcw== + +EnvVariables: [] +# - name: FLASK_ENV +# value: qa + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: "0.05" + memory: 50Mi + requests: + cpu: "0.01" + memory: 10Mi + + diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/pipeline-values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/pipeline-values.yaml new file mode 100644 index 00000000000..dbe4db3e8ec --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/pipeline-values.yaml @@ -0,0 +1,6 @@ +deployment: + strategy: + recreate: {} + rolling: + maxSurge: "25%" + maxUnavailable: 1 diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/release-values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/release-values.yaml new file mode 100644 index 00000000000..48eb3f482c1 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/release-values.yaml @@ -0,0 +1,14 @@ +server: + deployment: + image_tag: IMAGE_TAG + image: IMAGE_REPO + enabled: false +dbMigrationConfig: + enabled: false + +pauseForSecondsBeforeSwitchActive: 0 +waitForSecondsBeforeScalingDown: 0 +autoPromotionSeconds: 30 + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/schema.json b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/schema.json new file mode 100644 index 00000000000..6a332631a93 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/schema.json @@ -0,0 +1,1368 @@ + +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "containerExtraSpecs":{ + "type": "object", + "title": "containerExtraSpecs", + "description": "Define container extra specs here" + }, + "ContainerPort": { + "type": "array", + "description": "defines ports on which application services will be exposed to other services", + "title": "Container Port", + "items": { + "type": "object", + "properties": { + "envoyPort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "envoy port for the container", + "title": "Envoy Port" + }, + "idleTimeout": { + "type": "string", + "description": "duration of time for which a connection is idle before the connection is terminated", + "title": "Idle Timeout" + }, + "name": { + "type": "string", + "description": "name of the port", + "title": "Name" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Port", + "title": "port for the container" + }, + "servicePort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port of the corresponding kubernetes service", + "title": "Service Port" + }, + "nodePort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "nodeport of the corresponding kubernetes service", + "title": "Node Port" + }, + "supportStreaming": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "field to enable/disable timeout for high performance protocols like grpc", + "title": "Support Streaming" + }, + "useHTTP2": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": " field for setting if envoy container can accept(or not) HTTP2 requests", + "title": "Use HTTP2" + } + } + } + }, + "EnvVariables": { + "type": "array", + "items": {}, + "description": "contains environment variables needed by the containers", + "title": "Environment Variables" + }, + "EnvVariablesFromFieldPath":{ + "type": "array", + "description": "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs", + "title": "EnvVariablesFromFieldPath", + "items": [ + { + "type": "object", + "properties": { + "name":{ + "type": "string", + "title": "name", + "description": "Env variable name to be" + }, + "fieldPath":{ + "type": "string", + "title": "fieldPath", + "description": "Path of the field to select in the specified API version" + } + } + } + ] + }, + "EnvVariablesFromSecretKeys": { + "type": "array", + "description": "Selects a field of the deployment: It is use to get the name of Environment Variable name, Secret name and the Key name from which we are using the value in that corresponding Environment Variable.", + "title": "EnvVariablesFromSecretKeys", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be used." + }, + "secretName": { + "type": "string", + "title": "secretName", + "description": "Name of Secret from which we are taking the value." + }, + "keyName": { + "type": "string", + "title": "keyName", + "description": "Name of The Key Where the value is mapped with." + } + } + } + ] + }, + "EnvVariablesFromConfigMapKeys": { + "type": "array", + "description": "Selects a field of the deployment: It is use to get the name of Environment Variable name, Config Map name and the Key name from which we are using the value in that corresponding Environment Variable.", + "title": "EnvVariablesFromConfigMapKeys", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be used." + }, + "configMapName": { + "type": "string", + "title": "configMapName", + "description": "Name of configMap from which we are taking the value." + }, + "keyName": { + "type": "string", + "title": "keyName", + "description": "Name of The Key Where the value is mapped with." + } + } + } + ] + }, + "GracePeriod": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "time for which Kubernetes waits before terminating the pods", + "title": "Grace Period" + }, + "LivenessProbe": { + "type": "object", + "description": "used by the kubelet to know when to restart a container", + "title": "Liveness Probe", + "properties": { + "Path": { + "type": "string", + "description": "defines the path where the liveness needs to be checked", + "title": "Path" + }, + "command": { + "type": "array", + "items": {}, + "description": "commands executed to perform a probe", + "title": "Command" + }, + "failureThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", + "title": "Failure Threshold" + }, + "httpHeaders": { + "type": "array", + "items": {}, + "description": "used to override the default headers by defining .httpHeaders for the probe", + "title": "HTTP headers" + }, + "initialDelaySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to wait before a given container is checked for liveness", + "title": "Initial Delay Seconds" + }, + "periodSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to check a given container for liveness", + "title": "Period Seconds" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port to access on the container", + "title": "Port" + }, + "scheme": { + "type": "string", + "description": "Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP.", + "title": "Scheme" + }, + "successThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", + "title": "Success Threshold" + }, + "tcp": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", + "title": "TCP" + }, + "timeoutSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time for checking timeout", + "title": "Timeout Seconds" + } + } + }, + "MaxSurge": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "maximum number of pods that can be created over the desired number of pods", + "title": "Maximum Surge" + }, + "MaxUnavailable": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "maximum number of pods that can be unavailable during the update process", + "title": "Maximum Unavailable" + }, + "MinReadySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", + "title": "Minimum Ready Seconds" + }, + "ReadinessProbe": { + "type": "object", + "description": "kubelet uses readiness probes to know when a container is ready to start accepting traffic", + "title": "Readiness Probe", + "properties": { + "Path": { + "type": "string", + "description": "defines the path where the readiness needs to be checked", + "title": "Path" + }, + "command": { + "type": "array", + "items": {}, + "description": "commands executed to perform a probe", + "title": "Command" + }, + "failureThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", + "title": "Failure Threshold" + }, + "httpHeader": { + "type": "array", + "items": {}, + "description": "used to override the default headers by defining .httpHeaders for the probe", + "title": "HTTP headers" + }, + "initialDelaySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to wait before a given container is checked for readiness", + "title": "Initial Delay Seconds" + }, + "periodSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to check a given container for readiness", + "title": "Period Seconds" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port to access on the container", + "title": "Port" + }, + "scheme": { + "type": "string", + "description": "Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP.", + "title": "Scheme" + }, + "successThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", + "title": "Success Threshold" + }, + "tcp": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", + "title": "TCP" + }, + "timeoutSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time for checking timeout", + "title": "Timeout Seconds" + } + } + }, + "networkPolicy": { + "type": "object", + "description": "NetworkPolicy describes what network traffic is allowed for a set of Pods", + "title": "Network Policy", + "properties": { + "enabled":{ + "type":"boolean", + "description": "used to enable or disable NetworkPolicy" + }, + "annotations":{ + "type": "object", + "description": "Annotations for NetworkPolicy" + }, + "labels":{ + "type":"object", + "description": "Labels for NetworkPolicy" + }, + "podSelector":{ + "type": "object", + "description": "Selects the pods to which this NetworkPolicy object applies", + "properties": { + "matchExpressions":{ + "type":"array", + "description": "list of label selector" + }, + "matchLabels":{ + "type":"object", + "description": "map of {key,value} pairs" + } + } + }, + "policyTypes":{ + "type":"array", + "description": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress,Egress." + }, + "ingress":{ + "type":"array", + "description": "List of ingress rules to be applied to the selected pods" + }, + "egress":{ + "type":"array", + "description": "List of egress rules to be applied to the selected pods" + } + } + }, + "istio": { + "type": "object", + "description": "Istio Service mesh", + "title": "Istio" + }, + "flaggerCanary":{ + "type": "object", + "description": "Flagger for canary release with istio service mesh", + "title": "Flagger Canary Release" + }, + "Spec": { + "type": "object", + "description": "used to define the desire state of the given container", + "title": "Spec", + "properties": { + "Affinity": { + "type": "object", + "description": "Node/Inter-pod Affinity allows you to constrain which nodes your pod is eligible to schedule on, based on labels of the node/pods", + "title": "Affinity", + "properties": { + "Key": { + "anyOf": [ + { + "type": "null" + }, + { + "type": "string", + "description": "Key part of the label for node/pod selection", + "title": "Key" + } + ] + }, + "Values": { + "type": "string", + "description": "Value part of the label for node/pod selection", + "title": "Values" + }, + "key": { + "type": "string" + } + } + } + } + }, + "ambassadorMapping": { + "type": "object", + "description": "used to create ambassador mapping resource", + "title": "Mapping", + "properties": { + "ambassadorId": { + "type": "string", + "description": "used to specify id for specific ambassador mappings controller", + "title": "Ambassador ID" + }, + "cors": { + "type": "object", + "description": "used to specify cors policy to access host for this mapping", + "title": "CORS" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to specify whether to create an ambassador mapping or not", + "title": "Enabled" + }, + "weight": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to specify weight for canary ambassador mappings" + }, + "hostname": { + "type": "string", + "description": "used to specify hostname for ambassador mapping", + "title": "Hostname" + }, + "labels": { + "type": "object", + "description": "used to provide custom labels for ambassador mapping", + "title": "Labels" + }, + "prefix": { + "type": "string", + "description": "used to specify path for ambassador mapping", + "title": "Prefix" + }, + "retryPolicy": { + "type": "object", + "description": "used to specify retry policy for ambassador mapping", + "title": "Retry Policy" + }, + "rewrite": { + "type": "string", + "description": "used to specify whether to redirect the path of this mapping and where", + "title": "Rewrite" + }, + "tls": { + "type": "object", + "description": "used to create or define ambassador TLSContext resource", + "title": "TLS Context" + }, + "extraSpec": { + "type": "object", + "description": "used to provide extra spec values which not present in deployment template for ambassador resource", + "title": "Extra Spec" + } + } + }, + "args": { + "type": "object", + "description": " used to give arguments to command", + "title": "Arguments", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling aruguments", + "title": "Enabled" + }, + "value": { + "type": "array", + "description": "values of the arguments", + "title": "Value", + "items": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ] + } + } + }, + "autoscaling": { + "type": "object", + "description": "connected to HPA and controls scaling up and down in response to request load", + "title": "Autoscaling", + "properties": { + "MaxReplicas": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Maximum number of replicas allowed for scaling", + "title": "Maximum Replicas" + }, + "MinReplicas": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Minimum number of replicas allowed for scaling", + "title": "Minimum Replicas" + }, + "TargetCPUUtilizationPercentage": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "The target CPU utilization that is expected for a container", + "title": "TargetCPUUtilizationPercentage" + }, + "TargetMemoryUtilizationPercentage": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "The target memory utilization that is expected for a container", + "title": "TargetMemoryUtilizationPercentage" + }, + "behavior": { + "type": "object", + "description": "describes behavior and scaling policies for that behavior", + "title": "Behavior" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling autoscaling", + "title": "Enabled" + }, + "labels": { + "type": "object", + "description": "labels for HPA", + "title": "labels" + }, + "annotations": { + "type": "object", + "description": "used to configure some options for HPA", + "title": "annotations" + }, + "extraMetrics": { + "type": "array", + "items": {}, + "description": "used to give external metrics for autoscaling", + "title": "Extra Metrics" + } + } + }, + "command": { + "type": "object", + "description": "contains the commands for the server", + "title": "Command", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling commands" + }, + "value": { + "type": "array", + "items": {}, + "description": "contains the commands", + "title": "Value" + }, + "workingDir": { + "type": "object", + "items": {}, + "description": "contains the working directory", + "title": "Working directory" + } + } + }, + "containerSecurityContext": { + "type": "object", + "description": " defines privilege and access control settings for a Container", + "title": "Container Security Context" + }, + "containers": { + "type": "array", + "items": {}, + "description": " used to run side-car containers along with the main container within same pod" + }, + "dbMigrationConfig": { + "type": "object", + "description": "used to configure database migration", + "title": "Db Migration Config", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling the config", + "title": "Enabled" + } + } + }, + "envoyproxy": { + "type": "object", + "description": "envoy is attached as a sidecar to the application container to collect metrics like 4XX, 5XX, throughput and latency", + "title": "Envoy Proxy", + "properties": { + "configMapName": { + "type": "string", + "description": "configMap containing configuration for Envoy", + "title": "ConfigMap" + }, + "lifecycle":{ + "type": "object", + "description": "Actions that the management system should take in response to container lifecycle events", + "title": "lifecycle", + "properties": { + "enabled":{ + "type": "boolean" + }, + "postStart":{ + "type": "object", + "title": "postStart", + "description": "PostStart is called immediately after a container is created" + }, + "preStop":{ + "type": "object", + "title": "preStop", + "description": "PreStop is called immediately before a container is terminated" + } + } + }, + "image": { + "type": "string", + "description": "image of envoy to be used" + }, + "resources": { + "type": "object", + "description": "minimum and maximum RAM and CPU available to the application", + "title": "Resources", + "properties": { + "limits": { + "type": "object", + "description": "the maximum values a container can reach", + "title": "Limits", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "limit of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "limit of memory", + "title": "Memory" + } + } + }, + "requests": { + "type": "object", + "description": "request is what the container is guaranteed to get", + "title": "Requests", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "request value of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "request value of memory", + "title": "Memory" + } + } + } + } + } + } + }, + "hostAliases":{ + "type": "array", + "title": "hostAliases", + "description": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file", + "items": [ + { + "type": "object", + "properties": { + "ip":{ + "type": "string", + "title": "IP", + "description": "IP address of the host file entry" + }, + "hostnames":{ + "type": "array", + "description": "Hostnames for the above IP address", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "image": { + "type": "object", + "description": "used to access images in kubernetes", + "title": "Image", + "properties": { + "pullPolicy": { + "type": "string", + "description": "used to define the instances calling the image", + "title": "Pull Policy", + "enum": ["IfNotPresent", "Always"] + } + } + }, + "restartPolicy": { + "type": "string", + "description": "It restarts the docker container based on defined conditions.", + "title": "Restart Policy", + "enum": [ + "Always", + "OnFailure", + "Never" + ] + }, + "imagePullSecrets": { + "type": "array", + "items": {}, + "description": "contains the docker credentials that are used for accessing a registry", + "title": "Image PullSecrets" + }, + "winterSoldier": { + "type": "object", + "description": "allows to scale, sleep or delete the resource based on time.", + "title": "winterSoldier", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the winterSoldier controller", + "title": "Annotations" + }, + "labels": { + "type": "object", + "description": "labels for winterSoldier", + "title": "winterSoldier labels", + "default": "" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "apiVersion": { + "type": "string", + "description": "Api version for winterSoldier", + "title": "winterSoldier apiVersion", + "default": "pincher.devtron.ai/v1alpha1" + }, + "timeRangesWithZone": { + "type": "object", + "description": "describe time zone and time ranges to input in the winterSoldier", + "title": "Time Ranges With Zone", + "timeZone": { + "type": "string", + "description": "describe time zone, and follow standard format", + "title": "Time Zone" + }, + "timeRanges": { + "type": "array", + "items": {}, + "description": "used to take array of time ranges in which each element contains timeFrom, timeTo, weekdayFrom and weekdayTo.", + "title": "Time Ranges" + } + }, + "type": { + "type": "string", + "description": "describe the type of application Rollout/deployment.", + "title": "Type" + }, + "action": { + "type": "string", + "description": "describe the action to be performed by winterSoldier.", + "title": "Action" + }, + "targetReplicas": { + "type": "array", + "description": "describe the number of replicas to which the resource should scale up or down.", + "title": "Target Replicas" + }, + "fieldSelector": { + "type": "array", + "description": "it takes arrays of methods to select specific fields.", + "title": "Field Selector" + } + } + }, + "ingress": { + "type": "object", + "description": "allows public access to URLs", + "title": "Ingress", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the Ingress controller", + "title": "Annotations" + }, + "className": { + "type": "string", + "description": "name of ingress class, a reference to an IngressClass resource that contains additional configuration including the name of the controller", + "title": "Ingress class name", + "default": "nginx" + }, + "labels": { + "type": "object", + "description": "labels for ingress", + "title": "Ingress labels", + "default": "" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "hosts": { + "type": "array", + "description": "list of hosts in ingress", + "title": "Hosts", + "items": [ + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "host URL", + "title": "Host" + }, + "pathType": { + "type": "string", + "description": "type of path", + "title": "PathType" + }, + "paths": { + "type": "array", + "description": "list of paths for a given host", + "title": "Paths", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "tls": { + "type": "array", + "items": {}, + "description": "contains security details - private key and certificate", + "title": "TLS" + } + } + }, + "ingressInternal": { + "type": "object", + "description": "allows private access to the URLs", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the Ingress controller", + "title": "Annotations" + }, + "className": { + "type": "string", + "description": "name of ingress class, a reference to an IngressClass resource that contains additional configuration including the name of the controller", + "title": "Ingress class name", + "default": "nginx-internal" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "hosts": { + "type": "array", + "description": "list of hosts in ingress", + "title": "Hosts", + "items": [ + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "host URL", + "title": "Host" + }, + "pathType": { + "type": "string", + "description": "type of path", + "title": "PathType" + }, + "paths": { + "type": "array", + "description": "list of paths for a given host", + "title": "Paths", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "tls": { + "type": "array", + "items": {}, + "description": "contains security details - private key and certificate", + "title": "TLS" + } + } + }, + "initContainers": { + "type": "array", + "items": {}, + "description": "specialized containers that run before app containers in a Pod, can contain utilities or setup scripts not present in an app image", + "title": "Init Containers" + }, + "kedaAutoscaling": { + "type": "object", + "description": "Kubernetes-based event driven autoscaler. With KEDA, one can drive the scaling of any container in Kubernetes based on the no. of events needing to be processed", + "title": "KEDA Autoscaling", + "properties": { + "advanced": { + "type": "object" + }, + "authenticationRef": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "envSourceContainerName": { + "type": "string" + }, + "maxReplicaCount": { + "type": "integer" + }, + "minReplicaCount": { + "type": "integer" + }, + "triggerAuthentication": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "spec": { + "type": "object" + } + } + }, + "triggers": { + "type": "array", + "items": {} + } + } + }, + "containerSpec": { + "type":"object", + "description": "define the container specic configuration", + "title": "containerSpec", + "properties": { + "lifecycle": { + "type": "object", + "description": "Actions that the management system should take in response to container lifecycle events", + "title": "lifecycle", + "properties": { + "enabled":{ + "type": "boolean" + }, + "postStart":{ + "type": "object", + "title": "postStart", + "description": "PostStart is called immediately after a container is created.You could use this event to check that a required API is available before the container’s main work begins" + }, + "preStop":{ + "type": "object", + "title": "preStop", + "description": "PreStop is called immediately before a container is terminated" + } + } + } + } + }, + "pauseForSecondsBeforeSwitchActive": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "tell how much to wait for given period of time before switch active the container", + "title": "Pause For Seconds Before SwitchActive" + }, + "podAnnotations": { + "type":"object", + "description": "used to attach metadata and configs in Kubernetes", + "title": "Pod Annotations" + }, + "podDisruptionBudget": { + "type": "object", + "description": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", + "properties": { + "minAvailable":{ + "type": "string", + "title": "minAvailable", + "description": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod" + }, + "maxUnavailable":{ + "type": "string", + "title": "maxUnavailable", + "description": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod." + } + } + }, + "deploymentLabels": { + "type": "object", + "description": "deploymentLabels is an object to define the label on deployment.", + "title": "DeploymentLabels" + }, + "deploymentAnnotations": { + "type": "object", + "description": "deploymentAnnotations is an object to define the annotations on deployment.", + "title": "DeploymentAnnotations" + }, + "podExtraSpecs":{ + "type": "object", + "description": "ExtraSpec for the pods to be configured", + "title": "podExtraSpecs" + }, + "podLabels": { + "type":"object", + "description": "key/value pairs that are attached to pods, are intended to be used to specify identifying attributes of objects that are meaningful and relevant to users, but do not directly imply semantics to the core system", + "title": "Pod Labels" + }, + "podSecurityContext": { + "type":"object", + "description": "defines privilege and access control settings for a Pod or Container", + "title": "Pod Security Context" + }, + "prometheus": { + "type": "object", + "description": "a kubernetes monitoring tool", + "title": "Prometheus", + "properties": { + "release": { + "type": "string", + "description": "name of the file to be monitored, describes the state of prometheus" + } + } + }, + "rawYaml": { + "type": "array", + "items": {}, + "description": "Accepts an array of Kubernetes objects. One can specify any kubernetes yaml here & it will be applied when a app gets deployed.", + "title": "Raw YAML" + }, + "replicaCount": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "count of Replicas of pod", + "title": "REplica Count" + }, + "resources": { + "type": "object", + "description": "minimum and maximum RAM and CPU available to the application", + "title": "Resources", + "properties": { + "limits": { + "type": "object", + "description": "the maximum values a container can reach", + "title": "Limits", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "limit of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "limit of memory", + "title": "Memory" + } + } + }, + "requests": { + "type": "object", + "description": "request is what the container is guaranteed to get", + "title": "Requests", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "request value of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "request value of memory", + "title": "Memory" + } + } + } + } + }, + "secret": { + "type": "object", + "properties": { + "data": { + "type": "object" + }, + "enabled": { + "type": "boolean" + } + } + }, + "server": { + "type": "object", + "description": "used for providing server configurations.", + "title": "Server", + "properties": { + "deployment": { + "type": "object", + "description": "gives the details for deployment", + "title": "Deployment", + "properties": { + "image": { + "type": "string", + "description": "URL of the image", + "title": "Image" + }, + "image_tag": { + "type": "string", + "description": "tag of the image", + "title": "Image Tag" + } + } + } + } + }, + "service": { + "type": "object", + "description": "defines annotations and the type of service", + "title": "Service", + "properties": { + "annotations": { + "type": "object", + "title": "Annotations", + "description": "annotations of service" + }, + "type": { + "type": "string", + "description": "type of service", + "title": "Type", + "enum": [ + "ClusterIP", + "LoadBalancer", + "NodePort", + "ExternalName" + ] + } + } + }, + "serviceAccount": { + "type": "object", + "description": "defines service account for pods", + "title": "Service Account", + "properties": { + "annotations": { + "type": "object", + "title": "Annotations", + "description": "annotations of service account" + }, + "name": { + "type": "string", + "description": "name of service account", + "title": "Name" + }, + "create": { + "type": "boolean" + } + } + }, + "servicemonitor": { + "type": "object", + "description": "gives the set of targets to be monitored", + "title": "Service Monitor", + "properties": { + "additionalLabels": { + "type": "object" + } + } + }, + "tolerations": { + "type": "array", + "items": {}, + "description": "a mechanism which work together with Taints which ensures that pods are not placed on inappropriate nodes", + "title": "Tolerations" + }, + "topologySpreadConstraints": { + "type": "array", + "items": {}, + "description": "used to control how Pods are spread across a cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains", + "title": "Topology Spread Constraints" + }, + "volumeMounts": { + "type": "array", + "items": {}, + "description": "used to provide mounts to the volume", + "title": "Volume Mounts" + }, + "volumes": { + "type": "array", + "items": {}, + "description": "required when some values need to be read from or written to an external disk", + "title": "Volumes" + }, + "waitForSecondsBeforeScalingDown": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Wait for given period of time before scaling down the container", + "title": "Wait For Seconds Before Scaling Down" + } + } +} + diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/secrets-test-values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/secrets-test-values.yaml new file mode 100644 index 00000000000..4a20404db87 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/secrets-test-values.yaml @@ -0,0 +1 @@ +{"ConfigSecrets":{"enabled":true,"secrets":[{"data":{"standard_key":"c3RhbmRhcmQtdmFsdWU="},"external":false,"externalType":"","mountPath":"/test","name":"normal-secret","type":"volume"},{"data":{"secret_key":"U0VDUkVUIERBVEE="},"external":true,"externalType":"AWSSecretsManager","mountPath":"","name":"external-secret-3","type":"environment"}]}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/NOTES.txt b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/NOTES.txt new file mode 100644 index 00000000000..2b144781688 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range $.Values.ingress.paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include ".Chart.Name .fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ include ".Chart.Name .fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".Chart.Name .fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include ".Chart.Name .name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/_helpers.tpl b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/_helpers.tpl new file mode 100644 index 00000000000..8fdc4daa201 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/_helpers.tpl @@ -0,0 +1,150 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define ".Chart.Name .name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create service name +*/}} +{{- define ".servicename" -}} +{{- if .Values.service.name -}} +{{- .Values.service.name | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 55 | trimSuffix "-" -}}-service +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 55 | trimSuffix "-" -}}-service +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 55 | trimSuffix "-" -}}-service +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create preview service name +*/}} +{{- define ".previewservicename" -}} +{{- if .Values.service.name -}} +{{- .Values.service.name | trunc 55 | trimSuffix "-" -}}-preview +{{- else if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 47 | trimSuffix "-" -}}-preview-service +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 47 | trimSuffix "-" -}}-preview-service +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 47 | trimSuffix "-" -}}-preview-service +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define ".Chart.Name .fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define ".Chart.Name .chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define ".Chart.Name .color" -}} +{{- $active0 := (index .Values.server.deployment 0).enabled -}} +{{/* +{{- $active1 := (index .Values.server.deployment 1).enabled -}} +*/}} +{{- $active1 := include "safeenabledcheck" . -}} +{{- $active := and $active0 $active1 -}} +{{- $active -}} +{{- end -}} + +{{- define "safeenabledcheck" -}} +{{- if (eq (len .Values.server.deployment) 2) -}} + {{- if (index .Values.server.deployment 1).enabled -}} + {{- $active := true -}} + {{- $active -}} + {{- else -}} + {{- $active := false -}} + {{- $active -}} + {{- end -}} +{{- else -}} + {{- $active := false -}} + {{- $active -}} +{{- end -}} +{{- end -}} + + +{{- define "isCMVolumeExists" -}} + {{- $isCMVolumeExists := false -}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $isCMVolumeExists = true}} + {{- end }} + {{- end }} + {{- end }} + {{- $isCMVolumeExists -}} +{{- end -}} + +{{- define "isSecretVolumeExists" -}} + {{- $isSecretVolumeExists := false -}} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $isSecretVolumeExists = true}} + {{- end }} + {{- end }} + {{- end }} + {{- $isSecretVolumeExists -}} +{{- end -}} + + + + +{{- define "serviceMonitorEnabled" -}} + {{- $SMenabled := false -}} + {{- range .Values.ContainerPort }} + {{- if .servicemonitor }} + {{- if and .servicemonitor.enabled }} + {{- $SMenabled = true -}} + {{- end }} + {{- end }} + {{- end }} + {{- $SMenabled -}} +{{- end -}} + +{{- define "VerticalPodAutoScalingEnabled" -}} + {{- $SMenabled := false -}} + {{- if and .Values.verticalPodScaling.enabled }} + {{- $SMenabled = true -}} + {{- end }} + {{- $SMenabled -}} +{{- end -}} + +{{/* Create the name of the service account to use */}} +{{- define "serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include ".Chart.Name .fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/ambassador.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/ambassador.yaml new file mode 100644 index 00000000000..9d4a431c26d --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/ambassador.yaml @@ -0,0 +1,94 @@ +{{ $svcName := include ".servicename" . }} +{{ $svcPort := (index .Values.ContainerPort 0).servicePort }} +{{- if $.Values.ambassadorMapping.enabled }} +{{- with $.Values.ambassadorMapping }} +apiVersion: getambassador.io/v3alpha1 +kind: Mapping +metadata: + {{- if .name }} + name: {{ .name }} + {{- else }} + name: {{ include ".Chart.Name .fullname" $ }}-mapping + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ $.Values.pipelineName }} + {{- if .labels }} +{{ toYaml .labels | nindent 4 }} + {{- end }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .ambassadorId }} + ambassador_id: {{ .ambassadorId }} + {{- end }} + {{- if .hostname }} + hostname: {{ .hostname | quote }} + {{- end }} + prefix: {{ .prefix }} + {{- if .rewrite }} + rewrite: {{ .rewrite }} + {{- end }} + service: {{ $svcName }}.{{ $.Release.Namespace }}:{{ $svcPort }} + {{- if .retryPolicy }} + retry_policy: +{{ toYaml .retryPolicy | indent 4 }} + {{- end }} + {{- if .cors }} + cors: +{{ toYaml .cors | indent 4 }} + {{- end }} + {{- if .weight }} + weight: {{ .weight }} + {{- end }} + {{- if .method }} + method: {{ .method }} + {{- end }} + {{- if .extraSpec }} +{{ toYaml .extraSpec | indent 2 }} + {{- end }} + {{- if .tls }} + {{- if .tls.context }} + tls: {{ .tls.context }} +{{- if .tls.create }} +--- +apiVersion: getambassador.io/v3alpha1 +kind: TLSContext +metadata: + name: {{ .tls.context }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ $.Values.pipelineName }} + {{- if .tls.labels }} +{{ toYaml .tls.labels | nindent 4 }} + {{- end }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .tls.secretName }} + secret: {{ .tls.secretName }} + {{- end }} + {{- if .tls.hosts }} + hosts: +{{ toYaml .tls.hosts | nindent 4 }} + {{- end }} + {{- if .tls.extraSpec }} +{{ toYaml .tls.extraSpec | indent 2 }} + {{- end }} +{{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/configmap.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/configmap.yaml new file mode 100644 index 00000000000..4e7879665e4 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/configmap.yaml @@ -0,0 +1,22 @@ +{{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{if eq .external false}} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .name}}-{{ $.Values.app }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +data: +{{ toYaml .data | trim | indent 2 }} + {{- end}} + {{- end}} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/deployment.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/deployment.yaml new file mode 100644 index 00000000000..95f796b3398 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/deployment.yaml @@ -0,0 +1,1249 @@ + {{- $hasCMEnvExists := false -}} + {{- $hasCMVolumeExists := false -}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $hasCMVolumeExists = true}} + {{- end }} + {{- if eq .type "environment"}} + {{- $hasCMEnvExists = true}} + {{- end }} + {{- end }} + {{- end }} + + {{- $hasPVCExists := false -}} + {{- if .Values.persistentVolumeClaim.name }} + {{- $hasPVCExists = true }} + {{- end }} + + {{- $hasSecretEnvExists := false -}} + {{- $hasSecretVolumeExists := false -}} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $hasSecretVolumeExists = true}} + {{- end }} + {{- if eq .type "environment"}} + {{- $hasSecretEnvExists = true}} + {{- end }} + {{- end }} + {{- end }} + + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ .Values.pipelineName }} +{{- if .Values.deploymentLabels }} +{{ toYaml .Values.deploymentLabels | indent 4 }} +{{- end }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + +{{- if .Values.deploymentAnnotations }} + annotations: +{{ toYaml .Values.deploymentAnnotations | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: +{{- if .Values.customMatchLabels }} +{{ toYaml .Values.customMatchLabels | indent 6 }} +{{- end }} + app: {{ template ".Chart.Name .name" $ }} + release: {{ $.Release.Name }} + replicas: {{ $.Values.replicaCount }} + minReadySeconds: {{ $.Values.MinReadySeconds }} + template: + metadata: + {{- if .Values.podAnnotations }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} +{{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 8 }} +{{- end }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 8 }} +{{- end }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + spec: +{{- if $.Values.podExtraSpecs }} +{{ toYaml .Values.podExtraSpecs | indent 6 }} +{{- end }} + terminationGracePeriodSeconds: {{ $.Values.GracePeriod }} + restartPolicy: Always +{{- if $.Values.hostAliases }} + hostAliases: +{{ toYaml .Values.hostAliases | indent 8 }} +{{- end }} +{{- if and $.Values.Spec.Affinity.Key $.Values.Spec.Affinity.Values }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ $.Values.Spec.Affinity.Key }} + operator: In + values: + - {{ $.Values.Spec.Affinity.Values | default "nodes" }} +{{- else if $.Values.affinity.enabled }} + affinity: +{{ toYaml .Values.affinity.values | indent 8 }} +{{- end }} +{{- if $.Values.serviceAccountName }} + serviceAccountName: {{ $.Values.serviceAccountName }} +{{- else }} + serviceAccountName: {{ template "serviceAccountName" . }} +{{- end }} +{{- if $.Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} +{{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} +{{- if $.Values.imagePullSecrets}} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} +{{- end}} +{{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: +{{- range $.Values.topologySpreadConstraints }} + - maxSkew: {{ .maxSkew }} + topologyKey: {{ .topologyKey }} + whenUnsatisfiable: {{ .whenUnsatisfiable }} + {{- if semverCompare "<=1.30-0" $.Capabilities.KubeVersion.GitVersion }} + {{- if .minDomains }} + minDomains: {{ .minDomains }} + {{- end }} + {{- end }} + {{- if .nodeAffinityPolicy }} + nodeAffinityPolicy: {{ .nodeAffinityPolicy }} + {{- end }} + {{- if .nodeTaintsPolicy }} + nodeTaintsPolicy: {{ .nodeTaintsPolicy }} + {{- end }} + labelSelector: + matchLabels: + {{- if and .autoLabelSelector .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 12 }} + {{- else if .autoLabelSelector }} + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} + {{- else if .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 12 }} + {{- end }} +{{- end }} +{{- end }} +{{- if $.Values.topologySpreadConstraint }} + topologySpreadConstraints: +{{ toYaml .Values.topologySpreadConstraint }} +{{- end }} +{{- if $.Values.podSecurityContext }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} +{{- end }} +{{- if $.Values.restartPolicy }} + restartPolicy: {{ $.Values.restartPolicy }} +{{- end }} +{{- if $.Values.initContainers}} + initContainers: +{{- range $i, $c := .Values.initContainers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-init-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .args}} + args: +{{ toYaml .args | indent 12 -}} +{{- end}} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} + - +{{ toYaml . | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + containers: + - name: {{ $.Chart.Name }} + image: "{{ .Values.server.deployment.image }}:{{ .Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + {{- if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- if $.Values.containerSpec.lifecycle.postStart }} + postStart: +{{ toYaml $.Values.containerSpec.lifecycle.postStart | indent 12 -}} + {{- end }} + {{- end }} +{{- if and $.Values.containerSecurityContext $.Values.privileged }} + securityContext: + privileged: true +{{ toYaml .Values.containerSecurityContext | indent 12 }} +{{- else if $.Values.privileged }} + securityContext: + privileged: true +{{- else if $.Values.containerSecurityContext }} + securityContext: +{{ toYaml .Values.containerSecurityContext | indent 12 }} +{{- end }} +{{- if $.Values.containerExtraSpecs }} +{{ toYaml .Values.containerExtraSpecs | indent 10 }} +{{- end }} +{{- if $.Values.resizePolicy }} + resizePolicy: +{{ toYaml .Values.resizePolicy | indent 12 }} +{{- end }} + ports: + {{- range $.Values.ContainerPort }} + - name: {{ .name}} + containerPort: {{ .port }} + protocol: {{ .protocol }} + {{- end}} +{{- if and $.Values.command.enabled $.Values.command.workingDir }} + workingDir: {{ $.Values.command.workingDir }} +{{- end}} +{{- if and $.Values.command.value $.Values.command.enabled}} + command: +{{ toYaml $.Values.command.value | indent 12 -}} +{{- end}} +{{- if and $.Values.args.value $.Values.args.enabled}} + args: +{{ toYaml $.Values.args.value | indent 12 -}} +{{- end }} + env: + - name: CONFIG_HASH + value: {{ include (print $.Chart.Name "/templates/configmap.yaml") . | sha256sum }}{{ if and (.Values.devtronInternal) (.Values.devtronInternal.containerSpecs.ConfigHash) }}{{ .Values.devtronInternal.containerSpecs.ConfigHash }}{{ end }} + - name: SECRET_HASH + value: {{ include (print $.Chart.Name "/templates/secret.yaml") . | sha256sum }}{{ if and (.Values.devtronInternal) (.Values.devtronInternal.containerSpecs.SecretHash) }}{{ .Values.devtronInternal.containerSpecs.SecretHash }}{{ end }} + - name: DEVTRON_APP_NAME + value: {{ template ".Chart.Name .name" $ }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DEVTRON_CONTAINER_REPO + value: "{{ .Values.server.deployment.image }}" + - name: DEVTRON_CONTAINER_TAG + value: "{{ .Values.server.deployment.image_tag }}" + {{- range $.Values.EnvVariablesFromFieldPath }} + - name: {{ .name }} + valueFrom: + fieldRef: + fieldPath: {{ .fieldPath }} + {{- end}} + {{- range $.Values.EnvVariables }} + {{- if and .name .value }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariablesFromSecretKeys }} + {{- if and .name .secretName .keyName }} + - name: {{ .name }} + valueFrom: + secretKeyRef: + name: {{ .secretName }} + key: {{ .keyName }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariablesFromConfigMapKeys }} + {{- if and .name .configMapName .keyName }} + - name: {{ .name }} + valueFrom: + configMapKeyRef: + name: {{ .configMapName }} + key: {{ .keyName }} + {{- end }} + {{- end }} + {{- if or (and ($hasCMEnvExists) (.Values.ConfigMaps.enabled)) (and ($hasSecretEnvExists) (.Values.ConfigSecrets.enabled)) }} + envFrom: + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "environment" }} + - configMapRef: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "environment" }} + - secretRef: + {{if eq .external true}} + name: {{ .name }} + {{else if eq .external false}} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + +{{- if or $.Values.LivenessProbe.Path $.Values.LivenessProbe.command $.Values.LivenessProbe.tcp $.Values.LivenessProbe.grpc }} + livenessProbe: +{{- if $.Values.LivenessProbe.Path }} + httpGet: + path: {{ $.Values.LivenessProbe.Path }} + port: {{ $.Values.LivenessProbe.port }} + scheme: {{ $.Values.LivenessProbe.scheme }} + {{- if $.Values.LivenessProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.LivenessProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.LivenessProbe.command }} + exec: + command: +{{ toYaml .Values.LivenessProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.LivenessProbe.tcp }} + tcpSocket: + port: {{ $.Values.LivenessProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.LivenessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.LivenessProbe.periodSeconds }} + successThreshold: {{ $.Values.LivenessProbe.successThreshold }} + timeoutSeconds: {{ $.Values.LivenessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.LivenessProbe.failureThreshold }} + {{- if $.Values.LivenessProbe.grpc }} + grpc: +{{ toYaml .Values.LivenessProbe.grpc | indent 14 }} + {{- end }} +{{- end }} +{{- if or $.Values.ReadinessProbe.Path $.Values.ReadinessProbe.command $.Values.ReadinessProbe.tcp $.Values.ReadinessProbe.grpc }} + readinessProbe: +{{- if $.Values.ReadinessProbe.Path }} + httpGet: + path: {{ $.Values.ReadinessProbe.Path }} + port: {{ $.Values.ReadinessProbe.port }} + scheme: {{ $.Values.ReadinessProbe.scheme }} + {{- if $.Values.ReadinessProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.ReadinessProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.ReadinessProbe.command }} + exec: + command: +{{ toYaml .Values.ReadinessProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.ReadinessProbe.tcp }} + tcpSocket: + port: {{ $.Values.ReadinessProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.ReadinessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.ReadinessProbe.periodSeconds }} + successThreshold: {{ $.Values.ReadinessProbe.successThreshold }} + timeoutSeconds: {{ $.Values.ReadinessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.ReadinessProbe.failureThreshold }} + {{- if $.Values.ReadinessProbe.grpc }} + grpc: +{{ toYaml .Values.ReadinessProbe.grpc | indent 14 }} + {{- end}} +{{- end }} + resources: +{{ toYaml $.Values.resources | trim | indent 12 }} +{{- if or $.Values.StartupProbe.Path $.Values.StartupProbe.command $.Values.StartupProbe.tcp $.Values.StartupProbe.grpc }} + startupProbe: +{{- if $.Values.StartupProbe.Path }} + httpGet: + path: {{ $.Values.StartupProbe.Path }} + port: {{ $.Values.StartupProbe.port }} + {{- if $.Values.StartupProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.StartupProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.StartupProbe.command }} + exec: + command: +{{ toYaml .Values.StartupProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.StartupProbe.tcp }} + tcpSocket: + port: {{ $.Values.StartupProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.StartupProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.StartupProbe.periodSeconds }} + successThreshold: {{ $.Values.StartupProbe.successThreshold }} + timeoutSeconds: {{ $.Values.StartupProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.StartupProbe.failureThreshold }} + {{- if $.Values.StartupProbe.grpc }} + grpc: +{{ toYaml .Values.StartupProbe.grpc | indent 14 }} + {{- end}} +{{- end }} + volumeMounts: +{{- with .Values.volumeMounts }} +{{ toYaml . | trim | indent 12 }} +{{- end }} +{{- if $.Values.persistentVolumeClaim.name }} + - name: {{ .Values.persistentVolumeClaim.name }}-vol + mountPath: {{ .Values.persistentVolumeClaim.mountPath | default "/tmp" }} +{{- end}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + + {{- else }} + {{- range $k, $v := .data }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + + {{- else }} + {{if (or (eq .externalType "ESO_GoogleSecretsManager") (eq .externalType "ESO_AWSSecretsManager") (eq .externalType "ESO_HashiCorpVault") (eq .externalType "ESO_AzureSecretsManager"))}} + {{- if and (.esoSubPath) (ne (len .esoSubPath) 0) }} + {{- range .esoSubPath }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ . }} + subPath: {{ . }} + {{- end }} + {{- else }} + {{- range .esoSecretData.esoData }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ .secretKey }} + subPath: {{ .secretKey }} + {{- end }} + {{- end }} + {{- else }} + {{- range $k, $v := .data }} # for others secrets the mount path will be .data[i].secretKey + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq ($hasPVCExists) false) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) }} []{{- end }} + {{- if and (eq (len .Values.volumeMounts) 0) (eq ($hasPVCExists) false) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) }} [] {{- end }} +{{- if $.Values.appMetrics }} + - name: envoy + image: {{ $.Values.envoyproxy.image | default "quay.io/devtron/envoy:v1.16.0"}} + {{- if $.Values.envoyproxy.lifecycle }} + lifecycle: +{{ toYaml .Values.envoyproxy.lifecycle | indent 12 -}} + {{- else if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- end }} + resources: +{{ toYaml $.Values.envoyproxy.resources | trim | indent 12 }} + ports: + - containerPort: 9901 + protocol: TCP + name: envoy-admin + {{- range $index, $element := .Values.ContainerPort }} + - name: {{ $element.name}} + containerPort: {{ $element.envoyPort | default (add 8790 $index) }} + protocol: TCP + {{- end }} + command: ["/usr/local/bin/envoy"] + args: ["-c", "/etc/envoy-config/envoy-config.json", "-l", "info", "--log-format", "[METADATA][%Y-%m-%d %T.%e][%t][%l][%n] %v"] + volumeMounts: + - name: {{ $.Values.envoyproxy.configMapName | default "envoy-config-volume" }} + mountPath: /etc/envoy-config/ +{{- if $.Values.envoyproxy.readinessProbe}} + readinessProbe: +{{ toYaml $.Values.envoyproxy.readinessProbe | indent 12}} +{{- end }} +{{- if $.Values.envoyproxy.livenessProbe}} + livenessProbe: +{{ toYaml $.Values.envoyproxy.livenessProbe | indent 12}} +{{- end }} +{{- end}} +{{- if $.Values.containers }} +{{- range $i, $c := .Values.containers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-sidecontainer-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .env }} + env: +{{ toYaml .env | indent 12 }} +{{- end }} + {{- if .envFrom }} + envFrom: +{{ toYaml .env | indent 12 }} +{{- end }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .resizePolicy }} + resizePolicy: +{{ toYaml .resziePolicy | indent 12}} +{{- end }} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} + - +{{ toYaml . | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + + + volumes: + {{- if $.Values.appMetrics }} + - name: envoy-config-volume + configMap: + name: sidecar-config-{{ template ".Chart.Name .name" $ }} + {{- end }} +{{- with .Values.volumes }} +{{ toYaml . | trim | indent 8 }} +{{- end }} +{{- if .Values.persistentVolumeClaim.name }} + - name: {{.Values.persistentVolumeClaim.name}}-vol + persistentVolumeClaim: + claimName: {{.Values.persistentVolumeClaim.name }} +{{- end}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + configMap: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + secret: + {{- if eq .external true }} + secretName: {{ .name }} + {{- else if eq .external false }} + secretName: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq ($hasPVCExists) false) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) }} []{{- end }} + {{- if and (eq (len .Values.volumeMounts) 0) (eq ($hasPVCExists) false) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) }} [] {{- end }} + + revisionHistoryLimit: 3 +## pauseForSecondsBeforeSwitchActive: {{ $.Values.pauseForSecondsBeforeSwitchActive }} +# waitForSecondsBeforeScalingDown: {{ $.Values.waitForSecondsBeforeScalingDown }} + strategy: + {{- if eq .Values.deploymentType "ROLLING" }} + type: "RollingUpdate" + rollingUpdate: + maxSurge: {{ $.Values.deployment.strategy.rolling.maxSurge }} + maxUnavailable: {{ $.Values.deployment.strategy.rolling.maxUnavailable }} + {{- end }} + {{- if eq .Values.deploymentType "RECREATE" }} + type: "Recreate" + {{- end }} +{{- if $.Values.secondaryWorkload.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include ".Chart.Name .fullname" $ }}-{{ $.Values.secondaryWorkload.postfix | default "sec" }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ .Values.pipelineName }} +{{- if .Values.deploymentLabels }} +{{ toYaml .Values.deploymentLabels | indent 4 }} +{{- end }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + +{{- if .Values.deploymentAnnotations }} + annotations: +{{ toYaml .Values.deploymentAnnotations | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: +{{- if .Values.customMatchLabels }} +{{ toYaml .Values.customMatchLabels | indent 6 }} +{{- end }} + app: {{ template ".Chart.Name .name" $ }} + release: {{ $.Release.Name }} + replicas: {{ $.Values.secondaryWorkload.replicaCount | default 1 }} + minReadySeconds: {{ $.Values.MinReadySeconds }} + template: + metadata: + {{- if .Values.podAnnotations }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} +{{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 8 }} +{{- end }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 8 }} +{{- end }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + spec: +{{- if $.Values.podExtraSpecs }} +{{ toYaml .Values.podExtraSpecs | indent 6 }} +{{- end }} + terminationGracePeriodSeconds: {{ $.Values.GracePeriod }} + restartPolicy: Always +{{- if $.Values.hostAliases }} + hostAliases: +{{ toYaml .Values.hostAliases | indent 8 }} +{{- end }} +{{- with $.Values.secondaryWorkload }} +{{- if and .Spec.Affinity.Key .Spec.Affinity.Values }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Spec.Affinity.Key }} + operator: In + values: + - {{ .Spec.Affinity.Values | default "nodes" }} +{{- else if .affinity.enabled }} + affinity: +{{ toYaml .affinity.values | indent 8 }} +{{- end }} +{{- end }} +{{- if $.Values.serviceAccountName }} + serviceAccountName: {{ $.Values.serviceAccountName }} +{{- else }} + serviceAccountName: {{ template "serviceAccountName" . }} +{{- end }} +{{- if $.Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} +{{- end }} + {{- if $.Values.secondaryWorkload.tolerations }} + tolerations: +{{ toYaml $.Values.secondaryWorkload.tolerations | indent 8 }} + {{- end }} +{{- if $.Values.imagePullSecrets}} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} +{{- end}} +{{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: +{{- range $.Values.topologySpreadConstraints }} + - maxSkew: {{ .maxSkew }} + topologyKey: {{ .topologyKey }} + whenUnsatisfiable: {{ .whenUnsatisfiable }} + {{- if semverCompare "<=1.30-0" $.Capabilities.KubeVersion.GitVersion }} + {{- if .minDomains }} + minDomains: {{ .minDomains }} + {{- end }} + {{- end }} + {{- if .nodeAffinityPolicy }} + nodeAffinityPolicy: {{ .nodeAffinityPolicy }} + {{- end }} + {{- if .nodeTaintsPolicy }} + nodeTaintsPolicy: {{ .nodeTaintsPolicy }} + {{- end }} + labelSelector: + matchLabels: + {{- if and .autoLabelSelector .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 12 }} + {{- else if .autoLabelSelector }} + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} + {{- else if .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 12 }} + {{- end }} +{{- end }} +{{- end }} +{{- if $.Values.topologySpreadConstraint }} + topologySpreadConstraints: +{{ toYaml .Values.topologySpreadConstraint }} +{{- end }} +{{- if $.Values.podSecurityContext }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} +{{- end }} +{{- if $.Values.restartPolicy }} + restartPolicy: {{ $.Values.restartPolicy }} +{{- end }} +{{- if $.Values.initContainers}} + initContainers: +{{- range $i, $c := .Values.initContainers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-init-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .args}} + args: +{{ toYaml .args | indent 12 -}} +{{- end}} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} + - +{{ toYaml . | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + containers: + - name: {{ $.Chart.Name }} + image: "{{ .Values.server.deployment.image }}:{{ .Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + {{- if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- if $.Values.containerSpec.lifecycle.postStart }} + postStart: +{{ toYaml $.Values.containerSpec.lifecycle.postStart | indent 12 -}} + {{- end }} + {{- end }} +{{- if and $.Values.containerSecurityContext $.Values.privileged }} + securityContext: + privileged: true +{{ toYaml .Values.containerSecurityContext | indent 12 }} +{{- else if $.Values.privileged }} + securityContext: + privileged: true +{{- else if $.Values.containerSecurityContext }} + securityContext: +{{ toYaml .Values.containerSecurityContext | indent 12 }} +{{- end }} +{{- if $.Values.containerExtraSpecs }} +{{ toYaml .Values.containerExtraSpecs | indent 10 }} +{{- end }} +{{- if $.Values.resizePolicy }} + resizePolicy: +{{ toYaml .Values.resizePolicy | indent 12 }} +{{- end }} + ports: + {{- range $.Values.ContainerPort }} + - name: {{ .name}} + containerPort: {{ .port }} + protocol: {{ .protocol }} + {{- end}} +{{- if and $.Values.command.enabled $.Values.command.workingDir }} + workingDir: {{ $.Values.command.workingDir }} +{{- end}} +{{- if and $.Values.command.value $.Values.command.enabled}} + command: +{{ toYaml $.Values.command.value | indent 12 -}} +{{- end}} +{{- if and $.Values.args.value $.Values.args.enabled}} + args: +{{ toYaml $.Values.args.value | indent 12 -}} +{{- end }} + env: + - name: CONFIG_HASH + value: {{ include (print $.Chart.Name "/templates/configmap.yaml") . | sha256sum }}{{ if and (.Values.devtronInternal) (.Values.devtronInternal.containerSpecs.ConfigHash) }}{{ .Values.devtronInternal.containerSpecs.ConfigHash }}{{ end }} + - name: SECRET_HASH + value: {{ include (print $.Chart.Name "/templates/secret.yaml") . | sha256sum }}{{ if and (.Values.devtronInternal) (.Values.devtronInternal.containerSpecs.SecretHash) }}{{ .Values.devtronInternal.containerSpecs.SecretHash }}{{ end }} + - name: DEVTRON_APP_NAME + value: {{ template ".Chart.Name .name" $ }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DEVTRON_CONTAINER_REPO + value: "{{ .Values.server.deployment.image }}" + - name: DEVTRON_CONTAINER_TAG + value: "{{ .Values.server.deployment.image_tag }}" + {{- range $.Values.EnvVariablesFromFieldPath }} + - name: {{ .name }} + valueFrom: + fieldRef: + fieldPath: {{ .fieldPath }} + {{- end}} + {{- range $.Values.EnvVariables }} + {{- if and .name .value }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariablesFromSecretKeys }} + {{- if and .name .secretName .keyName }} + - name: {{ .name }} + valueFrom: + secretKeyRef: + name: {{ .secretName }} + key: {{ .keyName }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariablesFromConfigMapKeys }} + {{- if and .name .configMapName .keyName }} + - name: {{ .name }} + valueFrom: + configMapKeyRef: + name: {{ .configMapName }} + key: {{ .keyName }} + {{- end }} + {{- end }} + {{- if or (and ($hasCMEnvExists) (.Values.ConfigMaps.enabled)) (and ($hasSecretEnvExists) (.Values.ConfigSecrets.enabled)) }} + envFrom: + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "environment" }} + - configMapRef: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "environment" }} + - secretRef: + {{if eq .external true}} + name: {{ .name }} + {{else if eq .external false}} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + +{{- if or $.Values.LivenessProbe.Path $.Values.LivenessProbe.command $.Values.LivenessProbe.tcp $.Values.LivenessProbe.grpc }} + livenessProbe: +{{- if $.Values.LivenessProbe.Path }} + httpGet: + path: {{ $.Values.LivenessProbe.Path }} + port: {{ $.Values.LivenessProbe.port }} + scheme: {{ $.Values.LivenessProbe.scheme }} + {{- if $.Values.LivenessProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.LivenessProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.LivenessProbe.command }} + exec: + command: +{{ toYaml .Values.LivenessProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.LivenessProbe.tcp }} + tcpSocket: + port: {{ $.Values.LivenessProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.LivenessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.LivenessProbe.periodSeconds }} + successThreshold: {{ $.Values.LivenessProbe.successThreshold }} + timeoutSeconds: {{ $.Values.LivenessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.LivenessProbe.failureThreshold }} + {{- if $.Values.LivenessProbe.grpc }} + grpc: +{{ toYaml .Values.LivenessProbe.grpc | indent 14 }} + {{- end }} +{{- end }} +{{- if or $.Values.ReadinessProbe.Path $.Values.ReadinessProbe.command $.Values.ReadinessProbe.tcp $.Values.ReadinessProbe.grpc }} + readinessProbe: +{{- if $.Values.ReadinessProbe.Path }} + httpGet: + path: {{ $.Values.ReadinessProbe.Path }} + port: {{ $.Values.ReadinessProbe.port }} + scheme: {{ $.Values.ReadinessProbe.scheme }} + {{- if $.Values.ReadinessProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.ReadinessProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.ReadinessProbe.command }} + exec: + command: +{{ toYaml .Values.ReadinessProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.ReadinessProbe.tcp }} + tcpSocket: + port: {{ $.Values.ReadinessProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.ReadinessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.ReadinessProbe.periodSeconds }} + successThreshold: {{ $.Values.ReadinessProbe.successThreshold }} + timeoutSeconds: {{ $.Values.ReadinessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.ReadinessProbe.failureThreshold }} + {{- if $.Values.ReadinessProbe.grpc }} + grpc: +{{ toYaml .Values.ReadinessProbe.grpc | indent 14 }} + {{- end}} +{{- end }} + resources: +{{ toYaml $.Values.resources | trim | indent 12 }} +{{- if or $.Values.StartupProbe.Path $.Values.StartupProbe.command $.Values.StartupProbe.tcp $.Values.StartupProbe.grpc }} + startupProbe: +{{- if $.Values.StartupProbe.Path }} + httpGet: + path: {{ $.Values.StartupProbe.Path }} + port: {{ $.Values.StartupProbe.port }} + {{- if $.Values.StartupProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.StartupProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.StartupProbe.command }} + exec: + command: +{{ toYaml .Values.StartupProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.StartupProbe.tcp }} + tcpSocket: + port: {{ $.Values.StartupProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.StartupProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.StartupProbe.periodSeconds }} + successThreshold: {{ $.Values.StartupProbe.successThreshold }} + timeoutSeconds: {{ $.Values.StartupProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.StartupProbe.failureThreshold }} + {{- if $.Values.StartupProbe.grpc }} + grpc: +{{ toYaml .Values.StartupProbe.grpc | indent 14 }} + {{- end}} +{{- end }} + volumeMounts: +{{- with .Values.volumeMounts }} +{{ toYaml . | trim | indent 12 }} +{{- end }} +{{- if $.Values.persistentVolumeClaim.name }} + - name: {{ .Values.persistentVolumeClaim.name }}-vol + mountPath: {{ .Values.persistentVolumeClaim.mountPath | default "/tmp" }} +{{- end}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + + {{- else }} + {{- range $k, $v := .data }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + + {{- else }} + {{if (or (eq .externalType "ESO_GoogleSecretsManager") (eq .externalType "ESO_AWSSecretsManager") (eq .externalType "ESO_HashiCorpVault") (eq .externalType "ESO_AzureSecretsManager"))}} + {{- if and (.esoSubPath) (ne (len .esoSubPath) 0) }} + {{- range .esoSubPath }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ . }} + subPath: {{ . }} + {{- end }} + {{- else }} + {{- range .esoSecretData.esoData }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ .secretKey }} + subPath: {{ .secretKey }} + {{- end }} + {{- end }} + {{- else }} + {{- range $k, $v := .data }} # for others secrets the mount path will be .data[i].secretKey + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq ($hasPVCExists) false) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) }} []{{- end }} + {{- if and (eq (len .Values.volumeMounts) 0) (eq ($hasPVCExists) false) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) }} [] {{- end }} +{{- if $.Values.appMetrics }} + - name: envoy + image: {{ $.Values.envoyproxy.image | default "quay.io/devtron/envoy:v1.16.0"}} + {{- if $.Values.envoyproxy.lifecycle }} + lifecycle: +{{ toYaml .Values.envoyproxy.lifecycle | indent 12 -}} + {{- else if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- end }} + resources: +{{ toYaml $.Values.envoyproxy.resources | trim | indent 12 }} + ports: + - containerPort: 9901 + protocol: TCP + name: envoy-admin + {{- range $index, $element := .Values.ContainerPort }} + - name: {{ $element.name}} + containerPort: {{ $element.envoyPort | default (add 8790 $index) }} + protocol: TCP + {{- end }} + command: ["/usr/local/bin/envoy"] + args: ["-c", "/etc/envoy-config/envoy-config.json", "-l", "info", "--log-format", "[METADATA][%Y-%m-%d %T.%e][%t][%l][%n] %v"] + volumeMounts: + - name: {{ $.Values.envoyproxy.configMapName | default "envoy-config-volume" }} + mountPath: /etc/envoy-config/ +{{- if $.Values.envoyproxy.readinessProbe}} + readinessProbe: +{{ toYaml $.Values.envoyproxy.readinessProbe | indent 12}} +{{- end }} +{{- if $.Values.envoyproxy.livenessProbe}} + livenessProbe: +{{ toYaml $.Values.envoyproxy.livenessProbe | indent 12}} +{{- end }} +{{- end}} +{{- if $.Values.containers }} +{{- range $i, $c := .Values.containers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-sidecontainer-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .env }} + env: +{{ toYaml .env | indent 12 }} +{{- end }} + {{- if .envFrom }} + envFrom: +{{ toYaml .env | indent 12 }} +{{- end }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .resizePolicy }} + resizePolicy: +{{ toYaml .resziePolicy | indent 12}} +{{- end }} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} + - +{{ toYaml . | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + + + volumes: + {{- if $.Values.appMetrics }} + - name: envoy-config-volume + configMap: + name: sidecar-config-{{ template ".Chart.Name .name" $ }} + {{- end }} +{{- with .Values.volumes }} +{{ toYaml . | trim | indent 8 }} +{{- end }} +{{- if .Values.persistentVolumeClaim.name }} + - name: {{.Values.persistentVolumeClaim.name}}-vol + persistentVolumeClaim: + claimName: {{.Values.persistentVolumeClaim.name }} +{{- end}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + configMap: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + secret: + {{- if eq .external true }} + secretName: {{ .name }} + {{- else if eq .external false }} + secretName: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq ($hasPVCExists) false) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) }} []{{- end }} + {{- if and (eq (len .Values.volumeMounts) 0) (eq ($hasPVCExists) false) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) }} [] {{- end }} + + revisionHistoryLimit: 3 +## pauseForSecondsBeforeSwitchActive: {{ $.Values.pauseForSecondsBeforeSwitchActive }} +# waitForSecondsBeforeScalingDown: {{ $.Values.waitForSecondsBeforeScalingDown }} + strategy: + {{- if eq .Values.deploymentType "ROLLING" }} + type: "RollingUpdate" + rollingUpdate: + maxSurge: {{ $.Values.deployment.strategy.rolling.maxSurge }} + maxUnavailable: {{ $.Values.deployment.strategy.rolling.maxUnavailable }} + {{- end }} + {{- if eq .Values.deploymentType "RECREATE" }} + type: "Recreate" + {{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/externalsecrets.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/externalsecrets.yaml new file mode 100644 index 00000000000..efd291af5d2 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/externalsecrets.yaml @@ -0,0 +1,76 @@ +{{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{if eq .external true }} + {{if (or (eq .externalType "ESO_GoogleSecretsManager") (eq .externalType "ESO_AWSSecretsManager") (eq .externalType "ESO_HashiCorpVault") (eq .externalType "ESO_AzureSecretsManager"))}} +{{- if .esoSecretData.secretStore }} +--- +apiVersion: external-secrets.io/v1beta1 +kind: SecretStore +metadata: + name: {{ .name}} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + provider: + {{- toYaml .esoSecretData.secretStore | nindent 4 }} +{{- end }} +--- +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: {{ .name }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .esoSecretData.refreshInterval }} + refreshInterval: {{ .esoSecretData.refreshInterval }} + {{- else }} + refreshInterval: 1h + {{- end}} + {{- if and .esoSecretData.secretStoreRef (not .esoSecretData.secretStore) }} + secretStoreRef: +{{ toYaml .esoSecretData.secretStoreRef | indent 4 }} + {{- else }} + secretStoreRef: + name: {{ .name}} + kind: SecretStore + {{- end }} + target: + name: {{ .name}} + {{- if .esoSecretData.template }} + template: + {{- toYaml .esoSecretData.template | nindent 6 }} + {{- end }} + creationPolicy: Owner + {{- if .esoSecretData.esoDataFrom }} + dataFrom: + {{- toYaml .esoSecretData.esoDataFrom | nindent 4 }} + {{- else }} + data: + {{- range .esoSecretData.esoData }} + - secretKey: {{ .secretKey }} + remoteRef: + key: {{ .key }} + {{- if .property }} + property: {{ .property }} + {{- end }} + {{- end}} +{{- end}} +{{- end}} +{{- end}} +{{- end}} +{{- end}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/flagger.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/flagger.yaml new file mode 100644 index 00000000000..766098fb618 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/flagger.yaml @@ -0,0 +1,164 @@ +{{- if .Values.flaggerCanary.enabled }} +{{ if .Values.flaggerCanary.createIstioGateway.enabled -}} +{{- with .Values.flaggerCanary.createIstioGateway }} +apiVersion: networking.istio.io/v1beta1 +kind: Gateway +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-istio-gateway + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .labels }} +{{ toYaml .labels | indent 4 }} + {{- end }} +{{- if .annotations }} + annotations: +{{ toYaml .annotations | indent 4 }} +{{- end }} +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - {{ .host | quote -}} +{{- if .tls.enabled }} + tls: + httpsRedirect: true + - port: + number: 443 + name: https + protocol: HTTPS + hosts: + - {{ .host | quote }} + tls: + mode: SIMPLE + credentialName: {{ .tls.secretName }} +{{ end }} +{{ end }} +{{ end }} +{{ end }} +--- +{{- if .Values.flaggerCanary.enabled }} +{{- with .Values.flaggerCanary }} +apiVersion: flagger.app/v1beta1 +kind: Canary +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-canary + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .labels }} +{{ toYaml .labels | indent 4 }} + {{- end }} +{{- if .annotations }} + annotations: +{{ toYaml .annotations | indent 4 }} +{{- end }} +spec: + targetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include ".Chart.Name .fullname" $ }} +{{- if $.Values.autoscaling.enabled }} + autoscalerRef: + apiVersion: autoscaling/v1 + kind: HorizontalPodAutoscaler + name: {{ template ".Chart.Name .fullname" $ }}-hpa +{{- end }} + service: + portDiscovery: {{ .portDiscovery }} + port: {{ .serviceport }} + targetPort: {{ .targetPort }} + {{- if .appProtocol }} + appProtocol: {{ .appProtocol }} + {{- end }} +{{- if $.Values.flaggerCanary.gatewayRefs }} + gatewayRefs: +{{ toYaml $.Values.flaggerCanary.gatewayRefs | indent 6 }} +{{- end }} + {{- if or .createIstioGateway.enabled .addOtherGateways }} + gateways: + {{- if .createIstioGateway.enabled }} + - {{ template ".Chart.Name .fullname" $ }}-istio-gateway + {{- end }} + {{- if .addOtherGateways }} + {{- range .addOtherGateways }} + - {{ . }} + {{- end }} + {{- end }} + {{- end }} + {{- if or .createIstioGateway.enabled .addOtherHosts }} + hosts: + {{- if .createIstioGateway.enabled }} + - {{ .createIstioGateway.host | quote }} + {{- end }} + {{- if .addOtherHosts }} + {{- range .addOtherHosts }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- if .retries }} + retries: +{{ toYaml .retries | indent 6 }} + {{- end }} + {{- if .match }} + match: + {{- range .match }} + - uri: + prefix: {{ .uri.prefix }} + {{- end }} + {{- end }} + {{- if .rewriteUri }} + rewrite: + uri: {{ .rewriteUri }} + {{- end }} + {{- if .timeout }} + timeout: {{ .timeout }} + {{- end }} +{{- if $.Values.flaggerCanary.headers }} + headers: +{{ toYaml $.Values.flaggerCanary.headers | indent 6 }} +{{- end }} +{{- if $.Values.flaggerCanary.corsPolicy }} + corsPolicy: +{{ toYaml $.Values.flaggerCanary.corsPolicy | indent 6 }} +{{- end }} + analysis: + interval: {{ .analysis.interval }} + threshold: {{ .analysis.threshold }} + maxWeight: {{ .analysis.maxWeight }} + stepWeight: {{ .analysis.stepWeight }} + metrics: + - name: request-success-rate + threshold: {{ .thresholds.successRate }} + interval: 1m + - name: request-duration + threshold: {{ .thresholds.latency }} + interval: 1m + webhooks: + {{- if .loadtest.enabled }} + - name: load-test + url: {{ .loadtest.url }} + timeout: 5s + metadata: + cmd: "hey -z 1m -q 10 -c 2 http://{{ include ".Chart.Name .fullname" $ }}-canary.{{ $.Release.Namespace }}:{{ $.Values.flaggerCanary.targetPort }}/" + {{- end }} +{{- end }} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/generic.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/generic.yaml new file mode 100644 index 00000000000..db95e842670 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/generic.yaml @@ -0,0 +1,4 @@ +{{- range .Values.rawYaml }} +--- +{{ toYaml . }} + {{- end -}} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/hpa.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/hpa.yaml new file mode 100644 index 00000000000..91553a09f57 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/hpa.yaml @@ -0,0 +1,170 @@ +{{- if $.Values.autoscaling.enabled }} +{{- if semverCompare ">=1.23-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2 +{{- else if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2beta2 +{{- else }} +apiVersion: autoscaling/v2beta1 +{{- end }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-hpa + {{- if .Values.autoscaling.annotations }} + annotations: +{{ toYaml .Values.autoscaling.annotations | indent 4 }} + {{- end }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + {{- if .Values.autoscaling.labels }} +{{ toYaml .Values.autoscaling.labels | indent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include ".Chart.Name .fullname" $ }} + minReplicas: {{ $.Values.autoscaling.MinReplicas }} + maxReplicas: {{ $.Values.autoscaling.MaxReplicas }} + metrics: + {{- if $.Values.autoscaling.containerResource.enabled }} + {{- with $.Values.autoscaling.containerResource }} + {{- if .TargetCPUUtilizationPercentage }} + - type: ContainerResource + containerResource: + name: cpu + container: {{ $.Chart.Name }} + target: + type: Utilization + averageUtilization: {{ .TargetCPUUtilizationPercentage }} + {{- end}} + {{- if .TargetMemoryUtilizationPercentage }} + - type: ContainerResource + containerResource: + name: memory + container: {{ $.Chart.Name }} + target: + type: Utilization + averageUtilization: {{ .TargetMemoryUtilizationPercentage }} + {{- end}} + {{- end }} + {{- end }} + {{- if $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + {{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + target: + type: Utilization + averageUtilization: {{ $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if $.Values.autoscaling.TargetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + {{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + target: + type: Utilization + averageUtilization: {{ $.Values.autoscaling.TargetCPUUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ $.Values.autoscaling.TargetCPUUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if and $.Values.autoscaling.extraMetrics (semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion) }} + {{- toYaml $.Values.autoscaling.extraMetrics | nindent 2 }} + {{- end}} + {{- if and $.Values.autoscaling.behavior (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + behavior: + {{- toYaml $.Values.autoscaling.behavior | nindent 4 }} + {{- end }} + {{- end }} +{{- if and $.Values.secondaryWorkload.enabled $.Values.secondaryWorkload.autoscaling.enabled }} +--- +{{- if semverCompare ">=1.23-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2 +{{- else if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2beta2 +{{- else }} +apiVersion: autoscaling/v2beta1 +{{- end }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-{{ $.Values.secondaryWorkload.postfix | default "sec" }}-hpa + {{- if .Values.autoscaling.annotations }} + annotations: +{{ toYaml .Values.autoscaling.annotations | indent 4 }} + {{- end }} + {{- if .Values.autoscaling.labels }} + labels: +{{ toYaml .Values.autoscaling.labels | indent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include ".Chart.Name .fullname" $ }}-{{ $.Values.secondaryWorkload.postfix | default "sec" }} + {{- with $.Values.secondaryWorkload }} + minReplicas: {{ .autoscaling.MinReplicas }} + maxReplicas: {{ .autoscaling.MaxReplicas }} + metrics: + {{- if .autoscaling.containerResource.enabled }} + {{- with .autoscaling.containerResource }} + {{- if .TargetCPUUtilizationPercentage }} + - type: ContainerResource + containerResource: + name: cpu + container: {{ $.Chart.Name }} + target: + type: Utilization + averageUtilization: {{ .TargetCPUUtilizationPercentage }} + {{- end}} + {{- if .TargetMemoryUtilizationPercentage }} + - type: ContainerResource + containerResource: + name: memory + container: {{ $.Chart.Name }} + target: + type: Utilization + averageUtilization: {{ .TargetMemoryUtilizationPercentage }} + {{- end}} + {{- end }} + {{- end }} + {{- if .autoscaling.TargetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + {{- if semverCompare ">=1.16-0" $.Capabilities.KubeVersion.GitVersion }} + target: + type: Utilization + averageUtilization: {{ .autoscaling.TargetMemoryUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ .autoscaling.TargetMemoryUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if .autoscaling.TargetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + {{- if semverCompare ">=1.16-0" $.Capabilities.KubeVersion.GitVersion }} + target: + type: Utilization + averageUtilization: {{ .autoscaling.TargetCPUUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ .autoscaling.TargetCPUUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if and .autoscaling.extraMetrics (semverCompare ">=1.16-0" $.Capabilities.KubeVersion.GitVersion) }} + {{- toYaml .autoscaling.extraMetrics | nindent 2 }} + {{- end}} + {{- if and .autoscaling.behavior (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + behavior: + {{- toYaml .autoscaling.behavior | nindent 4 }} + {{- end }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/ingress.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/ingress.yaml new file mode 100644 index 00000000000..d9a2543e98d --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/ingress.yaml @@ -0,0 +1,188 @@ +{{ $svcName := include ".servicename" . }} +{{ $svcPort := (index .Values.ContainerPort 0).servicePort }} +{{- if $.Values.ingress.enabled -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- if and .Values.ingressInternal.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingressInternal.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingressInternal.annotations "kubernetes.io/ingress.class" .Values.ingressInternal.className}} + {{- end }} +{{- end }} +{{- end }} +--- +{{ if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + {{- if $.Values.ingress.name }} + name: {{ $.Values.ingress.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-ingress + {{- end }} + namespace: {{ $.Values.NameSpace }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + {{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} + {{- end }} +{{- if .Values.ingress.annotations }} + annotations: +{{ toYaml .Values.ingress.annotations | indent 4 }} +{{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + rules: + {{- if or .Values.ingress.host .Values.ingress.path }} + - host: {{ .Values.ingress.host }} + http: + paths: + - path: {{ .Values.ingress.path }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $.Values.ingress.pathType | default "ImplementationSpecific" }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if and ($.Values.ingress.hosts) (not ($.Values.ingress.host )) }} + {{- range .Values.ingress.hosts }} + {{ $outer := . -}} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $outer.pathType | default "ImplementationSpecific" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if .additionalBackends }} +{{ toYaml .additionalBackends | indent 10 }} + {{- end }} + + {{- end }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end }} +{{- if $.Values.ingressInternal.enabled }} +--- +{{ if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{ else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{ else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + {{- if $.Values.ingressInternal.name }} + name: {{ $.Values.ingressInternal.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-ingress-internal + {{- end }} + namespace: {{ $.Values.NameSpace }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- if .Values.ingressInternal.annotations }} + annotations: +{{ toYaml .Values.ingressInternal.annotations | indent 4 }} +{{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingressInternal.className }} + {{- end }} + rules: + {{- if or .Values.ingressInternal.host .Values.ingressInternal.path }} + - host: {{ .Values.ingressInternal.host }} + http: + paths: + - path: {{ .Values.ingressInternal.path }} + {{- if and .Values.ingressInternal.pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $.Values.ingressInternal.pathType | default "Prefix" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if and ($.Values.ingressInternal.hosts) (not ($.Values.ingressInternal.host )) }} + {{- range .Values.ingressInternal.hosts }} + {{ $outer := . -}} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $outer.pathType | default "ImplementationSpecific" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if .additionalBackends }} +{{ toYaml .additionalBackends | indent 10 }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ingressInternal.tls }} + tls: +{{ toYaml .Values.ingressInternal.tls | indent 4 }} + {{- end -}} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-authorizationpolicy.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-authorizationpolicy.yaml new file mode 100644 index 00000000000..ac7b456ec5b --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-authorizationpolicy.yaml @@ -0,0 +1,37 @@ +{{- with .Values.istio }} +{{- if and .enable .authorizationPolicy.enabled }} +apiVersion: security.istio.io/v1beta1 +kind: AuthorizationPolicy +metadata: + name: {{ template ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .authorizationPolicy.labels }} +{{ toYaml .authorizationPolicy.labels | indent 4 }} + {{- end }} +{{- if .authorizationPolicy.annotations }} + annotations: +{{ toYaml .authorizationPolicy.annotations | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template ".Chart.Name .fullname" $ }} + action: {{ .authorizationPolicy.action }} +{{- if $.Values.istio.authorizationPolicy.provider }} + provider: +{{ toYaml $.Values.istio.authorizationPolicy.provider | indent 4 }} +{{- end }} +{{- if $.Values.istio.authorizationPolicy.rules }} + rules: +{{ toYaml $.Values.istio.authorizationPolicy.rules | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-destinationrule.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-destinationrule.yaml new file mode 100644 index 00000000000..47bef9a828e --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-destinationrule.yaml @@ -0,0 +1,34 @@ +{{- with .Values.istio }} +{{- if and .enable .destinationRule.enabled }} +apiVersion: networking.istio.io/v1beta1 +kind: DestinationRule +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-destinationrule + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .destinationRule.labels }} +{{ toYaml .destinationRule.labels | indent 4 }} + {{- end }} +{{- if .destinationRule.annotations }} + annotations: +{{ toYaml .destinationRule.annotations | indent 4 }} +{{- end }} +spec: + host: "{{ include ".servicename" $ }}.{{ $.Release.Namespace }}.svc.cluster.local" +{{- if $.Values.istio.destinationRule.subsets }} + subsets: +{{ toYaml $.Values.istio.destinationRule.subsets | indent 4 }} +{{- end }} +{{- if $.Values.istio.destinationRule.trafficPolicy }} + trafficPolicy: +{{ toYaml $.Values.istio.destinationRule.trafficPolicy | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-gateway.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-gateway.yaml new file mode 100644 index 00000000000..d6579590100 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-gateway.yaml @@ -0,0 +1,50 @@ +{{- if and .Values.istio.enable .Values.istio.gateway.enabled -}} +apiVersion: networking.istio.io/v1beta1 +kind: Gateway +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-istio-gateway + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if $.Values.istio.gateway.labels }} +{{ toYaml $.Values.istio.gateway.labels | indent 4 }} + {{- end }} +{{- if $.Values.istio.gateway.annotations }} + annotations: +{{ toYaml $.Values.istio.gateway.annotations | indent 4 }} +{{- end }} +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - {{ .Values.istio.gateway.host | quote -}} +{{ with .Values.istio.gateway }} +{{- if .tls.enabled }} + tls: + httpsRedirect: true + - port: + number: 443 + name: https + protocol: HTTPS + hosts: + - {{ .host | quote }} + tls: + mode: SIMPLE + credentialName: {{ .tls.secretName }} +{{ end }} +{{ end }} +{{ end }} + + + diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-peerauthentication.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-peerauthentication.yaml new file mode 100644 index 00000000000..481f8a96474 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-peerauthentication.yaml @@ -0,0 +1,36 @@ +{{- with .Values.istio }} +{{- if and .enable .peerAuthentication.enabled }} +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: {{ template ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .peerAuthentication.labels }} +{{ toYaml .peerAuthentication.labels | indent 4 }} + {{- end }} +{{- if .peerAuthentication.annotations }} + annotations: +{{ toYaml .peerAuthentication.annotations | indent 4 }} +{{- end }} +spec: +{{- if .peerAuthentication.selector.enabled }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template ".Chart.Name .fullname" $ }} +{{- end }} + mtls: + mode: {{ .peerAuthentication.mtls.mode }} +{{- if $.Values.istio.peerAuthentication.portLevelMtls }} + portLevelMtls: +{{ toYaml $.Values.istio.peerAuthentication.portLevelMtls | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-requestauthentication.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-requestauthentication.yaml new file mode 100644 index 00000000000..3429cee1462 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-requestauthentication.yaml @@ -0,0 +1,34 @@ +{{- with .Values.istio }} +{{- if and .enable .requestAuthentication.enabled }} +apiVersion: security.istio.io/v1beta1 +kind: RequestAuthentication +metadata: + name: {{ template ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .requestAuthentication.labels }} +{{ toYaml .requestAuthentication.labels | indent 4 }} + {{- end }} +{{- if .requestAuthentication.annotations }} + annotations: +{{ toYaml .requestAuthentication.annotations | indent 4 }} +{{- end }} +spec: +{{- if .requestAuthentication.selector.enabled }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template ".Chart.Name .fullname" $ }} +{{- end }} +{{- if $.Values.istio.requestAuthentication.jwtRules }} + jwtRules: +{{ toYaml $.Values.istio.requestAuthentication.jwtRules | indent 2 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-virtualservice.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-virtualservice.yaml new file mode 100644 index 00000000000..af61039b8db --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/istio-virtualservice.yaml @@ -0,0 +1,50 @@ +{{- with .Values.istio }} +{{- if and .enable .virtualService.enabled }} +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-virtualservice + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .virtualService.labels }} +{{ toYaml .virtualService.labels | indent 4 }} + {{- end }} +{{- if .virtualService.annotations }} + annotations: +{{ toYaml .virtualService.annotations | indent 4 }} +{{- end }} +spec: +{{- if or .gateway.enabled .virtualService.gateways }} + gateways: + {{- if .gateway.enabled }} + - {{ template ".Chart.Name .fullname" $ }}-istio-gateway + {{- end }} + {{- range .virtualService.gateways }} + - {{ . | quote }} + {{- end }} +{{- end }} +{{- if or .gateway.enabled .virtualService.hosts }} + hosts: + {{- if .gateway.enabled }} + - {{ .gateway.host | quote }} + {{- end }} + {{- range .virtualService.hosts }} + - {{ . | quote }} + {{- end }} +{{- else }} + hosts: + - "{{ include ".servicename" $ }}.{{ $.Release.Namespace }}.svc.cluster.local" +{{- end }} +{{- if $.Values.istio.virtualService.http }} + http: +{{ toYaml $.Values.istio.virtualService.http | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/keda-autoscaling.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/keda-autoscaling.yaml new file mode 100644 index 00000000000..371363ab1a8 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/keda-autoscaling.yaml @@ -0,0 +1,76 @@ +{{- if $.Values.kedaAutoscaling.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + {{- if $.Values.kedaAutoscaling.name }} + name: {{ $.Values.kedaAutoscaling.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-keda + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ .Release.Name }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} + {{- end }} + {{- if .Values.kedaAutoscaling.labels }} +{{ toYaml .Values.kedaAutoscaling.labels | indent 4 }} + {{- end }} + {{- if .Values.kedaAutoscaling.annotations }} + annotations: +{{ toYaml .Values.kedaAutoscaling.annotations | indent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include ".Chart.Name .fullname" $ }} +{{- if $.Values.kedaAutoscaling.envSourceContainerName }} + envSourceContainerName: {{ $.Values.kedaAutoscaling.envSourceContainerName }} +{{- end }} +{{- if $.Values.kedaAutoscaling.pollingInterval }} + pollingInterval: {{ $.Values.kedaAutoscaling.pollingInterval }} +{{- end }} +{{- if $.Values.kedaAutoscaling.cooldownPeriod }} + cooldownPeriod: {{ $.Values.kedaAutoscaling.cooldownPeriod }} +{{- end }} +{{- if $.Values.kedaAutoscaling.idleReplicaCount }} + idleReplicaCount: {{ $.Values.kedaAutoscaling.idleReplicaCount }} +{{- end }} + minReplicaCount: {{ $.Values.kedaAutoscaling.minReplicaCount }} + maxReplicaCount: {{ $.Values.kedaAutoscaling.maxReplicaCount }} +{{- if $.Values.kedaAutoscaling.fallback }} + fallback: +{{ toYaml $.Values.kedaAutoscaling.fallback | indent 4 }} +{{- end }} +{{- if $.Values.kedaAutoscaling.advanced }} + advanced: +{{ toYaml $.Values.kedaAutoscaling.advanced | indent 4 }} +{{- end }} + triggers: +{{ toYaml .Values.kedaAutoscaling.triggers | indent 2}} +{{- if $.Values.kedaAutoscaling.authenticationRef }} + authenticationRef: +{{ toYaml $.Values.kedaAutoscaling.authenticationRef | indent 6 }} +{{- end }} +--- +{{- if $.Values.kedaAutoscaling.triggerAuthentication.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{ $.Values.kedaAutoscaling.triggerAuthentication.name }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + +spec: +{{ toYaml $.Values.kedaAutoscaling.triggerAuthentication.spec | indent 2 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/metrics-service-monitor.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/metrics-service-monitor.yaml new file mode 100644 index 00000000000..fa5321d3034 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/metrics-service-monitor.yaml @@ -0,0 +1,35 @@ +{{- if $.Values.appMetrics -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} +spec: + jobLabel: {{ template ".Chart.Name .name" $ }} + endpoints: + - port: envoy-admin + interval: 30s + path: /stats/prometheus + relabelings: + - action: replace + sourceLabels: + - __meta_kubernetes_pod_label_pod_template_hash + targetLabel: devtron_app_hash + selector: + matchLabels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + namespaceSelector: + matchNames: + - {{.Release.Namespace}} + podTargetLabels: + - appId + - envId + - devtron_app_hash +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/networkpolicy.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/networkpolicy.yaml new file mode 100644 index 00000000000..350232a23b6 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/networkpolicy.yaml @@ -0,0 +1,50 @@ +{{- if .Values.networkPolicy.enabled -}} +{{- with .Values.networkPolicy }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-networkpolicy + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if $.Values.networkPolicy.labels }} +{{ toYaml $.Values.networkPolicy.labels | indent 4 }} + {{- end }} +{{- if $.Values.networkPolicy.annotations }} + annotations: +{{ toYaml $.Values.networkPolicy.annotations | indent 4 }} +{{- end }} +spec: + podSelector: +{{- if .podSelector.matchExpressions }} + matchExpressions: +{{ toYaml $.Values.networkPolicy.podSelector.matchExpressions | indent 6 }} +{{- end }} +{{- if .podSelector.matchLabels }} + matchLabels: +{{ toYaml $.Values.networkPolicy.podSelector.matchLabels | indent 6 }} +{{- else }} + matchLabels: + app: {{ template ".Chart.Name .name" $ }} + release: {{ $.Release.Name }} +{{- end }} +{{- if .policyTypes }} + policyTypes: +{{ toYaml $.Values.networkPolicy.policyTypes | indent 4 }} +{{- end }} +{{- if .ingress }} + ingress: +{{ toYaml $.Values.networkPolicy.ingress | indent 4 }} +{{- end }} +{{- if .egress }} + egress: +{{ toYaml $.Values.networkPolicy.ingress | indent 4}} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/persistent-volume-claim.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/persistent-volume-claim.yaml new file mode 100644 index 00000000000..bf4e6dfb712 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/persistent-volume-claim.yaml @@ -0,0 +1,24 @@ +{{- if .Values.persistentVolumeClaim.name }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{.Values.persistentVolumeClaim.name }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- with .Values.persistentVolumeClaim }} +spec: + accessModes: +{{- range .accessMode }} + - {{ . }} +{{- end }} + resources: + requests: + storage: {{ .storage | default "5Gi" }} + storageClassName: {{ .storageClassName | default "default" }} + volumeMode: {{ .volumeMode | default "Filesystem" }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/poddisruptionbudget.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/poddisruptionbudget.yaml new file mode 100644 index 00000000000..2736332531c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/poddisruptionbudget.yaml @@ -0,0 +1,35 @@ +{{- if .Values.podDisruptionBudget }} +{{- if semverCompare ">=1.21-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: policy/v1 +{{- else -}} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + {{- if .Values.podDisruptionBudget.name }} + name: {{ .Values.podDisruptionBudget.name }} + {{- else }} + name: {{ include ".Chart.Name .fullname" $ }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + {{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 6 }} + {{- else }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/pre-sync-job.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/pre-sync-job.yaml new file mode 100644 index 00000000000..54c9f636eed --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/pre-sync-job.yaml @@ -0,0 +1,29 @@ +{{- if $.Values.dbMigrationConfig.enabled }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-migrator + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + annotations: + argocd.argoproj.io/hook: PreSync +# argocd.argoproj.io/hook-delete-policy: HookSucceeded +spec: + template: + spec: + containers: + - name: migrator + image: 686244538589.dkr.ecr.us-east-2.amazonaws.com/migrator:0.0.1-rc14 + env: + {{- range $.Values.dbMigrationConfig.envValues }} + - name: {{ .key}} + value: {{ .value | quote }} + {{- end}} + restartPolicy: Never + backoffLimit: 0 +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/prometheusrules.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/prometheusrules.yaml new file mode 100644 index 00000000000..c285de13883 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/prometheusrules.yaml @@ -0,0 +1,33 @@ +{{- if .Values.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + {{- if .Values.prometheusRule.name }} + name: {{ .Values.prometheusRule.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }} + {{- end }} + {{- if .Values.prometheusRule.namespace }} + namespace: {{ .Values.prometheusRule.namespace }} + {{- end }} + labels: + kind: Prometheus + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ .Values.prometheus.release }} + {{- if .Values.prometheusRule.additionalLabels }} +{{ toYaml .Values.prometheusRule.additionalLabels | indent 4 }} + {{- end }} +spec: + {{- with .Values.prometheusRule.rules }} + groups: + {{- if $.Values.prometheusRule.name }} + - name: {{ $.Values.prometheusRule.name }} + {{- else }} + - name: {{ template ".Chart.Name .fullname" $ }} + {{- end }} + rules: {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/secret.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/secret.yaml new file mode 100644 index 00000000000..5ac3ae14101 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/secret.yaml @@ -0,0 +1,84 @@ +{{- if $.Values.secret.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: app-secret + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +type: Opaque +data: +{{ toYaml $.Values.secret.data | indent 2 }} +{{- end }} + + +{{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{if eq .external false}} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name}}-{{ $.Values.app }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + release: {{ $.Release.Name }} + chart: {{ template ".Chart.Name .chart" $ }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +type: Opaque +data: +{{ toYaml .data | trim | indent 2 }} +{{- end}} + {{if eq .external true }} + {{if (or (eq .externalType "AWSSecretsManager") (eq .externalType "AWSSystemManager") (eq .externalType "HashiCorpVault"))}} +--- +apiVersion: kubernetes-client.io/v1 +kind: ExternalSecret +metadata: + name: {{ .name}} +{{- if $.Values.appLabels }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .roleARN }} + roleArn: .roleARN + {{- end}} + {{- if eq .externalType "AWSSecretsManager"}} + backendType: secretsManager + {{- end}} + {{- if eq .externalType "AWSSystemManager"}} + backendType: systemManager + {{- end}} + {{- if eq .externalType "HashiCorpVault"}} + backendType: vault + {{- end}} + data: + {{- range .secretData }} + - key: {{.key}} + name: {{.name}} + {{- if .property }} + property: {{.property}} + {{- end}} + isBinary: {{.isBinary}} + {{- end}} + {{- end}} + {{- end}} + {{- end}} + {{- end}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/service.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/service.yaml new file mode 100644 index 00000000000..17c96b2ec72 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/service.yaml @@ -0,0 +1,98 @@ +{{- if .Values.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template ".servicename" . }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end}} +spec: + type: {{ .Values.service.type | default "ClusterIP" }} +{{- if (and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges )}} + loadBalancerSourceRanges: + {{- range .Values.service.loadBalancerSourceRanges }} + - {{ . }} + {{- end }} +{{- end }} + ports: + {{- range .Values.ContainerPort }} + {{- if .servicePort }} + - port: {{ .servicePort }} + {{- else }} + - port: {{ .port }} + {{- end }} + {{- if .targetPort }} + targetPort: {{ .targetPort }} + {{- else }} + targetPort: {{ .name }} + {{- end }} + protocol: {{ .protocol }} + {{- if (and (eq $.Values.service.type "NodePort") .nodePort ) }} + nodePort: {{ .nodePort }} + {{- end }} + name: {{ .name }} + {{- end }} + {{- if $.Values.appMetrics }} + - port: 9901 + name: envoy-admin + {{- end }} + selector: + {{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 4 }} + {{- else }} + app: {{ template ".Chart.Name .name" . }} + {{- end }} +{{- if .Values.service.sessionAffinity.enabled }} + sessionAffinity: ClientIP +{{- end }} +{{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: +{{ toYaml .Values.service.sessionAffinityConfig | indent 4 }} +{{- end }} +{{- if eq .Values.deploymentType "BLUE-GREEN" }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template ".previewservicename" . }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +spec: + type: ClusterIP + ports: + {{- range .Values.ContainerPort }} + {{- if .servicePort }} + - port: {{ .servicePort }} + {{- else }} + - port: {{ .port }} + {{- end }} + targetPort: {{ .name }} + protocol: TCP + name: {{ .name }} + {{- end }} + {{- if $.Values.appMetrics }} + - port: 9901 + name: envoy-admin + {{- end }} + selector: + {{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 4 }} + {{- else }} + app: {{ template ".Chart.Name .name" . }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/serviceaccount.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/serviceaccount.yaml new file mode 100644 index 00000000000..f337548e942 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/serviceaccount.yaml @@ -0,0 +1,21 @@ +{{- if $.Values.serviceAccount }} +{{- if $.Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "serviceAccountName" . }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.serviceAccount.annotations | indent 4 }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/servicemonitor.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/servicemonitor.yaml new file mode 100644 index 00000000000..3cdacf236d5 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/servicemonitor.yaml @@ -0,0 +1,114 @@ +{{ $serviceMonitorEnabled := include "serviceMonitorEnabled" . }} +{{- if eq "true" $serviceMonitorEnabled -}} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + {{- if .Values.servicemonitor.name }} + name: {{ .Values.servicemonitor.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-sm + {{- end }} + labels: + kind: Prometheus + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} + {{- if .Values.servicemonitor.additionalLabels }} +{{ toYaml .Values.servicemonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + {{- range .Values.ContainerPort }} + {{- if .servicemonitor }} + {{- if .servicemonitor.enabled}} + {{- if .servicemonitor.targetPort }} + - targetPort: {{ .servicemonitor.targetPort }} + {{- else if .servicePort }} + - port: {{ .name }} + {{- end }} + {{- if .servicemonitor.path }} + path: {{ .servicemonitor.path}} + {{- end }} + {{- if .servicemonitor.scheme }} + scheme: {{ .servicemonitor.scheme}} + {{- end }} + {{- if .servicemonitor.interval }} + interval: {{ .servicemonitor.interval}} + {{- end }} + {{- if .servicemonitor.scrapeTimeout }} + scrapeTimeout: {{ .servicemonitor.scrapeTimeout | quote }} + {{- end }} + {{- if .servicemonitor.basicAuth }} + basicAuth: + {{- toYaml .servicemonitor.basicAuth | nindent 8 }} + {{- end }} + {{- if .servicemonitor.insecureTLS }} + tlsConfig: + insecureSkipVerify: true + {{- else if .servicemonitor.tlsConfig }} + tlsConfig: + {{- toYaml .servicemonitor.tlsConfig | nindent 8 }} + {{- end }} + {{- if .servicemonitor.metricRelabelings}} + metricRelabelings: +{{toYaml .servicemonitor.metricRelabelings | indent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- range .Values.containers }} + {{- range .ports }} + {{- if .servicemonitor }} + {{- if .servicemonitor.enabled}} + {{- if .servicemonitor.targetPort }} + - targetPort: {{ .servicemonitor.targetPort }} + {{- else if .servicePort }} + - port: {{ .name }} + {{- end }} + {{- if .servicemonitor.path }} + path: {{ .servicemonitor.path}} + {{- end }} + {{- if .servicemonitor.scheme }} + scheme: {{ .servicemonitor.scheme}} + {{- end }} + {{- if .servicemonitor.interval }} + interval: {{ .servicemonitor.interval}} + {{- end }} + {{- if .servicemonitor.scrapeTimeout }} + scrapeTimeout: {{ .servicemonitor.scrapeTimeout}} + {{- end }} + {{- if .servicemonitor.basicAuth }} + basicAuth: + {{- toYaml .servicemonitor.basicAuth | nindent 8 }} + {{- end }} + {{- if .servicemonitor.insecureTLS }} + tlsConfig: + insecureSkipVerify: true + {{- else if .servicemonitor.tlsConfig }} + tlsConfig: + {{- toYaml .servicemonitor.tlsConfig | nindent 8 }} + {{- end }} + {{- if .servicemonitor.metricRelabelings}} + metricRelabelings: +{{toYaml .servicemonitor.metricRelabelings | indent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.servicemonitor.namespaceSelector }} + namespaceSelector: + matchNames: + {{- toYaml .Values.servicemonitor.namespaceSelector | nindent 6 }} + {{- end }} + selector: + matchLabels: + {{- if .Values.servicemonitor.matchLabels }} + {{- toYaml .Values.servicemonitor.matchLabels | nindent 6 }} + {{- else }} + app: {{ template ".Chart.Name .name" $ }} +{{- end }} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/sidecar-configmap.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/sidecar-configmap.yaml new file mode 100644 index 00000000000..cf32679409a --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/sidecar-configmap.yaml @@ -0,0 +1,169 @@ +{{- if .Values.appMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: 2019-08-12T18:38:34Z + name: sidecar-config-{{ template ".Chart.Name .name" $ }} +data: + envoy-config.json: | + { + "stats_config": { + "use_all_default_tags": false, + "stats_tags": [ + { + "tag_name": "cluster_name", + "regex": "^cluster\\.((.+?(\\..+?\\.svc\\.cluster\\.local)?)\\.)" + }, + { + "tag_name": "tcp_prefix", + "regex": "^tcp\\.((.*?)\\.)\\w+?$" + }, + { + "tag_name": "response_code", + "regex": "_rq(_(\\d{3}))$" + }, + { + "tag_name": "response_code_class", + "regex": ".*_rq(_(\\dxx))$" + }, + { + "tag_name": "http_conn_manager_listener_prefix", + "regex": "^listener(?=\\.).*?\\.http\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "http_conn_manager_prefix", + "regex": "^http\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "listener_address", + "regex": "^listener\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "mongo_prefix", + "regex": "^mongo\\.(.+?)\\.(collection|cmd|cx_|op_|delays_|decoding_)(.*?)$" + } + ], + "stats_matcher": { + "inclusion_list": { + "patterns": [ + { + "regex": ".*_rq_\\dxx$" + }, + { + "regex": ".*_rq_time$" + }, + { + "regex": "cluster.*" + }, + ] + } + } + }, + "admin": { + "access_log_path": "/dev/null", + "address": { + "socket_address": { + "address": "0.0.0.0", + "port_value": 9901 + } + } + }, + "static_resources": { + "clusters": [ + {{- range $index, $element := .Values.ContainerPort }} + { + "name": "{{ $.Values.app }}-{{ $index }}", + "type": "STATIC", + "connect_timeout": "0.250s", + "lb_policy": "ROUND_ROBIN", +{{- if $element.idleTimeout }} + "common_http_protocol_options": { + "idle_timeout": {{ $element.idleTimeout | quote }} + }, +{{- end }} +{{- if or $element.useHTTP2 $element.useGRPC }} + "http2_protocol_options": {}, +{{- end }} +{{- if and (not $element.useGRPC) (not $element.supportStreaming) }} + "max_requests_per_connection": "1", +{{- end }} + "load_assignment": { + "cluster_name": "9", + "endpoints": { + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "protocol": "TCP", + "address": "127.0.0.1", + "port_value": {{ $element.port }} + } + } + } + } + ] + } + } + }, + {{- end }} + ], + "listeners":[ + {{- range $index, $element := .Values.ContainerPort }} + { + "address": { + "socket_address": { + "protocol": "TCP", + "address": "0.0.0.0", + "port_value": {{ $element.envoyPort | default (add 8790 $index) }} + } + }, + "filter_chains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "config": { + "codec_type": "AUTO", + "stat_prefix": "stats", + "route_config": { + "virtual_hosts": [ + { + "name": "backend", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/" + }, + "route": { +{{- if $element.supportStreaming }} + "timeout": "0s", +{{- end }} +{{- if and ($element.envoyTimeout) (not $element.supportStreaming) }} + "timeout": "{{ $element.envoyTimeout }}", +{{- end }} + "cluster": "{{ $.Values.app }}-{{ $index }}" + } + } + ] + } + ] + }, + "http_filters": { + "name": "envoy.filters.http.router" + } + } + } + ] + } + ] + }, + {{- end }} + ] + } + } +--- +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/vertical-pod-autoscaler.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/vertical-pod-autoscaler.yaml new file mode 100644 index 00000000000..16933579793 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/vertical-pod-autoscaler.yaml @@ -0,0 +1,27 @@ +{{ $VerticalPodAutoScalingEnabled := include "VerticalPodAutoScalingEnabled" . }} +{{- if eq "true" $VerticalPodAutoScalingEnabled -}} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: {{ template ".Chart.Name .fullname" . }}-vpa + labels: + kind: Prometheus + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} +spec: +{{- if .Values.verticalPodScaling.resourcePolicy }} + resourcePolicy: +{{ toYaml .Values.verticalPodScaling.resourcePolicy}} +{{- end }} +{{- if .Values.verticalPodScaling.updatePolicy }} + updatePolicy: +{{ toYaml .Values.verticalPodScaling.updatePolicy}} +{{- end }} + targetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include ".Chart.Name .fullname" $ }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/winter-soldier.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/winter-soldier.yaml new file mode 100644 index 00000000000..314f0c6db0c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/templates/winter-soldier.yaml @@ -0,0 +1,45 @@ +{{- if .Values.winterSoldier.enabled }} +apiVersion: {{ $.Values.winterSoldier.apiVersion }} +kind: Hibernator +metadata: + {{- if .Values.winterSoldier.name }} + name: {{ .Values.winterSoldier.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-hibernator + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + {{- if .Values.winterSoldier.labels }} +{{ toYaml .Values.winterSoldier.labels | indent 4 }} + {{- end }} +{{- if .Values.winterSoldier.annotations }} + annotations: +{{ toYaml .Values.winterSoldier.annotations | indent 4 }} +{{- end }} +spec: + timeRangesWithZone: +{{ toYaml $.Values.winterSoldier.timeRangesWithZone | indent 4}} + selectors: + - inclusions: + - objectSelector: + name: {{ include ".Chart.Name .fullname" $ }} + type: {{ .Values.winterSoldier.type | quote }} + fieldSelector: +{{toYaml $.Values.winterSoldier.fieldSelector | indent 14 }} + namespaceSelector: + name: {{ $.Release.Namespace }} + exclusions: [] + action: {{ $.Values.winterSoldier.action }} + {{- if eq .Values.winterSoldier.action "scale" }} + {{- if .Values.winterSoldier.targetReplicas }} + targetReplicas: {{ $.Values.winterSoldier.targetReplicas }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/test-values.json b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/test-values.json new file mode 100644 index 00000000000..a26806cb912 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/test-values.json @@ -0,0 +1,292 @@ +{ + "ConfigMaps": { + "enabled": true, + "maps": [ + { + "data": { + "a": "b" + }, + "esoSecretData": {}, + "external": false, + "externalType": "", + "filePermission": "", + "mountPath": "", + "name": "abc", + "roleARN": "", + "subPath": false, + "type": "environment" + } + ] + }, + "ConfigSecrets": { + "enabled": true, + "secrets": [ + { + "data": { + "access-key": "QUtJQVdQVENFV0w1Wk4zVFBSRzY=", + "secret-access-key": "dkJ1bXRJL1YyZFUrQmVrSnM4QkVsblJnQzlRbEZueVZqL0dEdUc4Ng==" + }, + "esoSecretData": {}, + "external": false, + "externalType": "", + "filePermission": "", + "mountPath": "", + "name": "auth-aws", + "roleARN": "", + "subPath": false, + "type": "environment" + }, + { + "esoSecretData": { + "esoData": [ + { + "key": "ajay-secret-aws", + "property": "mob", + "secretKey": "mymob" + }, + { + "key": "ajay-secret-aws", + "property": "pin", + "secretKey": "mypin" + } + ], + "secretStore": { + "aws": { + "auth": { + "secretRef": { + "accessKeyIDSecretRef": { + "key": "access-key", + "name": "auth-aws-1" + }, + "secretAccessKeySecretRef": { + "key": "secret-access-key", + "name": "auth-aws-1" + } + } + }, + "region": "ap-south-1", + "service": "SecretsManager" + } + } + }, + "external": true, + "externalType": "ESO_AWSSecretsManager", + "filePermission": "", + "mountPath": "", + "name": "external-secret-aws", + "roleARN": "", + "subPath": false, + "type": "environment" + } + ] + }, + "ContainerPort": [ + { + "envoyPort": 8799, + "idleTimeout": "1800s", + "name": "app", + "port": 80, + "servicePort": 80, + "supportStreaming": false, + "useHTTP2": false + } + ], + "EnvVariables": [], + "GracePeriod": 30, + "LivenessProbe": { + "Path": "", + "command": [], + "failureThreshold": 3, + "httpHeaders": [], + "initialDelaySeconds": 20, + "periodSeconds": 10, + "port": 8080, + "scheme": "", + "successThreshold": 1, + "tcp": false, + "timeoutSeconds": 5 + }, + "MaxSurge": 1, + "MaxUnavailable": 0, + "MinReadySeconds": 60, + "ReadinessProbe": { + "Path": "", + "command": [], + "failureThreshold": 3, + "httpHeaders": [], + "initialDelaySeconds": 20, + "periodSeconds": 10, + "port": 8080, + "scheme": "", + "successThreshold": 1, + "tcp": false, + "timeoutSeconds": 5 + }, + "Spec": { + "Affinity": { + "Values": "nodes", + "key": "" + } + }, + "app": "1", + "appLabels": {}, + "appMetrics": false, + "args": { + "enabled": false, + "value": [ + "/bin/sh", + "-c", + "touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600" + ] + }, + "autoscaling": { + "MaxReplicas": 2, + "MinReplicas": 1, + "TargetCPUUtilizationPercentage": 90, + "TargetMemoryUtilizationPercentage": 80, + "annotations": {}, + "behavior": {}, + "enabled": false, + "extraMetrics": [], + "labels": {} + }, + "command": { + "enabled": false, + "value": [], + "workingDir": {} + }, + "containerSecurityContext": {}, + "containers": [], + "dbMigrationConfig": { + "enabled": false + }, + "deployment": { + "strategy": { + "blueGreen": { + "autoPromotionEnabled": false, + "autoPromotionSeconds": 30, + "previewReplicaCount": 1, + "scaleDownDelaySeconds": 30 + } + } + }, + "deploymentType": "BLUE-GREEN", + "env": "1", + "envoyproxy": { + "configMapName": "", + "image": "quay.io/devtron/envoy:v1.14.1", + "resources": { + "limits": { + "cpu": "50m", + "memory": "50Mi" + }, + "requests": { + "cpu": "50m", + "memory": "50Mi" + } + } + }, + "hostAliases": [], + "image": { + "pullPolicy": "IfNotPresent" + }, + "imagePullSecrets": [], + "ingress": { + "annotations": {}, + "className": "", + "enabled": false, + "hosts": [ + { + "host": "chart-example1.local", + "pathType": "ImplementationSpecific", + "paths": [ + "/example1" + ] + } + ], + "labels": {}, + "tls": [] + }, + "ingressInternal": { + "annotations": {}, + "className": "", + "enabled": false, + "hosts": [ + { + "host": "chart-example1.internal", + "pathType": "ImplementationSpecific", + "paths": [ + "/example1" + ] + }, + { + "host": "chart-example2.internal", + "pathType": "ImplementationSpecific", + "paths": [ + "/example2", + "/example2/healthz" + ] + } + ], + "tls": [] + }, + "initContainers": [], + "kedaAutoscaling": { + "advanced": {}, + "authenticationRef": {}, + "enabled": false, + "envSourceContainerName": "", + "maxReplicaCount": 2, + "minReplicaCount": 1, + "triggerAuthentication": { + "enabled": false, + "name": "", + "spec": {} + }, + "triggers": [] + }, + "pauseForSecondsBeforeSwitchActive": 30, + "pipelineName": "cd-1-fpji", + "podAnnotations": {}, + "podLabels": {}, + "podSecurityContext": {}, + "prometheus": { + "release": "monitoring" + }, + "rawYaml": [], + "releaseVersion": "6", + "replicaCount": 1, + "resources": { + "limits": { + "cpu": "0.05", + "memory": "50Mi" + }, + "requests": { + "cpu": "0.01", + "memory": "10Mi" + } + }, + "secret": { + "data": {}, + "enabled": false + }, + "server": { + "deployment": { + "image": "aju121/test12", + "image_tag": "63118bf2-1-1" + } + }, + "service": { + "annotations": {}, + "loadBalancerSourceRanges": [], + "type": "ClusterIP" + }, + "servicemonitor": { + "additionalLabels": {} + }, + "tolerations": [], + "topologySpreadConstraints": [], + "volumeMounts": [], + "volumes": [], + "waitForSecondsBeforeScalingDown": 30 +} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/test_values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/test_values.yaml new file mode 100644 index 00000000000..dd0395f97f8 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/test_values.yaml @@ -0,0 +1,779 @@ +# Default values for myapp. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +kedaAutoscaling: + enabled: true + envSourceContainerName: "" # Optional. Default: .spec.template.spec.containers[0] + cooldownPeriod: 300 # Optional. Default: 300 seconds + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 # Optional. Must be less than minReplicaCount + pollingInterval: 30 # Optional. Default: 30 seconds + # The fallback section is optional. It defines a number of replicas to fallback to if a scaler is in an error state. + fallback: {} # Optional. Section to specify fallback options + # failureThreshold: 3 # Mandatory if fallback section is included + # replicas: 6 + advanced: {} + # horizontalPodAutoscalerConfig: # Optional. Section to specify HPA related options + # behavior: # Optional. Use to modify HPA's scaling behavior + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Percent + # value: 100 + # periodSeconds: 15 + triggers: + - type: kubernetes-workload + name: trig_one + metadata: + podSelector: 'pod=workload-test' + - type: metrics-api + name: trig_two + metadata: + url: "https://mockbin.org/bin/336a8d99-9e09-4f1f-979d-851a6d1b1423" + valueLocation: "tasks" + + triggerAuthentication: + enabled: true + name: "trigger-test" + spec: {} + authenticationRef: {} + +deploymentLabels: + name: kunalverma + Company: Devtron + Job: DevRel + +deploymentAnnotations: + name: kunalverma + Company: Devtron + Job: DevRel + +containerSpec: + lifecycle: + enabled: true + preStop: + exec: + command: ["sleep","10"] + postStart: + httpGet: + host: example.com + path: /example + port: 90 + +imagePullSecrets: + - test1 + - test2 +replicaCount: 1 +MinReadySeconds: 5 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyTimeout: 15 + targetPort: 8080 + envoyPort: 8799 + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s + servicemonitor: + enabled: true + path: /abc + scheme: 'http' + interval: 30s + scrapeTimeout: 20s + metricRelabelings: + - sourceLabels: [namespace] + regex: '(.*)' + replacement: myapp + targetLabel: target_namespace + + - name: app1 + port: 8090 + targetPort: 1234 + servicePort: 8080 + useGRPC: true + servicemonitor: + enabled: true + - name: app2 + port: 8091 + servicePort: 8081 + useGRPC: true + +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 +autoPromotionSeconds: 30 + +Spec: + Affinity: + Key: + # Key: kops.k8s.io/instancegroup + Values: + + +image: + pullPolicy: IfNotPresent + +autoscaling: + enabled: true + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + behavior: {} +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +secret: + enabled: false + +service: + type: ClusterIP + # name: "1234567890123456789012345678901234567890123456789012345678901234567890" + annotations: {} + # test1: test2 + # test3: test4 + +istio: + enable: false + gateway: + enabled: false + labels: {} + annotations: {} + host: example.com + tls: + enabled: false + secretName: example-tls-secret + virtualService: + enabled: false + labels: {} + annotations: {} + gateways: [] + hosts: [] + http: + # - match: + # - uri: + # prefix: /v1 + # - uri: + # prefix: /v2 + # rewriteUri: / + # timeout: 12 + # headers: + # request: + # add: + # x-some-header: "value" + # retries: + # attempts: 2 + # perTryTimeout: 3s + # route: + # - destination: + # host: service1 + # port: 80 + # - route: + # - destination: + # host: service2 + +flaggerCanary: + enabled: false + labels: {} + annotations: {} + createIstioGateway: + enabled: false + labels: {} + annotations: {} + host: example.com + tls: + enabled: false + secretName: example-tls-secret + # Istio gateways (optional) + addOtherGateways: [] + # Istio virtual service host names (optional) + addOtherHosts: [] + # Istio gateway refs (optional) + gatewayRefs: + # - name: istio-gateway + # namespace: istio-system + #service port + port: 80 + #containerPort + targetPort: 8080 + # discover all port open in container + portDiscovery: false + # application protocol (optional) + appProtocol: + # Istio retry policy (optional) + retries: + # attempts: 3 + # perTryTimeout: 1s + # retryOn: "gateway-error,connect-failure,refused-stream" + # HTTP match conditions (optional) + match: + - uri: + prefix: / + # HTTP rewrite (optional) + rewriteUri: + # timeout (optional) + timeout: + # Add headers (optional) + headers: + # request: + # add: + # x-some-header: "value" + # cross-origin resource sharing policy (optional) + corsPolicy: + # allowOrigin: + # - example.com + # allowMethods: + # - GET + # allowCredentials: false + # allowHeaders: + # - x-some-header + # maxAge: 24h + analysis: + # schedule interval (default 60s) + interval: 5s + # max number of failed metric checks before rollback + threshold: 10 + # max traffic percentage routed to canary + # percentage (0-100) + maxWeight: 50 + # canary increment step + # percentage (0-100) + stepWeight: 5 + thresholds: + # minimum req success rate (non 5xx responses) + # percentage (0-100) + successRate: 90 + # maximum req duration P99 + # milliseconds + latency: 500 + loadtest: + enabled: true + # load tester address + url: http://flagger-loadtester.test/ + +server: + deployment: + image_tag: 1-95af053 + image: "" +deploymentType: "RECREATE" + +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: zone + whenUnsatisfiable: DoNotSchedule + autoLabelSelector: true + customLabelSelector: + foo: bar + +EnvVariables: + - name: FLASK_ENV + value: qa + +LivenessProbe: + Path: / + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + - name: Custom-Header2 + value: xyz + +ReadinessProbe: + Path: / + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + +StartupProbe: + Path: "/" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] + command: [] + tcp: false + +prometheus: + release: monitoring + +servicemonitor: + additionalLabels: {} + +prometheusRule: + enabled: true + additionalLabels: {} + namespace: "" + rules: + # These are just examples rules, please adapt them to your needs + - alert: TooMany500s + expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + for: 1m + labels: + severity: critical + annotations: + description: Too many 5XXs + summary: More than 5% of the all requests did return 5XX, this require your attention + - alert: TooMany400s + expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + for: 1m + labels: + severity: critical + annotations: + description: Too many 4XXs + summary: More than 5% of the all requests did return 4XX, this require your attention + + +ingress: + enabled: true + className: nginx + annotations: {} +# nginx.ingress.kubernetes.io/rewrite-target: / +# nginx.ingress.kubernetes.io/ssl-redirect: "false" +# kubernetes.io/ingress.class: nginx +# kubernetes.io/tls-acme: "true" +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" +# Old Ingress Format +# host: "ingress-example.com" +# path: "/app" + +# New Ingress Format + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + additionalBackends: + - path: /example1 + pathType: "ImplementationSpecific" + backend: + service: + name: test-service + port: + number: 80 + + tls: [] +### Legacy Ingress Format ## +# host: abc.com +# path: "/" +# pathType: "ImplementationSpecific" + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +ingressInternal: + enabled: true + className: nginx-internal + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + additionalBackends: + - path: /internal + pathType: "ImplementationSpecific" + backend: + service: + name: test-service-internal + port: + number: 80 + - path: /internal-01 + pathType: "ImplementationSpecific" + backend: + service: + name: test-service-internal + port: + number: 80 + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +winterSoldier: + apiVersion: pincher.devtron.ai/v1alpha1 + enabled: true + annotations: {} + labels: {} + type: Deployment + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: + - timeFrom: 00:00 + timeTo: 23:59:59 + weekdayFrom: Sat + weekdayTo: Sun + - timeFrom: 00:00 + timeTo: 08:00 + weekdayFrom: Mon + weekdayTo: Fri + - timeFrom: 20:00 + timeTo: 23:59:59 + weekdayFrom: Mon + weekdayTo: Fri + action: scale + targetReplicas: [1,1,1] + fieldSelector: + - AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) + + +dbMigrationConfig: + enabled: false + +command: + workingDir: /app + enabled: false + value: ["ls"] + +args: + enabled: false + value: [] + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 0.10 + memory: 100Mi + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + + +nodeSelector: {} + + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 + +ConfigMaps: + enabled: false + maps: [] +# - name: config-map-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-map-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# filePermission: 0400 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 +# key3: abc-2 +# - name: config-map-3 +# type: environment +# external: true +# mountPath: /etc/config/3 +# data: [] +# - name: config-map-4 +# type: volume +# external: true +# mountPath: /etc/config/4 +# data: [] + + +ConfigSecrets: + enabled: true + secrets: + - name: config-secret-1 + type: environment + external: false + externalType: AWSSecretsManager + esoSecretData: + secretStore: + aws: + service: SecretsManager + region: us-east-1 + auth: + secretRef: + accessKeyIDSecretRef: + name: awssm-secret + key: access-key + secretAccessKeySecretRef: + name: awssm-secret + key: secret-access-key + esoData: + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + data: + key1: key1value-1 + key2: key2value-1 + key3: key3value-1 + - name: config-secret-2 + type: environment + external: false + externalType: ESO_HashiCorpVault + esoSecretData: + secretStore: + vault: + server: "http://my.vault.server:8200" + path: "secret" + version: "v2" + auth: + tokenSecretRef: + name: vault-token + key: token + esoData: + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + date: + key1: key1value-1 + key2: key2value-1 + key3: key3value-1 + +# - name: config-secret-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 + + +initContainers: + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + - command: ["sh", "-c", "chown -R 1000:1000 logs"] + reuseContainerImage: true + volumeMounts: + - mountPath: /usr/local/airflow/logs + name: logs-data + securityContext: + privileged: true + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + - name: init-migrate + image: busybox:latest + command: ["sh", "-c", "chown -R 1000:1000 logs"] + volumeMounts: + - mountPath: /usr/local/airflow/logs + name: logs-data + securityContext: + capabilities: + drop: + - ALL + +containers: + # Additional init containers to run before the Scheduler pods. + # for example, be used to run a sidecar that chown Logs storage . + - name: volume-mount-hack + image: busybox + command: ["sh", "-c", "chown -R 1000:1000 logs"] + volumeMounts: + - mountPath: /usr/local/airflow/logs + name: logs-data + + +rawYaml: + - apiVersion: v1 + kind: Service + metadata: + annotations: + labels: + app: sample-metrics-app + name: sample-metrics-app + namespace: default + spec: + ports: + - name: web + port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: sample-metrics-app + sessionAffinity: None + type: ClusterIP + - apiVersion: v1 + kind: Service + metadata: + annotations: + labels: + app: sample-metrics-app + name: sample-metrics-app + namespace: default + spec: + ports: + - name: web + port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: sample-metrics-app + sessionAffinity: None + type: ClusterIP + +# If you need to provide some extra specs for main container which are not included by default in deployment template +# then provide them here +containerExtraSpecs: {} + +# If you need to provide some extra specs for pod which are not included by default in deployment template +# then provide them here +podExtraSpecs: {} + +envoyproxy: + image: docker.io/envoyproxy/envoy:v1.16.0 + configMapName: "" + lifecycle: {} + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +podDisruptionBudget: + minAvailable: 1 + maxUnavailable: 1 + + # Node tolerations for server scheduling to nodes with taints + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +# + +tolerations: + - key: "key" + operator: "Equal|Exists" + value: "value" + effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +appMetrics: true +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "test1" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: + kubernetes.io/service-account.name: build-robot +containerSecurityContext: + allowPrivilegeEscalation: false +privileged: true +hostAliases: [] +# - ip: "127.0.0.1" +# hostnames: +# - "foo.local" + + +affinity: + enabled: false + values: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: security + operator: In + values: + - S1 + topologyKey: topology.kubernetes.io/zone + +secondaryWorkload: + enabled: true + postfix: "od" + replicaCount: 1 + affinity: {} + tolerations: [] + autoscaling: + enabled: true + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/values.yaml new file mode 100644 index 00000000000..2d5215d050e --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-20-0/values.yaml @@ -0,0 +1,736 @@ +# Default values for myapp. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 +MinReadySeconds: 5 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyPort: 8799 + envoyTimeout: 15s + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s + protocol: TCP +# servicemonitor: +# enabled: true +# path: /abc +# scheme: 'http' +# interval: 30s +# scrapeTimeout: 20s +# metricRelabelings: +# - sourceLabels: [namespace] +# regex: '(.*)' +# replacement: myapp +# targetLabel: target_namespace + + - name: app1 + port: 8090 + servicePort: 8080 + useGRPC: true + +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 +autoPromotionSeconds: 30 + +Spec: + Affinity: + Key: "" +# Key: kops.k8s.io/instancegroup + Values: "" + +affinity: {} + +image: + pullPolicy: IfNotPresent + +restartPolicy: Always + +ambassadorMapping: + enabled: false + # labels: + # key1: value1 + # prefix: / + # ambassadorId: 1234 + # hostname: devtron.example.com + # rewrite: /foo/ + # retryPolicy: + # retry_on: "5xx" + # num_retries: 10 + # cors: + # origins: http://foo.example,http://bar.example + # methods: POST, GET, OPTIONS + # headers: Content-Type + # credentials: true + # exposed_headers: X-Custom-Header + # max_age: "86400" + # weight: 10 + # method: GET + # extraSpec: + # method_regex: true + # headers: + # x-quote-mode: backend + # x-random-header: devtron + # tls: + # context: httpd-context + # create: true + # secretName: httpd-secret + # hosts: + # - anything.example.info + # - devtron.example.com + # extraSpec: + # min_tls_version: v1.2 + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + annotations: {} + labels: {} + behavior: {} + containerResource: + enabled: false +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +kedaAutoscaling: + enabled: false + envSourceContainerName: "" # Optional. Default: .spec.template.spec.containers[0] + cooldownPeriod: 300 # Optional. Default: 300 seconds + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 # Optional. Must be less than minReplicaCount + pollingInterval: 30 # Optional. Default: 30 seconds + # The fallback section is optional. It defines a number of replicas to fallback to if a scaler is in an error state. + fallback: {} # Optional. Section to specify fallback options + # failureThreshold: 3 # Mandatory if fallback section is included + # replicas: 6 + advanced: {} + # horizontalPodAutoscalerConfig: # Optional. Section to specify HPA related options + # behavior: # Optional. Use to modify HPA's scaling behavior + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Percent + # value: 100 + # periodSeconds: 15 + triggers: [] + triggerAuthentication: + enabled: false + name: "" + spec: {} + authenticationRef: {} + +# kedaHttpScaledObject: +# enabled: false +# minReplicaCount: 1 +# maxReplicaCount: 2 +# targetPendingRequests: +# scaledownPeriod: +# servicePort: 80 # port of the service (required) + +secret: + enabled: false + +service: + type: ClusterIP + enabled: true +# name: "1234567890123456789012345678901234567890123456789012345678901234567890" + annotations: {} + # test1: test2 + # test3: test4 + sessionAffinity: + enabled: false + +istio: + enable: false + gateway: + enabled: false + labels: {} + annotations: {} + host: "" + tls: + enabled: false + secretName: "" + virtualService: + enabled: false + labels: {} + annotations: {} + gateways: [] + hosts: [] + http: [] + # - match: + # - uri: + # prefix: /v1 + # - uri: + # prefix: /v2 + # timeout: 12 + # headers: + # request: + # add: + # x-some-header: "value" + # retries: + # attempts: 2 + # perTryTimeout: 3s + destinationRule: + enabled: false + labels: {} + annotations: {} + subsets: [] + trafficPolicy: {} + peerAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + mtls: + mode: "" + portLevelMtls: {} + requestAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + jwtRules: [] + authorizationPolicy: + enabled: false + labels: {} + annotations: {} + action: + provider: {} + rules: [] + +flaggerCanary: + enabled: false + labels: {} + annotations: {} + createIstioGateway: + enabled: false + labels: {} + annotations: {} + host: "" + tls: + enabled: false + secretName: "" + # Istio gateways (optional) + addOtherGateways: [] + # Istio virtual service host names (optional) + addOtherHosts: [] + # Istio gateway refs (optional) + gatewayRefs: + # - name: istio-gateway + # namespace: istio-system + #service port + serviceport: 8080 + #containerPort + targetPort: 8080 + # discover all port open in container + portDiscovery: true + # application protocol (optional) + appProtocol: + # Istio retry policy (optional) + retries: + attempts: 3 + perTryTimeout: 1s + retryOn: "gateway-error,connect-failure,refused-stream" + # HTTP match conditions (optional) + match: + - uri: + prefix: / + # HTTP rewrite (optional) + rewriteUri: / + # timeout (optional) + timeout: + # Add headers (optional) + headers: + # request: + # add: + # x-some-header: "value" + # cross-origin resource sharing policy (optional) + corsPolicy: + # allowOrigin: + # - example.com + # allowMethods: + # - GET + # allowCredentials: false + # allowHeaders: + # - x-some-header + # maxAge: 24h + analysis: + # schedule interval (default 60s) + interval: 15s + # max number of failed metric checks before rollback + threshold: 5 + # max traffic percentage routed to canary + # percentage (0-100) + maxWeight: 50 + # canary increment step + # percentage (0-100) + stepWeight: 5 + thresholds: + # minimum req success rate (non 5xx responses) + # percentage (0-100) + successRate: 90 + # maximum req duration P99 + # milliseconds + latency: 500 + loadtest: + enabled: true + # load tester address + url: http://flagger-loadtester.istio-system/ + + +server: + deployment: + image_tag: 1-95af053 + image: "" + +EnvVariablesFromFieldPath: [] +# - name: POD_NAME +# fieldPath: metadata.name + +EnvVariables: [] + # - name: FLASK_ENV + # value: qa + +EnvVariablesFromSecretKeys: [] + # - name: ENV_NAME + # secretName: SECRET_NAME + # keyName: SECRET_KEY + +EnvVariablesFromConfigMapKeys: [] + # - name: ENV_NAME + # configMapName: CONFIG_MAP_NAME + # keyName: CONFIG_MAP_KEY + +LivenessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + grpc: {} + + +ReadinessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + grpc: {} + + +StartupProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] + command: [] + tcp: false + grpc: {} + + +prometheus: + release: monitoring + +servicemonitor: + additionalLabels: {} + + +prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" +# rules: +# # These are just examples rules, please adapt them to your needs +# - alert: TooMany500s +# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 +# for: 1m +# labels: +# severity: critical +# annotations: +# description: Too many 5XXs +# summary: More than 5% of the all requests did return 5XX, this require your attention +# - alert: TooMany400s +# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 +# for: 1m +# labels: +# severity: critical +# annotations: +# description: Too many 4XXs +# summary: More than 5% of the all requests did return 4XX, this require your attention +# + +ingress: + enabled: false + className: "" + labels: {} + annotations: {} +# nginx.ingress.kubernetes.io/rewrite-target: / +# nginx.ingress.kubernetes.io/ssl-redirect: "false" +# kubernetes.io/ingress.class: nginx +# kubernetes.io/tls-acme: "true" +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +ingressInternal: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +winterSoldier: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + labels: {} + annotations: {} + type: Deployment + timeRangesWithZone: {} + # timeZone: "Asia/Kolkata" + # timeRanges: [] + action: sleep + targetReplicas: [] + fieldSelector: [] + # - AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) + +networkPolicy: + enabled: false + annotations: {} + labels: {} + podSelector: + matchExpressions: [] + matchLabels: {} + policyTypes: [] + ingress: [] + egress: [] + +dbMigrationConfig: + enabled: false + +command: + enabled: false + value: [] + +args: + enabled: false + value: [] + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + + +nodeSelector: {} + +# If you need to provide some extra specs for pod which are not included by default in deployment template +# then provide them here +podExtraSpecs: {} + +# If you need to provide some extra specs for main container which are not included by default in deployment template +# then provide them here +containerExtraSpecs: {} + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 + +ConfigMaps: + enabled: false + maps: [] +# - name: config-map-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-map-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 +# key3: abc-2 +# - name: config-map-3 +# type: environment +# external: true +# mountPath: /etc/config/3 +# data: [] +# - name: config-map-4 +# type: volume +# external: true +# mountPath: /etc/config/4 +# data: [] + + +ConfigSecrets: + enabled: true + secrets: + - name: config-secret-1 + type: volume + filePermission: "420" + externalType: ESO_GoogleSecretsManager + mountPath: /etc/config/2 + esoSecretData: + esoData: + - key": kushagra-test + property: test1 + secretKey: SECRET_KUSHAGRA + # data: + # key1: key1value-1 + # key2: key2value-1 + # key3: key3value-1 +# - name: config-secret-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 + + +initContainers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + # # Uncomment below line ONLY IF you want to reuse the container image. + # # This will assign your application's docker image to init container. + # reuseContainerImage: true + +containers: + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + - name: volume-mount-hack + image: busybox + command: ["sh", "-c", "chown -R 1000:1000 logs"] + volumeMounts: + - mountPath: /usr/local/airflow/logs + name: logs-data + envFrom: + - configMapRef: + name: kamal + + +rawYaml: [] +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP + +topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: zone + # whenUnsatisfiable: DoNotSchedule + # autoLabelSelector: true + # minDomain: 1 + # nodeTaintsPolicy: Honor + +envoyproxy: + image: docker.io/envoyproxy/envoy:v1.16.0 + lifecycle: {} + configMapName: "" + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +containerSpec: + lifecycle: + enabled: false + preStop: {} +# exec: +# command: ["sleep","10"] + postStart: {} +# httpGet: +# host: example.com +# path: /example +# port: 90 + +podDisruptionBudget: {} +# minAvailable: 1 +# maxUnavailable: 1 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + +podSecurityContext: {} + # runAsUser: 1000 + # runAsGroup: 3000 + # fsGroup: 2000 + +containerSecurityContext: {} + # allowPrivilegeEscalation: false +## Pods Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: {} + +tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +imagePullSecrets: [] + # - test1 + # - test2 +persistentVolumeClaim: {} + +verticalPodScaling: + enabled: false + +secondaryWorkload: + enabled: false + Spec: + Affinity: + Key: "" + Values: "" + replicaCount: 1 + affinity: {} + tolerations: [] + autoscaling: + enabled: false + containerResource: + enabled: false \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/.helmignore b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/.helmignore new file mode 100644 index 00000000000..50af0317254 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/.image_descriptor_template.json b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/.image_descriptor_template.json new file mode 100644 index 00000000000..bd2472da075 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/.image_descriptor_template.json @@ -0,0 +1 @@ +{"server":{"deployment":{"image_tag":"{{.Tag}}","image":"{{.Name}}"}},"pipelineName": "{{.PipelineName}}","releaseVersion":"{{.ReleaseVersion}}","deploymentType": "{{.DeploymentType}}", "app": "{{.App}}", "env": "{{.Env}}", "appMetrics": {{.AppMetrics}}} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/Chart.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/Chart.yaml new file mode 100644 index 00000000000..5335b9923f4 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: reference-chart_4-19-0 +version: 4.19.0 diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/README.md b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/README.md new file mode 100644 index 00000000000..968eac6bb5a --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/README.md @@ -0,0 +1,866 @@ + +# Rollout Deployment Chart - v4.19.0 + +## 1. Yaml File - + +### Container Ports + +This defines ports on which application services will be exposed to other services + +```yaml +ContainerPort: + - envoyPort: 8799 + idleTimeout: + name: app + port: 8080 + servicePort: 80 + nodePort: 32056 + supportStreaming: true + useHTTP2: true +``` + +| Key | Description | +| :--- | :--- | +| `envoyPort` | envoy port for the container. | +| `idleTimeout` | the duration of time that a connection is idle before the connection is terminated. | +| `name` | name of the port. | +| `port` | port for the container. | +| `servicePort` | port of the corresponding kubernetes service. | +| `nodePort` | nodeport of the corresponding kubernetes service. | +| `supportStreaming` | Used for high performance protocols like grpc where timeout needs to be disabled. | +| `useHTTP2` | Envoy container can accept HTTP2 requests. | + +### EnvVariables +```yaml +EnvVariables: [] +``` +To set environment variables for the containers that run in the Pod. + +### EnvVariablesFromSecretKeys +```yaml +EnvVariablesFromSecretKeys: + - name: ENV_NAME + secretName: SECRET_NAME + keyName: SECRET_KEY + +``` + It is use to get the name of Environment Variable name, Secret name and the Key name from which we are using the value in that corresponding Environment Variable. + + ### EnvVariablesFromConfigMapKeys +```yaml +EnvVariablesFromConfigMapKeys: + - name: ENV_NAME + configMapName: CONFIG_MAP_NAME + keyName: CONFIG_MAP_KEY + +``` + It is use to get the name of Environment Variable name, Config Map name and the Key name from which we are using the value in that corresponding Environment Variable. + +### Liveness Probe + +If this check fails, kubernetes restarts the pod. This should return error code in case of non-recoverable error. + +```yaml +LivenessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + scheme: "" + tcp: true +``` + +| Key | Description | +| :--- | :--- | +| `Path` | It define the path where the liveness needs to be checked. | +| `initialDelaySeconds` | It defines the time to wait before a given container is checked for liveliness. | +| `periodSeconds` | It defines the time to check a given container for liveness. | +| `successThreshold` | It defines the number of successes required before a given container is said to fulfil the liveness probe. | +| `timeoutSeconds` | It defines the time for checking timeout. | +| `failureThreshold` | It defines the maximum number of failures that are acceptable before a given container is not considered as live. | +| `httpHeaders` | Custom headers to set in the request. HTTP allows repeated headers,You can override the default headers by defining .httpHeaders for the probe. | +| `scheme` | Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP. +| `tcp` | The kubelet will attempt to open a socket to your container on the specified port. If it can establish a connection, the container is considered healthy. | + + +### MaxUnavailable + +```yaml + MaxUnavailable: 0 +``` +The maximum number of pods that can be unavailable during the update process. The value of "MaxUnavailable: " can be an absolute number or percentage of the replicas count. The default value of "MaxUnavailable: " is 25%. + +### MaxSurge + +```yaml +MaxSurge: 1 +``` +The maximum number of pods that can be created over the desired number of pods. For "MaxSurge: " also, the value can be an absolute number or percentage of the replicas count. +The default value of "MaxSurge: " is 25%. + +### Min Ready Seconds + +```yaml +MinReadySeconds: 60 +``` +This specifies the minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available. This defaults to 0 (the Pod will be considered available as soon as it is ready). + +### Readiness Probe + +If this check fails, kubernetes stops sending traffic to the application. This should return error code in case of errors which can be recovered from if traffic is stopped. + +```yaml +ReadinessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + scheme: "" + tcp: true +``` + +| Key | Description | +| :--- | :--- | +| `Path` | It define the path where the readiness needs to be checked. | +| `initialDelaySeconds` | It defines the time to wait before a given container is checked for readiness. | +| `periodSeconds` | It defines the time to check a given container for readiness. | +| `successThreshold` | It defines the number of successes required before a given container is said to fulfill the readiness probe. | +| `timeoutSeconds` | It defines the time for checking timeout. | +| `failureThreshold` | It defines the maximum number of failures that are acceptable before a given container is not considered as ready. | +| `httpHeaders` | Custom headers to set in the request. HTTP allows repeated headers,You can override the default headers by defining .httpHeaders for the probe. | +| `scheme` | Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP. +| `tcp` | The kubelet will attempt to open a socket to your container on the specified port. If it can establish a connection, the container is considered healthy. | + +### Pod Disruption Budget + +You can create `PodDisruptionBudget` for each application. A PDB limits the number of pods of a replicated application that are down simultaneously from voluntary disruptions. For example, an application would like to ensure the number of replicas running is never brought below the certain number. + +```yaml +podDisruptionBudget: + minAvailable: 1 +``` + +or + +```yaml +podDisruptionBudget: + maxUnavailable: 50% +``` + +You can specify either `maxUnavailable` or `minAvailable` in a PodDisruptionBudget and it can be expressed as integers or as a percentage + +| Key | Description | +| :--- | :--- | +| `minAvailable` | Evictions are allowed as long as they leave behind 1 or more healthy pods of the total number of desired replicas. | +| `maxUnavailable` | Evictions are allowed as long as at most 1 unhealthy replica among the total number of desired replicas. | + +### Ambassador Mappings + +You can create ambassador mappings to access your applications from outside the cluster. At its core a Mapping resource maps a resource to a service. + +```yaml +ambassadorMapping: + ambassadorId: "prod-emissary" + cors: {} + enabled: true + hostname: devtron.example.com + labels: {} + prefix: / + retryPolicy: {} + rewrite: "" + tls: + context: "devtron-tls-context" + create: false + hosts: [] + secretName: "" +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Set true to enable ambassador mapping else set false.| +| `ambassadorId` | used to specify id for specific ambassador mappings controller. | +| `cors` | used to specify cors policy to access host for this mapping. | +| `weight` | used to specify weight for canary ambassador mappings. | +| `hostname` | used to specify hostname for ambassador mapping. | +| `prefix` | used to specify path for ambassador mapping. | +| `labels` | used to provide custom labels for ambassador mapping. | +| `retryPolicy` | used to specify retry policy for ambassador mapping. | +| `corsPolicy` | Provide cors headers on flagger resource. | +| `rewrite` | used to specify whether to redirect the path of this mapping and where. | +| `tls` | used to create or define ambassador TLSContext resource. | +| `extraSpec` | used to provide extra spec values which not present in deployment template for ambassador resource. | + +### Autoscaling + +This is connected to HPA and controls scaling up and down in response to request load. + +```yaml +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + extraMetrics: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Set true to enable autoscaling else set false.| +| `MinReplicas` | Minimum number of replicas allowed for scaling. | +| `MaxReplicas` | Maximum number of replicas allowed for scaling. | +| `TargetCPUUtilizationPercentage` | The target CPU utilization that is expected for a container. | +| `TargetMemoryUtilizationPercentage` | The target memory utilization that is expected for a container. | +| `extraMetrics` | Used to give external metrics for autoscaling. | + +### Fullname Override + +```yaml +fullnameOverride: app-name +``` +`fullnameOverride` replaces the release fullname created by default by devtron, which is used to construct Kubernetes object names. By default, devtron uses {app-name}-{environment-name} as release fullname. + +### Image + +```yaml +image: + pullPolicy: IfNotPresent +``` + +Image is used to access images in kubernetes, pullpolicy is used to define the instances calling the image, here the image is pulled when the image is not present,it can also be set as "Always". + +### imagePullSecrets + +`imagePullSecrets` contains the docker credentials that are used for accessing a registry. + +```yaml +imagePullSecrets: + - regcred +``` +regcred is the secret that contains the docker credentials that are used for accessing a registry. Devtron will not create this secret automatically, you'll have to create this secret using dt-secrets helm chart in the App store or create one using kubectl. You can follow this documentation Pull an Image from a Private Registry [https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) . + +### Ingress + +This allows public access to the url, please ensure you are using right nginx annotation for nginx class, its default value is nginx + +```yaml +ingress: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + className: nginx + annotations: {} + hosts: + - host: example1.com + paths: + - /example + - host: example2.com + paths: + - /example2 + - /example2/healthz + tls: [] +``` +Legacy deployment-template ingress format + +```yaml +ingress: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + ingressClassName: nginx-internal + annotations: {} + path: "" + host: "" + tls: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Enable or disable ingress | +| `annotations` | To configure some options depending on the Ingress controller | +| `path` | Path name | +| `host` | Host name | +| `tls` | It contains security details | + +### Ingress Internal + +This allows private access to the url, please ensure you are using right nginx annotation for nginx class, its default value is nginx + +```yaml +ingressInternal: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + ingressClassName: nginx-internal + annotations: {} + hosts: + - host: example1.com + paths: + - /example + - host: example2.com + paths: + - /example2 + - /example2/healthz + tls: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Enable or disable ingress | +| `annotations` | To configure some options depending on the Ingress controller | +| `path` | Path name | +| `host` | Host name | +| `tls` | It contains security details | + + +### additionalBackends + +This defines additional backend path in the ingress . + +```yaml + hosts: + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + additionalBackends: + - path: /example1 + pathType: "ImplementationSpecific" + backend: + service: + name: test-service + port: + number: 80 +``` + +### Init Containers +```yaml +initContainers: + - reuseContainerImage: true + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + volumeMounts: + - mountPath: /etc/ls-oms + name: ls-oms-cm-vol + command: + - flyway + - -configFiles=/etc/ls-oms/flyway.conf + - migrate + + - name: nginx + image: nginx:1.14.2 + securityContext: + privileged: true + ports: + - containerPort: 80 + command: ["/usr/local/bin/nginx"] + args: ["-g", "daemon off;"] +``` +Specialized containers that run before app containers in a Pod. Init containers can contain utilities or setup scripts not present in an app image. One can use base image inside initContainer by setting the reuseContainerImage flag to `true`. + +### Istio + +Istio is a service mesh which simplifies observability, traffic management, security and much more with it's virtual services and gateways. + +```yaml +istio: + enable: true + gateway: + annotations: {} + enabled: false + host: example.com + labels: {} + tls: + enabled: false + secretName: example-tls-secret + virtualService: + annotations: {} + enabled: false + gateways: [] + hosts: [] + http: + - corsPolicy: + allowCredentials: false + allowHeaders: + - x-some-header + allowMethods: + - GET + allowOrigin: + - example.com + maxAge: 24h + headers: + request: + add: + x-some-header: value + match: + - uri: + prefix: /v1 + - uri: + prefix: /v2 + retries: + attempts: 2 + perTryTimeout: 3s + rewriteUri: / + route: + - destination: + host: service1 + port: 80 + timeout: 12s + - route: + - destination: + host: service2 + labels: {} +``` + +### Pause For Seconds Before Switch Active +```yaml +pauseForSecondsBeforeSwitchActive: 30 +``` +To wait for given period of time before switch active the container. + + +### Winter-Soldier +Winter Soldier can be used to +- cleans up (delete) Kubernetes resources +- reduce workload pods to 0 + +**_NOTE:_** After deploying this we can create the Hibernator object and provide the custom configuration by which workloads going to delete, sleep and many more. for more information check [the main repo](https://github.com/devtron-labs/winter-soldier) + +Given below is template values you can give in winter-soldier: +```yaml +winterSoldier: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + action: sleep + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: [] + targetReplicas: [] + fieldSelector: [] +``` +Here, +| Key | values | Description | +| :--- | :--- | :--- | +| `enabled` | `fasle`,`true` | decide the enabling factor | +| `apiVersion` | `pincher.devtron.ai/v1beta1`, `pincher.devtron.ai/v1alpha1` | specific api version | +| `action` | `sleep`,`delete`, `scale` | This specify the action need to perform. | +| `timeRangesWithZone`:`timeZone` | eg:- `"Asia/Kolkata"`,`"US/Pacific"` | It use to specify the timeZone used. (It uses standard format. please refer [this](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones)) | +| `timeRangesWithZone`:`timeRanges` | array of [ `timeFrom`, `timeTo`, `weekdayFrom`, `weekdayTo`] | It use to define time period/range on which the user need to perform the specified action. you can have multiple timeRanges.
These settings will take `action` on Sat and Sun from 00:00 to 23:59:59, | +| `targetReplicas` | `[n]` : n - number of replicas to scale. | These is mandatory field when the `action` is `scale`
Defalut value is `[]`. | +| `fieldSelector` | `- AfterTime(AddTime( ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) ` | These value will take a list of methods to select the resources on which we perform specified `action` . | + + +here is an example, +```yaml +winterSoldier: + apiVersion: pincher.devtron.ai/v1alpha1 + enabled: true + annotations: {} + labels: {} + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: + - timeFrom: 00:00 + timeTo: 23:59:59 + weekdayFrom: Sat + weekdayTo: Sun + - timeFrom: 00:00 + timeTo: 08:00 + weekdayFrom: Mon + weekdayTo: Fri + - timeFrom: 20:00 + timeTo: 23:59:59 + weekdayFrom: Mon + weekdayTo: Fri + action: scale + targetReplicas: [1,1,1] + fieldSelector: + - AfterTime(AddTime( ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '10h'), Now()) +``` +Above settings will take action on `Sat` and `Sun` from 00:00 to 23:59:59, and on `Mon`-`Fri` from 00:00 to 08:00 and 20:00 to 23:59:59. If `action:sleep` then runs hibernate at timeFrom and unhibernate at `timeTo`. If `action: delete` then it will delete workloads at `timeFrom` and `timeTo`. Here the `action:scale` thus it scale the number of resource replicas to `targetReplicas: [1,1,1]`. Here each element of `targetReplicas` array is mapped with the corresponding elments of array `timeRangesWithZone/timeRanges`. Thus make sure the length of both array is equal, otherwise the cnages cannot be observed. + +The above example will select the application objects which have been created 10 hours ago across all namespaces excluding application's namespace. Winter soldier exposes following functions to handle time, cpu and memory. + +- ParseTime - This function can be used to parse time. For eg to parse creationTimestamp use ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z') +- AddTime - This can be used to add time. For eg AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '-10h') ll add 10h to the time. Use d for day, h for hour, m for minutes and s for seconds. Use negative number to get earlier time. +- Now - This can be used to get current time. +- CpuToNumber - This can be used to compare CPU. For eg any({{spec.containers.#.resources.requests}}, { MemoryToNumber(.memory) < MemoryToNumber('60Mi')}) will check if any resource.requests is less than 60Mi. + + + +### Resources + +These define minimum and maximum RAM and CPU available to the application. + +```yaml +resources: + limits: + cpu: "1" + memory: "200Mi" + requests: + cpu: "0.10" + memory: "100Mi" +``` + +Resources are required to set CPU and memory usage. + +#### Limits + +Limits make sure a container never goes above a certain value. The container is only allowed to go up to the limit, and then it is restricted. + +#### Requests + +Requests are what the container is guaranteed to get. + +### Service + +This defines annotations and the type of service, optionally can define name also. + +```yaml + service: + type: ClusterIP + annotations: {} +``` + +### Volumes + +```yaml +volumes: + - name: log-volume + emptyDir: {} + - name: logpv + persistentVolumeClaim: + claimName: logpvc +``` + +It is required when some values need to be read from or written to an external disk. + +### Volume Mounts + +```yaml +volumeMounts: + - mountPath: /var/log/nginx/ + name: log-volume + - mountPath: /mnt/logs + name: logpvc + subPath: employee +``` + +It is used to provide mounts to the volume. + +### Affinity and anti-affinity + +```yaml +Spec: + Affinity: + Key: + Values: +``` + +Spec is used to define the desire state of the given container. + +Node Affinity allows you to constrain which nodes your pod is eligible to schedule on, based on labels of the node. + +Inter-pod affinity allow you to constrain which nodes your pod is eligible to be scheduled based on labels on pods. + +#### Key + +Key part of the label for node selection, this should be same as that on node. Please confirm with devops team. + +#### Values + +Value part of the label for node selection, this should be same as that on node. Please confirm with devops team. + +### Tolerations + +```yaml +tolerations: + - key: "key" + operator: "Equal" + value: "value" + effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" +``` + +Taints are the opposite, they allow a node to repel a set of pods. + +A given pod can access the given node and avoid the given taint only if the given pod satisfies a given taint. + +Taints and tolerations are a mechanism which work together that allows you to ensure that pods are not placed on inappropriate nodes. Taints are added to nodes, while tolerations are defined in the pod specification. When you taint a node, it will repel all the pods except those that have a toleration for that taint. A node can have one or many taints associated with it. + +### Arguments + +```yaml +args: + enabled: false + value: [] +``` + +This is used to give arguments to command. + +### Command + +```yaml +command: + enabled: false + value: [] +``` + +It contains the commands for the server. + +| Key | Description | +| :--- | :--- | +| `enabled` | To enable or disable the command. | +| `value` | It contains the commands. | + + +### Containers +Containers section can be used to run side-car containers along with your main container within same pod. Containers running within same pod can share volumes and IP Address and can address each other @localhost. We can use base image inside container by setting the reuseContainerImage flag to `true`. + +```yaml + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + command: ["/usr/local/bin/nginx"] + args: ["-g", "daemon off;"] + - reuseContainerImage: true + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + volumeMounts: + - mountPath: /etc/ls-oms + name: ls-oms-cm-vol + command: + - flyway + - -configFiles=/etc/ls-oms/flyway.conf + - migrate +``` + +### Prometheus + +```yaml + prometheus: + release: monitoring +``` + +It is a kubernetes monitoring tool and the name of the file to be monitored as monitoring in the given case.It describes the state of the prometheus. + +### rawYaml + +```yaml +rawYaml: + - apiVersion: v1 + kind: Service + metadata: + name: my-service + spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + type: ClusterIP +``` +Accepts an array of Kubernetes objects. You can specify any kubernetes yaml here and it will be applied when your app gets deployed. + +### Grace Period + +```yaml +GracePeriod: 30 +``` +Kubernetes waits for the specified time called the termination grace period before terminating the pods. By default, this is 30 seconds. If your pod usually takes longer than 30 seconds to shut down gracefully, make sure you increase the `GracePeriod`. + +A Graceful termination in practice means that your application needs to handle the SIGTERM message and begin shutting down when it receives it. This means saving all data that needs to be saved, closing down network connections, finishing any work that is left, and other similar tasks. + +There are many reasons why Kubernetes might terminate a perfectly healthy container. If you update your deployment with a rolling update, Kubernetes slowly terminates old pods while spinning up new ones. If you drain a node, Kubernetes terminates all pods on that node. If a node runs out of resources, Kubernetes terminates pods to free those resources. It’s important that your application handle termination gracefully so that there is minimal impact on the end user and the time-to-recovery is as fast as possible. + + +### Server + +```yaml +server: + deployment: + image_tag: 1-95a53 + image: "" +``` + +It is used for providing server configurations. + +#### Deployment + +It gives the details for deployment. + +| Key | Description | +| :--- | :--- | +| `image_tag` | It is the image tag | +| `image` | It is the URL of the image | + +### Service Monitor + +```yaml +servicemonitor: + enabled: true + path: /abc + scheme: 'http' + interval: 30s + scrapeTimeout: 20s + metricRelabelings: + - sourceLabels: [namespace] + regex: '(.*)' + replacement: myapp + targetLabel: target_namespace +``` + +It gives the set of targets to be monitored. + +### Db Migration Config + +```yaml +dbMigrationConfig: + enabled: false +``` + +It is used to configure database migration. + + +### KEDA Autoscaling +[KEDA](https://keda.sh) is a Kubernetes-based Event Driven Autoscaler. With KEDA, you can drive the scaling of any container in Kubernetes based on the number of events needing to be processed. KEDA can be installed into any Kubernetes cluster and can work alongside standard Kubernetes components like the Horizontal Pod Autoscaler(HPA). + +Example for autosccaling with KEDA using Prometheus metrics is given below: +```yaml +kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 + pollingInterval: 30 + advanced: + restoreToOriginalReplicaCount: true + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + triggers: + - type: prometheus + metadata: + serverAddress: http://:9090 + metricName: http_request_total + query: envoy_cluster_upstream_rq{appId="300", cluster_name="300-0", container="envoy",} + threshold: "50" + triggerAuthentication: + enabled: false + name: + spec: {} + authenticationRef: {} +``` +Example for autosccaling with KEDA based on kafka is given below : +```yaml +kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 + pollingInterval: 30 + advanced: {} + triggers: + - type: kafka + metadata: + bootstrapServers: b-2.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092,b-3.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092,b-1.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092 + topic: Orders-Service-ESP.info + lagThreshold: "100" + consumerGroup: oders-remove-delivered-packages + allowIdleConsumers: "true" + triggerAuthentication: + enabled: true + name: keda-trigger-auth-kafka-credential + spec: + secretTargetRef: + - parameter: sasl + name: keda-kafka-secrets + key: sasl + - parameter: username + name: keda-kafka-secrets + key: username + authenticationRef: + name: keda-trigger-auth-kafka-credential +``` + +### Security Context +A security context defines privilege and access control settings for a Pod or Container. + +To add a security context for main container: +```yaml +containerSecurityContext: + allowPrivilegeEscalation: false +``` + +To add a security context on pod level: +```yaml +podSecurityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 +``` + +### Topology Spread Constraints +You can use topology spread constraints to control how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains. This can help to achieve high availability as well as efficient resource utilization. + +```yaml +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: zone + whenUnsatisfiable: DoNotSchedule + autoLabelSelector: true + customLabelSelector: {} +``` + +### Deployment Metrics + +It gives the realtime metrics of the deployed applications + +| Key | Description | +| :--- | :--- | +| `Deployment Frequency` | It shows how often this app is deployed to production | +| `Change Failure Rate` | It shows how often the respective pipeline fails. | +| `Mean Lead Time` | It shows the average time taken to deliver a change to production. | +| `Mean Time to Recovery` | It shows the average time taken to fix a failed pipeline. | + +## 2. Show application metrics + +If you want to see application metrics like different HTTP status codes metrics, application throughput, latency, response time. Enable the Application metrics from below the deployment template Save button. After enabling it, you should be able to see all metrics on App detail page. By default it remains disabled. +![](../../../.gitbook/assets/deployment_application_metrics%20%282%29.png) + +Once all the Deployment template configurations are done, click on `Save` to save your deployment configuration. Now you are ready to create [Workflow](workflow/) to do CI/CD. + +### Helm Chart Json Schema + +Helm Chart [json schema](../../../scripts/devtron-reference-helm-charts/reference-chart_4-11-0/schema.json) is used to validate the deployment template values. + +### Other Validations in Json Schema + +The values of CPU and Memory in limits must be greater than or equal to in requests respectively. Similarly, In case of envoyproxy, the values of limits are greater than or equal to requests as mentioned below. +``` +resources.limits.cpu >= resources.requests.cpu +resources.limits.memory >= resources.requests.memory +envoyproxy.resources.limits.cpu >= envoyproxy.resources.requests.cpu +envoyproxy.resources.limits.memory >= envoyproxy.resources.requests.memory +``` diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/app-values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/app-values.yaml new file mode 100644 index 00000000000..f4c8cef663c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/app-values.yaml @@ -0,0 +1,428 @@ +# Mandatory configs +podDisruptionBudget: {} + +rolloutLabels: {} +rolloutAnnotations: {} + +containerSpec: + lifecycle: + enabled: false + preStop: + exec: + command: ["sleep","10"] + postStart: + httpGet: + host: example.com + path: /example + port: 90 + +replicaCount: 1 +MinReadySeconds: 60 +GracePeriod: 30 +image: + pullPolicy: IfNotPresent +restartPolicy: Always +service: + # enabled: true + type: ClusterIP + #name: "service-1234567890" + loadBalancerSourceRanges: [] + # loadBalancerSourceRanges: + # - 1.2.3.4/32 + # - 1.2.5.6/23 + annotations: {} + # test1: test2 + # test3: test4 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyPort: 8799 + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s +# servicemonitor: +# enabled: true +# path: /abc +# scheme: 'http' +# interval: 30s +# scrapeTimeout: 20s +# metricRelabelings: +# - sourceLabels: [namespace] +# regex: '(.*)' +# replacement: myapp +# targetLabel: target_namespace +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 0.10 + memory: 100Mi + +# Optional configs +LivenessProbe: + Path: "" + port: 8080 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + tcp: false + command: [] + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + +ReadinessProbe: + Path: "" + port: 8080 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + tcp: false + command: [] + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + +StartupProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] + command: [] + tcp: false + +ingress: + enabled: false + className: "" + labels: {} + annotations: {} +# nginx.ingress.kubernetes.io/force-ssl-redirect: 'false' +# nginx.ingress.kubernetes.io/ssl-redirect: 'false' +# kubernetes.io/ingress.class: nginx +# nginx.ingress.kubernetes.io/rewrite-target: /$2 +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +ingressInternal: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +command: + workingDir: {} + enabled: false + value: [] + +args: + enabled: false + value: + - /bin/sh + - -c + - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600 + +#For adding custom labels to pods + +podLabels: {} +# customKey: customValue +podAnnotations: {} +# customKey: customValue + +rawYaml: [] + +topologySpreadConstraints: [] + +initContainers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + +containers: [] + ## Additional containers to run along with application pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + +dbMigrationConfig: + enabled: false + +tolerations: [] + +podSecurityContext: {} + +containerSecurityContext: {} + +Spec: + Affinity: + Key: + # Key: kops.k8s.io/instancegroup + Values: + +ambassadorMapping: + enabled: false + labels: {} + prefix: / + ambassadorId: "" + hostname: devtron.example.com + rewrite: "" + retryPolicy: {} + cors: {} + tls: + context: "" + create: false + secretName: "" + hosts: [] + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 70 + TargetMemoryUtilizationPercentage: 80 + annotations: {} + labels: {} + behavior: {} +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +kedaAutoscaling: + enabled: false + envSourceContainerName: "" # Optional. Default: .spec.template.spec.containers[0] + minReplicaCount: 1 + maxReplicaCount: 2 + advanced: {} + triggers: [] + triggerAuthentication: + enabled: false + name: "" + spec: {} + authenticationRef: {} + +prometheus: + release: monitoring + +server: + deployment: + image_tag: 1-95af053 + image: "" + +servicemonitor: + additionalLabels: {} + +envoyproxy: + image: docker.io/envoyproxy/envoy:v1.16.0 + configMapName: "" + lifecycle: {} + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +istio: + enable: false + gateway: + enabled: false + labels: {} + annotations: {} + host: "example.com" + tls: + enabled: false + secretName: secret-name + virtualService: + enabled: false + labels: {} + annotations: {} + gateways: [] + hosts: [] + http: [] + # - match: + # - uri: + # prefix: /v1 + # - uri: + # prefix: /v2 + # timeout: 12 + # headers: + # request: + # add: + # x-some-header: "value" + # retries: + # attempts: 2 + # perTryTimeout: 3s + destinationRule: + enabled: false + labels: {} + annotations: {} + subsets: [] + trafficPolicy: {} + peerAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + mtls: + mode: "" + portLevelMtls: {} + requestAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + jwtRules: [] + authorizationPolicy: + enabled: false + labels: {} + annotations: {} + action: + provider: {} + rules: [] + +networkPolicy: + enabled: false + annotations: {} + labels: {} + podSelector: + matchExpressions: [] + matchLabels: {} + policyTypes: [] + ingress: [] + egress: [] + +winterSoldier: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + annotation: {} + labels: {} + type: Rollout + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: [] + action: sleep + targetReplicas: [] + fieldSelector: + - AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) + + + + +## Pods Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: {} + +imagePullSecrets: [] + # - test1 + # - test2 +hostAliases: [] +# - ip: "127.0.0.1" +# hostnames: +# - "foo.local" +# - "bar.local" +# - ip: "10.1.2.3" +# hostnames: +# - "foo.remote" +# - "bar.remote" diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/env-values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/env-values.yaml new file mode 100644 index 00000000000..5cd07c0269e --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/env-values.yaml @@ -0,0 +1,66 @@ +replicaCount: 1 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 + +Spec: + Affinity: + key: "" + Values: nodes + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + behavior: {} +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# +secret: + enabled: false + data: {} +# my_own_secret: S3ViZXJuZXRlcyBXb3Jrcw== + +EnvVariables: [] +# - name: FLASK_ENV +# value: qa + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: "0.05" + memory: 50Mi + requests: + cpu: "0.01" + memory: 10Mi + + diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/pipeline-values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/pipeline-values.yaml new file mode 100644 index 00000000000..40a5ec633dd --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/pipeline-values.yaml @@ -0,0 +1,24 @@ +deployment: + strategy: + blueGreen: + autoPromotionSeconds: 30 + scaleDownDelaySeconds: 30 + previewReplicaCount: 1 + autoPromotionEnabled: false + rolling: + maxSurge: "25%" + maxUnavailable: 1 + canary: + maxSurge: "25%" + maxUnavailable: 1 + steps: + - setWeight: 25 + - pause: + duration: 15 # 1 min + - setWeight: 50 + - pause: + duration: 15 # 1 min + - setWeight: 75 + - pause: + duration: 15 # 1 min + recreate: {} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/release-values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/release-values.yaml new file mode 100644 index 00000000000..48eb3f482c1 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/release-values.yaml @@ -0,0 +1,14 @@ +server: + deployment: + image_tag: IMAGE_TAG + image: IMAGE_REPO + enabled: false +dbMigrationConfig: + enabled: false + +pauseForSecondsBeforeSwitchActive: 0 +waitForSecondsBeforeScalingDown: 0 +autoPromotionSeconds: 30 + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/schema.json new file mode 100644 index 00000000000..da5cce59eab --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/schema.json @@ -0,0 +1,1363 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "containerExtraSpecs": { + "type": "object", + "title": "containerExtraSpecs", + "description": "Define container extra specs here" + }, + "ContainerPort": { + "type": "array", + "description": "defines ports on which application services will be exposed to other services", + "title": "Container Port", + "items": { + "type": "object", + "properties": { + "envoyPort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "envoy port for the container", + "title": "Envoy Port" + }, + "idleTimeout": { + "type": "string", + "description": "duration of time for which a connection is idle before the connection is terminated", + "title": "Idle Timeout" + }, + "name": { + "type": "string", + "description": "name of the port", + "title": "Name" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Port", + "title": "port for the container" + }, + "servicePort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port of the corresponding kubernetes service", + "title": "Service Port" + }, + "nodePort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "nodeport of the corresponding kubernetes service", + "title": "Node Port" + }, + "supportStreaming": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "field to enable/disable timeout for high performance protocols like grpc", + "title": "Support Streaming" + }, + "useHTTP2": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": " field for setting if envoy container can accept(or not) HTTP2 requests", + "title": "Use HTTP2" + } + } + } + }, + "EnvVariables": { + "type": "array", + "items": {}, + "description": "contains environment variables needed by the containers", + "title": "Environment Variables" + }, + "EnvVariablesFromFieldPath": { + "type": "array", + "description": "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs", + "title": "EnvVariablesFromFieldPath", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be" + }, + "fieldPath": { + "type": "string", + "title": "fieldPath", + "description": "Path of the field to select in the specified API version" + } + } + } + ] + }, + "EnvVariablesFromSecretKeys": { + "type": "array", + "description": "Selects a field of the deployment: It is use to get the name of Environment Variable name, Secret name and the Key name from which we are using the value in that corresponding Environment Variable.", + "title": "EnvVariablesFromSecretKeys", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be used." + }, + "secretName": { + "type": "string", + "title": "secretName", + "description": "Name of Secret from which we are taking the value." + }, + "keyName": { + "type": "string", + "title": "keyName", + "description": "Name of The Key Where the value is mapped with." + } + } + } + ] + }, + "EnvVariablesFromConfigMapKeys": { + "type": "array", + "description": "Selects a field of the deployment: It is use to get the name of Environment Variable name, Config Map name and the Key name from which we are using the value in that corresponding Environment Variable.", + "title": "EnvVariablesFromConfigMapKeys", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be used." + }, + "configMapName": { + "type": "string", + "title": "configMapName", + "description": "Name of configMap from which we are taking the value." + }, + "keyName": { + "type": "string", + "title": "keyName", + "description": "Name of The Key Where the value is mapped with." + } + } + } + ] + }, + "GracePeriod": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "time for which Kubernetes waits before terminating the pods", + "title": "Grace Period" + }, + "LivenessProbe": { + "type": "object", + "description": "used by the kubelet to know when to restart a container", + "title": "Liveness Probe", + "properties": { + "Path": { + "type": "string", + "description": "defines the path where the liveness needs to be checked", + "title": "Path" + }, + "command": { + "type": "array", + "items": {}, + "description": "commands executed to perform a probe", + "title": "Command" + }, + "failureThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", + "title": "Failure Threshold" + }, + "httpHeaders": { + "type": "array", + "items": {}, + "description": "used to override the default headers by defining .httpHeaders for the probe", + "title": "HTTP headers" + }, + "initialDelaySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to wait before a given container is checked for liveness", + "title": "Initial Delay Seconds" + }, + "periodSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to check a given container for liveness", + "title": "Period Seconds" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port to access on the container", + "title": "Port" + }, + "scheme": { + "type": "string", + "description": "Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP.", + "title": "Scheme" + }, + "successThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", + "title": "Success Threshold" + }, + "tcp": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", + "title": "TCP" + }, + "timeoutSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time for checking timeout", + "title": "Timeout Seconds" + } + } + }, + "MaxSurge": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "maximum number of pods that can be created over the desired number of pods", + "title": "Maximum Surge" + }, + "MaxUnavailable": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "maximum number of pods that can be unavailable during the update process", + "title": "Maximum Unavailable" + }, + "MinReadySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", + "title": "Minimum Ready Seconds" + }, + "ReadinessProbe": { + "type": "object", + "description": "kubelet uses readiness probes to know when a container is ready to start accepting traffic", + "title": "Readiness Probe", + "properties": { + "Path": { + "type": "string", + "description": "defines the path where the readiness needs to be checked", + "title": "Path" + }, + "command": { + "type": "array", + "items": {}, + "description": "commands executed to perform a probe", + "title": "Command" + }, + "failureThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", + "title": "Failure Threshold" + }, + "httpHeader": { + "type": "array", + "items": {}, + "description": "used to override the default headers by defining .httpHeaders for the probe", + "title": "HTTP headers" + }, + "initialDelaySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to wait before a given container is checked for readiness", + "title": "Initial Delay Seconds" + }, + "periodSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to check a given container for readiness", + "title": "Period Seconds" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port to access on the container", + "title": "Port" + }, + "scheme": { + "type": "string", + "description": "Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP.", + "title": "Scheme" + }, + "successThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", + "title": "Success Threshold" + }, + "tcp": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", + "title": "TCP" + }, + "timeoutSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time for checking timeout", + "title": "Timeout Seconds" + } + } + }, + "Spec": { + "type": "object", + "description": "used to define the desire state of the given container", + "title": "Spec", + "properties": { + "Affinity": { + "type": "object", + "description": "Node/Inter-pod Affinity allows you to constrain which nodes your pod is eligible to schedule on, based on labels of the node/pods", + "title": "Affinity", + "properties": { + "Key": { + "anyOf": [ + { + "type": "null" + }, + { + "type": "string", + "description": "Key part of the label for node/pod selection", + "title": "Key" + } + ] + }, + "Values": { + "type": "string", + "description": "Value part of the label for node/pod selection", + "title": "Values" + }, + "key": { + "type": "string" + } + } + } + } + }, + "ambassadorMapping": { + "type": "object", + "description": "used to create ambassador mapping resource", + "title": "Mapping", + "properties": { + "ambassadorId": { + "type": "string", + "description": "used to specify id for specific ambassador mappings controller", + "title": "Ambassador ID" + }, + "cors": { + "type": "object", + "description": "used to specify cors policy to access host for this mapping", + "title": "CORS" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to specify whether to create an ambassador mapping or not", + "title": "Enabled" + }, + "weight": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to specify weight for canary ambassador mappings" + }, + "hostname": { + "type": "string", + "description": "used to specify hostname for ambassador mapping", + "title": "Hostname" + }, + "labels": { + "type": "object", + "description": "used to provide custom labels for ambassador mapping", + "title": "Labels" + }, + "prefix": { + "type": "string", + "description": "used to specify path for ambassador mapping", + "title": "Prefix" + }, + "retryPolicy": { + "type": "object", + "description": "used to specify retry policy for ambassador mapping", + "title": "Retry Policy" + }, + "rewrite": { + "type": "string", + "description": "used to specify whether to redirect the path of this mapping and where", + "title": "Rewrite" + }, + "tls": { + "type": "object", + "description": "used to create or define ambassador TLSContext resource", + "title": "TLS Context" + }, + "extraSpec": { + "type": "object", + "description": "used to provide extra spec values which not present in deployment template for ambassador resource", + "title": "Extra Spec" + } + } + }, + "args": { + "type": "object", + "description": " used to give arguments to command", + "title": "Arguments", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling aruguments", + "title": "Enabled" + }, + "value": { + "type": "array", + "description": "values of the arguments", + "title": "Value", + "items": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ] + } + } + }, + "autoscaling": { + "type": "object", + "description": "connected to HPA and controls scaling up and down in response to request load", + "title": "Autoscaling", + "properties": { + "MaxReplicas": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Maximum number of replicas allowed for scaling", + "title": "Maximum Replicas" + }, + "MinReplicas": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Minimum number of replicas allowed for scaling", + "title": "Minimum Replicas" + }, + "TargetCPUUtilizationPercentage": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "The target CPU utilization that is expected for a container", + "title": "TargetCPUUtilizationPercentage" + }, + "TargetMemoryUtilizationPercentage": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "The target memory utilization that is expected for a container", + "title": "TargetMemoryUtilizationPercentage" + }, + "behavior": { + "type": "object", + "description": "describes behavior and scaling policies for that behavior", + "title": "Behavior" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling autoscaling", + "title": "Enabled" + }, + "labels": { + "type": "object", + "description": "labels for HPA", + "title": "labels" + }, + "annotations": { + "type": "object", + "description": "used to configure some options for HPA", + "title": "annotations" + }, + "extraMetrics": { + "type": "array", + "items": {}, + "description": "used to give external metrics for autoscaling", + "title": "Extra Metrics" + } + } + }, + "command": { + "type": "object", + "description": "contains the commands for the server", + "title": "Command", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling commands" + }, + "value": { + "type": "array", + "items": {}, + "description": "contains the commands", + "title": "Value" + }, + "workingDir": { + "type": "object", + "items": {}, + "description": "contains the working directory", + "title": "Working directory" + } + } + }, + "containerSecurityContext": { + "type": "object", + "description": " defines privilege and access control settings for a Container", + "title": "Container Security Context" + }, + "containers": { + "type": "array", + "items": {}, + "description": " used to run side-car containers along with the main container within same pod" + }, + "dbMigrationConfig": { + "type": "object", + "description": "used to configure database migration", + "title": "Db Migration Config", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling the config", + "title": "Enabled" + } + } + }, + "envoyproxy": { + "type": "object", + "description": "envoy is attached as a sidecar to the application container to collect metrics like 4XX, 5XX, throughput and latency", + "title": "Envoy Proxy", + "properties": { + "configMapName": { + "type": "string", + "description": "configMap containing configuration for Envoy", + "title": "ConfigMap" + }, + "lifecycle": { + "type": "object", + "description": "Actions that the management system should take in response to container lifecycle events", + "title": "lifecycle", + "properties": { + "enabled": { + "type": "boolean" + }, + "postStart": { + "type": "object", + "title": "postStart", + "description": "PostStart is called immediately after a container is created" + }, + "preStop": { + "type": "object", + "title": "preStop", + "description": "PreStop is called immediately before a container is terminated" + } + } + }, + "image": { + "type": "string", + "description": "image of envoy to be used" + }, + "resources": { + "type": "object", + "description": "minimum and maximum RAM and CPU available to the application", + "title": "Resources", + "properties": { + "limits": { + "type": "object", + "description": "the maximum values a container can reach", + "title": "Limits", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "limit of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "limit of memory", + "title": "Memory" + } + } + }, + "requests": { + "type": "object", + "description": "request is what the container is guaranteed to get", + "title": "Requests", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "request value of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "request value of memory", + "title": "Memory" + } + } + } + } + } + } + }, + "hostAliases": { + "type": "array", + "title": "hostAliases", + "description": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file", + "items": [ + { + "type": "object", + "properties": { + "ip": { + "type": "string", + "title": "IP", + "description": "IP address of the host file entry" + }, + "hostnames": { + "type": "array", + "description": "Hostnames for the above IP address", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "image": { + "type": "object", + "description": "used to access images in kubernetes", + "title": "Image", + "properties": { + "pullPolicy": { + "type": "string", + "description": "used to define the instances calling the image", + "title": "Pull Policy", + "enum": [ + "IfNotPresent", + "Always" + ] + } + } + }, + "restartPolicy": { + "type": "string", + "description": "It restarts the docker container based on defined conditions.", + "title": "Restart Policy", + "enum": [ + "Always", + "OnFailure", + "Never" + ] + }, + "imagePullSecrets": { + "type": "array", + "items": {}, + "description": "contains the docker credentials that are used for accessing a registry", + "title": "Image PullSecrets" + }, + "winterSoldier": { + "type": "object", + "description": "allows to scale, sleep or delete the resource based on time.", + "title": "winterSoldier", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the winterSoldier controller", + "title": "Annotations" + }, + "labels": { + "type": "object", + "description": "labels for winterSoldier", + "title": "winterSoldier labels", + "default": "" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "apiVersion": { + "type": "string", + "description": "Api version for winterSoldier", + "title": "winterSoldier apiVersion", + "default": "pincher.devtron.ai/v1alpha1" + }, + "timeRangesWithZone": { + "type": "object", + "description": "describe time zone and time ranges to input in the winterSoldier", + "title": "Time Ranges With Zone", + "timeZone": { + "type": "string", + "description": "describe time zone, and follow standard format", + "title": "Time Zone" + }, + "timeRanges": { + "type": "array", + "items": {}, + "description": "used to take array of time ranges in which each element contains timeFrom, timeTo, weekdayFrom and weekdayTo.", + "title": "Time Ranges" + } + }, + "type": { + "type": "string", + "description": "describe the type of application Rollout/deployment.", + "title": "Type" + }, + "action": { + "type": "string", + "description": "describe the action to be performed by winterSoldier.", + "title": "Action" + }, + "targetReplicas": { + "type": "array", + "description": "describe the number of replicas to which the resource should scale up or down.", + "title": "Target Replicas" + }, + "fieldSelector": { + "type": "array", + "description": "it takes arrays of methods to select specific fields.", + "title": "Field Selector" + } + } + }, + "ingress": { + "type": "object", + "description": "allows public access to URLs", + "title": "Ingress", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the Ingress controller", + "title": "Annotations" + }, + "className": { + "type": "string", + "description": "name of ingress class, a reference to an IngressClass resource that contains additional configuration including the name of the controller", + "title": "Ingress class name", + "default": "nginx" + }, + "labels": { + "type": "object", + "description": "labels for ingress", + "title": "Ingress labels", + "default": "" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "hosts": { + "type": "array", + "description": "list of hosts in ingress", + "title": "Hosts", + "items": [ + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "host URL", + "title": "Host" + }, + "pathType": { + "type": "string", + "description": "type of path", + "title": "PathType" + }, + "paths": { + "type": "array", + "description": "list of paths for a given host", + "title": "Paths", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "tls": { + "type": "array", + "items": {}, + "description": "contains security details - private key and certificate", + "title": "TLS" + } + } + }, + "ingressInternal": { + "type": "object", + "description": "allows private access to the URLs", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the Ingress controller", + "title": "Annotations" + }, + "className": { + "type": "string", + "description": "name of ingress class, a reference to an IngressClass resource that contains additional configuration including the name of the controller", + "title": "Ingress class name", + "default": "nginx-internal" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "hosts": { + "type": "array", + "description": "list of hosts in ingress", + "title": "Hosts", + "items": [ + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "host URL", + "title": "Host" + }, + "pathType": { + "type": "string", + "description": "type of path", + "title": "PathType" + }, + "paths": { + "type": "array", + "description": "list of paths for a given host", + "title": "Paths", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "tls": { + "type": "array", + "items": {}, + "description": "contains security details - private key and certificate", + "title": "TLS" + } + } + }, + "networkPolicy":{ + "type": "object", + "description": "NetworkPolicy describes what network traffic is allowed for a set of Pods", + "title": "Network Policy", + "properties": { + "enabled":{ + "type":"boolean", + "description": "used to enable or disable NetworkPolicy" + }, + "annotations":{ + "type": "object", + "description": "Annotations for NetworkPolicy" + }, + "labels":{ + "type":"object", + "description": "Labels for NetworkPolicy" + }, + "podSelector":{ + "type": "object", + "description": "Selects the pods to which this NetworkPolicy object applies", + "properties": { + "matchExpressions":{ + "type":"array", + "description": "list of label selector" + }, + "matchLabels":{ + "type":"object", + "description": "map of {key,value} pairs" + } + } + }, + "policyTypes":{ + "type":"array", + "description": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress,Egress." + }, + "ingress":{ + "type":"array", + "description": "List of ingress rules to be applied to the selected pods" + }, + "egress":{ + "type":"array", + "description": "List of egress rules to be applied to the selected pods" + } + } + }, + "istio":{ + "type": "object", + "description": "Istio Service mesh", + "title": "Istio" + }, + "initContainers": { + "type": "array", + "items": {}, + "description": "specialized containers that run before app containers in a Pod, can contain utilities or setup scripts not present in an app image", + "title": "Init Containers" + }, + "kedaAutoscaling": { + "type": "object", + "description": "Kubernetes-based event driven autoscaler. With KEDA, one can drive the scaling of any container in Kubernetes based on the no. of events needing to be processed", + "title": "KEDA Autoscaling", + "properties": { + "advanced": { + "type": "object" + }, + "authenticationRef": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "envSourceContainerName": { + "type": "string" + }, + "maxReplicaCount": { + "type": "integer" + }, + "minReplicaCount": { + "type": "integer" + }, + "triggerAuthentication": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "spec": { + "type": "object" + } + } + }, + "triggers": { + "type": "array", + "items": {} + } + } + }, + "containerSpec": { + "type": "object", + "description": "define the container specic configuration", + "title": "containerSpec", + "properties": { + "lifecycle": { + "type": "object", + "description": "Actions that the management system should take in response to container lifecycle events", + "title": "lifecycle", + "properties": { + "enabled": { + "type": "boolean" + }, + "postStart": { + "type": "object", + "title": "postStart", + "description": "PostStart is called immediately after a container is created.You could use this event to check that a required API is available before the container’s main work begins" + }, + "preStop": { + "type": "object", + "title": "preStop", + "description": "PreStop is called immediately before a container is terminated" + } + } + } + } + }, + "pauseForSecondsBeforeSwitchActive": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "tell how much to wait for given period of time before switch active the container", + "title": "Pause For Seconds Before SwitchActive" + }, + "podAnnotations": { + "type": "object", + "description": "used to attach metadata and configs in Kubernetes", + "title": "Pod Annotations" + }, + "podDisruptionBudget": { + "type": "object", + "description": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", + "properties": { + "minAvailable": { + "type": "string", + "title": "minAvailable", + "description": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod" + }, + "maxUnavailable": { + "type": "string", + "title": "maxUnavailable", + "description": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod." + } + } + }, + "podExtraSpecs": { + "type": "object", + "description": "ExtraSpec for the pods to be configured", + "title": "podExtraSpecs" + }, + "podLabels": { + "type": "object", + "description": "key/value pairs that are attached to pods, are intended to be used to specify identifying attributes of objects that are meaningful and relevant to users, but do not directly imply semantics to the core system", + "title": "Pod Labels" + }, + "podSecurityContext": { + "type": "object", + "description": "defines privilege and access control settings for a Pod or Container", + "title": "Pod Security Context" + }, + "prometheus": { + "type": "object", + "description": "a kubernetes monitoring tool", + "title": "Prometheus", + "properties": { + "release": { + "type": "string", + "description": "name of the file to be monitored, describes the state of prometheus" + } + } + }, + "rawYaml": { + "type": "array", + "items": {}, + "description": "Accepts an array of Kubernetes objects. One can specify any kubernetes yaml here & it will be applied when a app gets deployed.", + "title": "Raw YAML" + }, + "replicaCount": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "count of Replicas of pod", + "title": "REplica Count" + }, + "resources": { + "type": "object", + "description": "minimum and maximum RAM and CPU available to the application", + "title": "Resources", + "properties": { + "limits": { + "type": "object", + "description": "the maximum values a container can reach", + "title": "Limits", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "limit of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "limit of memory", + "title": "Memory" + } + } + }, + "requests": { + "type": "object", + "description": "request is what the container is guaranteed to get", + "title": "Requests", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "request value of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "request value of memory", + "title": "Memory" + } + } + } + } + }, + "secret": { + "type": "object", + "properties": { + "data": { + "type": "object" + }, + "enabled": { + "type": "boolean" + } + } + }, + "server": { + "type": "object", + "description": "used for providing server configurations.", + "title": "Server", + "properties": { + "deployment": { + "type": "object", + "description": "gives the details for deployment", + "title": "Deployment", + "properties": { + "image": { + "type": "string", + "description": "URL of the image", + "title": "Image" + }, + "image_tag": { + "type": "string", + "description": "tag of the image", + "title": "Image Tag" + } + } + } + } + }, + "service": { + "type": "object", + "description": "defines annotations and the type of service", + "title": "Service", + "properties": { + "annotations": { + "type": "object", + "title": "Annotations", + "description": "annotations of service" + }, + "type": { + "type": "string", + "description": "type of service", + "title": "Type", + "enum": [ + "ClusterIP", + "LoadBalancer", + "NodePort", + "ExternalName" + ] + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable service", + "title": "Enabled" + } + } + }, + "serviceAccount": { + "type": "object", + "description": "defines service account for pods", + "title": "Service Account", + "properties": { + "annotations": { + "type": "object", + "title": "Annotations", + "description": "annotations of service account" + }, + "name": { + "type": "string", + "description": "name of service account", + "title": "Name" + }, + "create": { + "type": "boolean" + } + } + }, + "servicemonitor": { + "type": "object", + "description": "gives the set of targets to be monitored", + "title": "Service Monitor", + "properties": { + "additionalLabels": { + "type": "object" + } + } + }, + "tolerations": { + "type": "array", + "items": {}, + "description": "a mechanism which work together with Taints which ensures that pods are not placed on inappropriate nodes", + "title": "Tolerations" + }, + "topologySpreadConstraints": { + "type": "array", + "items": {}, + "description": "used to control how Pods are spread across a cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains", + "title": "Topology Spread Constraints" + }, + "volumeMounts": { + "type": "array", + "items": {}, + "description": "used to provide mounts to the volume", + "title": "Volume Mounts" + }, + "volumes": { + "type": "array", + "items": {}, + "description": "required when some values need to be read from or written to an external disk", + "title": "Volumes" + }, + "waitForSecondsBeforeScalingDown": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Wait for given period of time before scaling down the container", + "title": "Wait For Seconds Before Scaling Down" + } + } +} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/secrets-test-values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/secrets-test-values.yaml new file mode 100644 index 00000000000..4a20404db87 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/secrets-test-values.yaml @@ -0,0 +1 @@ +{"ConfigSecrets":{"enabled":true,"secrets":[{"data":{"standard_key":"c3RhbmRhcmQtdmFsdWU="},"external":false,"externalType":"","mountPath":"/test","name":"normal-secret","type":"volume"},{"data":{"secret_key":"U0VDUkVUIERBVEE="},"external":true,"externalType":"AWSSecretsManager","mountPath":"","name":"external-secret-3","type":"environment"}]}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/NOTES.txt b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/NOTES.txt new file mode 100644 index 00000000000..2b144781688 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range $.Values.ingress.paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include ".Chart.Name .fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ include ".Chart.Name .fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".Chart.Name .fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include ".Chart.Name .name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/_helpers.tpl b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/_helpers.tpl new file mode 100644 index 00000000000..ada78dad51a --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/_helpers.tpl @@ -0,0 +1,142 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define ".Chart.Name .name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create service name +*/}} +{{- define ".servicename" -}} +{{- if .Values.service.name -}} +{{- .Values.service.name | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 55 | trimSuffix "-" -}}-service +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 55 | trimSuffix "-" -}}-service +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 55 | trimSuffix "-" -}}-service +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create preview service name +*/}} +{{- define ".previewservicename" -}} +{{- if .Values.service.name -}} +{{- .Values.service.name | trunc 55 | trimSuffix "-" -}}-preview +{{- else if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 47 | trimSuffix "-" -}}-preview-service +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 47 | trimSuffix "-" -}}-preview-service +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 47 | trimSuffix "-" -}}-preview-service +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define ".Chart.Name .fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define ".Chart.Name .chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define ".Chart.Name .color" -}} +{{- $active0 := (index .Values.server.deployment 0).enabled -}} +{{/* +{{- $active1 := (index .Values.server.deployment 1).enabled -}} +*/}} +{{- $active1 := include "safeenabledcheck" . -}} +{{- $active := and $active0 $active1 -}} +{{- $active -}} +{{- end -}} + +{{- define "safeenabledcheck" -}} +{{- if (eq (len .Values.server.deployment) 2) -}} + {{- if (index .Values.server.deployment 1).enabled -}} + {{- $active := true -}} + {{- $active -}} + {{- else -}} + {{- $active := false -}} + {{- $active -}} + {{- end -}} +{{- else -}} + {{- $active := false -}} + {{- $active -}} +{{- end -}} +{{- end -}} + + +{{- define "isCMVolumeExists" -}} + {{- $isCMVolumeExists := false -}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $isCMVolumeExists = true}} + {{- end }} + {{- end }} + {{- end }} + {{- $isCMVolumeExists -}} +{{- end -}} + +{{- define "isSecretVolumeExists" -}} + {{- $isSecretVolumeExists := false -}} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $isSecretVolumeExists = true}} + {{- end }} + {{- end }} + {{- end }} + {{- $isSecretVolumeExists -}} +{{- end -}} + + + + +{{- define "serviceMonitorEnabled" -}} + {{- $SMenabled := false -}} + {{- range .Values.ContainerPort }} + {{- if .servicemonitor }} + {{- if and .servicemonitor.enabled }} + {{- $SMenabled = true -}} + {{- end }} + {{- end }} + {{- end }} + {{- $SMenabled -}} +{{- end -}} + +{{/* Create the name of the service account to use */}} +{{- define "serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include ".Chart.Name .fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/ambassador.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/ambassador.yaml new file mode 100644 index 00000000000..7c374a70e8b --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/ambassador.yaml @@ -0,0 +1,86 @@ +{{ $svcName := include ".servicename" . }} +{{ $svcPort := (index .Values.ContainerPort 0).servicePort }} +{{- if $.Values.ambassadorMapping.enabled }} +{{- with $.Values.ambassadorMapping }} +apiVersion: getambassador.io/v3alpha1 +kind: Mapping +metadata: + name: {{ include ".Chart.Name .fullname" $ }}-mapping + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ $.Values.pipelineName }} + {{- if .labels }} +{{ toYaml .labels | nindent 4 }} + {{- end }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .ambassadorId }} + ambassador_id: {{ .ambassadorId }} + {{- end }} + {{- if .hostname }} + hostname: {{ .hostname | quote }} + {{- end }} + prefix: {{ .prefix }} + {{- if .rewrite }} + rewrite: {{ .rewrite }} + {{- end }} + service: {{ $svcName }}.{{ $.Release.Namespace }}:{{ $svcPort }} + {{- if .retryPolicy }} + retry_policy: +{{ toYaml .retryPolicy | indent 4 }} + {{- end }} + {{- if .cors }} + cors: +{{ toYaml .cors | indent 4 }} + {{- end }} + {{- if .weight }} + weight: {{ .weight }} + {{- end }} + {{- if .method }} + method: {{ .method }} + {{- end }} + {{- if .extraSpec }} +{{ toYaml .extraSpec | indent 2 }} + {{- end }} + {{- if .tls }} + {{- if .tls.context }} + tls: {{ .tls.context }} +{{- if .tls.create }} +--- +apiVersion: getambassador.io/v3alpha1 +kind: TLSContext +metadata: + name: {{ .tls.context }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ $.Values.pipelineName }} + {{- if .tls.labels }} +{{ toYaml .tls.labels | nindent 4 }} + {{- end }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .tls.secretName }} + secret: {{ .tls.secretName }} + {{- end }} + {{- if .tls.hosts }} + hosts: +{{ toYaml .tls.hosts | nindent 4 }} + {{- end }} + {{- if .tls.extraSpec }} +{{ toYaml .tls.extraSpec | indent 2 }} + {{- end }} +{{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/configmap.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/configmap.yaml new file mode 100644 index 00000000000..72d5ca84798 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/configmap.yaml @@ -0,0 +1,17 @@ +{{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{if eq .external false}} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .name}}-{{ $.Values.app }} +{{- if $.Values.appLabels }} + labels: +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +data: +{{ toYaml .data | trim | indent 2 }} + {{- end}} + {{- end}} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/deployment.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/deployment.yaml new file mode 100644 index 00000000000..317b3f992b7 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/deployment.yaml @@ -0,0 +1,556 @@ + {{- $hasCMEnvExists := false -}} + {{- $hasCMVolumeExists := false -}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $hasCMVolumeExists = true}} + {{- end }} + {{- if eq .type "environment"}} + {{- $hasCMEnvExists = true}} + {{- end }} + {{- end }} + {{- end }} + + {{- $hasSecretEnvExists := false -}} + {{- $hasSecretVolumeExists := false -}} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $hasSecretVolumeExists = true}} + {{- end }} + {{- if eq .type "environment"}} + {{- $hasSecretEnvExists = true}} + {{- end }} + {{- end }} + {{- end }} + + +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: {{ include ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ .Values.pipelineName }} +{{- if .Values.rolloutLabels }} +{{ toYaml .Values.rolloutLabels | indent 4 }} +{{- end }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + +{{- if .Values.rolloutAnnotations }} + annotations: +{{ toYaml .Values.rolloutAnnotations | indent 4 }} +{{- end }} + +spec: + selector: + matchLabels: + app: {{ template ".Chart.Name .name" $ }} + release: {{ $.Release.Name }} + replicas: {{ $.Values.replicaCount }} + minReadySeconds: {{ $.Values.MinReadySeconds }} + template: + metadata: + {{- if .Values.podAnnotations }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 8 }} +{{- end }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + spec: +{{- if $.Values.podExtraSpecs }} +{{ toYaml .Values.podExtraSpecs | indent 6 }} +{{- end }} + terminationGracePeriodSeconds: {{ $.Values.GracePeriod }} + restartPolicy: Always +{{- if $.Values.hostAliases }} + hostAliases: +{{ toYaml .Values.hostAliases | indent 8 }} +{{- end }} +{{- if and $.Values.Spec.Affinity.Key $.Values.Spec.Affinity.Values }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ $.Values.Spec.Affinity.Key }} + operator: In + values: + - {{ $.Values.Spec.Affinity.Values | default "nodes" }} +{{- end }} +{{- if $.Values.serviceAccountName }} + serviceAccountName: {{ $.Values.serviceAccountName }} +{{- else }} + serviceAccountName: {{ template "serviceAccountName" . }} +{{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} +{{- if $.Values.imagePullSecrets}} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} +{{- end}} +{{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: +{{- range $.Values.topologySpreadConstraints }} + - maxSkew: {{ .maxSkew }} + topologyKey: {{ .topologyKey }} + whenUnsatisfiable: {{ .whenUnsatisfiable }} + labelSelector: + matchLabels: + {{- if and .autoLabelSelector .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 12 }} + {{- else if .autoLabelSelector }} + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} + {{- else if .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 12 }} + {{- end }} +{{- end }} +{{- end }} +{{- if $.Values.podSecurityContext }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} +{{- end }} +{{- if $.Values.restartPolicy }} + restartPolicy: {{ $.Values.restartPolicy }} +{{- end }} +{{- if $.Values.initContainers}} + initContainers: +{{- range $i, $c := .Values.initContainers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-init-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} + - +{{ toYaml . | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + containers: +{{- if $.Values.appMetrics }} + - name: envoy + image: {{ $.Values.envoyproxy.image | default "envoyproxy/envoy:v1.14.1"}} + {{- if $.Values.envoyproxy.lifecycle }} + lifecycle: +{{ toYaml .Values.envoyproxy.lifecycle | indent 12 -}} + {{- else if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- end }} + resources: +{{ toYaml $.Values.envoyproxy.resources | trim | indent 12 }} + ports: + - containerPort: 9901 + protocol: TCP + name: envoy-admin + {{- range $index, $element := .Values.ContainerPort }} + - name: {{ $element.name}} + containerPort: {{ $element.envoyPort | default (add 8790 $index) }} + protocol: TCP + {{- end }} + command: ["/usr/local/bin/envoy"] + args: ["-c", "/etc/envoy-config/envoy-config.json", "-l", "info", "--log-format", "[METADATA][%Y-%m-%d %T.%e][%t][%l][%n] %v"] + volumeMounts: + - name: {{ $.Values.envoyproxy.configMapName | default "envoy-config-volume" }} + mountPath: /etc/envoy-config/ +{{- end}} +{{- if $.Values.containers }} +{{- range $i, $c := .Values.containers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-sidecontainer-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} + - +{{ toYaml . | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + - name: {{ $.Chart.Name }} + image: "{{ .Values.server.deployment.image }}:{{ .Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + {{- if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- if $.Values.containerSpec.lifecycle.postStart }} + postStart: +{{ toYaml $.Values.containerSpec.lifecycle.postStart | indent 12 -}} + {{- end }} + {{- end }} +{{- if and $.Values.containerSecurityContext $.Values.privileged }} + securityContext: + privileged: true +{{ toYaml .Values.containerSecurityContext | indent 12 }} +{{- else if $.Values.privileged }} + securityContext: + privileged: true +{{- else if $.Values.containerSecurityContext }} + securityContext: +{{ toYaml .Values.containerSecurityContext | indent 12 }} +{{- end }} +{{- if $.Values.containerExtraSpecs }} +{{ toYaml .Values.containerExtraSpecs | indent 10 }} +{{- end }} + ports: + {{- range $.Values.ContainerPort }} + - name: {{ .name}} + containerPort: {{ .port }} + protocol: TCP + {{- end}} +{{- if and $.Values.command.enabled $.Values.command.workingDir }} + workingDir: {{ $.Values.command.workingDir }} +{{- end}} +{{- if and $.Values.command.value $.Values.command.enabled}} + command: +{{ toYaml $.Values.command.value | indent 12 -}} +{{- end}} +{{- if and $.Values.args.value $.Values.args.enabled}} + args: +{{ toYaml $.Values.args.value | indent 12 -}} +{{- end }} + env: + - name: CONFIG_HASH + value: {{ include (print $.Chart.Name "/templates/configmap.yaml") . | sha256sum }} + - name: SECRET_HASH + value: {{ include (print $.Chart.Name "/templates/secret.yaml") . | sha256sum }} + - name: DEVTRON_APP_NAME + value: {{ template ".Chart.Name .name" $ }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DEVTRON_CONTAINER_REPO + value: "{{ .Values.server.deployment.image }}" + - name: DEVTRON_CONTAINER_TAG + value: "{{ .Values.server.deployment.image_tag }}" + {{- range $.Values.EnvVariablesFromFieldPath }} + {{- if and .name .fieldPath }} + - name: {{ .name }} + valueFrom: + fieldRef: + fieldPath: {{ .fieldPath }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariables }} + {{- if and .name .value }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariablesFromSecretKeys }} + {{- if and .name .secretName .keyName }} + - name: {{ .name }} + valueFrom: + secretKeyRef: + name: {{ .secretName }} + key: {{ .keyName }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariablesFromConfigMapKeys }} + {{- if and .name .configMapName .keyName }} + - name: {{ .name }} + valueFrom: + configMapKeyRef: + name: {{ .configMapName }} + key: {{ .keyName }} + {{- end }} + {{- end }} + {{- if or (and ($hasCMEnvExists) (.Values.ConfigMaps.enabled)) (and ($hasSecretEnvExists) (.Values.ConfigSecrets.enabled)) }} + envFrom: + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "environment" }} + - configMapRef: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "environment" }} + - secretRef: + {{if eq .external true}} + name: {{ .name }} + {{else if eq .external false}} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + +{{- if or $.Values.LivenessProbe.Path $.Values.LivenessProbe.command $.Values.LivenessProbe.tcp }} + livenessProbe: +{{- if $.Values.LivenessProbe.Path }} + httpGet: + path: {{ $.Values.LivenessProbe.Path }} + port: {{ $.Values.LivenessProbe.port }} + {{- if $.Values.LivenessProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.LivenessProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.LivenessProbe.command }} + exec: + command: +{{ toYaml .Values.LivenessProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.LivenessProbe.tcp }} + tcpSocket: + port: {{ $.Values.LivenessProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.LivenessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.LivenessProbe.periodSeconds }} + successThreshold: {{ $.Values.LivenessProbe.successThreshold }} + timeoutSeconds: {{ $.Values.LivenessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.LivenessProbe.failureThreshold }} +{{- end }} +{{- if or $.Values.ReadinessProbe.Path $.Values.ReadinessProbe.command $.Values.ReadinessProbe.tcp }} + readinessProbe: +{{- if $.Values.ReadinessProbe.Path }} + httpGet: + path: {{ $.Values.ReadinessProbe.Path }} + port: {{ $.Values.ReadinessProbe.port }} + {{- if $.Values.ReadinessProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.ReadinessProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.ReadinessProbe.command }} + exec: + command: +{{ toYaml .Values.ReadinessProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.ReadinessProbe.tcp }} + tcpSocket: + port: {{ $.Values.ReadinessProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.ReadinessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.ReadinessProbe.periodSeconds }} + successThreshold: {{ $.Values.ReadinessProbe.successThreshold }} + timeoutSeconds: {{ $.Values.ReadinessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.ReadinessProbe.failureThreshold }} +{{- end }} + resources: +{{ toYaml $.Values.resources | trim | indent 12 }} +{{- if or $.Values.StartupProbe.Path $.Values.StartupProbe.command $.Values.StartupProbe.tcp }} + startupProbe: +{{- if $.Values.StartupProbe.Path }} + httpGet: + path: {{ $.Values.StartupProbe.Path }} + port: {{ $.Values.StartupProbe.port }} + {{- if $.Values.StartupProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.StartupProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.StartupProbe.command }} + exec: + command: +{{ toYaml .Values.StartupProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.StartupProbe.tcp }} + tcpSocket: + port: {{ $.Values.StartupProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.StartupProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.StartupProbe.periodSeconds }} + successThreshold: {{ $.Values.StartupProbe.successThreshold }} + timeoutSeconds: {{ $.Values.StartupProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.StartupProbe.failureThreshold }} +{{- end }} + volumeMounts: +{{- with .Values.volumeMounts }} +{{ toYaml . | trim | indent 12 }} +{{- end }} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + + {{- else }} + {{- range $k, $v := .data }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + + {{- else }} + {{- range $k, $v := .data }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) }} []{{- end }} + {{- if and (eq (len .Values.volumeMounts) 0) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) }} []{{- end }} + + volumes: + {{- if $.Values.appMetrics }} + - name: envoy-config-volume + configMap: + name: sidecar-config-{{ template ".Chart.Name .name" $ }} + {{- end }} +{{- with .Values.volumes }} +{{ toYaml . | trim | indent 8 }} +{{- end }} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + configMap: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + secret: + {{- if eq .external true }} + secretName: {{ .name }} + {{- else if eq .external false }} + secretName: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) (eq (.Values.appMetrics) false) }} []{{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) (eq (.Values.appMetrics) false) }} []{{- end }} + + revisionHistoryLimit: 3 +## pauseForSecondsBeforeSwitchActive: {{ $.Values.pauseForSecondsBeforeSwitchActive }} +# waitForSecondsBeforeScalingDown: {{ $.Values.waitForSecondsBeforeScalingDown }} + strategy: + {{- if eq .Values.deploymentType "BLUE-GREEN" }} + blueGreen: # A new field that used to provide configurable options for a BlueGreenUpdate strategy + previewService: {{ template ".previewservicename" . }} # Reference to a service that can serve traffic to a new image before it receives the active traffic + activeService: {{ template ".servicename" . }} # Reference to a service that serves end-user traffic to the replica set + autoPromotionSeconds: {{ $.Values.deployment.strategy.blueGreen.autoPromotionSeconds }} + scaleDownDelaySeconds: {{ $.Values.deployment.strategy.blueGreen.scaleDownDelaySeconds }} + previewReplicaCount: {{ $.Values.deployment.strategy.blueGreen.previewReplicaCount }} + autoPromotionEnabled: {{ $.Values.deployment.strategy.blueGreen.autoPromotionEnabled }} + {{- else if eq .Values.deploymentType "ROLLING" }} + canary: + stableService: {{ template ".servicename" . }} # Reference to a service that serves end-user traffic to the replica set + maxSurge: {{ $.Values.deployment.strategy.rolling.maxSurge }} + maxUnavailable: {{ $.Values.deployment.strategy.rolling.maxUnavailable }} + {{- else if eq .Values.deploymentType "RECREATE" }} + recreate: + activeService: {{ template ".servicename" . }} # Reference to a service that serves end-user traffic to the replica set + {{- else if eq .Values.deploymentType "CANARY" }} + canary: + stableService: {{ template ".servicename" . }} # Reference to a service that serves end-user traffic to the replica set + maxSurge: {{ $.Values.deployment.strategy.canary.maxSurge }} + maxUnavailable: {{ $.Values.deployment.strategy.canary.maxUnavailable }} + steps: +{{ toYaml .Values.deployment.strategy.canary.steps | indent 8 }} + {{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/externalsecrets.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/externalsecrets.yaml new file mode 100644 index 00000000000..bdb4223cc0c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/externalsecrets.yaml @@ -0,0 +1,57 @@ +{{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{if eq .external true }} + {{if (or (eq .externalType "ESO_GoogleSecretsManager") (eq .externalType "ESO_AWSSecretsManager") (eq .externalType "ESO_HashiCorpVault") (eq .externalType "ESO_AzureSecretsManager"))}} +{{- if .esoSecretData.secretStore }} +--- +apiVersion: external-secrets.io/v1beta1 +kind: SecretStore +metadata: + name: {{ .name}} +{{- if $.Values.appLabels }} + labels: +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + provider: + {{- toYaml .esoSecretData.secretStore | nindent 4 }} +{{- end }} +--- +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: {{ .name }} +{{- if $.Values.appLabels }} + labels: +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .esoSecretData.refreshInterval }} + refreshInterval: {{ .esoSecretData.refreshInterval }} + {{- else }} + refreshInterval: 1h + {{- end}} + {{- if and .esoSecretData.secretStoreRef (not .esoSecretData.secretStore) }} + secretStoreRef: +{{ toYaml .esoSecretData.secretStoreRef | indent 4 }} + {{- else }} + secretStoreRef: + name: {{ .name}} + kind: SecretStore + {{- end }} + target: + name: {{ .name}} + creationPolicy: Owner + data: + {{- range .esoSecretData.esoData }} + - secretKey: {{ .secretKey }} + remoteRef: + key: {{ .key }} + {{- if .property }} + property: {{ .property }} + {{- end }} + {{- end}} +{{- end}} +{{- end}} +{{- end}} +{{- end}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/generic.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/generic.yaml new file mode 100644 index 00000000000..db95e842670 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/generic.yaml @@ -0,0 +1,4 @@ +{{- range .Values.rawYaml }} +--- +{{ toYaml . }} + {{- end -}} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/hpa.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/hpa.yaml new file mode 100644 index 00000000000..a0e15155766 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/hpa.yaml @@ -0,0 +1,59 @@ +{{- if $.Values.autoscaling.enabled }} +{{- if semverCompare ">=1.23-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2 +{{- else if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2beta2 +{{- else }} +apiVersion: autoscaling/v2beta1 +{{- end }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-hpa + {{- if .Values.autoscaling.annotations }} + annotations: +{{ toYaml .Values.autoscaling.annotations | indent 4 }} + {{- end }} + {{- if .Values.autoscaling.labels }} + labels: +{{ toYaml .Values.autoscaling.labels | indent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: argoproj.io/v1alpha1 + kind: Rollout + name: {{ include ".Chart.Name .fullname" $ }} + minReplicas: {{ $.Values.autoscaling.MinReplicas }} + maxReplicas: {{ $.Values.autoscaling.MaxReplicas }} + metrics: + {{- if $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + {{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + target: + type: Utilization + averageUtilization: {{ $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if $.Values.autoscaling.TargetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + {{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + target: + type: Utilization + averageUtilization: {{ $.Values.autoscaling.TargetCPUUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ $.Values.autoscaling.TargetCPUUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if and $.Values.autoscaling.extraMetrics (semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion) }} + {{- toYaml $.Values.autoscaling.extraMetrics | nindent 2 }} + {{- end}} + {{- if and $.Values.autoscaling.behavior (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + behavior: + {{- toYaml $.Values.autoscaling.behavior | nindent 4 }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/ingress.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/ingress.yaml new file mode 100644 index 00000000000..1f231966b16 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/ingress.yaml @@ -0,0 +1,177 @@ +{{ $svcName := include ".servicename" . }} +{{ $svcPort := (index .Values.ContainerPort 0).servicePort }} +{{- if $.Values.ingress.enabled -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- if and .Values.ingressInternal.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingressInternal.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingressInternal.annotations "kubernetes.io/ingress.class" .Values.ingressInternal.className}} + {{- end }} +{{- end }} +{{- end }} +--- +{{ if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ template ".Chart.Name .fullname" . }}-ingress + namespace: {{ $.Values.NameSpace }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + {{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} + {{- end }} +{{- if .Values.ingress.annotations }} + annotations: +{{ toYaml .Values.ingress.annotations | indent 4 }} +{{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + rules: + {{- if or .Values.ingress.host .Values.ingress.path }} + - host: {{ .Values.ingress.host }} + http: + paths: + - path: {{ .Values.ingress.path }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $.Values.ingress.pathType | default "ImplementationSpecific" }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if and ($.Values.ingress.hosts) (not ($.Values.ingress.host )) }} + {{- range .Values.ingress.hosts }} + {{ $outer := . -}} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $outer.pathType | default "ImplementationSpecific" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if .additionalBackends }} +{{ toYaml .additionalBackends | indent 10 }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end }} +{{- if $.Values.ingressInternal.enabled }} +--- +{{ if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{ else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{ else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ template ".Chart.Name .fullname" . }}-ingress-internal + namespace: {{ $.Values.NameSpace }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +{{- if .Values.ingressInternal.annotations }} + annotations: +{{ toYaml .Values.ingressInternal.annotations | indent 4 }} +{{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingressInternal.className }} + {{- end }} + rules: + {{- if or .Values.ingressInternal.host .Values.ingressInternal.path }} + - host: {{ .Values.ingressInternal.host }} + http: + paths: + - path: {{ .Values.ingressInternal.path }} + {{- if and .Values.ingressInternal.pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $.Values.ingressInternal.pathType | default "Prefix" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if and ($.Values.ingressInternal.hosts) (not ($.Values.ingressInternal.host )) }} + {{- range .Values.ingressInternal.hosts }} + {{ $outer := . -}} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $outer.pathType | default "ImplementationSpecific" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if .additionalBackends }} +{{ toYaml .additionalBackends | indent 10 }} + {{- end }} + + {{- end }} + {{- end }} + {{- if .Values.ingressInternal.tls }} + tls: +{{ toYaml .Values.ingressInternal.tls | indent 4 }} + {{- end -}} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-authorizationpolicy.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-authorizationpolicy.yaml new file mode 100644 index 00000000000..ac7b456ec5b --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-authorizationpolicy.yaml @@ -0,0 +1,37 @@ +{{- with .Values.istio }} +{{- if and .enable .authorizationPolicy.enabled }} +apiVersion: security.istio.io/v1beta1 +kind: AuthorizationPolicy +metadata: + name: {{ template ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .authorizationPolicy.labels }} +{{ toYaml .authorizationPolicy.labels | indent 4 }} + {{- end }} +{{- if .authorizationPolicy.annotations }} + annotations: +{{ toYaml .authorizationPolicy.annotations | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template ".Chart.Name .fullname" $ }} + action: {{ .authorizationPolicy.action }} +{{- if $.Values.istio.authorizationPolicy.provider }} + provider: +{{ toYaml $.Values.istio.authorizationPolicy.provider | indent 4 }} +{{- end }} +{{- if $.Values.istio.authorizationPolicy.rules }} + rules: +{{ toYaml $.Values.istio.authorizationPolicy.rules | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-destinationrule.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-destinationrule.yaml new file mode 100644 index 00000000000..47bef9a828e --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-destinationrule.yaml @@ -0,0 +1,34 @@ +{{- with .Values.istio }} +{{- if and .enable .destinationRule.enabled }} +apiVersion: networking.istio.io/v1beta1 +kind: DestinationRule +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-destinationrule + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .destinationRule.labels }} +{{ toYaml .destinationRule.labels | indent 4 }} + {{- end }} +{{- if .destinationRule.annotations }} + annotations: +{{ toYaml .destinationRule.annotations | indent 4 }} +{{- end }} +spec: + host: "{{ include ".servicename" $ }}.{{ $.Release.Namespace }}.svc.cluster.local" +{{- if $.Values.istio.destinationRule.subsets }} + subsets: +{{ toYaml $.Values.istio.destinationRule.subsets | indent 4 }} +{{- end }} +{{- if $.Values.istio.destinationRule.trafficPolicy }} + trafficPolicy: +{{ toYaml $.Values.istio.destinationRule.trafficPolicy | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-gateway.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-gateway.yaml new file mode 100644 index 00000000000..d6579590100 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-gateway.yaml @@ -0,0 +1,50 @@ +{{- if and .Values.istio.enable .Values.istio.gateway.enabled -}} +apiVersion: networking.istio.io/v1beta1 +kind: Gateway +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-istio-gateway + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if $.Values.istio.gateway.labels }} +{{ toYaml $.Values.istio.gateway.labels | indent 4 }} + {{- end }} +{{- if $.Values.istio.gateway.annotations }} + annotations: +{{ toYaml $.Values.istio.gateway.annotations | indent 4 }} +{{- end }} +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - {{ .Values.istio.gateway.host | quote -}} +{{ with .Values.istio.gateway }} +{{- if .tls.enabled }} + tls: + httpsRedirect: true + - port: + number: 443 + name: https + protocol: HTTPS + hosts: + - {{ .host | quote }} + tls: + mode: SIMPLE + credentialName: {{ .tls.secretName }} +{{ end }} +{{ end }} +{{ end }} + + + diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-peerauthentication.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-peerauthentication.yaml new file mode 100644 index 00000000000..481f8a96474 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-peerauthentication.yaml @@ -0,0 +1,36 @@ +{{- with .Values.istio }} +{{- if and .enable .peerAuthentication.enabled }} +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: {{ template ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .peerAuthentication.labels }} +{{ toYaml .peerAuthentication.labels | indent 4 }} + {{- end }} +{{- if .peerAuthentication.annotations }} + annotations: +{{ toYaml .peerAuthentication.annotations | indent 4 }} +{{- end }} +spec: +{{- if .peerAuthentication.selector.enabled }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template ".Chart.Name .fullname" $ }} +{{- end }} + mtls: + mode: {{ .peerAuthentication.mtls.mode }} +{{- if $.Values.istio.peerAuthentication.portLevelMtls }} + portLevelMtls: +{{ toYaml $.Values.istio.peerAuthentication.portLevelMtls | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-requestauthentication.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-requestauthentication.yaml new file mode 100644 index 00000000000..3429cee1462 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-requestauthentication.yaml @@ -0,0 +1,34 @@ +{{- with .Values.istio }} +{{- if and .enable .requestAuthentication.enabled }} +apiVersion: security.istio.io/v1beta1 +kind: RequestAuthentication +metadata: + name: {{ template ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .requestAuthentication.labels }} +{{ toYaml .requestAuthentication.labels | indent 4 }} + {{- end }} +{{- if .requestAuthentication.annotations }} + annotations: +{{ toYaml .requestAuthentication.annotations | indent 4 }} +{{- end }} +spec: +{{- if .requestAuthentication.selector.enabled }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template ".Chart.Name .fullname" $ }} +{{- end }} +{{- if $.Values.istio.requestAuthentication.jwtRules }} + jwtRules: +{{ toYaml $.Values.istio.requestAuthentication.jwtRules | indent 2 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-virtualservice.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-virtualservice.yaml new file mode 100644 index 00000000000..af61039b8db --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/istio-virtualservice.yaml @@ -0,0 +1,50 @@ +{{- with .Values.istio }} +{{- if and .enable .virtualService.enabled }} +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-virtualservice + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .virtualService.labels }} +{{ toYaml .virtualService.labels | indent 4 }} + {{- end }} +{{- if .virtualService.annotations }} + annotations: +{{ toYaml .virtualService.annotations | indent 4 }} +{{- end }} +spec: +{{- if or .gateway.enabled .virtualService.gateways }} + gateways: + {{- if .gateway.enabled }} + - {{ template ".Chart.Name .fullname" $ }}-istio-gateway + {{- end }} + {{- range .virtualService.gateways }} + - {{ . | quote }} + {{- end }} +{{- end }} +{{- if or .gateway.enabled .virtualService.hosts }} + hosts: + {{- if .gateway.enabled }} + - {{ .gateway.host | quote }} + {{- end }} + {{- range .virtualService.hosts }} + - {{ . | quote }} + {{- end }} +{{- else }} + hosts: + - "{{ include ".servicename" $ }}.{{ $.Release.Namespace }}.svc.cluster.local" +{{- end }} +{{- if $.Values.istio.virtualService.http }} + http: +{{ toYaml $.Values.istio.virtualService.http | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/keda-autoscaling.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/keda-autoscaling.yaml new file mode 100644 index 00000000000..7eb999bb486 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/keda-autoscaling.yaml @@ -0,0 +1,64 @@ +{{- if $.Values.kedaAutoscaling.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-keda + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} + {{- end }} + {{- if .Values.kedaAutoscaling.labels }} +{{ toYaml .Values.kedaAutoscaling.labels | indent 4 }} + {{- end }} + {{- if .Values.kedaAutoscaling.annotations }} + annotations: +{{ toYaml .Values.kedaAutoscaling.annotations | indent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: argoproj.io/v1alpha1 + kind: Rollout + name: {{ include ".Chart.Name .fullname" $ }} +{{- if $.Values.kedaAutoscaling.envSourceContainerName }} + envSourceContainerName: {{ $.Values.kedaAutoscaling.envSourceContainerName }} +{{- end }} +{{- if $.Values.kedaAutoscaling.pollingInterval }} + pollingInterval: {{ $.Values.kedaAutoscaling.pollingInterval }} +{{- end }} +{{- if $.Values.kedaAutoscaling.cooldownPeriod }} + cooldownPeriod: {{ $.Values.kedaAutoscaling.cooldownPeriod }} +{{- end }} +{{- if $.Values.kedaAutoscaling.idleReplicaCount }} + idleReplicaCount: {{ $.Values.kedaAutoscaling.idleReplicaCount }} +{{- end }} + minReplicaCount: {{ $.Values.kedaAutoscaling.minReplicaCount }} + maxReplicaCount: {{ $.Values.kedaAutoscaling.maxReplicaCount }} +{{- if $.Values.kedaAutoscaling.fallback }} + fallback: +{{ toYaml $.Values.kedaAutoscaling.fallback | indent 4 }} +{{- end }} +{{- if $.Values.kedaAutoscaling.advanced }} + advanced: +{{ toYaml $.Values.kedaAutoscaling.advanced | indent 4 }} +{{- end }} + triggers: +{{ toYaml .Values.kedaAutoscaling.triggers | indent 2}} +{{- if $.Values.kedaAutoscaling.authenticationRef }} + authenticationRef: +{{ toYaml $.Values.kedaAutoscaling.authenticationRef | indent 6 }} +{{- end }} +--- +{{- if $.Values.kedaAutoscaling.triggerAuthentication.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{ $.Values.kedaAutoscaling.triggerAuthentication.name }} +spec: +{{ toYaml $.Values.kedaAutoscaling.triggerAuthentication.spec | indent 2 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/metrics-service-monitor.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/metrics-service-monitor.yaml new file mode 100644 index 00000000000..4e9e544f508 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/metrics-service-monitor.yaml @@ -0,0 +1,35 @@ +{{- if $.Values.appMetrics -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} +spec: + jobLabel: {{ template ".Chart.Name .name" $ }} + endpoints: + - port: envoy-admin + interval: 30s + path: /stats/prometheus + relabelings: + - action: replace + sourceLabels: + - __meta_kubernetes_pod_label_rollouts_pod_template_hash + targetLabel: devtron_app_hash + selector: + matchLabels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + namespaceSelector: + matchNames: + - {{.Release.Namespace}} + podTargetLabels: + - appId + - envId + - devtron_app_hash +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/networkpolicy.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/networkpolicy.yaml new file mode 100644 index 00000000000..350232a23b6 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/networkpolicy.yaml @@ -0,0 +1,50 @@ +{{- if .Values.networkPolicy.enabled -}} +{{- with .Values.networkPolicy }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-networkpolicy + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if $.Values.networkPolicy.labels }} +{{ toYaml $.Values.networkPolicy.labels | indent 4 }} + {{- end }} +{{- if $.Values.networkPolicy.annotations }} + annotations: +{{ toYaml $.Values.networkPolicy.annotations | indent 4 }} +{{- end }} +spec: + podSelector: +{{- if .podSelector.matchExpressions }} + matchExpressions: +{{ toYaml $.Values.networkPolicy.podSelector.matchExpressions | indent 6 }} +{{- end }} +{{- if .podSelector.matchLabels }} + matchLabels: +{{ toYaml $.Values.networkPolicy.podSelector.matchLabels | indent 6 }} +{{- else }} + matchLabels: + app: {{ template ".Chart.Name .name" $ }} + release: {{ $.Release.Name }} +{{- end }} +{{- if .policyTypes }} + policyTypes: +{{ toYaml $.Values.networkPolicy.policyTypes | indent 4 }} +{{- end }} +{{- if .ingress }} + ingress: +{{ toYaml $.Values.networkPolicy.ingress | indent 4 }} +{{- end }} +{{- if .egress }} + egress: +{{ toYaml $.Values.networkPolicy.ingress | indent 4}} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/poddisruptionbudget.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/poddisruptionbudget.yaml new file mode 100644 index 00000000000..c9cbb4162d4 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/poddisruptionbudget.yaml @@ -0,0 +1,25 @@ +{{- if .Values.podDisruptionBudget }} +{{- if semverCompare ">=1.21-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: policy/v1 +{{- else -}} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + name: {{ include ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/pre-sync-job.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/pre-sync-job.yaml new file mode 100644 index 00000000000..cd733d48576 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/pre-sync-job.yaml @@ -0,0 +1,23 @@ +{{- if $.Values.dbMigrationConfig.enabled }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-migrator + annotations: + argocd.argoproj.io/hook: PreSync +# argocd.argoproj.io/hook-delete-policy: HookSucceeded +spec: + template: + spec: + containers: + - name: migrator + image: 686244538589.dkr.ecr.us-east-2.amazonaws.com/migrator:0.0.1-rc14 + env: + {{- range $.Values.dbMigrationConfig.envValues }} + - name: {{ .key}} + value: {{ .value | quote }} + {{- end}} + restartPolicy: Never + backoffLimit: 0 +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/prometheusrules.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/prometheusrules.yaml new file mode 100644 index 00000000000..90f398bff4c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/prometheusrules.yaml @@ -0,0 +1,22 @@ +{{- if .Values.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template ".Chart.Name .fullname" . }} + {{- if .Values.prometheusRule.namespace }} + namespace: {{ .Values.prometheusRule.namespace }} + {{- end }} + labels: + kind: Prometheus + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} + {{- if .Values.prometheusRule.additionalLabels }} +{{ toYaml .Values.prometheusRule.additionalLabels | indent 4 }} + {{- end }} +spec: + {{- with .Values.prometheusRule.rules }} + groups: + - name: {{ template ".Chart.Name .fullname" $ }} + rules: {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/secret.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/secret.yaml new file mode 100644 index 00000000000..26a17b968ca --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/secret.yaml @@ -0,0 +1,69 @@ +{{- if $.Values.secret.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: app-secret +{{- if $.Values.appLabels }} + labels: +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +type: Opaque +data: +{{ toYaml $.Values.secret.data | indent 2 }} +{{- end }} + + +{{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{if eq .external false}} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name}}-{{ $.Values.app }} +{{- if $.Values.appLabels }} + labels: +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +type: Opaque +data: +{{ toYaml .data | trim | indent 2 }} +{{- end}} + {{if eq .external true }} + {{if (or (eq .externalType "AWSSecretsManager") (eq .externalType "AWSSystemManager") (eq .externalType "HashiCorpVault"))}} +--- +apiVersion: kubernetes-client.io/v1 +kind: ExternalSecret +metadata: + name: {{ .name}} +{{- if $.Values.appLabels }} + labels: +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .roleARN }} + roleArn: .roleARN + {{- end}} + {{- if eq .externalType "AWSSecretsManager"}} + backendType: secretsManager + {{- end}} + {{- if eq .externalType "AWSSystemManager"}} + backendType: systemManager + {{- end}} + {{- if eq .externalType "HashiCorpVault"}} + backendType: vault + {{- end}} + data: + {{- range .secretData }} + - key: {{.key}} + name: {{.name}} + {{- if .property }} + property: {{.property}} + {{- end}} + isBinary: {{.isBinary}} + {{- end}} + {{- end}} + {{- end}} + {{- end}} + {{- end}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/service.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/service.yaml new file mode 100644 index 00000000000..da6917be6d1 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/service.yaml @@ -0,0 +1,83 @@ +{{- if .Values.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template ".servicename" . }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end}} +spec: + type: {{ .Values.service.type | default "ClusterIP" }} +{{- if (and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges )}} + loadBalancerSourceRanges: + {{- range .Values.service.loadBalancerSourceRanges }} + - {{ . }} + {{- end }} +{{- end }} + ports: + {{- range .Values.ContainerPort }} + {{- if .servicePort }} + - port: {{ .servicePort }} + {{- else }} + - port: {{ .port }} + {{- end }} + {{- if .targetPort }} + targetPort: {{ .targetPort }} + {{- else }} + targetPort: {{ .name }} + {{- end }} + {{- if (and (eq $.Values.service.type "NodePort") .nodePort )}} + nodePort: {{ .nodePort }} + {{- end }} + protocol: TCP + name: {{ .name }} + {{- end }} + {{- if $.Values.appMetrics }} + - port: 9901 + name: envoy-admin + {{- end }} + selector: + app: {{ template ".Chart.Name .name" . }} +{{- if eq .Values.deploymentType "BLUE-GREEN" }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template ".previewservicename" . }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +spec: + type: ClusterIP + ports: + {{- range .Values.ContainerPort }} + {{- if .servicePort }} + - port: {{ .servicePort }} + {{- else }} + - port: {{ .port }} + {{- end }} + targetPort: {{ .name }} + protocol: TCP + name: {{ .name }} + {{- end }} + {{- if $.Values.appMetrics }} + - port: 9901 + name: envoy-admin + {{- end }} + selector: + app: {{ template ".Chart.Name .name" . }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/serviceaccount.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/serviceaccount.yaml new file mode 100644 index 00000000000..ac258610fa8 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if $.Values.serviceAccount }} +{{- if $.Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "serviceAccountName" . }} + {{- if .Values.podLabels }} + labels: +{{ toYaml .Values.podLabels | indent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.serviceAccount.annotations | indent 4 }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/servicemonitor.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/servicemonitor.yaml new file mode 100644 index 00000000000..1f90c722cb1 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{ $serviceMonitorEnabled := include "serviceMonitorEnabled" . }} +{{- if eq "true" $serviceMonitorEnabled -}} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template ".Chart.Name .fullname" . }}-sm + labels: + kind: Prometheus + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} + {{- if .Values.servicemonitor.additionalLabels }} +{{ toYaml .Values.servicemonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + {{- range .Values.ContainerPort }} + {{- if .servicemonitor }} + {{- if .servicemonitor.enabled}} + {{- if .servicePort }} + - port: {{ .name }} + {{- if .servicemonitor.path }} + path: {{ .servicemonitor.path}} + {{- end }} + {{- if .servicemonitor.scheme }} + scheme: {{ .servicemonitor.scheme}} + {{- end }} + {{- if .servicemonitor.interval }} + interval: {{ .servicemonitor.interval}} + {{- end }} + {{- if .servicemonitor.scrapeTimeout }} + scrapeTimeout: {{ .servicemonitor.scrapeTimeout}} + {{- end }} + {{- if .servicemonitor.metricRelabelings}} + metricRelabelings: +{{toYaml .servicemonitor.metricRelabelings | indent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + selector: + matchLabels: + app: {{ template ".Chart.Name .name" $ }} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/sidecar-configmap.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/sidecar-configmap.yaml new file mode 100644 index 00000000000..cf32679409a --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/sidecar-configmap.yaml @@ -0,0 +1,169 @@ +{{- if .Values.appMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: 2019-08-12T18:38:34Z + name: sidecar-config-{{ template ".Chart.Name .name" $ }} +data: + envoy-config.json: | + { + "stats_config": { + "use_all_default_tags": false, + "stats_tags": [ + { + "tag_name": "cluster_name", + "regex": "^cluster\\.((.+?(\\..+?\\.svc\\.cluster\\.local)?)\\.)" + }, + { + "tag_name": "tcp_prefix", + "regex": "^tcp\\.((.*?)\\.)\\w+?$" + }, + { + "tag_name": "response_code", + "regex": "_rq(_(\\d{3}))$" + }, + { + "tag_name": "response_code_class", + "regex": ".*_rq(_(\\dxx))$" + }, + { + "tag_name": "http_conn_manager_listener_prefix", + "regex": "^listener(?=\\.).*?\\.http\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "http_conn_manager_prefix", + "regex": "^http\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "listener_address", + "regex": "^listener\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "mongo_prefix", + "regex": "^mongo\\.(.+?)\\.(collection|cmd|cx_|op_|delays_|decoding_)(.*?)$" + } + ], + "stats_matcher": { + "inclusion_list": { + "patterns": [ + { + "regex": ".*_rq_\\dxx$" + }, + { + "regex": ".*_rq_time$" + }, + { + "regex": "cluster.*" + }, + ] + } + } + }, + "admin": { + "access_log_path": "/dev/null", + "address": { + "socket_address": { + "address": "0.0.0.0", + "port_value": 9901 + } + } + }, + "static_resources": { + "clusters": [ + {{- range $index, $element := .Values.ContainerPort }} + { + "name": "{{ $.Values.app }}-{{ $index }}", + "type": "STATIC", + "connect_timeout": "0.250s", + "lb_policy": "ROUND_ROBIN", +{{- if $element.idleTimeout }} + "common_http_protocol_options": { + "idle_timeout": {{ $element.idleTimeout | quote }} + }, +{{- end }} +{{- if or $element.useHTTP2 $element.useGRPC }} + "http2_protocol_options": {}, +{{- end }} +{{- if and (not $element.useGRPC) (not $element.supportStreaming) }} + "max_requests_per_connection": "1", +{{- end }} + "load_assignment": { + "cluster_name": "9", + "endpoints": { + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "protocol": "TCP", + "address": "127.0.0.1", + "port_value": {{ $element.port }} + } + } + } + } + ] + } + } + }, + {{- end }} + ], + "listeners":[ + {{- range $index, $element := .Values.ContainerPort }} + { + "address": { + "socket_address": { + "protocol": "TCP", + "address": "0.0.0.0", + "port_value": {{ $element.envoyPort | default (add 8790 $index) }} + } + }, + "filter_chains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "config": { + "codec_type": "AUTO", + "stat_prefix": "stats", + "route_config": { + "virtual_hosts": [ + { + "name": "backend", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/" + }, + "route": { +{{- if $element.supportStreaming }} + "timeout": "0s", +{{- end }} +{{- if and ($element.envoyTimeout) (not $element.supportStreaming) }} + "timeout": "{{ $element.envoyTimeout }}", +{{- end }} + "cluster": "{{ $.Values.app }}-{{ $index }}" + } + } + ] + } + ] + }, + "http_filters": { + "name": "envoy.filters.http.router" + } + } + } + ] + } + ] + }, + {{- end }} + ] + } + } +--- +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/winter-soldier.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/winter-soldier.yaml new file mode 100644 index 00000000000..2d3e7bae0fe --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/templates/winter-soldier.yaml @@ -0,0 +1,41 @@ +{{- if .Values.winterSoldier.enabled }} +apiVersion: {{ $.Values.winterSoldier.apiVersion }} +kind: Hibernator +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-hibernator + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + {{- if .Values.winterSoldier.labels }} +{{ toYaml .Values.winterSoldier.labels | indent 4 }} + {{- end }} +{{- if .Values.winterSoldier.annotations }} + annotations: +{{ toYaml .Values.winterSoldier.annotations | indent 4 }} +{{- end }} +spec: + timeRangesWithZone: +{{ toYaml $.Values.winterSoldier.timeRangesWithZone | indent 4}} + selectors: + - inclusions: + - objectSelector: + name: {{ include ".Chart.Name .fullname" $ }} + type: {{ .Values.winterSoldier.type | quote }} + fieldSelector: +{{toYaml $.Values.winterSoldier.fieldSelector | indent 14}} + namespaceSelector: + name: {{ $.Release.Namespace }} + exclusions: [] + action: {{ $.Values.winterSoldier.action }} + {{- if eq .Values.winterSoldier.action "scale" }} + {{- if .Values.winterSoldier.targetReplicas }} + targetReplicas: {{ $.Values.winterSoldier.targetReplicas }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/test_values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/test_values.yaml new file mode 100644 index 00000000000..aa0f16d568a --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/test_values.yaml @@ -0,0 +1,628 @@ +# Default values for myapp. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +rolloutLabels: + name: abhinav + Company: Devtron + Job: DevOps + +rolloutAnnotations: + name: abhinav + Company: Devtron + Job: DevOps + +containerSpec: + lifecycle: + enabled: true + preStop: + exec: + command: ["sleep","10"] + postStart: + httpGet: + host: example.com + path: /example + port: 90 + +imagePullSecrets: + - test1 + - test2 +replicaCount: 1 +MinReadySeconds: 5 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyTimeout: 15 + targetPort: 8080 + envoyPort: 8799 + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s + servicemonitor: + enabled: true + path: /abc + scheme: 'http' + interval: 30s + scrapeTimeout: 20s + metricRelabelings: + - sourceLabels: [namespace] + regex: '(.*)' + replacement: myapp + targetLabel: target_namespace + + - name: app1 + port: 8090 + targetPort: 1234 + servicePort: 8080 + useGRPC: true + servicemonitor: + enabled: true + - name: app2 + port: 8091 + servicePort: 8081 + useGRPC: true + +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 +autoPromotionSeconds: 30 + +Spec: + Affinity: + Key: + # Key: kops.k8s.io/instancegroup + Values: + + +image: + pullPolicy: IfNotPresent + +autoscaling: + enabled: true + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + behavior: {} +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +secret: + enabled: false + +service: + enabled: true + type: ClusterIP + # name: "1234567890123456789012345678901234567890123456789012345678901234567890" + annotations: {} + # test1: test2 + # test3: test4 + +server: + deployment: + image_tag: 1-95af053 + image: "" +deploymentType: "RECREATE" + +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: zone + whenUnsatisfiable: DoNotSchedule + autoLabelSelector: true + customLabelSelector: + foo: bar + +EnvVariables: + - name: FLASK_ENV + value: qa + +EnvVariablesFromSecretKeys: [] + # - name: ENV_NAME + # secretName: SECRET_NAME + # keyName: SECRET_KEY + +EnvVariablesFromCongigMapKeys: [] + # - name: ENV_NAME + # configMapName: CONFIG_MAP_NAME + # keyName: CONFIG_MAP_KEY + +LivenessProbe: + Path: / + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + - name: Custom-Header2 + value: xyz + + +winterSoldier: + apiVersion: pincher.devtron.ai/v1alpha1 + enabled: true + annotations: {} + labels: {} + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: + - timeFrom: 00:00 + timeTo: 23:59:59 + weekdayFrom: Sat + weekdayTo: Sun + - timeFrom: 00:00 + timeTo: 08:00 + weekdayFrom: Mon + weekdayTo: Fri + - timeFrom: 20:00 + timeTo: 23:59:59 + weekdayFrom: Mon + weekdayTo: Fri + action: scale + targetReplicas: [1,1,1] + fieldSelector: + - AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) + + +ReadinessProbe: + Path: / + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + +StartupProbe: + Path: "/" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] + command: [] + tcp: false + + +prometheus: + release: monitoring + +servicemonitor: + additionalLabels: {} + + +prometheusRule: + enabled: true + additionalLabels: {} + namespace: "" + rules: + # These are just examples rules, please adapt them to your needs + - alert: TooMany500s + expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + for: 1m + labels: + severity: critical + annotations: + description: Too many 5XXs + summary: More than 5% of the all requests did return 5XX, this require your attention + - alert: TooMany400s + expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + for: 1m + labels: + severity: critical + annotations: + description: Too many 4XXs + summary: More than 5% of the all requests did return 4XX, this require your attention + + +ingress: + enabled: true + className: nginx + annotations: {} +# nginx.ingress.kubernetes.io/rewrite-target: / +# nginx.ingress.kubernetes.io/ssl-redirect: "false" +# kubernetes.io/ingress.class: nginx +# kubernetes.io/tls-acme: "true" +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" +# Old Ingress Format +# host: "ingress-example.com" +# path: "/app" + +# New Ingress Format + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + additionalBackends: + - path: /example1 + pathType: "ImplementationSpecific" + backend: + service: + name: test-service + port: + number: 80 + + tls: [] +### Legacy Ingress Format ## +# host: abc.com +# path: "/" +# pathType: "ImplementationSpecific" + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +ingressInternal: + enabled: false + className: nginx-internal + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + additionalBackends: + - path: /internal + pathType: "ImplementationSpecific" + backend: + service: + name: test-service-internal + port: + number: 80 + + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +dbMigrationConfig: + enabled: false + +command: + workingDir: /app + enabled: false + value: ["ls"] + +args: + enabled: false + value: [] + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 0.10 + memory: 100Mi + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + + +nodeSelector: {} + + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 + +ConfigMaps: + enabled: false + maps: [] +# - name: config-map-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-map-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 +# key3: abc-2 +# - name: config-map-3 +# type: environment +# external: true +# mountPath: /etc/config/3 +# data: [] +# - name: config-map-4 +# type: volume +# external: true +# mountPath: /etc/config/4 +# data: [] + + +ConfigSecrets: + enabled: false + secrets: + - name: config-secret-1 + type: environment + external: false + externalType: AWSSecretsManager + esoSecretData: + secretStore: + aws: + service: SecretsManager + region: us-east-1 + auth: + secretRef: + accessKeyIDSecretRef: + name: awssm-secret + key: access-key + secretAccessKeySecretRef: + name: awssm-secret + key: secret-access-key + esoData: + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + data: + key1: key1value-1 + key2: key2value-1 + key3: key3value-1 + - name: config-secret-2 + type: environment + external: false + externalType: ESO_HashiCorpVault + esoSecretData: + secretStore: + vault: + server: "http://my.vault.server:8200" + path: "secret" + version: "v2" + auth: + tokenSecretRef: + name: vault-token + key: token + esoData: + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + date: + key1: key1value-1 + key2: key2value-1 + key3: key3value-1 + +# - name: config-secret-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 + + +initContainers: + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + - command: ["sh", "-c", "chown -R 1000:1000 logs"] + reuseContainerImage: true + volumeMounts: + - mountPath: /usr/local/airflow/logs + name: logs-data + securityContext: + privileged: true + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + - name: init-migrate + image: busybox:latest + command: ["sh", "-c", "chown -R 1000:1000 logs"] + volumeMounts: + - mountPath: /usr/local/airflow/logs + name: logs-data + securityContext: + capabilities: + drop: + - ALL + +containers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs +# name: logs-data + + +rawYaml: [] +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP + +# If you need to provide some extra specs for main container which are not included by default in deployment template +# then provide them here +containerExtraSpecs: {} + +# If you need to provide some extra specs for pod which are not included by default in deployment template +# then provide them here +podExtraSpecs: {} + +envoyproxy: + image: docker.io/envoyproxy/envoy:v1.16.0 + configMapName: "" + lifecycle: {} + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +podDisruptionBudget: {} + # minAvailable: 1 + # maxUnavailable: 1 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +## + +tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" +# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +appMetrics: false +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "test1" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: + kubernetes.io/service-account.name: build-robot +containerSecurityContext: + allowPrivilegeEscalation: false +privileged: true +hostAliases: [] +# - ip: "127.0.0.1" +# hostnames: +# - "foo.local" + +deployment: + strategy: + blueGreen: + autoPromotionSeconds: 30 + scaleDownDelaySeconds: 30 + previewReplicaCount: 1 + autoPromotionEnabled: false + rolling: + maxSurge: "25%" + maxUnavailable: 1 + canary: + maxSurge: "25%" + maxUnavailable: 1 + steps: + - setWeight: 25 + - pause: + duration: 15 # 1 min + - setWeight: 50 + - pause: + duration: 15 # 1 min + - setWeight: 75 + - pause: + duration: 15 # 1 min + recreate: {} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/values.yaml new file mode 100644 index 00000000000..97d1ddee937 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-19-0/values.yaml @@ -0,0 +1,613 @@ +# Default values for myapp. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 +MinReadySeconds: 5 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyPort: 8799 + envoyTimeout: 15s + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s +# servicemonitor: +# enabled: true +# path: /abc +# scheme: 'http' +# interval: 30s +# scrapeTimeout: 20s +# metricRelabelings: +# - sourceLabels: [namespace] +# regex: '(.*)' +# replacement: myapp +# targetLabel: target_namespace + + - name: app1 + port: 8090 + servicePort: 8080 + useGRPC: true + +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 +autoPromotionSeconds: 30 + +Spec: + Affinity: + Key: +# Key: kops.k8s.io/instancegroup + Values: + + +image: + pullPolicy: IfNotPresent + +restartPolicy: Always + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + # TargetCPUUtilizationPercentage: 90 + # TargetMemoryUtilizationPercentage: 80 + annotations: {} + labels: {} + behavior: {} +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +kedaAutoscaling: + enabled: false + envSourceContainerName: "" # Optional. Default: .spec.template.spec.containers[0] + cooldownPeriod: 300 # Optional. Default: 300 seconds + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 # Optional. Must be less than minReplicaCount + pollingInterval: 30 # Optional. Default: 30 seconds + # The fallback section is optional. It defines a number of replicas to fallback to if a scaler is in an error state. + fallback: {} # Optional. Section to specify fallback options + # failureThreshold: 3 # Mandatory if fallback section is included + # replicas: 6 + advanced: {} + # horizontalPodAutoscalerConfig: # Optional. Section to specify HPA related options + # behavior: # Optional. Use to modify HPA's scaling behavior + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Percent + # value: 100 + # periodSeconds: 15 + triggers: [] + triggerAuthentication: + enabled: false + name: "" + spec: {} + authenticationRef: {} + +secret: + enabled: false + +service: + enabled: true + type: ClusterIP +# name: "1234567890123456789012345678901234567890123456789012345678901234567890" + annotations: {} + # test1: test2 + # test3: test4 + +server: + deployment: + image_tag: 1-95af053 + image: "" + +EnvVariablesFromFieldPath: [] +# - name: POD_NAME +# fieldPath: metadata.name + +EnvVariables: [] + # - name: FLASK_ENV + # value: qa + +EnvVariablesFromSecretKeys: [] + # - name: ENV_NAME + # secretName: SECRET_NAME + # keyName: SECRET_KEY + +EnvVariablesFromConfigMapKeys: [] + # - name: ENV_NAME + # configMapName: CONFIG_MAP_NAME + # keyName: CONFIG_MAP_KEY + +LivenessProbe: + Path: / + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] +# - name: Custom-Header +# value: abc + +ReadinessProbe: + Path: / + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] +# - name: Custom-Header +# value: abc + +StartupProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] + command: [] + tcp: false + +prometheus: + release: monitoring + +servicemonitor: + additionalLabels: {} + + +prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" +# rules: +# # These are just examples rules, please adapt them to your needs +# - alert: TooMany500s +# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 +# for: 1m +# labels: +# severity: critical +# annotations: +# description: Too many 5XXs +# summary: More than 5% of the all requests did return 5XX, this require your attention +# - alert: TooMany400s +# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 +# for: 1m +# labels: +# severity: critical +# annotations: +# description: Too many 4XXs +# summary: More than 5% of the all requests did return 4XX, this require your attention +# + +ingress: + enabled: false + className: "" + labels: {} + annotations: {} +# nginx.ingress.kubernetes.io/rewrite-target: / +# nginx.ingress.kubernetes.io/ssl-redirect: "false" +# kubernetes.io/ingress.class: nginx +# kubernetes.io/tls-acme: "true" +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +ingressInternal: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +istio: + enable: false + gateway: + enabled: false + labels: {} + annotations: {} + host: "" + tls: + enabled: false + secretName: "" + virtualService: + enabled: false + labels: {} + annotations: {} + gateways: [] + hosts: [] + http: [] + # - match: + # - uri: + # prefix: /v1 + # - uri: + # prefix: /v2 + # timeout: 12 + # headers: + # request: + # add: + # x-some-header: "value" + # retries: + # attempts: 2 + # perTryTimeout: 3s + destinationRule: + enabled: false + labels: {} + annotations: {} + subsets: [] + trafficPolicy: {} + peerAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + mtls: + mode: "" + portLevelMtls: {} + requestAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + jwtRules: [] + authorizationPolicy: + enabled: false + labels: {} + annotations: {} + action: + provider: {} + rules: [] + +networkPolicy: + enabled: false + annotations: {} + labels: {} + podSelector: + matchExpressions: [] + matchLabels: {} + policyTypes: [] + ingress: [] + egress: [] + +hibernator: + enable: false + +dbMigrationConfig: + enabled: false + +command: + enabled: false + value: [] + +args: + enabled: false + value: [] + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + + +nodeSelector: {} + +# If you need to provide some extra specs for pod which are not included by default in deployment template +# then provide them here +podExtraSpecs: {} + +# If you need to provide some extra specs for main container which are not included by default in deployment template +# then provide them here +containerExtraSpecs: {} + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 + +ConfigMaps: + enabled: false + maps: [] +# - name: config-map-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-map-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 +# key3: abc-2 +# - name: config-map-3 +# type: environment +# external: true +# mountPath: /etc/config/3 +# data: [] +# - name: config-map-4 +# type: volume +# external: true +# mountPath: /etc/config/4 +# data: [] + + +ConfigSecrets: + enabled: false + secrets: [] +# - name: config-secret-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-secret-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 + + +initContainers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + # - name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + # # Uncomment below line ONLY IF you want to reuse the container image. + # # This will assign your application's docker image to init container. + # reuseContainerImage: true + +containers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + + +rawYaml: [] +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP + +winterSoldier: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + labels: {} + annotations: {} + timeRangesWithZone: {} + # timeZone: "Asia/Kolkata" + # timeRanges: [] + action: sleep + targetReplicas: [] + fieldSelector: [] + type: Rollout + # - AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) + +topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: zone + # whenUnsatisfiable: DoNotSchedule + # autoLabelSelector: true + # customLabelSelector: {} + +envoyproxy: + image: docker.io/envoyproxy/envoy:v1.16.0 + lifecycle: {} + configMapName: "" + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +ambassadorMapping: + enabled: false + # labels: + # key1: value1 + # prefix: / + # ambassadorId: 1234 + # hostname: devtron.example.com + # rewrite: /foo/ + # retryPolicy: + # retry_on: "5xx" + # num_retries: 10 + # cors: + # origins: http://foo.example,http://bar.example + # methods: POST, GET, OPTIONS + # headers: Content-Type + # credentials: true + # exposed_headers: X-Custom-Header + # max_age: "86400" + # weight: 10 + # method: GET + # extraSpec: + # method_regex: true + # headers: + # x-quote-mode: backend + # x-random-header: devtron + # tls: + # context: httpd-context + # create: true + # secretName: httpd-secret + # hosts: + # - anything.example.info + # - devtron.example.com + # extraSpec: + # min_tls_version: v1.2 + +containerSpec: + lifecycle: + enabled: false + preStop: {} +# exec: +# command: ["sleep","10"] + postStart: {} +# httpGet: +# host: example.com +# path: /example +# port: 90 + +podDisruptionBudget: {} +# minAvailable: 1 +# maxUnavailable: 1 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + +podSecurityContext: {} + # runAsUser: 1000 + # runAsGroup: 3000 + # fsGroup: 2000 + +containerSecurityContext: {} + # allowPrivilegeEscalation: false +## Pods Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: {} + +tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +imagePullSecrets: [] + # - test1 + # - test2 diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/.helmignore b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/.helmignore new file mode 100644 index 00000000000..50af0317254 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/.image_descriptor_template.json b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/.image_descriptor_template.json new file mode 100644 index 00000000000..bd2472da075 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/.image_descriptor_template.json @@ -0,0 +1 @@ +{"server":{"deployment":{"image_tag":"{{.Tag}}","image":"{{.Name}}"}},"pipelineName": "{{.PipelineName}}","releaseVersion":"{{.ReleaseVersion}}","deploymentType": "{{.DeploymentType}}", "app": "{{.App}}", "env": "{{.Env}}", "appMetrics": {{.AppMetrics}}} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/Chart.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/Chart.yaml new file mode 100644 index 00000000000..69ce8a7da83 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: reference-chart_4-20-0 +version: 4.20.0 diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/README.md b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/README.md new file mode 100644 index 00000000000..dc967d598dd --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/README.md @@ -0,0 +1,911 @@ + +# Rollout Deployment Chart - v4.20.0 + +## 1. Yaml File - + +### Container Ports + +This defines ports on which application services will be exposed to other services + +```yaml +ContainerPort: + - envoyPort: 8799 + idleTimeout: + name: app + port: 8080 + servicePort: 80 + nodePort: 32056 + supportStreaming: true + useHTTP2: true + protocol: TCP +``` + +| Key | Description | +| :--- | :--- | +| `envoyPort` | envoy port for the container. | +| `idleTimeout` | the duration of time that a connection is idle before the connection is terminated. | +| `name` | name of the port. | +| `port` | port for the container. | +| `servicePort` | port of the corresponding kubernetes service. | +| `nodePort` | nodeport of the corresponding kubernetes service. | +| `supportStreaming` | Used for high performance protocols like grpc where timeout needs to be disabled. | +| `useHTTP2` | Envoy container can accept HTTP2 requests. | +|`protocol`| Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". | + +### EnvVariables +```yaml +EnvVariables: [] +``` +To set environment variables for the containers that run in the Pod. + +### EnvVariablesFromSecretKeys +```yaml +EnvVariablesFromSecretKeys: + - name: ENV_NAME + secretName: SECRET_NAME + keyName: SECRET_KEY + +``` + It is use to get the name of Environment Variable name, Secret name and the Key name from which we are using the value in that corresponding Environment Variable. + + ### EnvVariablesFromConfigMapKeys +```yaml +EnvVariablesFromConfigMapKeys: + - name: ENV_NAME + configMapName: CONFIG_MAP_NAME + keyName: CONFIG_MAP_KEY + +``` + It is use to get the name of Environment Variable name, Config Map name and the Key name from which we are using the value in that corresponding Environment Variable. + +### Liveness Probe + +If this check fails, kubernetes restarts the pod. This should return error code in case of non-recoverable error. + +```yaml +LivenessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + scheme: "" + tcp: true +``` + +| Key | Description | +| :--- | :--- | +| `Path` | It define the path where the liveness needs to be checked. | +| `initialDelaySeconds` | It defines the time to wait before a given container is checked for liveliness. | +| `periodSeconds` | It defines the time to check a given container for liveness. | +| `successThreshold` | It defines the number of successes required before a given container is said to fulfil the liveness probe. | +| `timeoutSeconds` | It defines the time for checking timeout. | +| `failureThreshold` | It defines the maximum number of failures that are acceptable before a given container is not considered as live. | +| `httpHeaders` | Custom headers to set in the request. HTTP allows repeated headers,You can override the default headers by defining .httpHeaders for the probe. | +| `scheme` | Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP. +| `tcp` | The kubelet will attempt to open a socket to your container on the specified port. If it can establish a connection, the container is considered healthy. | + + +### MaxUnavailable + +```yaml + MaxUnavailable: 0 +``` +The maximum number of pods that can be unavailable during the update process. The value of "MaxUnavailable: " can be an absolute number or percentage of the replicas count. The default value of "MaxUnavailable: " is 25%. + +### MaxSurge + +```yaml +MaxSurge: 1 +``` +The maximum number of pods that can be created over the desired number of pods. For "MaxSurge: " also, the value can be an absolute number or percentage of the replicas count. +The default value of "MaxSurge: " is 25%. + +### Min Ready Seconds + +```yaml +MinReadySeconds: 60 +``` +This specifies the minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available. This defaults to 0 (the Pod will be considered available as soon as it is ready). + +### Readiness Probe + +If this check fails, kubernetes stops sending traffic to the application. This should return error code in case of errors which can be recovered from if traffic is stopped. + +```yaml +ReadinessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + scheme: "" + tcp: true +``` + +| Key | Description | +| :--- | :--- | +| `Path` | It define the path where the readiness needs to be checked. | +| `initialDelaySeconds` | It defines the time to wait before a given container is checked for readiness. | +| `periodSeconds` | It defines the time to check a given container for readiness. | +| `successThreshold` | It defines the number of successes required before a given container is said to fulfill the readiness probe. | +| `timeoutSeconds` | It defines the time for checking timeout. | +| `failureThreshold` | It defines the maximum number of failures that are acceptable before a given container is not considered as ready. | +| `httpHeaders` | Custom headers to set in the request. HTTP allows repeated headers,You can override the default headers by defining .httpHeaders for the probe. | +| `scheme` | Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP. +| `tcp` | The kubelet will attempt to open a socket to your container on the specified port. If it can establish a connection, the container is considered healthy. | + +### Pod Disruption Budget + +You can create `PodDisruptionBudget` for each application. A PDB limits the number of pods of a replicated application that are down simultaneously from voluntary disruptions. For example, an application would like to ensure the number of replicas running is never brought below the certain number. + +```yaml +podDisruptionBudget: + minAvailable: 1 +``` + +or + +```yaml +podDisruptionBudget: + maxUnavailable: 50% +``` + +You can specify either `maxUnavailable` or `minAvailable` in a PodDisruptionBudget and it can be expressed as integers or as a percentage + +| Key | Description | +| :--- | :--- | +| `minAvailable` | Evictions are allowed as long as they leave behind 1 or more healthy pods of the total number of desired replicas. | +| `maxUnavailable` | Evictions are allowed as long as at most 1 unhealthy replica among the total number of desired replicas. | + +### Ambassador Mappings + +You can create ambassador mappings to access your applications from outside the cluster. At its core a Mapping resource maps a resource to a service. + +```yaml +ambassadorMapping: + ambassadorId: "prod-emissary" + cors: {} + enabled: true + hostname: devtron.example.com + labels: {} + prefix: / + retryPolicy: {} + rewrite: "" + tls: + context: "devtron-tls-context" + create: false + hosts: [] + secretName: "" +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Set true to enable ambassador mapping else set false.| +| `ambassadorId` | used to specify id for specific ambassador mappings controller. | +| `cors` | used to specify cors policy to access host for this mapping. | +| `weight` | used to specify weight for canary ambassador mappings. | +| `hostname` | used to specify hostname for ambassador mapping. | +| `prefix` | used to specify path for ambassador mapping. | +| `labels` | used to provide custom labels for ambassador mapping. | +| `retryPolicy` | used to specify retry policy for ambassador mapping. | +| `corsPolicy` | Provide cors headers on flagger resource. | +| `rewrite` | used to specify whether to redirect the path of this mapping and where. | +| `tls` | used to create or define ambassador TLSContext resource. | +| `extraSpec` | used to provide extra spec values which not present in deployment template for ambassador resource. | + +### Autoscaling + +This is connected to HPA and controls scaling up and down in response to request load. + +```yaml +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + containerResource: + enabled: true + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + extraMetrics: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Set true to enable autoscaling else set false.| +| `MinReplicas` | Minimum number of replicas allowed for scaling. | +| `MaxReplicas` | Maximum number of replicas allowed for scaling. | +| `TargetCPUUtilizationPercentage` | The target CPU utilization that is expected for a container. | +| `TargetMemoryUtilizationPercentage` | The target memory utilization that is expected for a container. | +| `extraMetrics` | Used to give external metrics for autoscaling. | +| `containerResource` | Used to scale resource as per container resource. | + +### Fullname Override + +```yaml +fullnameOverride: app-name +``` +`fullnameOverride` replaces the release fullname created by default by devtron, which is used to construct Kubernetes object names. By default, devtron uses {app-name}-{environment-name} as release fullname. + +### Image + +```yaml +image: + pullPolicy: IfNotPresent +``` + +Image is used to access images in kubernetes, pullpolicy is used to define the instances calling the image, here the image is pulled when the image is not present,it can also be set as "Always". + +### imagePullSecrets + +`imagePullSecrets` contains the docker credentials that are used for accessing a registry. + +```yaml +imagePullSecrets: + - regcred +``` +regcred is the secret that contains the docker credentials that are used for accessing a registry. Devtron will not create this secret automatically, you'll have to create this secret using dt-secrets helm chart in the App store or create one using kubectl. You can follow this documentation Pull an Image from a Private Registry [https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) . + +### Ingress + +This allows public access to the url, please ensure you are using right nginx annotation for nginx class, its default value is nginx + +```yaml +ingress: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + className: nginx + annotations: {} + hosts: + - host: example1.com + paths: + - /example + - host: example2.com + paths: + - /example2 + - /example2/healthz + tls: [] +``` +Legacy deployment-template ingress format + +```yaml +ingress: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + ingressClassName: nginx-internal + annotations: {} + path: "" + host: "" + tls: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Enable or disable ingress | +| `annotations` | To configure some options depending on the Ingress controller | +| `path` | Path name | +| `host` | Host name | +| `tls` | It contains security details | + +### Ingress Internal + +This allows private access to the url, please ensure you are using right nginx annotation for nginx class, its default value is nginx + +```yaml +ingressInternal: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + ingressClassName: nginx-internal + annotations: {} + hosts: + - host: example1.com + paths: + - /example + - host: example2.com + paths: + - /example2 + - /example2/healthz + tls: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Enable or disable ingress | +| `annotations` | To configure some options depending on the Ingress controller | +| `path` | Path name | +| `host` | Host name | +| `tls` | It contains security details | + + +### additionalBackends + +This defines additional backend path in the ingress . + +```yaml + hosts: + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + additionalBackends: + - path: /example1 + pathType: "ImplementationSpecific" + backend: + service: + name: test-service + port: + number: 80 +``` + +### Init Containers +```yaml +initContainers: + - reuseContainerImage: true + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + volumeMounts: + - mountPath: /etc/ls-oms + name: ls-oms-cm-vol + args: + - sleep 300 + command: + - flyway + - -configFiles=/etc/ls-oms/flyway.conf + - migrate + + - name: nginx + image: nginx:1.14.2 + securityContext: + privileged: true + ports: + - containerPort: 80 + command: ["/usr/local/bin/nginx"] + args: ["-g", "daemon off;"] +``` +Specialized containers that run before app containers in a Pod. Init containers can contain utilities or setup scripts not present in an app image. One can use base image inside initContainer by setting the reuseContainerImage flag to `true`. + +### Istio + +Istio is a service mesh which simplifies observability, traffic management, security and much more with it's virtual services and gateways. + +```yaml +istio: + enable: true + gateway: + annotations: {} + enabled: false + host: example.com + labels: {} + tls: + enabled: false + secretName: example-tls-secret + virtualService: + annotations: {} + enabled: false + gateways: [] + hosts: [] + http: + - corsPolicy: + allowCredentials: false + allowHeaders: + - x-some-header + allowMethods: + - GET + allowOrigin: + - example.com + maxAge: 24h + headers: + request: + add: + x-some-header: value + match: + - uri: + prefix: /v1 + - uri: + prefix: /v2 + retries: + attempts: 2 + perTryTimeout: 3s + rewriteUri: / + route: + - destination: + host: service1 + port: 80 + timeout: 12s + - route: + - destination: + host: service2 + labels: {} +``` + +### Pause For Seconds Before Switch Active +```yaml +pauseForSecondsBeforeSwitchActive: 30 +``` +To wait for given period of time before switch active the container. + + +### Winter-Soldier +Winter Soldier can be used to +- cleans up (delete) Kubernetes resources +- reduce workload pods to 0 + +**_NOTE:_** After deploying this we can create the Hibernator object and provide the custom configuration by which workloads going to delete, sleep and many more. for more information check [the main repo](https://github.com/devtron-labs/winter-soldier) + +Given below is template values you can give in winter-soldier: +```yaml +winterSoldier: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + action: sleep + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: [] + targetReplicas: [] + fieldSelector: [] +``` +Here, +| Key | values | Description | +| :--- | :--- | :--- | +| `enabled` | `fasle`,`true` | decide the enabling factor | +| `apiVersion` | `pincher.devtron.ai/v1beta1`, `pincher.devtron.ai/v1alpha1` | specific api version | +| `action` | `sleep`,`delete`, `scale` | This specify the action need to perform. | +| `timeRangesWithZone`:`timeZone` | eg:- `"Asia/Kolkata"`,`"US/Pacific"` | It use to specify the timeZone used. (It uses standard format. please refer [this](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones)) | +| `timeRangesWithZone`:`timeRanges` | array of [ `timeFrom`, `timeTo`, `weekdayFrom`, `weekdayTo`] | It use to define time period/range on which the user need to perform the specified action. you can have multiple timeRanges.
These settings will take `action` on Sat and Sun from 00:00 to 23:59:59, | +| `targetReplicas` | `[n]` : n - number of replicas to scale. | These is mandatory field when the `action` is `scale`
Defalut value is `[]`. | +| `fieldSelector` | `- AfterTime(AddTime( ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) ` | These value will take a list of methods to select the resources on which we perform specified `action` . | + + +here is an example, +```yaml +winterSoldier: + apiVersion: pincher.devtron.ai/v1alpha1 + enabled: true + annotations: {} + labels: {} + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: + - timeFrom: 00:00 + timeTo: 23:59:59 + weekdayFrom: Sat + weekdayTo: Sun + - timeFrom: 00:00 + timeTo: 08:00 + weekdayFrom: Mon + weekdayTo: Fri + - timeFrom: 20:00 + timeTo: 23:59:59 + weekdayFrom: Mon + weekdayTo: Fri + action: scale + targetReplicas: [1,1,1] + fieldSelector: + - AfterTime(AddTime( ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '10h'), Now()) +``` +Above settings will take action on `Sat` and `Sun` from 00:00 to 23:59:59, and on `Mon`-`Fri` from 00:00 to 08:00 and 20:00 to 23:59:59. If `action:sleep` then runs hibernate at timeFrom and unhibernate at `timeTo`. If `action: delete` then it will delete workloads at `timeFrom` and `timeTo`. Here the `action:scale` thus it scale the number of resource replicas to `targetReplicas: [1,1,1]`. Here each element of `targetReplicas` array is mapped with the corresponding elments of array `timeRangesWithZone/timeRanges`. Thus make sure the length of both array is equal, otherwise the cnages cannot be observed. + +The above example will select the application objects which have been created 10 hours ago across all namespaces excluding application's namespace. Winter soldier exposes following functions to handle time, cpu and memory. + +- ParseTime - This function can be used to parse time. For eg to parse creationTimestamp use ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z') +- AddTime - This can be used to add time. For eg AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '-10h') ll add 10h to the time. Use d for day, h for hour, m for minutes and s for seconds. Use negative number to get earlier time. +- Now - This can be used to get current time. +- CpuToNumber - This can be used to compare CPU. For eg any({{spec.containers.#.resources.requests}}, { MemoryToNumber(.memory) < MemoryToNumber('60Mi')}) will check if any resource.requests is less than 60Mi. + + + +### Resources + +These define minimum and maximum RAM and CPU available to the application. + +```yaml +resources: + limits: + cpu: "1" + memory: "200Mi" + requests: + cpu: "0.10" + memory: "100Mi" +``` + +Resources are required to set CPU and memory usage. + +#### Limits + +Limits make sure a container never goes above a certain value. The container is only allowed to go up to the limit, and then it is restricted. + +#### Requests + +Requests are what the container is guaranteed to get. + +### Service + +This defines annotations and the type of service, optionally can define name also. + +```yaml + service: + type: ClusterIP + annotations: {} + sessionAffinity: + enabled: true + sessionAffinityConfig: {} +``` + +### Volumes + +```yaml +volumes: + - name: log-volume + emptyDir: {} + - name: logpv + persistentVolumeClaim: + claimName: logpvc +``` + +It is required when some values need to be read from or written to an external disk. + +### Volume Mounts + +```yaml +volumeMounts: + - mountPath: /var/log/nginx/ + name: log-volume + - mountPath: /mnt/logs + name: logpvc + subPath: employee +``` + +It is used to provide mounts to the volume. + +### Affinity and anti-affinity + +```yaml +Spec: + Affinity: + Key: + Values: +``` + +Spec is used to define the desire state of the given container. + +Node Affinity allows you to constrain which nodes your pod is eligible to schedule on, based on labels of the node. + +Inter-pod affinity allow you to constrain which nodes your pod is eligible to be scheduled based on labels on pods. + +#### Key + +Key part of the label for node selection, this should be same as that on node. Please confirm with devops team. + +#### Values + +Value part of the label for node selection, this should be same as that on node. Please confirm with devops team. + +### Tolerations + +```yaml +tolerations: + - key: "key" + operator: "Equal" + value: "value" + effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" +``` + +Taints are the opposite, they allow a node to repel a set of pods. + +A given pod can access the given node and avoid the given taint only if the given pod satisfies a given taint. + +Taints and tolerations are a mechanism which work together that allows you to ensure that pods are not placed on inappropriate nodes. Taints are added to nodes, while tolerations are defined in the pod specification. When you taint a node, it will repel all the pods except those that have a toleration for that taint. A node can have one or many taints associated with it. + +### Arguments + +```yaml +args: + enabled: false + value: [] +``` + +This is used to give arguments to command. + +### Command + +```yaml +command: + enabled: false + value: [] +``` + +It contains the commands for the server. + +| Key | Description | +| :--- | :--- | +| `enabled` | To enable or disable the command. | +| `value` | It contains the commands. | + + +### Containers +Containers section can be used to run side-car containers along with your main container within same pod. Containers running within same pod can share volumes and IP Address and can address each other @localhost. We can use base image inside container by setting the reuseContainerImage flag to `true`. + +```yaml + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + command: ["/usr/local/bin/nginx"] + args: ["-g", "daemon off;"] + - reuseContainerImage: true + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + volumeMounts: + - mountPath: /etc/ls-oms + name: ls-oms-cm-vol + command: + - flyway + - -configFiles=/etc/ls-oms/flyway.conf + - migrate +``` + +### Prometheus + +```yaml + prometheus: + release: monitoring +``` + +It is a kubernetes monitoring tool and the name of the file to be monitored as monitoring in the given case.It describes the state of the prometheus. + +### rawYaml + +```yaml +rawYaml: + - apiVersion: v1 + kind: Service + metadata: + name: my-service + spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + type: ClusterIP +``` +Accepts an array of Kubernetes objects. You can specify any kubernetes yaml here and it will be applied when your app gets deployed. + +### Grace Period + +```yaml +GracePeriod: 30 +``` +Kubernetes waits for the specified time called the termination grace period before terminating the pods. By default, this is 30 seconds. If your pod usually takes longer than 30 seconds to shut down gracefully, make sure you increase the `GracePeriod`. + +A Graceful termination in practice means that your application needs to handle the SIGTERM message and begin shutting down when it receives it. This means saving all data that needs to be saved, closing down network connections, finishing any work that is left, and other similar tasks. + +There are many reasons why Kubernetes might terminate a perfectly healthy container. If you update your deployment with a rolling update, Kubernetes slowly terminates old pods while spinning up new ones. If you drain a node, Kubernetes terminates all pods on that node. If a node runs out of resources, Kubernetes terminates pods to free those resources. It’s important that your application handle termination gracefully so that there is minimal impact on the end user and the time-to-recovery is as fast as possible. + + +### Server + +```yaml +server: + deployment: + image_tag: 1-95a53 + image: "" +``` + +It is used for providing server configurations. + +#### Deployment + +It gives the details for deployment. + +| Key | Description | +| :--- | :--- | +| `image_tag` | It is the image tag | +| `image` | It is the URL of the image | + +### Service Monitor + +```yaml +servicemonitor: + enabled: true + path: /abc + scheme: 'http' + interval: 30s + scrapeTimeout: 20s + metricRelabelings: + - sourceLabels: [namespace] + regex: '(.*)' + replacement: myapp + targetLabel: target_namespace +``` + +It gives the set of targets to be monitored. + +### Db Migration Config + +```yaml +dbMigrationConfig: + enabled: false +``` + +It is used to configure database migration. + + +### KEDA Autoscaling +[KEDA](https://keda.sh) is a Kubernetes-based Event Driven Autoscaler. With KEDA, you can drive the scaling of any container in Kubernetes based on the number of events needing to be processed. KEDA can be installed into any Kubernetes cluster and can work alongside standard Kubernetes components like the Horizontal Pod Autoscaler(HPA). + +Example for autosccaling with KEDA using Prometheus metrics is given below: +```yaml +kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 + pollingInterval: 30 + advanced: + restoreToOriginalReplicaCount: true + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + triggers: + - type: prometheus + metadata: + serverAddress: http://:9090 + metricName: http_request_total + query: envoy_cluster_upstream_rq{appId="300", cluster_name="300-0", container="envoy",} + threshold: "50" + triggerAuthentication: + enabled: false + name: + spec: {} + authenticationRef: {} +``` +Example for autosccaling with KEDA based on kafka is given below : +```yaml +kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 + pollingInterval: 30 + advanced: {} + triggers: + - type: kafka + metadata: + bootstrapServers: b-2.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092,b-3.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092,b-1.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092 + topic: Orders-Service-ESP.info + lagThreshold: "100" + consumerGroup: oders-remove-delivered-packages + allowIdleConsumers: "true" + triggerAuthentication: + enabled: true + name: keda-trigger-auth-kafka-credential + spec: + secretTargetRef: + - parameter: sasl + name: keda-kafka-secrets + key: sasl + - parameter: username + name: keda-kafka-secrets + key: username + authenticationRef: + name: keda-trigger-auth-kafka-credential +``` + +### Security Context +A security context defines privilege and access control settings for a Pod or Container. + +To add a security context for main container: +```yaml +containerSecurityContext: + allowPrivilegeEscalation: false +``` + +To add a security context on pod level: +```yaml +podSecurityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 +``` + +### Topology Spread Constraints +You can use topology spread constraints to control how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains. This can help to achieve high availability as well as efficient resource utilization. + +```yaml +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: zone + whenUnsatisfiable: DoNotSchedule + autoLabelSelector: true + customLabelSelector: {} + minDomains: 1 + nodeAffinityPolicy: Ignore +``` + +### Persistent Volume Claim +You can use persistent volume in your stateless application + +```yaml +persistentVolumeClaim: + name: my-pvc + storageClassName: default + accessMode: + - ReadWriteOnce + mountPath: /tmp + +``` + +### Vertical Pod Autoscaling +This is connected to VPA and controls scaling up and down in response to request load. +```yaml +verticalPodScaling: + enabled: true + resourcePolicy: {} + updatePolicy: {} + ``` + +### Scheduler Name + +You can provide you own custom scheduler to schedule your application + +```yaml +schedulerName: "" +``` + + +### Deployment Metrics + +It gives the realtime metrics of the deployed applications + +| Key | Description | +| :--- | :--- | +| `Deployment Frequency` | It shows how often this app is deployed to production | +| `Change Failure Rate` | It shows how often the respective pipeline fails. | +| `Mean Lead Time` | It shows the average time taken to deliver a change to production. | +| `Mean Time to Recovery` | It shows the average time taken to fix a failed pipeline. | + +## 2. Show application metrics + +If you want to see application metrics like different HTTP status codes metrics, application throughput, latency, response time. Enable the Application metrics from below the deployment template Save button. After enabling it, you should be able to see all metrics on App detail page. By default it remains disabled. +![](../../../.gitbook/assets/deployment_application_metrics%20%282%29.png) + +Once all the Deployment template configurations are done, click on `Save` to save your deployment configuration. Now you are ready to create [Workflow](workflow/) to do CI/CD. + +### Helm Chart Json Schema + +Helm Chart [json schema](../../../scripts/devtron-reference-helm-charts/reference-chart_4-11-0/schema.json) is used to validate the deployment template values. + +### Other Validations in Json Schema + +The values of CPU and Memory in limits must be greater than or equal to in requests respectively. Similarly, In case of envoyproxy, the values of limits are greater than or equal to requests as mentioned below. +``` +resources.limits.cpu >= resources.requests.cpu +resources.limits.memory >= resources.requests.memory +envoyproxy.resources.limits.cpu >= envoyproxy.resources.requests.cpu +envoyproxy.resources.limits.memory >= envoyproxy.resources.requests.memory +``` diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/app-values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/app-values.yaml new file mode 100644 index 00000000000..6ef81ac0779 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/app-values.yaml @@ -0,0 +1,443 @@ +# Mandatory configs +podDisruptionBudget: {} + +rolloutLabels: {} +rolloutAnnotations: {} + +containerSpec: + lifecycle: + enabled: false + preStop: + exec: + command: ["sleep","10"] + postStart: + httpGet: + host: example.com + path: /example + port: 90 + +replicaCount: 1 +MinReadySeconds: 60 +GracePeriod: 30 +image: + pullPolicy: IfNotPresent +restartPolicy: Always +service: + # enabled: true + type: ClusterIP + #name: "service-1234567890" + loadBalancerSourceRanges: [] + # loadBalancerSourceRanges: + # - 1.2.3.4/32 + # - 1.2.5.6/23 + annotations: {} + # test1: test2 + # test3: test4 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyPort: 8799 + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s + protocol: TCP + resizePolicy: [] +# servicemonitor: +# enabled: true +# path: /abc +# scheme: 'http' +# interval: 30s +# scrapeTimeout: 20s +# metricRelabelings: +# - sourceLabels: [namespace] +# regex: '(.*)' +# replacement: myapp +# targetLabel: target_namespace +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 0.10 + memory: 100Mi + +# Optional configs +LivenessProbe: + Path: "" + port: 8080 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + tcp: false + command: [] + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + +ReadinessProbe: + Path: "" + port: 8080 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + tcp: false + command: [] + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + +StartupProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] + command: [] + tcp: false + +ingress: + enabled: false + className: "" + labels: {} + annotations: {} +# nginx.ingress.kubernetes.io/force-ssl-redirect: 'false' +# nginx.ingress.kubernetes.io/ssl-redirect: 'false' +# kubernetes.io/ingress.class: nginx +# nginx.ingress.kubernetes.io/rewrite-target: /$2 +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +ingressInternal: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +command: + workingDir: {} + enabled: false + value: [] + +args: + enabled: false + value: + - /bin/sh + - -c + - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600 + +#For adding custom labels to pods + +podLabels: {} +# customKey: customValue +podAnnotations: {} +# customKey: customValue + +rawYaml: [] + +topologySpreadConstraints: [] + +initContainers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + +containers: [] + ## Additional containers to run along with application pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + +dbMigrationConfig: + enabled: false + +tolerations: [] + +podSecurityContext: {} + +containerSecurityContext: {} + +Spec: + Affinity: + Key: + # Key: kops.k8s.io/instancegroup + Values: + +affinity: + enabled: false + values: {} + +ambassadorMapping: + enabled: false + labels: {} + prefix: / + ambassadorId: "" + hostname: devtron.example.com + rewrite: "" + retryPolicy: {} + cors: {} + tls: + context: "" + create: false + secretName: "" + hosts: [] + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 70 + TargetMemoryUtilizationPercentage: 80 + annotations: {} + labels: {} + behavior: {} + containerResource: + enabled: false + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +kedaAutoscaling: + enabled: false + envSourceContainerName: "" # Optional. Default: .spec.template.spec.containers[0] + minReplicaCount: 1 + maxReplicaCount: 2 + advanced: {} + triggers: [] + triggerAuthentication: + enabled: false + name: "" + spec: {} + authenticationRef: {} + +prometheus: + release: monitoring + +server: + deployment: + image_tag: 1-95af053 + image: "" + +servicemonitor: + additionalLabels: {} + +envoyproxy: + image: quay.io/devtron/envoy:v1.16.0 + configMapName: "" + lifecycle: {} + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +istio: + enable: false + gateway: + enabled: false + labels: {} + annotations: {} + host: "example.com" + tls: + enabled: false + secretName: secret-name + virtualService: + enabled: false + labels: {} + annotations: {} + gateways: [] + hosts: [] + http: [] + # - match: + # - uri: + # prefix: /v1 + # - uri: + # prefix: /v2 + # timeout: 12 + # headers: + # request: + # add: + # x-some-header: "value" + # retries: + # attempts: 2 + # perTryTimeout: 3s + destinationRule: + enabled: false + labels: {} + annotations: {} + subsets: [] + trafficPolicy: {} + peerAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + mtls: + mode: "" + portLevelMtls: {} + requestAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + jwtRules: [] + authorizationPolicy: + enabled: false + labels: {} + annotations: {} + action: + provider: {} + rules: [] + +networkPolicy: + enabled: false + annotations: {} + labels: {} + podSelector: + matchExpressions: [] + matchLabels: {} + policyTypes: [] + ingress: [] + egress: [] + +winterSoldier: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + annotation: {} + labels: {} + type: Rollout + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: [] + action: sleep + targetReplicas: [] + fieldSelector: + - AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) + + + + +## Pods Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: {} + +imagePullSecrets: [] + # - test1 + # - test2 +hostAliases: [] +# - ip: "127.0.0.1" +# hostnames: +# - "foo.local" +# - "bar.local" +# - ip: "10.1.2.3" +# hostnames: +# - "foo.remote" +# - "bar.remote" +peristentVolumeClaim: {} + + +verticalPodScaling: + enabled: false \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/env-values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/env-values.yaml new file mode 100644 index 00000000000..5cd07c0269e --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/env-values.yaml @@ -0,0 +1,66 @@ +replicaCount: 1 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 + +Spec: + Affinity: + key: "" + Values: nodes + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + behavior: {} +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# +secret: + enabled: false + data: {} +# my_own_secret: S3ViZXJuZXRlcyBXb3Jrcw== + +EnvVariables: [] +# - name: FLASK_ENV +# value: qa + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: "0.05" + memory: 50Mi + requests: + cpu: "0.01" + memory: 10Mi + + diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/pipeline-values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/pipeline-values.yaml new file mode 100644 index 00000000000..40a5ec633dd --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/pipeline-values.yaml @@ -0,0 +1,24 @@ +deployment: + strategy: + blueGreen: + autoPromotionSeconds: 30 + scaleDownDelaySeconds: 30 + previewReplicaCount: 1 + autoPromotionEnabled: false + rolling: + maxSurge: "25%" + maxUnavailable: 1 + canary: + maxSurge: "25%" + maxUnavailable: 1 + steps: + - setWeight: 25 + - pause: + duration: 15 # 1 min + - setWeight: 50 + - pause: + duration: 15 # 1 min + - setWeight: 75 + - pause: + duration: 15 # 1 min + recreate: {} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/release-values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/release-values.yaml new file mode 100644 index 00000000000..48eb3f482c1 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/release-values.yaml @@ -0,0 +1,14 @@ +server: + deployment: + image_tag: IMAGE_TAG + image: IMAGE_REPO + enabled: false +dbMigrationConfig: + enabled: false + +pauseForSecondsBeforeSwitchActive: 0 +waitForSecondsBeforeScalingDown: 0 +autoPromotionSeconds: 30 + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/schema.json new file mode 100644 index 00000000000..da5cce59eab --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/schema.json @@ -0,0 +1,1363 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "containerExtraSpecs": { + "type": "object", + "title": "containerExtraSpecs", + "description": "Define container extra specs here" + }, + "ContainerPort": { + "type": "array", + "description": "defines ports on which application services will be exposed to other services", + "title": "Container Port", + "items": { + "type": "object", + "properties": { + "envoyPort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "envoy port for the container", + "title": "Envoy Port" + }, + "idleTimeout": { + "type": "string", + "description": "duration of time for which a connection is idle before the connection is terminated", + "title": "Idle Timeout" + }, + "name": { + "type": "string", + "description": "name of the port", + "title": "Name" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Port", + "title": "port for the container" + }, + "servicePort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port of the corresponding kubernetes service", + "title": "Service Port" + }, + "nodePort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "nodeport of the corresponding kubernetes service", + "title": "Node Port" + }, + "supportStreaming": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "field to enable/disable timeout for high performance protocols like grpc", + "title": "Support Streaming" + }, + "useHTTP2": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": " field for setting if envoy container can accept(or not) HTTP2 requests", + "title": "Use HTTP2" + } + } + } + }, + "EnvVariables": { + "type": "array", + "items": {}, + "description": "contains environment variables needed by the containers", + "title": "Environment Variables" + }, + "EnvVariablesFromFieldPath": { + "type": "array", + "description": "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs", + "title": "EnvVariablesFromFieldPath", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be" + }, + "fieldPath": { + "type": "string", + "title": "fieldPath", + "description": "Path of the field to select in the specified API version" + } + } + } + ] + }, + "EnvVariablesFromSecretKeys": { + "type": "array", + "description": "Selects a field of the deployment: It is use to get the name of Environment Variable name, Secret name and the Key name from which we are using the value in that corresponding Environment Variable.", + "title": "EnvVariablesFromSecretKeys", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be used." + }, + "secretName": { + "type": "string", + "title": "secretName", + "description": "Name of Secret from which we are taking the value." + }, + "keyName": { + "type": "string", + "title": "keyName", + "description": "Name of The Key Where the value is mapped with." + } + } + } + ] + }, + "EnvVariablesFromConfigMapKeys": { + "type": "array", + "description": "Selects a field of the deployment: It is use to get the name of Environment Variable name, Config Map name and the Key name from which we are using the value in that corresponding Environment Variable.", + "title": "EnvVariablesFromConfigMapKeys", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be used." + }, + "configMapName": { + "type": "string", + "title": "configMapName", + "description": "Name of configMap from which we are taking the value." + }, + "keyName": { + "type": "string", + "title": "keyName", + "description": "Name of The Key Where the value is mapped with." + } + } + } + ] + }, + "GracePeriod": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "time for which Kubernetes waits before terminating the pods", + "title": "Grace Period" + }, + "LivenessProbe": { + "type": "object", + "description": "used by the kubelet to know when to restart a container", + "title": "Liveness Probe", + "properties": { + "Path": { + "type": "string", + "description": "defines the path where the liveness needs to be checked", + "title": "Path" + }, + "command": { + "type": "array", + "items": {}, + "description": "commands executed to perform a probe", + "title": "Command" + }, + "failureThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", + "title": "Failure Threshold" + }, + "httpHeaders": { + "type": "array", + "items": {}, + "description": "used to override the default headers by defining .httpHeaders for the probe", + "title": "HTTP headers" + }, + "initialDelaySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to wait before a given container is checked for liveness", + "title": "Initial Delay Seconds" + }, + "periodSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to check a given container for liveness", + "title": "Period Seconds" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port to access on the container", + "title": "Port" + }, + "scheme": { + "type": "string", + "description": "Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP.", + "title": "Scheme" + }, + "successThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", + "title": "Success Threshold" + }, + "tcp": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", + "title": "TCP" + }, + "timeoutSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time for checking timeout", + "title": "Timeout Seconds" + } + } + }, + "MaxSurge": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "maximum number of pods that can be created over the desired number of pods", + "title": "Maximum Surge" + }, + "MaxUnavailable": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "maximum number of pods that can be unavailable during the update process", + "title": "Maximum Unavailable" + }, + "MinReadySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", + "title": "Minimum Ready Seconds" + }, + "ReadinessProbe": { + "type": "object", + "description": "kubelet uses readiness probes to know when a container is ready to start accepting traffic", + "title": "Readiness Probe", + "properties": { + "Path": { + "type": "string", + "description": "defines the path where the readiness needs to be checked", + "title": "Path" + }, + "command": { + "type": "array", + "items": {}, + "description": "commands executed to perform a probe", + "title": "Command" + }, + "failureThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", + "title": "Failure Threshold" + }, + "httpHeader": { + "type": "array", + "items": {}, + "description": "used to override the default headers by defining .httpHeaders for the probe", + "title": "HTTP headers" + }, + "initialDelaySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to wait before a given container is checked for readiness", + "title": "Initial Delay Seconds" + }, + "periodSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to check a given container for readiness", + "title": "Period Seconds" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port to access on the container", + "title": "Port" + }, + "scheme": { + "type": "string", + "description": "Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP.", + "title": "Scheme" + }, + "successThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", + "title": "Success Threshold" + }, + "tcp": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", + "title": "TCP" + }, + "timeoutSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time for checking timeout", + "title": "Timeout Seconds" + } + } + }, + "Spec": { + "type": "object", + "description": "used to define the desire state of the given container", + "title": "Spec", + "properties": { + "Affinity": { + "type": "object", + "description": "Node/Inter-pod Affinity allows you to constrain which nodes your pod is eligible to schedule on, based on labels of the node/pods", + "title": "Affinity", + "properties": { + "Key": { + "anyOf": [ + { + "type": "null" + }, + { + "type": "string", + "description": "Key part of the label for node/pod selection", + "title": "Key" + } + ] + }, + "Values": { + "type": "string", + "description": "Value part of the label for node/pod selection", + "title": "Values" + }, + "key": { + "type": "string" + } + } + } + } + }, + "ambassadorMapping": { + "type": "object", + "description": "used to create ambassador mapping resource", + "title": "Mapping", + "properties": { + "ambassadorId": { + "type": "string", + "description": "used to specify id for specific ambassador mappings controller", + "title": "Ambassador ID" + }, + "cors": { + "type": "object", + "description": "used to specify cors policy to access host for this mapping", + "title": "CORS" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to specify whether to create an ambassador mapping or not", + "title": "Enabled" + }, + "weight": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to specify weight for canary ambassador mappings" + }, + "hostname": { + "type": "string", + "description": "used to specify hostname for ambassador mapping", + "title": "Hostname" + }, + "labels": { + "type": "object", + "description": "used to provide custom labels for ambassador mapping", + "title": "Labels" + }, + "prefix": { + "type": "string", + "description": "used to specify path for ambassador mapping", + "title": "Prefix" + }, + "retryPolicy": { + "type": "object", + "description": "used to specify retry policy for ambassador mapping", + "title": "Retry Policy" + }, + "rewrite": { + "type": "string", + "description": "used to specify whether to redirect the path of this mapping and where", + "title": "Rewrite" + }, + "tls": { + "type": "object", + "description": "used to create or define ambassador TLSContext resource", + "title": "TLS Context" + }, + "extraSpec": { + "type": "object", + "description": "used to provide extra spec values which not present in deployment template for ambassador resource", + "title": "Extra Spec" + } + } + }, + "args": { + "type": "object", + "description": " used to give arguments to command", + "title": "Arguments", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling aruguments", + "title": "Enabled" + }, + "value": { + "type": "array", + "description": "values of the arguments", + "title": "Value", + "items": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ] + } + } + }, + "autoscaling": { + "type": "object", + "description": "connected to HPA and controls scaling up and down in response to request load", + "title": "Autoscaling", + "properties": { + "MaxReplicas": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Maximum number of replicas allowed for scaling", + "title": "Maximum Replicas" + }, + "MinReplicas": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Minimum number of replicas allowed for scaling", + "title": "Minimum Replicas" + }, + "TargetCPUUtilizationPercentage": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "The target CPU utilization that is expected for a container", + "title": "TargetCPUUtilizationPercentage" + }, + "TargetMemoryUtilizationPercentage": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "The target memory utilization that is expected for a container", + "title": "TargetMemoryUtilizationPercentage" + }, + "behavior": { + "type": "object", + "description": "describes behavior and scaling policies for that behavior", + "title": "Behavior" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling autoscaling", + "title": "Enabled" + }, + "labels": { + "type": "object", + "description": "labels for HPA", + "title": "labels" + }, + "annotations": { + "type": "object", + "description": "used to configure some options for HPA", + "title": "annotations" + }, + "extraMetrics": { + "type": "array", + "items": {}, + "description": "used to give external metrics for autoscaling", + "title": "Extra Metrics" + } + } + }, + "command": { + "type": "object", + "description": "contains the commands for the server", + "title": "Command", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling commands" + }, + "value": { + "type": "array", + "items": {}, + "description": "contains the commands", + "title": "Value" + }, + "workingDir": { + "type": "object", + "items": {}, + "description": "contains the working directory", + "title": "Working directory" + } + } + }, + "containerSecurityContext": { + "type": "object", + "description": " defines privilege and access control settings for a Container", + "title": "Container Security Context" + }, + "containers": { + "type": "array", + "items": {}, + "description": " used to run side-car containers along with the main container within same pod" + }, + "dbMigrationConfig": { + "type": "object", + "description": "used to configure database migration", + "title": "Db Migration Config", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling the config", + "title": "Enabled" + } + } + }, + "envoyproxy": { + "type": "object", + "description": "envoy is attached as a sidecar to the application container to collect metrics like 4XX, 5XX, throughput and latency", + "title": "Envoy Proxy", + "properties": { + "configMapName": { + "type": "string", + "description": "configMap containing configuration for Envoy", + "title": "ConfigMap" + }, + "lifecycle": { + "type": "object", + "description": "Actions that the management system should take in response to container lifecycle events", + "title": "lifecycle", + "properties": { + "enabled": { + "type": "boolean" + }, + "postStart": { + "type": "object", + "title": "postStart", + "description": "PostStart is called immediately after a container is created" + }, + "preStop": { + "type": "object", + "title": "preStop", + "description": "PreStop is called immediately before a container is terminated" + } + } + }, + "image": { + "type": "string", + "description": "image of envoy to be used" + }, + "resources": { + "type": "object", + "description": "minimum and maximum RAM and CPU available to the application", + "title": "Resources", + "properties": { + "limits": { + "type": "object", + "description": "the maximum values a container can reach", + "title": "Limits", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "limit of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "limit of memory", + "title": "Memory" + } + } + }, + "requests": { + "type": "object", + "description": "request is what the container is guaranteed to get", + "title": "Requests", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "request value of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "request value of memory", + "title": "Memory" + } + } + } + } + } + } + }, + "hostAliases": { + "type": "array", + "title": "hostAliases", + "description": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file", + "items": [ + { + "type": "object", + "properties": { + "ip": { + "type": "string", + "title": "IP", + "description": "IP address of the host file entry" + }, + "hostnames": { + "type": "array", + "description": "Hostnames for the above IP address", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "image": { + "type": "object", + "description": "used to access images in kubernetes", + "title": "Image", + "properties": { + "pullPolicy": { + "type": "string", + "description": "used to define the instances calling the image", + "title": "Pull Policy", + "enum": [ + "IfNotPresent", + "Always" + ] + } + } + }, + "restartPolicy": { + "type": "string", + "description": "It restarts the docker container based on defined conditions.", + "title": "Restart Policy", + "enum": [ + "Always", + "OnFailure", + "Never" + ] + }, + "imagePullSecrets": { + "type": "array", + "items": {}, + "description": "contains the docker credentials that are used for accessing a registry", + "title": "Image PullSecrets" + }, + "winterSoldier": { + "type": "object", + "description": "allows to scale, sleep or delete the resource based on time.", + "title": "winterSoldier", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the winterSoldier controller", + "title": "Annotations" + }, + "labels": { + "type": "object", + "description": "labels for winterSoldier", + "title": "winterSoldier labels", + "default": "" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "apiVersion": { + "type": "string", + "description": "Api version for winterSoldier", + "title": "winterSoldier apiVersion", + "default": "pincher.devtron.ai/v1alpha1" + }, + "timeRangesWithZone": { + "type": "object", + "description": "describe time zone and time ranges to input in the winterSoldier", + "title": "Time Ranges With Zone", + "timeZone": { + "type": "string", + "description": "describe time zone, and follow standard format", + "title": "Time Zone" + }, + "timeRanges": { + "type": "array", + "items": {}, + "description": "used to take array of time ranges in which each element contains timeFrom, timeTo, weekdayFrom and weekdayTo.", + "title": "Time Ranges" + } + }, + "type": { + "type": "string", + "description": "describe the type of application Rollout/deployment.", + "title": "Type" + }, + "action": { + "type": "string", + "description": "describe the action to be performed by winterSoldier.", + "title": "Action" + }, + "targetReplicas": { + "type": "array", + "description": "describe the number of replicas to which the resource should scale up or down.", + "title": "Target Replicas" + }, + "fieldSelector": { + "type": "array", + "description": "it takes arrays of methods to select specific fields.", + "title": "Field Selector" + } + } + }, + "ingress": { + "type": "object", + "description": "allows public access to URLs", + "title": "Ingress", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the Ingress controller", + "title": "Annotations" + }, + "className": { + "type": "string", + "description": "name of ingress class, a reference to an IngressClass resource that contains additional configuration including the name of the controller", + "title": "Ingress class name", + "default": "nginx" + }, + "labels": { + "type": "object", + "description": "labels for ingress", + "title": "Ingress labels", + "default": "" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "hosts": { + "type": "array", + "description": "list of hosts in ingress", + "title": "Hosts", + "items": [ + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "host URL", + "title": "Host" + }, + "pathType": { + "type": "string", + "description": "type of path", + "title": "PathType" + }, + "paths": { + "type": "array", + "description": "list of paths for a given host", + "title": "Paths", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "tls": { + "type": "array", + "items": {}, + "description": "contains security details - private key and certificate", + "title": "TLS" + } + } + }, + "ingressInternal": { + "type": "object", + "description": "allows private access to the URLs", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the Ingress controller", + "title": "Annotations" + }, + "className": { + "type": "string", + "description": "name of ingress class, a reference to an IngressClass resource that contains additional configuration including the name of the controller", + "title": "Ingress class name", + "default": "nginx-internal" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "hosts": { + "type": "array", + "description": "list of hosts in ingress", + "title": "Hosts", + "items": [ + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "host URL", + "title": "Host" + }, + "pathType": { + "type": "string", + "description": "type of path", + "title": "PathType" + }, + "paths": { + "type": "array", + "description": "list of paths for a given host", + "title": "Paths", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "tls": { + "type": "array", + "items": {}, + "description": "contains security details - private key and certificate", + "title": "TLS" + } + } + }, + "networkPolicy":{ + "type": "object", + "description": "NetworkPolicy describes what network traffic is allowed for a set of Pods", + "title": "Network Policy", + "properties": { + "enabled":{ + "type":"boolean", + "description": "used to enable or disable NetworkPolicy" + }, + "annotations":{ + "type": "object", + "description": "Annotations for NetworkPolicy" + }, + "labels":{ + "type":"object", + "description": "Labels for NetworkPolicy" + }, + "podSelector":{ + "type": "object", + "description": "Selects the pods to which this NetworkPolicy object applies", + "properties": { + "matchExpressions":{ + "type":"array", + "description": "list of label selector" + }, + "matchLabels":{ + "type":"object", + "description": "map of {key,value} pairs" + } + } + }, + "policyTypes":{ + "type":"array", + "description": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress,Egress." + }, + "ingress":{ + "type":"array", + "description": "List of ingress rules to be applied to the selected pods" + }, + "egress":{ + "type":"array", + "description": "List of egress rules to be applied to the selected pods" + } + } + }, + "istio":{ + "type": "object", + "description": "Istio Service mesh", + "title": "Istio" + }, + "initContainers": { + "type": "array", + "items": {}, + "description": "specialized containers that run before app containers in a Pod, can contain utilities or setup scripts not present in an app image", + "title": "Init Containers" + }, + "kedaAutoscaling": { + "type": "object", + "description": "Kubernetes-based event driven autoscaler. With KEDA, one can drive the scaling of any container in Kubernetes based on the no. of events needing to be processed", + "title": "KEDA Autoscaling", + "properties": { + "advanced": { + "type": "object" + }, + "authenticationRef": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "envSourceContainerName": { + "type": "string" + }, + "maxReplicaCount": { + "type": "integer" + }, + "minReplicaCount": { + "type": "integer" + }, + "triggerAuthentication": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "spec": { + "type": "object" + } + } + }, + "triggers": { + "type": "array", + "items": {} + } + } + }, + "containerSpec": { + "type": "object", + "description": "define the container specic configuration", + "title": "containerSpec", + "properties": { + "lifecycle": { + "type": "object", + "description": "Actions that the management system should take in response to container lifecycle events", + "title": "lifecycle", + "properties": { + "enabled": { + "type": "boolean" + }, + "postStart": { + "type": "object", + "title": "postStart", + "description": "PostStart is called immediately after a container is created.You could use this event to check that a required API is available before the container’s main work begins" + }, + "preStop": { + "type": "object", + "title": "preStop", + "description": "PreStop is called immediately before a container is terminated" + } + } + } + } + }, + "pauseForSecondsBeforeSwitchActive": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "tell how much to wait for given period of time before switch active the container", + "title": "Pause For Seconds Before SwitchActive" + }, + "podAnnotations": { + "type": "object", + "description": "used to attach metadata and configs in Kubernetes", + "title": "Pod Annotations" + }, + "podDisruptionBudget": { + "type": "object", + "description": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", + "properties": { + "minAvailable": { + "type": "string", + "title": "minAvailable", + "description": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod" + }, + "maxUnavailable": { + "type": "string", + "title": "maxUnavailable", + "description": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod." + } + } + }, + "podExtraSpecs": { + "type": "object", + "description": "ExtraSpec for the pods to be configured", + "title": "podExtraSpecs" + }, + "podLabels": { + "type": "object", + "description": "key/value pairs that are attached to pods, are intended to be used to specify identifying attributes of objects that are meaningful and relevant to users, but do not directly imply semantics to the core system", + "title": "Pod Labels" + }, + "podSecurityContext": { + "type": "object", + "description": "defines privilege and access control settings for a Pod or Container", + "title": "Pod Security Context" + }, + "prometheus": { + "type": "object", + "description": "a kubernetes monitoring tool", + "title": "Prometheus", + "properties": { + "release": { + "type": "string", + "description": "name of the file to be monitored, describes the state of prometheus" + } + } + }, + "rawYaml": { + "type": "array", + "items": {}, + "description": "Accepts an array of Kubernetes objects. One can specify any kubernetes yaml here & it will be applied when a app gets deployed.", + "title": "Raw YAML" + }, + "replicaCount": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "count of Replicas of pod", + "title": "REplica Count" + }, + "resources": { + "type": "object", + "description": "minimum and maximum RAM and CPU available to the application", + "title": "Resources", + "properties": { + "limits": { + "type": "object", + "description": "the maximum values a container can reach", + "title": "Limits", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "limit of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "limit of memory", + "title": "Memory" + } + } + }, + "requests": { + "type": "object", + "description": "request is what the container is guaranteed to get", + "title": "Requests", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "request value of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "request value of memory", + "title": "Memory" + } + } + } + } + }, + "secret": { + "type": "object", + "properties": { + "data": { + "type": "object" + }, + "enabled": { + "type": "boolean" + } + } + }, + "server": { + "type": "object", + "description": "used for providing server configurations.", + "title": "Server", + "properties": { + "deployment": { + "type": "object", + "description": "gives the details for deployment", + "title": "Deployment", + "properties": { + "image": { + "type": "string", + "description": "URL of the image", + "title": "Image" + }, + "image_tag": { + "type": "string", + "description": "tag of the image", + "title": "Image Tag" + } + } + } + } + }, + "service": { + "type": "object", + "description": "defines annotations and the type of service", + "title": "Service", + "properties": { + "annotations": { + "type": "object", + "title": "Annotations", + "description": "annotations of service" + }, + "type": { + "type": "string", + "description": "type of service", + "title": "Type", + "enum": [ + "ClusterIP", + "LoadBalancer", + "NodePort", + "ExternalName" + ] + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable service", + "title": "Enabled" + } + } + }, + "serviceAccount": { + "type": "object", + "description": "defines service account for pods", + "title": "Service Account", + "properties": { + "annotations": { + "type": "object", + "title": "Annotations", + "description": "annotations of service account" + }, + "name": { + "type": "string", + "description": "name of service account", + "title": "Name" + }, + "create": { + "type": "boolean" + } + } + }, + "servicemonitor": { + "type": "object", + "description": "gives the set of targets to be monitored", + "title": "Service Monitor", + "properties": { + "additionalLabels": { + "type": "object" + } + } + }, + "tolerations": { + "type": "array", + "items": {}, + "description": "a mechanism which work together with Taints which ensures that pods are not placed on inappropriate nodes", + "title": "Tolerations" + }, + "topologySpreadConstraints": { + "type": "array", + "items": {}, + "description": "used to control how Pods are spread across a cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains", + "title": "Topology Spread Constraints" + }, + "volumeMounts": { + "type": "array", + "items": {}, + "description": "used to provide mounts to the volume", + "title": "Volume Mounts" + }, + "volumes": { + "type": "array", + "items": {}, + "description": "required when some values need to be read from or written to an external disk", + "title": "Volumes" + }, + "waitForSecondsBeforeScalingDown": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Wait for given period of time before scaling down the container", + "title": "Wait For Seconds Before Scaling Down" + } + } +} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/secrets-test-values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/secrets-test-values.yaml new file mode 100644 index 00000000000..4a20404db87 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/secrets-test-values.yaml @@ -0,0 +1 @@ +{"ConfigSecrets":{"enabled":true,"secrets":[{"data":{"standard_key":"c3RhbmRhcmQtdmFsdWU="},"external":false,"externalType":"","mountPath":"/test","name":"normal-secret","type":"volume"},{"data":{"secret_key":"U0VDUkVUIERBVEE="},"external":true,"externalType":"AWSSecretsManager","mountPath":"","name":"external-secret-3","type":"environment"}]}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/NOTES.txt b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/NOTES.txt new file mode 100644 index 00000000000..2b144781688 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range $.Values.ingress.paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include ".Chart.Name .fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ include ".Chart.Name .fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".Chart.Name .fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include ".Chart.Name .name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/_helpers.tpl b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/_helpers.tpl new file mode 100644 index 00000000000..170e5fb2739 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/_helpers.tpl @@ -0,0 +1,150 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define ".Chart.Name .name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create service name +*/}} +{{- define ".servicename" -}} +{{- if .Values.service.name -}} +{{- .Values.service.name | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 55 | trimSuffix "-" -}}-service +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 55 | trimSuffix "-" -}}-service +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 55 | trimSuffix "-" -}}-service +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create preview service name +*/}} +{{- define ".previewservicename" -}} +{{- if .Values.service.name -}} +{{- .Values.service.name | trunc 55 | trimSuffix "-" -}}-preview +{{- else if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 47 | trimSuffix "-" -}}-preview-service +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 47 | trimSuffix "-" -}}-preview-service +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 47 | trimSuffix "-" -}}-preview-service +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define ".Chart.Name .fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define ".Chart.Name .chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define ".Chart.Name .color" -}} +{{- $active0 := (index .Values.server.deployment 0).enabled -}} +{{/* +{{- $active1 := (index .Values.server.deployment 1).enabled -}} +*/}} +{{- $active1 := include "safeenabledcheck" . -}} +{{- $active := and $active0 $active1 -}} +{{- $active -}} +{{- end -}} + +{{- define "safeenabledcheck" -}} +{{- if (eq (len .Values.server.deployment) 2) -}} + {{- if (index .Values.server.deployment 1).enabled -}} + {{- $active := true -}} + {{- $active -}} + {{- else -}} + {{- $active := false -}} + {{- $active -}} + {{- end -}} +{{- else -}} + {{- $active := false -}} + {{- $active -}} +{{- end -}} +{{- end -}} + + +{{- define "isCMVolumeExists" -}} + {{- $isCMVolumeExists := false -}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $isCMVolumeExists = true}} + {{- end }} + {{- end }} + {{- end }} + {{- $isCMVolumeExists -}} +{{- end -}} + +{{- define "isSecretVolumeExists" -}} + {{- $isSecretVolumeExists := false -}} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $isSecretVolumeExists = true}} + {{- end }} + {{- end }} + {{- end }} + {{- $isSecretVolumeExists -}} +{{- end -}} + + + + +{{- define "serviceMonitorEnabled" -}} + {{- $SMenabled := false -}} + {{- range .Values.ContainerPort }} + {{- if .servicemonitor }} + {{- if and .servicemonitor.enabled }} + {{- $SMenabled = true -}} + {{- end }} + {{- end }} + {{- end }} + {{- $SMenabled -}} +{{- end -}} + +{{/* Create the name of the service account to use */}} +{{- define "serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include ".Chart.Name .fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "VerticalPodAutoScalingEnabled" -}} + {{- $SMenabled := false -}} + {{- if and .Values.verticalPodScaling.enabled }} + {{- $SMenabled = true -}} + {{- end }} + {{- $SMenabled -}} +{{- end -}} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/ambassador.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/ambassador.yaml new file mode 100644 index 00000000000..9d4a431c26d --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/ambassador.yaml @@ -0,0 +1,94 @@ +{{ $svcName := include ".servicename" . }} +{{ $svcPort := (index .Values.ContainerPort 0).servicePort }} +{{- if $.Values.ambassadorMapping.enabled }} +{{- with $.Values.ambassadorMapping }} +apiVersion: getambassador.io/v3alpha1 +kind: Mapping +metadata: + {{- if .name }} + name: {{ .name }} + {{- else }} + name: {{ include ".Chart.Name .fullname" $ }}-mapping + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ $.Values.pipelineName }} + {{- if .labels }} +{{ toYaml .labels | nindent 4 }} + {{- end }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .ambassadorId }} + ambassador_id: {{ .ambassadorId }} + {{- end }} + {{- if .hostname }} + hostname: {{ .hostname | quote }} + {{- end }} + prefix: {{ .prefix }} + {{- if .rewrite }} + rewrite: {{ .rewrite }} + {{- end }} + service: {{ $svcName }}.{{ $.Release.Namespace }}:{{ $svcPort }} + {{- if .retryPolicy }} + retry_policy: +{{ toYaml .retryPolicy | indent 4 }} + {{- end }} + {{- if .cors }} + cors: +{{ toYaml .cors | indent 4 }} + {{- end }} + {{- if .weight }} + weight: {{ .weight }} + {{- end }} + {{- if .method }} + method: {{ .method }} + {{- end }} + {{- if .extraSpec }} +{{ toYaml .extraSpec | indent 2 }} + {{- end }} + {{- if .tls }} + {{- if .tls.context }} + tls: {{ .tls.context }} +{{- if .tls.create }} +--- +apiVersion: getambassador.io/v3alpha1 +kind: TLSContext +metadata: + name: {{ .tls.context }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ $.Values.pipelineName }} + {{- if .tls.labels }} +{{ toYaml .tls.labels | nindent 4 }} + {{- end }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .tls.secretName }} + secret: {{ .tls.secretName }} + {{- end }} + {{- if .tls.hosts }} + hosts: +{{ toYaml .tls.hosts | nindent 4 }} + {{- end }} + {{- if .tls.extraSpec }} +{{ toYaml .tls.extraSpec | indent 2 }} + {{- end }} +{{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/configmap.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/configmap.yaml new file mode 100644 index 00000000000..4e7879665e4 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/configmap.yaml @@ -0,0 +1,22 @@ +{{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{if eq .external false}} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .name}}-{{ $.Values.app }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +data: +{{ toYaml .data | trim | indent 2 }} + {{- end}} + {{- end}} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/deployment.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/deployment.yaml new file mode 100644 index 00000000000..4a115fc563c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/deployment.yaml @@ -0,0 +1,612 @@ + {{- $hasCMEnvExists := false -}} + {{- $hasCMVolumeExists := false -}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $hasCMVolumeExists = true}} + {{- end }} + {{- if eq .type "environment"}} + {{- $hasCMEnvExists = true}} + {{- end }} + {{- end }} + {{- end }} + + {{- $hasPVCExists := false -}} + {{- if .Values.persistentVolumeClaim.name }} + {{- $hasPVCExists = true }} + {{- end }} + + {{- $hasSecretEnvExists := false -}} + {{- $hasSecretVolumeExists := false -}} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $hasSecretVolumeExists = true}} + {{- end }} + {{- if eq .type "environment"}} + {{- $hasSecretEnvExists = true}} + {{- end }} + {{- end }} + {{- end }} + + +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: {{ include ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ .Values.pipelineName }} +{{- if .Values.rolloutLabels }} +{{ toYaml .Values.rolloutLabels | indent 4 }} +{{- end }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- if .Values.rolloutAnnotations }} + annotations: +{{ toYaml .Values.rolloutAnnotations | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app: {{ template ".Chart.Name .name" $ }} + release: {{ $.Release.Name }} + replicas: {{ $.Values.replicaCount }} + minReadySeconds: {{ $.Values.MinReadySeconds }} + template: + metadata: + {{- if .Values.podAnnotations }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} +{{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 8 }} +{{- end }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 8 }} +{{- end }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + spec: +{{- if $.Values.podExtraSpecs }} +{{ toYaml .Values.podExtraSpecs | indent 6 }} +{{- end }} + terminationGracePeriodSeconds: {{ $.Values.GracePeriod }} + restartPolicy: Always +{{- if $.Values.hostAliases }} + hostAliases: +{{ toYaml .Values.hostAliases | indent 8 }} +{{- end }} +{{- if and $.Values.Spec.Affinity.Key $.Values.Spec.Affinity.Values }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ $.Values.Spec.Affinity.Key }} + operator: In + values: + - {{ $.Values.Spec.Affinity.Values | default "nodes" }} +{{- else if $.Values.affinity.enabled }} + affinity: +{{ toYaml .Values.affinity.values | indent 8 }} +{{- end }} +{{- if $.Values.serviceAccountName }} + serviceAccountName: {{ $.Values.serviceAccountName }} +{{- else }} + serviceAccountName: {{ template "serviceAccountName" . }} +{{- end }} +{{- if $.Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} +{{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} +{{- if $.Values.imagePullSecrets}} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} +{{- end}} +{{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: +{{- range $.Values.topologySpreadConstraints }} + - maxSkew: {{ .maxSkew }} + topologyKey: {{ .topologyKey }} + whenUnsatisfiable: {{ .whenUnsatisfiable }} + labelSelector: + matchLabels: + {{- if and .autoLabelSelector .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 12 }} + {{- else if .autoLabelSelector }} + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} + {{- else if .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 12 }} + {{- end }} +{{- end }} +{{- end }} +{{- if $.Values.podSecurityContext }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} +{{- end }} +{{- if $.Values.restartPolicy }} + restartPolicy: {{ $.Values.restartPolicy }} +{{- end }} +{{- if $.Values.initContainers}} + initContainers: +{{- range $i, $c := .Values.initContainers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-init-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .args}} + args: +{{ toYaml .args | indent 12 -}} +{{- end}} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} + - +{{ toYaml . | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + containers: + - name: {{ $.Chart.Name }} + image: "{{ .Values.server.deployment.image }}:{{ .Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + {{- if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- if $.Values.containerSpec.lifecycle.postStart }} + postStart: +{{ toYaml $.Values.containerSpec.lifecycle.postStart | indent 12 -}} + {{- end }} + {{- end }} +{{- if and $.Values.containerSecurityContext $.Values.privileged }} + securityContext: + privileged: true +{{ toYaml .Values.containerSecurityContext | indent 12 }} +{{- else if $.Values.privileged }} + securityContext: + privileged: true +{{- else if $.Values.containerSecurityContext }} + securityContext: +{{ toYaml .Values.containerSecurityContext | indent 12 }} +{{- end }} +{{- if $.Values.containerExtraSpecs }} +{{ toYaml .Values.containerExtraSpecs | indent 10 }} +{{- end }} +{{- if $.Values.resizePolicy }} + resizePolicy: +{{ toYaml .Values.resizePolicy | indent 12 }} +{{- end }} + ports: + {{- range $.Values.ContainerPort }} + - name: {{ .name}} + containerPort: {{ .port }} + protocol: {{ .protocol }} + {{- end}} +{{- if and $.Values.command.enabled $.Values.command.workingDir }} + workingDir: {{ $.Values.command.workingDir }} +{{- end}} +{{- if and $.Values.command.value $.Values.command.enabled}} + command: +{{ toYaml $.Values.command.value | indent 12 -}} +{{- end}} +{{- if and $.Values.args.value $.Values.args.enabled}} + args: +{{ toYaml $.Values.args.value | indent 12 -}} +{{- end }} + env: + - name: CONFIG_HASH + value: {{ include (print $.Chart.Name "/templates/configmap.yaml") . | sha256sum }}{{ if and (.Values.devtronInternal) (.Values.devtronInternal.containerSpecs.ConfigHash) }}{{ .Values.devtronInternal.containerSpecs.ConfigHash }}{{ end }} + - name: SECRET_HASH + value: {{ include (print $.Chart.Name "/templates/secret.yaml") . | sha256sum }}{{ if and (.Values.devtronInternal) (.Values.devtronInternal.containerSpecs.SecretHash) }}{{ .Values.devtronInternal.containerSpecs.SecretHash }}{{ end }} + - name: DEVTRON_APP_NAME + value: {{ template ".Chart.Name .name" $ }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DEVTRON_CONTAINER_REPO + value: "{{ .Values.server.deployment.image }}" + - name: DEVTRON_CONTAINER_TAG + value: "{{ .Values.server.deployment.image_tag }}" + {{- range $.Values.EnvVariablesFromFieldPath }} + {{- if and .name .fieldPath }} + - name: {{ .name }} + valueFrom: + fieldRef: + fieldPath: {{ .fieldPath }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariables }} + {{- if and .name .value }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariablesFromSecretKeys }} + {{- if and .name .secretName .keyName }} + - name: {{ .name }} + valueFrom: + secretKeyRef: + name: {{ .secretName }} + key: {{ .keyName }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariablesFromConfigMapKeys }} + {{- if and .name .configMapName .keyName }} + - name: {{ .name }} + valueFrom: + configMapKeyRef: + name: {{ .configMapName }} + key: {{ .keyName }} + {{- end }} + {{- end }} + {{- if or (and ($hasCMEnvExists) (.Values.ConfigMaps.enabled)) (and ($hasSecretEnvExists) (.Values.ConfigSecrets.enabled)) }} + envFrom: + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "environment" }} + - configMapRef: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "environment" }} + - secretRef: + {{if eq .external true}} + name: {{ .name }} + {{else if eq .external false}} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + +{{- if or $.Values.LivenessProbe.Path $.Values.LivenessProbe.command $.Values.LivenessProbe.tcp }} + livenessProbe: +{{- if $.Values.LivenessProbe.Path }} + httpGet: + path: {{ $.Values.LivenessProbe.Path }} + port: {{ $.Values.LivenessProbe.port }} + scheme: {{ $.Values.LivenessProbe.scheme }} + {{- if $.Values.LivenessProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.LivenessProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.LivenessProbe.command }} + exec: + command: +{{ toYaml .Values.LivenessProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.LivenessProbe.tcp }} + tcpSocket: + port: {{ $.Values.LivenessProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.LivenessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.LivenessProbe.periodSeconds }} + successThreshold: {{ $.Values.LivenessProbe.successThreshold }} + timeoutSeconds: {{ $.Values.LivenessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.LivenessProbe.failureThreshold }} +{{- end }} +{{- if or $.Values.ReadinessProbe.Path $.Values.ReadinessProbe.command $.Values.ReadinessProbe.tcp }} + readinessProbe: +{{- if $.Values.ReadinessProbe.Path }} + httpGet: + path: {{ $.Values.ReadinessProbe.Path }} + port: {{ $.Values.ReadinessProbe.port }} + scheme: {{ $.Values.ReadinessProbe.scheme }} + {{- if $.Values.ReadinessProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.ReadinessProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.ReadinessProbe.command }} + exec: + command: +{{ toYaml .Values.ReadinessProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.ReadinessProbe.tcp }} + tcpSocket: + port: {{ $.Values.ReadinessProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.ReadinessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.ReadinessProbe.periodSeconds }} + successThreshold: {{ $.Values.ReadinessProbe.successThreshold }} + timeoutSeconds: {{ $.Values.ReadinessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.ReadinessProbe.failureThreshold }} +{{- end }} + resources: +{{ toYaml $.Values.resources | trim | indent 12 }} +{{- if or $.Values.StartupProbe.Path $.Values.StartupProbe.command $.Values.StartupProbe.tcp }} + startupProbe: +{{- if $.Values.StartupProbe.Path }} + httpGet: + path: {{ $.Values.StartupProbe.Path }} + port: {{ $.Values.StartupProbe.port }} + {{- if $.Values.StartupProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.StartupProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.StartupProbe.command }} + exec: + command: +{{ toYaml .Values.StartupProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.StartupProbe.tcp }} + tcpSocket: + port: {{ $.Values.StartupProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.StartupProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.StartupProbe.periodSeconds }} + successThreshold: {{ $.Values.StartupProbe.successThreshold }} + timeoutSeconds: {{ $.Values.StartupProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.StartupProbe.failureThreshold }} +{{- end }} + volumeMounts: +{{- with .Values.volumeMounts }} +{{ toYaml . | trim | indent 12 }} +{{- end }} +{{- if $.Values.persistentVolumeClaim.name }} + - name: {{ .Values.persistentVolumeClaim.name }}-vol + mountPath: {{ .Values.persistentVolumeClaim.mountPath | default "/tmp" }} +{{- end}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + + {{- else }} + {{- range $k, $v := .data }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + + {{- else }} + {{if (or (eq .externalType "ESO_GoogleSecretsManager") (eq .externalType "ESO_AWSSecretsManager") (eq .externalType "ESO_HashiCorpVault") (eq .externalType "ESO_AzureSecretsManager"))}} + {{- if and (.esoSubPath) (ne (len .esoSubPath) 0) }} + {{- range .esoSubPath }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ . }} + subPath: {{ . }} + {{- end }} + {{- else }} + {{- range .esoSecretData.esoData }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ .secretKey }} + subPath: {{ .secretKey }} + {{- end }} + {{- end }} + {{- else }} + {{- range $k, $v := .data }} # for others secrets the mount path will be .data[i].secretKey + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq ($hasPVCExists) false) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) }} []{{- end }} + {{- if and (eq (len .Values.volumeMounts) 0) (eq ($hasPVCExists) false) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) }} [] {{- end }} +{{- if $.Values.appMetrics }} + - name: envoy + image: {{ $.Values.envoyproxy.image | default "quay.io/devtron/envoy:v1.16.0"}} + {{- if $.Values.envoyproxy.lifecycle }} + lifecycle: +{{ toYaml .Values.envoyproxy.lifecycle | indent 12 -}} + {{- else if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- end }} + resources: +{{ toYaml $.Values.envoyproxy.resources | trim | indent 12 }} + ports: + - containerPort: 9901 + protocol: TCP + name: envoy-admin + {{- range $index, $element := .Values.ContainerPort }} + - name: {{ $element.name}} + containerPort: {{ $element.envoyPort | default (add 8790 $index) }} + protocol: TCP + {{- end }} + command: ["/usr/local/bin/envoy"] + args: ["-c", "/etc/envoy-config/envoy-config.json", "-l", "info", "--log-format", "[METADATA][%Y-%m-%d %T.%e][%t][%l][%n] %v"] + volumeMounts: + - name: {{ $.Values.envoyproxy.configMapName | default "envoy-config-volume" }} + mountPath: /etc/envoy-config/envoy-config-volume +{{- if $.Values.envoyproxy.readinessProbe}} + readinessProbe: +{{ toYaml $.Values.envoyproxy.readinessProbe | indent 12}} +{{- end }} +{{- if $.Values.envoyproxy.livenessProbe}} + readinessProbe: +{{ toYaml $.Values.envoyproxy.livenessProbe | indent 12}} +{{- end }} +{{- end}} +{{- if $.Values.containers }} +{{- range $i, $c := .Values.containers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-sidecontainer-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .resizePolicy }} + resizePolicy: +{{ toYaml .resziePolicy | indent 12}} +{{- end }} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} + - +{{ toYaml . | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + volumes: + {{- if $.Values.appMetrics }} + - name: envoy-config-volume + configMap: + name: sidecar-config-{{ template ".Chart.Name .name" $ }} + {{- end }} +{{- if .Values.persistentVolumeClaim.name }} + - name: {{.Values.persistentVolumeClaim.name}}-vol + persistentVolumeClaim: + claimName: {{.Values.persistentVolumeClaim.name }} +{{- end}} +{{- with .Values.volumes }} +{{ toYaml . | trim | indent 8 }} +{{- end }} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + configMap: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + secret: + {{- if eq .external true }} + secretName: {{ .name }} + {{- else if eq .external false }} + secretName: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq ($hasPVCExists) false) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) (eq (.Values.appMetrics) false) }} []{{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq ($hasPVCExists) false) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) (eq (.Values.appMetrics) false) }} []{{- end }} + revisionHistoryLimit: 3 +## pauseForSecondsBeforeSwitchActive: {{ $.Values.pauseForSecondsBeforeSwitchActive }} +# waitForSecondsBeforeScalingDown: {{ $.Values.waitForSecondsBeforeScalingDown }} + strategy: + {{- if eq .Values.deploymentType "BLUE-GREEN" }} + blueGreen: # A new field that used to provide configurable options for a BlueGreenUpdate strategy + previewService: {{ template ".previewservicename" . }} # Reference to a service that can serve traffic to a new image before it receives the active traffic + activeService: {{ template ".servicename" . }} # Reference to a service that serves end-user traffic to the replica set + autoPromotionSeconds: {{ $.Values.deployment.strategy.blueGreen.autoPromotionSeconds }} + scaleDownDelaySeconds: {{ $.Values.deployment.strategy.blueGreen.scaleDownDelaySeconds }} + previewReplicaCount: {{ $.Values.deployment.strategy.blueGreen.previewReplicaCount }} + autoPromotionEnabled: {{ $.Values.deployment.strategy.blueGreen.autoPromotionEnabled }} + {{- else if eq .Values.deploymentType "ROLLING" }} + canary: + stableService: {{ template ".servicename" . }} # Reference to a service that serves end-user traffic to the replica set + maxSurge: {{ $.Values.deployment.strategy.rolling.maxSurge }} + maxUnavailable: {{ $.Values.deployment.strategy.rolling.maxUnavailable }} + {{- else if eq .Values.deploymentType "RECREATE" }} + recreate: + activeService: {{ template ".servicename" . }} # Reference to a service that serves end-user traffic to the replica set + {{- else if eq .Values.deploymentType "CANARY" }} + canary: + stableService: {{ template ".servicename" . }} # Reference to a service that serves end-user traffic to the replica set + maxSurge: {{ $.Values.deployment.strategy.canary.maxSurge }} + maxUnavailable: {{ $.Values.deployment.strategy.canary.maxUnavailable }} + steps: +{{ toYaml .Values.deployment.strategy.canary.steps | indent 8 }} + {{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/externalsecrets.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/externalsecrets.yaml new file mode 100644 index 00000000000..efd291af5d2 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/externalsecrets.yaml @@ -0,0 +1,76 @@ +{{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{if eq .external true }} + {{if (or (eq .externalType "ESO_GoogleSecretsManager") (eq .externalType "ESO_AWSSecretsManager") (eq .externalType "ESO_HashiCorpVault") (eq .externalType "ESO_AzureSecretsManager"))}} +{{- if .esoSecretData.secretStore }} +--- +apiVersion: external-secrets.io/v1beta1 +kind: SecretStore +metadata: + name: {{ .name}} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + provider: + {{- toYaml .esoSecretData.secretStore | nindent 4 }} +{{- end }} +--- +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: {{ .name }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .esoSecretData.refreshInterval }} + refreshInterval: {{ .esoSecretData.refreshInterval }} + {{- else }} + refreshInterval: 1h + {{- end}} + {{- if and .esoSecretData.secretStoreRef (not .esoSecretData.secretStore) }} + secretStoreRef: +{{ toYaml .esoSecretData.secretStoreRef | indent 4 }} + {{- else }} + secretStoreRef: + name: {{ .name}} + kind: SecretStore + {{- end }} + target: + name: {{ .name}} + {{- if .esoSecretData.template }} + template: + {{- toYaml .esoSecretData.template | nindent 6 }} + {{- end }} + creationPolicy: Owner + {{- if .esoSecretData.esoDataFrom }} + dataFrom: + {{- toYaml .esoSecretData.esoDataFrom | nindent 4 }} + {{- else }} + data: + {{- range .esoSecretData.esoData }} + - secretKey: {{ .secretKey }} + remoteRef: + key: {{ .key }} + {{- if .property }} + property: {{ .property }} + {{- end }} + {{- end}} +{{- end}} +{{- end}} +{{- end}} +{{- end}} +{{- end}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/generic.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/generic.yaml new file mode 100644 index 00000000000..db95e842670 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/generic.yaml @@ -0,0 +1,4 @@ +{{- range .Values.rawYaml }} +--- +{{ toYaml . }} + {{- end -}} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/hpa.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/hpa.yaml new file mode 100644 index 00000000000..c7ba46e15b5 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/hpa.yaml @@ -0,0 +1,86 @@ +{{- if $.Values.autoscaling.enabled }} +{{- if semverCompare ">=1.23-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2 +{{- else if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2beta2 +{{- else }} +apiVersion: autoscaling/v2beta1 +{{- end }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-hpa + {{- if .Values.autoscaling.annotations }} + annotations: +{{ toYaml .Values.autoscaling.annotations | indent 4 }} + {{- end }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + {{- if .Values.autoscaling.labels }} +{{ toYaml .Values.autoscaling.labels | indent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: argoproj.io/v1alpha1 + kind: Rollout + name: {{ include ".Chart.Name .fullname" $ }} + minReplicas: {{ $.Values.autoscaling.MinReplicas }} + maxReplicas: {{ $.Values.autoscaling.MaxReplicas }} + metrics: + {{- if $.Values.autoscaling.containerResource.enabled }} + {{- with $.Values.autoscaling.containerResource }} + {{- if .TargetCPUUtilizationPercentage }} + - type: ContainerResource + containerResource: + name: cpu + container: {{ $.Chart.Name }} + target: + type: Utilization + averageUtilization: {{ .TargetCPUUtilizationPercentage }} + {{- end}} + {{- if .TargetMemoryUtilizationPercentage }} + - type: ContainerResource + containerResource: + name: memory + container: {{ $.Chart.Name }} + target: + type: Utilization + averageUtilization: {{ .TargetMemoryUtilizationPercentage }} + {{- end}} + {{- end }} + {{- end }} + {{- if $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + {{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + target: + type: Utilization + averageUtilization: {{ $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if $.Values.autoscaling.TargetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + {{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + target: + type: Utilization + averageUtilization: {{ $.Values.autoscaling.TargetCPUUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ $.Values.autoscaling.TargetCPUUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if and $.Values.autoscaling.extraMetrics (semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion) }} + {{- toYaml $.Values.autoscaling.extraMetrics | nindent 2 }} + {{- end}} + {{- if and $.Values.autoscaling.behavior (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + behavior: + {{- toYaml $.Values.autoscaling.behavior | nindent 4 }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/ingress.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/ingress.yaml new file mode 100644 index 00000000000..021d061b734 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/ingress.yaml @@ -0,0 +1,188 @@ +{{ $svcName := include ".servicename" . }} +{{ $svcPort := (index .Values.ContainerPort 0).servicePort }} +{{- if $.Values.ingress.enabled -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- if and .Values.ingressInternal.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingressInternal.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingressInternal.annotations "kubernetes.io/ingress.class" .Values.ingressInternal.className}} + {{- end }} +{{- end }} +{{- end }} +--- +{{ if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + {{- if $.Values.ingress.name }} + name: {{ $.Values.ingress.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-ingress + {{- end }} + namespace: {{ $.Values.NameSpace }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + {{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} + {{- end }} +{{- if .Values.ingress.annotations }} + annotations: +{{ toYaml .Values.ingress.annotations | indent 4 }} +{{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + rules: + {{- if or .Values.ingress.host .Values.ingress.path }} + - host: {{ .Values.ingress.host }} + http: + paths: + - path: {{ .Values.ingress.path }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $.Values.ingress.pathType | default "ImplementationSpecific" }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if and ($.Values.ingress.hosts) (not ($.Values.ingress.host )) }} + {{- range .Values.ingress.hosts }} + {{ $outer := . -}} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $outer.pathType | default "ImplementationSpecific" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if .additionalBackends }} +{{ toYaml .additionalBackends | indent 10 }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end }} +{{- if $.Values.ingressInternal.enabled }} +--- +{{ if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{ else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{ else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + {{- if $.Values.ingressInternal.name }} + name: {{ $.Values.ingressInternal.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-ingress-internal + {{- end }} + namespace: {{ $.Values.NameSpace }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- if .Values.ingressInternal.annotations }} + annotations: +{{ toYaml .Values.ingressInternal.annotations | indent 4 }} +{{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingressInternal.className }} + {{- end }} + rules: + {{- if or .Values.ingressInternal.host .Values.ingressInternal.path }} + - host: {{ .Values.ingressInternal.host }} + http: + paths: + - path: {{ .Values.ingressInternal.path }} + {{- if and .Values.ingressInternal.pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $.Values.ingressInternal.pathType | default "Prefix" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if and ($.Values.ingressInternal.hosts) (not ($.Values.ingressInternal.host )) }} + {{- range .Values.ingressInternal.hosts }} + {{ $outer := . -}} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $outer.pathType | default "ImplementationSpecific" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if .additionalBackends }} +{{ toYaml .additionalBackends | indent 10 }} + {{- end }} + + {{- end }} + {{- end }} + {{- if .Values.ingressInternal.tls }} + tls: +{{ toYaml .Values.ingressInternal.tls | indent 4 }} + {{- end -}} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-authorizationpolicy.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-authorizationpolicy.yaml new file mode 100644 index 00000000000..ac7b456ec5b --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-authorizationpolicy.yaml @@ -0,0 +1,37 @@ +{{- with .Values.istio }} +{{- if and .enable .authorizationPolicy.enabled }} +apiVersion: security.istio.io/v1beta1 +kind: AuthorizationPolicy +metadata: + name: {{ template ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .authorizationPolicy.labels }} +{{ toYaml .authorizationPolicy.labels | indent 4 }} + {{- end }} +{{- if .authorizationPolicy.annotations }} + annotations: +{{ toYaml .authorizationPolicy.annotations | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template ".Chart.Name .fullname" $ }} + action: {{ .authorizationPolicy.action }} +{{- if $.Values.istio.authorizationPolicy.provider }} + provider: +{{ toYaml $.Values.istio.authorizationPolicy.provider | indent 4 }} +{{- end }} +{{- if $.Values.istio.authorizationPolicy.rules }} + rules: +{{ toYaml $.Values.istio.authorizationPolicy.rules | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-destinationrule.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-destinationrule.yaml new file mode 100644 index 00000000000..47bef9a828e --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-destinationrule.yaml @@ -0,0 +1,34 @@ +{{- with .Values.istio }} +{{- if and .enable .destinationRule.enabled }} +apiVersion: networking.istio.io/v1beta1 +kind: DestinationRule +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-destinationrule + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .destinationRule.labels }} +{{ toYaml .destinationRule.labels | indent 4 }} + {{- end }} +{{- if .destinationRule.annotations }} + annotations: +{{ toYaml .destinationRule.annotations | indent 4 }} +{{- end }} +spec: + host: "{{ include ".servicename" $ }}.{{ $.Release.Namespace }}.svc.cluster.local" +{{- if $.Values.istio.destinationRule.subsets }} + subsets: +{{ toYaml $.Values.istio.destinationRule.subsets | indent 4 }} +{{- end }} +{{- if $.Values.istio.destinationRule.trafficPolicy }} + trafficPolicy: +{{ toYaml $.Values.istio.destinationRule.trafficPolicy | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-gateway.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-gateway.yaml new file mode 100644 index 00000000000..d6579590100 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-gateway.yaml @@ -0,0 +1,50 @@ +{{- if and .Values.istio.enable .Values.istio.gateway.enabled -}} +apiVersion: networking.istio.io/v1beta1 +kind: Gateway +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-istio-gateway + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if $.Values.istio.gateway.labels }} +{{ toYaml $.Values.istio.gateway.labels | indent 4 }} + {{- end }} +{{- if $.Values.istio.gateway.annotations }} + annotations: +{{ toYaml $.Values.istio.gateway.annotations | indent 4 }} +{{- end }} +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - {{ .Values.istio.gateway.host | quote -}} +{{ with .Values.istio.gateway }} +{{- if .tls.enabled }} + tls: + httpsRedirect: true + - port: + number: 443 + name: https + protocol: HTTPS + hosts: + - {{ .host | quote }} + tls: + mode: SIMPLE + credentialName: {{ .tls.secretName }} +{{ end }} +{{ end }} +{{ end }} + + + diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-peerauthentication.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-peerauthentication.yaml new file mode 100644 index 00000000000..481f8a96474 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-peerauthentication.yaml @@ -0,0 +1,36 @@ +{{- with .Values.istio }} +{{- if and .enable .peerAuthentication.enabled }} +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: {{ template ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .peerAuthentication.labels }} +{{ toYaml .peerAuthentication.labels | indent 4 }} + {{- end }} +{{- if .peerAuthentication.annotations }} + annotations: +{{ toYaml .peerAuthentication.annotations | indent 4 }} +{{- end }} +spec: +{{- if .peerAuthentication.selector.enabled }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template ".Chart.Name .fullname" $ }} +{{- end }} + mtls: + mode: {{ .peerAuthentication.mtls.mode }} +{{- if $.Values.istio.peerAuthentication.portLevelMtls }} + portLevelMtls: +{{ toYaml $.Values.istio.peerAuthentication.portLevelMtls | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-requestauthentication.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-requestauthentication.yaml new file mode 100644 index 00000000000..3429cee1462 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-requestauthentication.yaml @@ -0,0 +1,34 @@ +{{- with .Values.istio }} +{{- if and .enable .requestAuthentication.enabled }} +apiVersion: security.istio.io/v1beta1 +kind: RequestAuthentication +metadata: + name: {{ template ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .requestAuthentication.labels }} +{{ toYaml .requestAuthentication.labels | indent 4 }} + {{- end }} +{{- if .requestAuthentication.annotations }} + annotations: +{{ toYaml .requestAuthentication.annotations | indent 4 }} +{{- end }} +spec: +{{- if .requestAuthentication.selector.enabled }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template ".Chart.Name .fullname" $ }} +{{- end }} +{{- if $.Values.istio.requestAuthentication.jwtRules }} + jwtRules: +{{ toYaml $.Values.istio.requestAuthentication.jwtRules | indent 2 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-virtualservice.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-virtualservice.yaml new file mode 100644 index 00000000000..af61039b8db --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/istio-virtualservice.yaml @@ -0,0 +1,50 @@ +{{- with .Values.istio }} +{{- if and .enable .virtualService.enabled }} +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-virtualservice + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .virtualService.labels }} +{{ toYaml .virtualService.labels | indent 4 }} + {{- end }} +{{- if .virtualService.annotations }} + annotations: +{{ toYaml .virtualService.annotations | indent 4 }} +{{- end }} +spec: +{{- if or .gateway.enabled .virtualService.gateways }} + gateways: + {{- if .gateway.enabled }} + - {{ template ".Chart.Name .fullname" $ }}-istio-gateway + {{- end }} + {{- range .virtualService.gateways }} + - {{ . | quote }} + {{- end }} +{{- end }} +{{- if or .gateway.enabled .virtualService.hosts }} + hosts: + {{- if .gateway.enabled }} + - {{ .gateway.host | quote }} + {{- end }} + {{- range .virtualService.hosts }} + - {{ . | quote }} + {{- end }} +{{- else }} + hosts: + - "{{ include ".servicename" $ }}.{{ $.Release.Namespace }}.svc.cluster.local" +{{- end }} +{{- if $.Values.istio.virtualService.http }} + http: +{{ toYaml $.Values.istio.virtualService.http | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/keda-autoscaling.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/keda-autoscaling.yaml new file mode 100644 index 00000000000..faf89be1251 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/keda-autoscaling.yaml @@ -0,0 +1,76 @@ +{{- if $.Values.kedaAutoscaling.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + {{- if $.Values.kedaAutoscaling.name }} + name: {{ $.Values.kedaAutoscaling.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-keda + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ .Release.Name }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} + {{- end }} + {{- if .Values.kedaAutoscaling.labels }} +{{ toYaml .Values.kedaAutoscaling.labels | indent 4 }} + {{- end }} + {{- if .Values.kedaAutoscaling.annotations }} + annotations: +{{ toYaml .Values.kedaAutoscaling.annotations | indent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: argoproj.io/v1alpha1 + kind: Rollout + name: {{ include ".Chart.Name .fullname" $ }} +{{- if $.Values.kedaAutoscaling.envSourceContainerName }} + envSourceContainerName: {{ $.Values.kedaAutoscaling.envSourceContainerName }} +{{- end }} +{{- if $.Values.kedaAutoscaling.pollingInterval }} + pollingInterval: {{ $.Values.kedaAutoscaling.pollingInterval }} +{{- end }} +{{- if $.Values.kedaAutoscaling.cooldownPeriod }} + cooldownPeriod: {{ $.Values.kedaAutoscaling.cooldownPeriod }} +{{- end }} +{{- if $.Values.kedaAutoscaling.idleReplicaCount }} + idleReplicaCount: {{ $.Values.kedaAutoscaling.idleReplicaCount }} +{{- end }} + minReplicaCount: {{ $.Values.kedaAutoscaling.minReplicaCount }} + maxReplicaCount: {{ $.Values.kedaAutoscaling.maxReplicaCount }} +{{- if $.Values.kedaAutoscaling.fallback }} + fallback: +{{ toYaml $.Values.kedaAutoscaling.fallback | indent 4 }} +{{- end }} +{{- if $.Values.kedaAutoscaling.advanced }} + advanced: +{{ toYaml $.Values.kedaAutoscaling.advanced | indent 4 }} +{{- end }} + triggers: +{{ toYaml .Values.kedaAutoscaling.triggers | indent 2}} +{{- if $.Values.kedaAutoscaling.authenticationRef }} + authenticationRef: +{{ toYaml $.Values.kedaAutoscaling.authenticationRef | indent 6 }} +{{- end }} +--- +{{- if $.Values.kedaAutoscaling.triggerAuthentication.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{ $.Values.kedaAutoscaling.triggerAuthentication.name }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + +spec: +{{ toYaml $.Values.kedaAutoscaling.triggerAuthentication.spec | indent 2 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/metrics-service-monitor.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/metrics-service-monitor.yaml new file mode 100644 index 00000000000..4e9e544f508 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/metrics-service-monitor.yaml @@ -0,0 +1,35 @@ +{{- if $.Values.appMetrics -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} +spec: + jobLabel: {{ template ".Chart.Name .name" $ }} + endpoints: + - port: envoy-admin + interval: 30s + path: /stats/prometheus + relabelings: + - action: replace + sourceLabels: + - __meta_kubernetes_pod_label_rollouts_pod_template_hash + targetLabel: devtron_app_hash + selector: + matchLabels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + namespaceSelector: + matchNames: + - {{.Release.Namespace}} + podTargetLabels: + - appId + - envId + - devtron_app_hash +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/networkpolicy.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/networkpolicy.yaml new file mode 100644 index 00000000000..350232a23b6 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/networkpolicy.yaml @@ -0,0 +1,50 @@ +{{- if .Values.networkPolicy.enabled -}} +{{- with .Values.networkPolicy }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-networkpolicy + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if $.Values.networkPolicy.labels }} +{{ toYaml $.Values.networkPolicy.labels | indent 4 }} + {{- end }} +{{- if $.Values.networkPolicy.annotations }} + annotations: +{{ toYaml $.Values.networkPolicy.annotations | indent 4 }} +{{- end }} +spec: + podSelector: +{{- if .podSelector.matchExpressions }} + matchExpressions: +{{ toYaml $.Values.networkPolicy.podSelector.matchExpressions | indent 6 }} +{{- end }} +{{- if .podSelector.matchLabels }} + matchLabels: +{{ toYaml $.Values.networkPolicy.podSelector.matchLabels | indent 6 }} +{{- else }} + matchLabels: + app: {{ template ".Chart.Name .name" $ }} + release: {{ $.Release.Name }} +{{- end }} +{{- if .policyTypes }} + policyTypes: +{{ toYaml $.Values.networkPolicy.policyTypes | indent 4 }} +{{- end }} +{{- if .ingress }} + ingress: +{{ toYaml $.Values.networkPolicy.ingress | indent 4 }} +{{- end }} +{{- if .egress }} + egress: +{{ toYaml $.Values.networkPolicy.ingress | indent 4}} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/persistent-volume-claim.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/persistent-volume-claim.yaml new file mode 100644 index 00000000000..bf4e6dfb712 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/persistent-volume-claim.yaml @@ -0,0 +1,24 @@ +{{- if .Values.persistentVolumeClaim.name }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{.Values.persistentVolumeClaim.name }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- with .Values.persistentVolumeClaim }} +spec: + accessModes: +{{- range .accessMode }} + - {{ . }} +{{- end }} + resources: + requests: + storage: {{ .storage | default "5Gi" }} + storageClassName: {{ .storageClassName | default "default" }} + volumeMode: {{ .volumeMode | default "Filesystem" }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/poddisruptionbudget.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/poddisruptionbudget.yaml new file mode 100644 index 00000000000..2736332531c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/poddisruptionbudget.yaml @@ -0,0 +1,35 @@ +{{- if .Values.podDisruptionBudget }} +{{- if semverCompare ">=1.21-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: policy/v1 +{{- else -}} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + {{- if .Values.podDisruptionBudget.name }} + name: {{ .Values.podDisruptionBudget.name }} + {{- else }} + name: {{ include ".Chart.Name .fullname" $ }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + {{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 6 }} + {{- else }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/pre-sync-job.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/pre-sync-job.yaml new file mode 100644 index 00000000000..54c9f636eed --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/pre-sync-job.yaml @@ -0,0 +1,29 @@ +{{- if $.Values.dbMigrationConfig.enabled }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-migrator + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + annotations: + argocd.argoproj.io/hook: PreSync +# argocd.argoproj.io/hook-delete-policy: HookSucceeded +spec: + template: + spec: + containers: + - name: migrator + image: 686244538589.dkr.ecr.us-east-2.amazonaws.com/migrator:0.0.1-rc14 + env: + {{- range $.Values.dbMigrationConfig.envValues }} + - name: {{ .key}} + value: {{ .value | quote }} + {{- end}} + restartPolicy: Never + backoffLimit: 0 +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/prometheusrules.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/prometheusrules.yaml new file mode 100644 index 00000000000..c285de13883 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/prometheusrules.yaml @@ -0,0 +1,33 @@ +{{- if .Values.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + {{- if .Values.prometheusRule.name }} + name: {{ .Values.prometheusRule.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }} + {{- end }} + {{- if .Values.prometheusRule.namespace }} + namespace: {{ .Values.prometheusRule.namespace }} + {{- end }} + labels: + kind: Prometheus + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ .Values.prometheus.release }} + {{- if .Values.prometheusRule.additionalLabels }} +{{ toYaml .Values.prometheusRule.additionalLabels | indent 4 }} + {{- end }} +spec: + {{- with .Values.prometheusRule.rules }} + groups: + {{- if $.Values.prometheusRule.name }} + - name: {{ $.Values.prometheusRule.name }} + {{- else }} + - name: {{ template ".Chart.Name .fullname" $ }} + {{- end }} + rules: {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/secret.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/secret.yaml new file mode 100644 index 00000000000..5ac3ae14101 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/secret.yaml @@ -0,0 +1,84 @@ +{{- if $.Values.secret.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: app-secret + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +type: Opaque +data: +{{ toYaml $.Values.secret.data | indent 2 }} +{{- end }} + + +{{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{if eq .external false}} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name}}-{{ $.Values.app }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + release: {{ $.Release.Name }} + chart: {{ template ".Chart.Name .chart" $ }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +type: Opaque +data: +{{ toYaml .data | trim | indent 2 }} +{{- end}} + {{if eq .external true }} + {{if (or (eq .externalType "AWSSecretsManager") (eq .externalType "AWSSystemManager") (eq .externalType "HashiCorpVault"))}} +--- +apiVersion: kubernetes-client.io/v1 +kind: ExternalSecret +metadata: + name: {{ .name}} +{{- if $.Values.appLabels }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .roleARN }} + roleArn: .roleARN + {{- end}} + {{- if eq .externalType "AWSSecretsManager"}} + backendType: secretsManager + {{- end}} + {{- if eq .externalType "AWSSystemManager"}} + backendType: systemManager + {{- end}} + {{- if eq .externalType "HashiCorpVault"}} + backendType: vault + {{- end}} + data: + {{- range .secretData }} + - key: {{.key}} + name: {{.name}} + {{- if .property }} + property: {{.property}} + {{- end}} + isBinary: {{.isBinary}} + {{- end}} + {{- end}} + {{- end}} + {{- end}} + {{- end}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/service.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/service.yaml new file mode 100644 index 00000000000..ac9f9bb2c7d --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/service.yaml @@ -0,0 +1,101 @@ +{{- if .Values.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template ".servicename" . }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end}} +spec: + type: {{ .Values.service.type | default "ClusterIP" }} +{{- if (and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges )}} + loadBalancerSourceRanges: + {{- range .Values.service.loadBalancerSourceRanges }} + - {{ . }} + {{- end }} +{{- end }} + ports: + {{- range .Values.ContainerPort }} + {{- if .servicePort }} + - port: {{ .servicePort }} + {{- else }} + - port: {{ .port }} + {{- end }} + {{- if .targetPort }} + targetPort: {{ .targetPort }} + {{- else }} + targetPort: {{ .name }} + {{- end }} + {{- if (and (eq $.Values.service.type "NodePort") .nodePort )}} + nodePort: {{ .nodePort }} + {{- end }} + protocol: {{ .protocol }} + name: {{ .name }} + {{- end }} + {{- if $.Values.appMetrics }} + - port: 9901 + name: envoy-admin + {{- end }} + selector: + {{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 4 }} + {{- else }} + app: {{ template ".Chart.Name .name" . }} + {{- end }} +{{- if .Values.service.sessionAffinity.enabled }} + sessionAffinity: ClientIP +{{- end }} +{{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: +{{ toYaml .Values.service.sessionAffinityConfig | indent 4 }} +{{- end }} + + + +{{- if eq .Values.deploymentType "BLUE-GREEN" }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template ".previewservicename" . }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +spec: + type: ClusterIP + ports: + {{- range .Values.ContainerPort }} + {{- if .servicePort }} + - port: {{ .servicePort }} + {{- else }} + - port: {{ .port }} + {{- end }} + targetPort: {{ .name }} + protocol: TCP + name: {{ .name }} + {{- end }} + {{- if $.Values.appMetrics }} + - port: 9901 + name: envoy-admin + {{- end }} + selector: + {{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 4 }} + {{- else }} + app: {{ template ".Chart.Name .name" . }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/serviceaccount.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/serviceaccount.yaml new file mode 100644 index 00000000000..f337548e942 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/serviceaccount.yaml @@ -0,0 +1,21 @@ +{{- if $.Values.serviceAccount }} +{{- if $.Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "serviceAccountName" . }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.serviceAccount.annotations | indent 4 }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/servicemonitor.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/servicemonitor.yaml new file mode 100644 index 00000000000..1e9e092ca55 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/servicemonitor.yaml @@ -0,0 +1,114 @@ +{{ $serviceMonitorEnabled := include "serviceMonitorEnabled" . }} +{{- if eq "true" $serviceMonitorEnabled -}} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + {{- if .Values.servicemonitor.name }} + name: {{ .Values.servicemonitor.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-sm + {{- end }} + labels: + kind: Prometheus + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} + {{- if .Values.servicemonitor.additionalLabels }} +{{ toYaml .Values.servicemonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + {{- range .Values.ContainerPort }} + {{- if .servicemonitor }} + {{- if .servicemonitor.enabled}} + {{- if .servicemonitor.targetPort }} + - targetPort: {{ .servicemonitor.targetPort }} + {{- else if .servicePort }} + - port: {{ .name }} + {{- end }} + {{- if .servicemonitor.path }} + path: {{ .servicemonitor.path}} + {{- end }} + {{- if .servicemonitor.scheme }} + scheme: {{ .servicemonitor.scheme}} + {{- end }} + {{- if .servicemonitor.interval }} + interval: {{ .servicemonitor.interval}} + {{- end }} + {{- if .servicemonitor.scrapeTimeout }} + scrapeTimeout: {{ .servicemonitor.scrapeTimeout | quote }} + {{- end }} + {{- if .servicemonitor.basicAuth }} + basicAuth: + {{- toYaml .servicemonitor.basicAuth | nindent 8 }} + {{- end }} + {{- if .servicemonitor.insecureTLS }} + tlsConfig: + insecureSkipVerify: true + {{- else if .servicemonitor.tlsConfig }} + tlsConfig: + {{- toYaml .servicemonitor.tlsConfig | nindent 8 }} + {{- end }} + {{- if .servicemonitor.metricRelabelings}} + metricRelabelings: +{{toYaml .servicemonitor.metricRelabelings | indent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- range .Values.containers }} + {{- range .ports }} + {{- if .servicemonitor }} + {{- if .servicemonitor.enabled}} + {{- if .servicemonitor.targetPort }} + - targetPort: {{ .servicemonitor.targetPort }} + {{- else if .servicePort }} + - port: {{ .name }} + {{- end }} + {{- if .servicemonitor.path }} + path: {{ .servicemonitor.path}} + {{- end }} + {{- if .servicemonitor.scheme }} + scheme: {{ .servicemonitor.scheme}} + {{- end }} + {{- if .servicemonitor.interval }} + interval: {{ .servicemonitor.interval}} + {{- end }} + {{- if .servicemonitor.scrapeTimeout }} + scrapeTimeout: {{ .servicemonitor.scrapeTimeout}} + {{- end }} + {{- if .servicemonitor.basicAuth }} + basicAuth: + {{- toYaml .servicemonitor.basicAuth | nindent 8 }} + {{- end }} + {{- if .servicemonitor.insecureTLS }} + tlsConfig: + insecureSkipVerify: true + {{- else if .servicemonitor.tlsConfig }} + tlsConfig: + {{- toYaml .servicemonitor.tlsConfig | nindent 8 }} + {{- end }} + {{- if .servicemonitor.metricRelabelings}} + metricRelabelings: +{{toYaml .servicemonitor.metricRelabelings | indent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.servicemonitor.namespaceSelector }} + namespaceSelector: + matchNames: + {{- toYaml .Values.servicemonitor.namespaceSelector | nindent 6 }} + {{- end }} + selector: + matchLabels: + {{- if .Values.servicemonitor.matchLabels }} + {{- toYaml .Values.servicemonitor.matchLabels | nindent 6 }} + {{- else }} + app: {{ template ".Chart.Name .name" $ }} + {{- end }} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/sidecar-configmap.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/sidecar-configmap.yaml new file mode 100644 index 00000000000..cf32679409a --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/sidecar-configmap.yaml @@ -0,0 +1,169 @@ +{{- if .Values.appMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: 2019-08-12T18:38:34Z + name: sidecar-config-{{ template ".Chart.Name .name" $ }} +data: + envoy-config.json: | + { + "stats_config": { + "use_all_default_tags": false, + "stats_tags": [ + { + "tag_name": "cluster_name", + "regex": "^cluster\\.((.+?(\\..+?\\.svc\\.cluster\\.local)?)\\.)" + }, + { + "tag_name": "tcp_prefix", + "regex": "^tcp\\.((.*?)\\.)\\w+?$" + }, + { + "tag_name": "response_code", + "regex": "_rq(_(\\d{3}))$" + }, + { + "tag_name": "response_code_class", + "regex": ".*_rq(_(\\dxx))$" + }, + { + "tag_name": "http_conn_manager_listener_prefix", + "regex": "^listener(?=\\.).*?\\.http\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "http_conn_manager_prefix", + "regex": "^http\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "listener_address", + "regex": "^listener\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "mongo_prefix", + "regex": "^mongo\\.(.+?)\\.(collection|cmd|cx_|op_|delays_|decoding_)(.*?)$" + } + ], + "stats_matcher": { + "inclusion_list": { + "patterns": [ + { + "regex": ".*_rq_\\dxx$" + }, + { + "regex": ".*_rq_time$" + }, + { + "regex": "cluster.*" + }, + ] + } + } + }, + "admin": { + "access_log_path": "/dev/null", + "address": { + "socket_address": { + "address": "0.0.0.0", + "port_value": 9901 + } + } + }, + "static_resources": { + "clusters": [ + {{- range $index, $element := .Values.ContainerPort }} + { + "name": "{{ $.Values.app }}-{{ $index }}", + "type": "STATIC", + "connect_timeout": "0.250s", + "lb_policy": "ROUND_ROBIN", +{{- if $element.idleTimeout }} + "common_http_protocol_options": { + "idle_timeout": {{ $element.idleTimeout | quote }} + }, +{{- end }} +{{- if or $element.useHTTP2 $element.useGRPC }} + "http2_protocol_options": {}, +{{- end }} +{{- if and (not $element.useGRPC) (not $element.supportStreaming) }} + "max_requests_per_connection": "1", +{{- end }} + "load_assignment": { + "cluster_name": "9", + "endpoints": { + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "protocol": "TCP", + "address": "127.0.0.1", + "port_value": {{ $element.port }} + } + } + } + } + ] + } + } + }, + {{- end }} + ], + "listeners":[ + {{- range $index, $element := .Values.ContainerPort }} + { + "address": { + "socket_address": { + "protocol": "TCP", + "address": "0.0.0.0", + "port_value": {{ $element.envoyPort | default (add 8790 $index) }} + } + }, + "filter_chains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "config": { + "codec_type": "AUTO", + "stat_prefix": "stats", + "route_config": { + "virtual_hosts": [ + { + "name": "backend", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/" + }, + "route": { +{{- if $element.supportStreaming }} + "timeout": "0s", +{{- end }} +{{- if and ($element.envoyTimeout) (not $element.supportStreaming) }} + "timeout": "{{ $element.envoyTimeout }}", +{{- end }} + "cluster": "{{ $.Values.app }}-{{ $index }}" + } + } + ] + } + ] + }, + "http_filters": { + "name": "envoy.filters.http.router" + } + } + } + ] + } + ] + }, + {{- end }} + ] + } + } +--- +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/vertical-pod-autoscaler.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/vertical-pod-autoscaler.yaml new file mode 100644 index 00000000000..9591354bdc2 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/vertical-pod-autoscaler.yaml @@ -0,0 +1,27 @@ +{{ $VerticalPodAutoScalingEnabled := include "VerticalPodAutoScalingEnabled" . }} +{{- if eq "true" $VerticalPodAutoScalingEnabled -}} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: {{ template ".Chart.Name .fullname" . }}-vpa + labels: + kind: Prometheus + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} +spec: +{{- if .Values.verticalPodScaling.resourcePolicy }} + resourcePolicy: +{{ toYaml .Values.verticalPodScaling.resourcePolicy}} +{{- end }} +{{- if .Values.verticalPodScaling.updatePolicy }} + updatePolicy: +{{ toYaml .Values.verticalPodScaling.updatePolicy}} +{{- end }} + targetRef: + apiVersion: argoproj.io/v1alpha1 + kind: Rollout + name: {{ include ".Chart.Name .fullname" $ }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/winter-soldier.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/winter-soldier.yaml new file mode 100644 index 00000000000..5ac2fd8443e --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/templates/winter-soldier.yaml @@ -0,0 +1,45 @@ +{{- if .Values.winterSoldier.enabled }} +apiVersion: {{ $.Values.winterSoldier.apiVersion }} +kind: Hibernator +metadata: + {{- if .Values.winterSoldier.name }} + name: {{ .Values.winterSoldier.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-hibernator + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + {{- if .Values.winterSoldier.labels }} +{{ toYaml .Values.winterSoldier.labels | indent 4 }} + {{- end }} +{{- if .Values.winterSoldier.annotations }} + annotations: +{{ toYaml .Values.winterSoldier.annotations | indent 4 }} +{{- end }} +spec: + timeRangesWithZone: +{{ toYaml $.Values.winterSoldier.timeRangesWithZone | indent 4}} + selectors: + - inclusions: + - objectSelector: + name: {{ include ".Chart.Name .fullname" $ }} + type: {{ .Values.winterSoldier.type | quote }} + fieldSelector: +{{toYaml $.Values.winterSoldier.fieldSelector | indent 14}} + namespaceSelector: + name: {{ $.Release.Namespace }} + exclusions: [] + action: {{ $.Values.winterSoldier.action }} + {{- if eq .Values.winterSoldier.action "scale" }} + {{- if .Values.winterSoldier.targetReplicas }} + targetReplicas: {{ $.Values.winterSoldier.targetReplicas }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/test_values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/test_values.yaml new file mode 100644 index 00000000000..7077cd43276 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/test_values.yaml @@ -0,0 +1,648 @@ +# Default values for myapp. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +rolloutLabels: + name: abhinav + Company: Devtron + Job: DevOps + +rolloutAnnotations: + name: abhinav + Company: Devtron + Job: DevOps + +containerSpec: + lifecycle: + enabled: true + preStop: + exec: + command: ["sleep","10"] + postStart: + httpGet: + host: example.com + path: /example + port: 90 + +imagePullSecrets: + - test1 + - test2 +replicaCount: 1 +MinReadySeconds: 5 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyTimeout: 15 + targetPort: 8080 + envoyPort: 8799 + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s + servicemonitor: + enabled: true + path: /abc + scheme: 'http' + interval: 30s + scrapeTimeout: 20s + metricRelabelings: + - sourceLabels: [namespace] + regex: '(.*)' + replacement: myapp + targetLabel: target_namespace + + - name: app1 + port: 8090 + targetPort: 1234 + servicePort: 8080 + useGRPC: true + servicemonitor: + enabled: true + - name: app2 + port: 8091 + servicePort: 8081 + useGRPC: true + +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 +autoPromotionSeconds: 30 + +Spec: + Affinity: + Key: + # Key: kops.k8s.io/instancegroup + Values: + + +image: + pullPolicy: IfNotPresent + +autoscaling: + enabled: true + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + behavior: {} +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +secret: + enabled: false + +service: + enabled: true + type: ClusterIP + # name: "1234567890123456789012345678901234567890123456789012345678901234567890" + annotations: {} + # test1: test2 + # test3: test4 + +server: + deployment: + image_tag: 1-95af053 + image: "" +deploymentType: "RECREATE" + +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: zone + whenUnsatisfiable: DoNotSchedule + autoLabelSelector: true + customLabelSelector: + foo: bar + +EnvVariables: + - name: FLASK_ENV + value: qa + +EnvVariablesFromSecretKeys: [] + # - name: ENV_NAME + # secretName: SECRET_NAME + # keyName: SECRET_KEY + +EnvVariablesFromCongigMapKeys: [] + # - name: ENV_NAME + # configMapName: CONFIG_MAP_NAME + # keyName: CONFIG_MAP_KEY + +LivenessProbe: + Path: / + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + - name: Custom-Header2 + value: xyz + + +winterSoldier: + apiVersion: pincher.devtron.ai/v1alpha1 + enabled: true + annotations: {} + labels: {} + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: + - timeFrom: 00:00 + timeTo: 23:59:59 + weekdayFrom: Sat + weekdayTo: Sun + - timeFrom: 00:00 + timeTo: 08:00 + weekdayFrom: Mon + weekdayTo: Fri + - timeFrom: 20:00 + timeTo: 23:59:59 + weekdayFrom: Mon + weekdayTo: Fri + action: scale + targetReplicas: [1,1,1] + fieldSelector: + - AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) + + +ReadinessProbe: + Path: / + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + +StartupProbe: + Path: "/" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] + command: [] + tcp: false + + +prometheus: + release: monitoring + +servicemonitor: + additionalLabels: {} + + +prometheusRule: + enabled: true + additionalLabels: {} + namespace: "" + rules: + # These are just examples rules, please adapt them to your needs + - alert: TooMany500s + expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + for: 1m + labels: + severity: critical + annotations: + description: Too many 5XXs + summary: More than 5% of the all requests did return 5XX, this require your attention + - alert: TooMany400s + expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + for: 1m + labels: + severity: critical + annotations: + description: Too many 4XXs + summary: More than 5% of the all requests did return 4XX, this require your attention + + +ingress: + enabled: true + className: nginx + annotations: {} +# nginx.ingress.kubernetes.io/rewrite-target: / +# nginx.ingress.kubernetes.io/ssl-redirect: "false" +# kubernetes.io/ingress.class: nginx +# kubernetes.io/tls-acme: "true" +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" +# Old Ingress Format +# host: "ingress-example.com" +# path: "/app" + +# New Ingress Format + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + additionalBackends: + - path: /example1 + pathType: "ImplementationSpecific" + backend: + service: + name: test-service + port: + number: 80 + + tls: [] +### Legacy Ingress Format ## +# host: abc.com +# path: "/" +# pathType: "ImplementationSpecific" + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +ingressInternal: + enabled: false + className: nginx-internal + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + additionalBackends: + - path: /internal + pathType: "ImplementationSpecific" + backend: + service: + name: test-service-internal + port: + number: 80 + + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +dbMigrationConfig: + enabled: false + +command: + workingDir: /app + enabled: false + value: ["ls"] + +args: + enabled: false + value: [] + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 0.10 + memory: 100Mi + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + + +nodeSelector: {} + + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 + +ConfigMaps: + enabled: false + maps: [] +# - name: config-map-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-map-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 +# key3: abc-2 +# - name: config-map-3 +# type: environment +# external: true +# mountPath: /etc/config/3 +# data: [] +# - name: config-map-4 +# type: volume +# external: true +# mountPath: /etc/config/4 +# data: [] + + +ConfigSecrets: + enabled: false + secrets: + - name: config-secret-1 + type: environment + external: false + externalType: AWSSecretsManager + esoSecretData: + secretStore: + aws: + service: SecretsManager + region: us-east-1 + auth: + secretRef: + accessKeyIDSecretRef: + name: awssm-secret + key: access-key + secretAccessKeySecretRef: + name: awssm-secret + key: secret-access-key + esoData: + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + data: + key1: key1value-1 + key2: key2value-1 + key3: key3value-1 + - name: config-secret-2 + type: environment + external: false + externalType: ESO_HashiCorpVault + esoSecretData: + secretStore: + vault: + server: "http://my.vault.server:8200" + path: "secret" + version: "v2" + auth: + tokenSecretRef: + name: vault-token + key: token + esoData: + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + date: + key1: key1value-1 + key2: key2value-1 + key3: key3value-1 + +# - name: config-secret-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 + + +initContainers: + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + - command: ["sh", "-c", "chown -R 1000:1000 logs"] + reuseContainerImage: true + volumeMounts: + - mountPath: /usr/local/airflow/logs + name: logs-data + securityContext: + privileged: true + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + - name: init-migrate + image: busybox:latest + command: ["sh", "-c", "chown -R 1000:1000 logs"] + volumeMounts: + - mountPath: /usr/local/airflow/logs + name: logs-data + securityContext: + capabilities: + drop: + - ALL + +containers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs +# name: logs-data + + +rawYaml: [] +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP + +# If you need to provide some extra specs for main container which are not included by default in deployment template +# then provide them here +containerExtraSpecs: {} + +# If you need to provide some extra specs for pod which are not included by default in deployment template +# then provide them here +podExtraSpecs: {} + +envoyproxy: + image: docker.io/envoyproxy/envoy:v1.16.0 + configMapName: "" + lifecycle: {} + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +podDisruptionBudget: {} + # minAvailable: 1 + # maxUnavailable: 1 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +## + +tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" +# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +appMetrics: false +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "test1" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: + kubernetes.io/service-account.name: build-robot +containerSecurityContext: + allowPrivilegeEscalation: false +privileged: true +hostAliases: [] +# - ip: "127.0.0.1" +# hostnames: +# - "foo.local" + +deployment: + strategy: + blueGreen: + autoPromotionSeconds: 30 + scaleDownDelaySeconds: 30 + previewReplicaCount: 1 + autoPromotionEnabled: false + rolling: + maxSurge: "25%" + maxUnavailable: 1 + canary: + maxSurge: "25%" + maxUnavailable: 1 + steps: + - setWeight: 25 + - pause: + duration: 15 # 1 min + - setWeight: 50 + - pause: + duration: 15 # 1 min + - setWeight: 75 + - pause: + duration: 15 # 1 min + recreate: {} + +persistentVolumeClaim: + name: kamal-pvc + resources: + requests: + storage: 5Gi + storageClassName: my-storage-class + +affinity: + enabled: false + values: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: security + operator: In + values: + - S1 + topologyKey: topology.kubernetes.io/zone diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/values.yaml new file mode 100644 index 00000000000..bd87d4fc2ec --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-20-0/values.yaml @@ -0,0 +1,635 @@ +# Default values for myapp. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 +MinReadySeconds: 5 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyPort: 8799 + envoyTimeout: 15s + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s + protocol: TCP + # servicemonitor: + # enabled: true + # path: /abc + # scheme: 'http' + # interval: 30s +# scrapeTimeout: 20s +# metricRelabelings: +# - sourceLabels: [namespace] +# regex: '(.*)' +# replacement: myapp +# targetLabel: target_namespace + + - name: app1 + port: 8090 + servicePort: 8080 + useGRPC: true + +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 +autoPromotionSeconds: 30 + +Spec: + Affinity: #required/preferred + Key: +# Key: kops.k8s.io/instancegroup + Values: + + +affinity: + enabled: false + values: {} + + +image: + pullPolicy: IfNotPresent + +restartPolicy: Always + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 70 + TargetMemoryUtilizationPercentage: 80 + annotations: {} + labels: {} + behavior: {} + containerResource: + enable: false + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +kedaAutoscaling: + enabled: false + envSourceContainerName: "" # Optional. Default: .spec.template.spec.containers[0] + cooldownPeriod: 300 # Optional. Default: 300 seconds + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 # Optional. Must be less than minReplicaCount + pollingInterval: 30 # Optional. Default: 30 seconds + # The fallback section is optional. It defines a number of replicas to fallback to if a scaler is in an error state. + fallback: {} # Optional. Section to specify fallback options + # failureThreshold: 3 # Mandatory if fallback section is included + # replicas: 6 + advanced: {} + # horizontalPodAutoscalerConfig: # Optional. Section to specify HPA related options + # behavior: # Optional. Use to modify HPA's scaling behavior + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Percent + # value: 100 + # periodSeconds: 15 + triggers: [] + triggerAuthentication: + enabled: false + name: "" + spec: {} + authenticationRef: {} + +secret: + enabled: false + +service: + enabled: true + type: ClusterIP +# name: "1234567890123456789012345678901234567890123456789012345678901234567890" + annotations: {} + # test1: test2 + # test3: test4 + sessionAffinity: + enabled: false + sessionAffinityConfig: {} + + +server: + deployment: + image_tag: 1-95af053 + image: "" + +EnvVariablesFromFieldPath: [] +# - name: POD_NAME +# fieldPath: metadata.name + +EnvVariables: [] + # - name: FLASK_ENV + # value: qa + +EnvVariablesFromSecretKeys: [] + # - name: ENV_NAME + # secretName: SECRET_NAME + # keyName: SECRET_KEY + +EnvVariablesFromConfigMapKeys: [] + # - name: ENV_NAME + # configMapName: CONFIG_MAP_NAME + # keyName: CONFIG_MAP_KEY + +LivenessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + +ReadinessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + +StartupProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] + command: [] + tcp: false + +prometheus: + release: monitoring + +servicemonitor: + additionalLabels: {} + + +prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" +# rules: +# # These are just examples rules, please adapt them to your needs +# - alert: TooMany500s +# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 +# for: 1m +# labels: +# severity: critical +# annotations: +# description: Too many 5XXs +# summary: More than 5% of the all requests did return 5XX, this require your attention +# - alert: TooMany400s +# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 +# for: 1m +# labels: +# severity: critical +# annotations: +# description: Too many 4XXs +# summary: More than 5% of the all requests did return 4XX, this require your attention +# + +ingress: + enabled: false + className: "" + labels: {} + annotations: {} +# nginx.ingress.kubernetes.io/rewrite-target: / +# nginx.ingress.kubernetes.io/ssl-redirect: "false" +# kubernetes.io/ingress.class: nginx +# kubernetes.io/tls-acme: "true" +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +ingressInternal: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +istio: + enable: false + gateway: + enabled: false + labels: {} + annotations: {} + host: "" + tls: + enabled: false + secretName: "" + virtualService: + enabled: false + labels: {} + annotations: {} + gateways: [] + hosts: [] + http: [] + # - match: + # - uri: + # prefix: /v1 + # - uri: + # prefix: /v2 + # timeout: 12 + # headers: + # request: + # add: + # x-some-header: "value" + # retries: + # attempts: 2 + # perTryTimeout: 3s + destinationRule: + enabled: false + labels: {} + annotations: {} + subsets: [] + trafficPolicy: {} + peerAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + mtls: + mode: "" + portLevelMtls: {} + requestAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + jwtRules: [] + authorizationPolicy: + enabled: false + labels: {} + annotations: {} + action: + provider: {} + rules: [] + +networkPolicy: + enabled: false + annotations: {} + labels: {} + podSelector: + matchExpressions: [] + matchLabels: {} + policyTypes: [] + ingress: [] + egress: [] + +hibernator: + enable: false + +dbMigrationConfig: + enabled: false + +command: + enabled: false + value: [] + +args: + enabled: false + value: [] + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + + +nodeSelector: {} + +# If you need to provide some extra specs for pod which are not included by default in deployment template +# then provide them here +podExtraSpecs: {} + +# If you need to provide some extra specs for main container which are not included by default in deployment template +# then provide them here +containerExtraSpecs: {} + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 + +ConfigMaps: + enabled: false + maps: [] +# - name: config-map-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-map-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 +# key3: abc-2 +# - name: config-map-3 +# type: environment +# external: true +# mountPath: /etc/config/3 +# data: [] +# - name: config-map-4 +# type: volume +# external: true +# mountPath: /etc/config/4 +# data: [] + + +ConfigSecrets: + enabled: false + secrets: [] +# - name: config-secret-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-secret-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 + + +initContainers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + # # Uncomment below line ONLY IF you want to reuse the container image. + # # This will assign your application's docker image to init container. + # reuseContainerImage: true + +containers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . +# - name: volume-mount-hack +# image: busybox +# command: ["sh", "-c", "chown -R 1000:1000 logs"] +# volumeMounts: +# - mountPath: /usr/local/airflow/logs +# name: logs-data +# resizePolicy: +# - resourceName: cpu +# restartPolicy: NotRequired +# - resourceName: memory +# restartPolicy: RestartContainer + + +rawYaml: [] +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP + +winterSoldier: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + labels: {} + annotations: {} + timeRangesWithZone: {} + # timeZone: "Asia/Kolkata" + # timeRanges: [] + action: sleep + targetReplicas: [] + fieldSelector: [] + type: Rollout + # - AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) + +topologySpreadConstraints: [] + +schedulerName: "" + +envoyproxy: + image: docker.io/envoyproxy/envoy:v1.16.0 + lifecycle: {} + configMapName: "" + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +ambassadorMapping: + enabled: false + # labels: + # key1: value1 + # prefix: / + # ambassadorId: 1234 + # hostname: devtron.example.com + # rewrite: /foo/ + # retryPolicy: + # retry_on: "5xx" + # num_retries: 10 + # cors: + # origins: http://foo.example,http://bar.example + # methods: POST, GET, OPTIONS + # headers: Content-Type + # credentials: true + # exposed_headers: X-Custom-Header + # max_age: "86400" + # weight: 10 + # method: GET + # extraSpec: + # method_regex: true + # headers: + # x-quote-mode: backend + # x-random-header: devtron + # tls: + # context: httpd-context + # create: true + # secretName: httpd-secret + # hosts: + # - anything.example.info + # - devtron.example.com + # extraSpec: + # min_tls_version: v1.2 + +containerSpec: + lifecycle: + enabled: false + preStop: {} +# exec: +# command: ["sleep","10"] + postStart: {} +# httpGet: +# host: example.com +# path: /example +# port: 90 + +podDisruptionBudget: {} +# minAvailable: 1 +# maxUnavailable: 1 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + +podSecurityContext: {} + # runAsUser: 1000 + # runAsGroup: 3000 + # fsGroup: 2000 + +containerSecurityContext: {} + # allowPrivilegeEscalation: false +## Pods Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: {} + +tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +imagePullSecrets: [] + # - test1 + # - test2 + + +persistentVolumeClaim: {} + + +verticalPodScaling: + enabled: false \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/app-values.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/app-values.yaml index 1b1912aa689..ffbe895ffde 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/app-values.yaml +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/app-values.yaml @@ -234,6 +234,9 @@ Spec: Key: # Key: kops.k8s.io/instancegroup Values: +affinity: + enabled: false + values: {} ambassadorMapping: enabled: false @@ -259,6 +262,10 @@ autoscaling: annotations: {} labels: {} behavior: {} + containerResource: + enabled: false + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 # scaleDown: # stabilizationWindowSeconds: 300 # policies: @@ -311,7 +318,7 @@ servicemonitor: additionalLabels: {} envoyproxy: - image: quay.io/devtron/envoy:v1.14.1 + image: quay.io/devtron/envoy:v1.16.0 configMapName: "" lifecycle: {} resources: @@ -386,3 +393,6 @@ hostAliases: [] # hostnames: # - "foo.remote" # - "bar.remote" + +verticalPodScaling: + enabled: false \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/_helpers.tpl b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/_helpers.tpl index efbdad6de47..75ceac27e9f 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/_helpers.tpl +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/_helpers.tpl @@ -60,6 +60,14 @@ If release name contains chart name it will be used as a full name. {{- end -}} {{- end -}} +{{- define "VerticalPodAutoScalingEnabled" -}} + {{- $SMenabled := false -}} + {{- if and .Values.verticalPodScaling.enabled }} + {{- $SMenabled = true -}} + {{- end }} + {{- $SMenabled -}} +{{- end -}} + {{/* Create chart name and version as used by the chart label. */}} diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/externalsecrets.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/externalsecrets.yaml index 129278add1d..efd291af5d2 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/externalsecrets.yaml +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/externalsecrets.yaml @@ -51,7 +51,15 @@ spec: {{- end }} target: name: {{ .name}} + {{- if .esoSecretData.template }} + template: + {{- toYaml .esoSecretData.template | nindent 6 }} + {{- end }} creationPolicy: Owner + {{- if .esoSecretData.esoDataFrom }} + dataFrom: + {{- toYaml .esoSecretData.esoDataFrom | nindent 4 }} + {{- else }} data: {{- range .esoSecretData.esoData }} - secretKey: {{ .secretKey }} @@ -64,4 +72,5 @@ spec: {{- end}} {{- end}} {{- end}} +{{- end}} {{- end}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/hpa.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/hpa.yaml index a1fecd0cc37..bfe8efe8aef 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/hpa.yaml +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/hpa.yaml @@ -38,6 +38,28 @@ spec: minReplicas: {{ $.Values.autoscaling.MinReplicas }} maxReplicas: {{ $.Values.autoscaling.MaxReplicas }} metrics: + {{- if $.Values.autoscaling.containerResource.enabled }} + {{- with $.Values.autoscaling.containerResource }} + {{- if .TargetCPUUtilizationPercentage }} + - type: ContainerResource + containerResource: + name: cpu + container: {{ $.Chart.Name }} + target: + type: Utilization + averageUtilization: {{ .TargetCPUUtilizationPercentage }} + {{- end}} + {{- if .TargetMemoryUtilizationPercentage }} + - type: ContainerResource + containerResource: + name: memory + container: {{ $.Chart.Name }} + target: + type: Utilization + averageUtilization: {{ .TargetMemoryUtilizationPercentage }} + {{- end}} + {{- end }} + {{- end }} {{- if $.Values.autoscaling.TargetMemoryUtilizationPercentage }} - type: Resource resource: diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/keda-autoscaling.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/keda-autoscaling.yaml index b0e804f591a..db5b1cf81b4 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/keda-autoscaling.yaml +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/keda-autoscaling.yaml @@ -13,6 +13,17 @@ metadata: release: {{ $.Release.Name }} appId: {{ $.Values.app | quote }} envId: {{ $.Values.env | quote }} + release: {{ .Release.Name }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} + {{- end }} + {{- if .Values.kedaAutoscaling.labels }} +{{ toYaml .Values.kedaAutoscaling.labels | indent 4 }} + {{- end }} + {{- if .Values.kedaAutoscaling.annotations }} + annotations: +{{ toYaml .Values.kedaAutoscaling.annotations | indent 4 }} + {{- end }} spec: scaleTargetRef: apiVersion: apps/v1 diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/service.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/service.yaml index 8890359e31c..5900bf88904 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/service.yaml +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/service.yaml @@ -43,7 +43,7 @@ spec: {{- if (and (eq $.Values.service.type "NodePort") .nodePort )}} nodePort: {{ .nodePort }} {{- end }} - protocol: TCP + protocol: {{ .protocol }} name: {{ .name }} {{- end }} {{- if $.Values.appMetrics }} @@ -56,6 +56,13 @@ spec: {{- else }} app: {{ template ".Chart.Name .name" . }} {{- end }} +{{- if .Values.service.sessionAffinity.enabled }} + sessionAffinity: ClientIP +{{- end }} +{{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: +{{ toYaml .Values.service.sessionAffinityConfig | indent 4 }} +{{- end }} {{- end }} --- {{- if or .Values.service.enabled .Values.serviceheadless.enabled }} @@ -114,6 +121,13 @@ spec: {{- else }} app: {{ template ".Chart.Name .name" . }} {{- end }} +{{- if .Values.serviceheadless.sessionAffinity.enabled }} + sessionAffinity: ClientIP +{{- end }} +{{- if .Values.serviceheadless.sessionAffinityConfig }} + sessionAffinityConfig: +{{ toYaml .Values.serviceheadless.sessionAffinityConfig | indent 4 }} +{{- end }} type: ClusterIP {{- if (and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges )}} loadBalancerSourceRanges: @@ -121,4 +135,4 @@ spec: - {{ . }} {{- end }} {{- end }} -{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/servicemonitor.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/servicemonitor.yaml index 276a50211e7..57d745dbe8b 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/servicemonitor.yaml +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/servicemonitor.yaml @@ -39,7 +39,7 @@ spec: interval: {{ .servicemonitor.interval}} {{- end }} {{- if .servicemonitor.scrapeTimeout }} - scrapeTimeout: {{ .servicemonitor.scrapeTimeout}} + scrapeTimeout: {{ .servicemonitor.scrapeTimeout | quote}} {{- end }} {{- if .servicemonitor.basicAuth }} basicAuth: diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/statefulset.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/statefulset.yaml index f693b05ff49..55cb8ed17ac 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/statefulset.yaml +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/statefulset.yaml @@ -119,12 +119,18 @@ spec: operator: In values: - {{ $.Values.Spec.Affinity.Values | default "nodes" }} +{{- else if $.Values.affinity.enabled }} + affinity: +{{ toYaml .Values.affinity.values | indent 8 }} {{- end }} {{- if $.Values.serviceAccountName }} serviceAccountName: {{ $.Values.serviceAccountName }} {{- else }} serviceAccountName: {{ template "serviceAccountName" . }} {{- end }} +{{- if $.Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} +{{- end }} {{- if .Values.nodeSelector }} nodeSelector: {{ toYaml $.Values.nodeSelector | indent 10 }} @@ -148,6 +154,17 @@ spec: - maxSkew: {{ .maxSkew }} topologyKey: {{ .topologyKey }} whenUnsatisfiable: {{ .whenUnsatisfiable }} + {{- if semverCompare "<=1.30-0" $.Capabilities.KubeVersion.GitVersion }} + {{- if .minDomains }} + minDomains: {{ .minDomains }} + {{- end }} + {{- end }} + {{- if .nodeAffinityPolicy }} + nodeAffinityPolicy: {{ .nodeAffinityPolicy }} + {{- end }} + {{- if .nodeTaintsPolicy }} + nodeTaintsPolicy: {{ .nodeTaintsPolicy }} + {{- end }} labelSelector: matchLabels: {{- if and .autoLabelSelector .customLabelSelector }} @@ -181,6 +198,10 @@ spec: command: {{ toYaml .command | indent 12 -}} {{- end}} +{{- if .args}} + args: +{{ toYaml .args | indent 12 -}} +{{- end}} {{- if .resources}} resources: {{ toYaml .resources | indent 12 -}} @@ -198,7 +219,7 @@ spec: containers: {{- if $.Values.appMetrics }} - name: envoy - image: {{ $.Values.envoyproxy.image | default "envoyproxy/envoy:v1.14.1"}} + image: {{ $.Values.envoyproxy.image | default "quay.io/devtron/envoy:v1.16.0"}} {{- if $.Values.envoyproxy.lifecycle }} lifecycle: {{ toYaml .Values.envoyproxy.lifecycle | indent 12 -}} @@ -286,7 +307,7 @@ spec: {{- range $.Values.ContainerPort }} - name: {{ .name}} containerPort: {{ .port }} - protocol: TCP + protocol: {{ .protocol }} {{- end}} {{- if and $.Values.command.enabled $.Values.command.workingDir }} workingDir: {{ $.Values.command.workingDir }} @@ -301,9 +322,9 @@ spec: {{- end }} env: - name: CONFIG_HASH - value: {{ include (print $.Chart.Name "/templates/configmap.yaml") . | sha256sum }} + value: {{ include (print $.Chart.Name "/templates/configmap.yaml") . | sha256sum }}{{ if and (.Values.devtronInternal) (.Values.devtronInternal.containerSpecs.ConfigHash) }}{{ .Values.devtronInternal.containerSpecs.ConfigHash }}{{ end }} - name: SECRET_HASH - value: {{ include (print $.Chart.Name "/templates/secret.yaml") . | sha256sum }} + value: {{ include (print $.Chart.Name "/templates/secret.yaml") . | sha256sum }}{{ if and (.Values.devtronInternal) (.Values.devtronInternal.containerSpecs.SecretHash) }}{{ .Values.devtronInternal.containerSpecs.SecretHash }}{{ end }} - name: DEVTRON_APP_NAME value: {{ template ".Chart.Name .name" $ }} - name: POD_NAME @@ -376,6 +397,7 @@ spec: httpGet: path: {{ $.Values.LivenessProbe.Path }} port: {{ $.Values.LivenessProbe.port }} + scheme: {{ $.Values.LivenessProbe.scheme }} {{- if $.Values.LivenessProbe.httpHeaders }} httpHeaders: {{- range $.Values.LivenessProbe.httpHeaders}} @@ -502,7 +524,22 @@ spec: mountPath: {{ $cmMountPath }} {{- else }} - {{- range $k, $v := .data }} + {{if (or (eq .externalType "ESO_GoogleSecretsManager") (eq .externalType "ESO_AWSSecretsManager") (eq .externalType "ESO_HashiCorpVault") (eq .externalType "ESO_AzureSecretsManager"))}} + {{- if and (.esoSubPath) (ne (len .esoSubPath) 0) }} + {{- range .esoSubPath }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ . }} + subPath: {{ . }} + {{- end }} + {{- else }} + {{- range .esoSecretData.esoData }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ .secretKey }} + subPath: {{ .secretKey }} + {{- end }} + {{- end }} + {{- else }} + {{- range $k, $v := .data }} # for others secrets the mount path will be .data[i].secretKey - name: {{ $cmName | replace "." "-"}}-vol mountPath: {{ $cmMountPath}}/{{ $k}} subPath: {{ $k}} @@ -511,8 +548,89 @@ spec: {{- end }} {{- end }} {{- end }} - {{- if and (eq (len .Values.volumes) 0) (eq (len .Values.statefulSetConfig.volumeClaimTemplates) 0) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) }} []{{- end }} - {{- if and (eq (len .Values.volumeMounts) 0) (eq (len .Values.statefulSetConfig.volumeClaimTemplates) 0) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) }} [] {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) }} []{{- end }} + {{- if and (eq (len .Values.volumeMounts) 0) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) }} [] {{- end }} +{{- if $.Values.appMetrics }} + - name: envoy + image: {{ $.Values.envoyproxy.image | default "quay.io/devtron/envoy:v1.16.0"}} + {{- if $.Values.envoyproxy.lifecycle }} + lifecycle: +{{ toYaml .Values.envoyproxy.lifecycle | indent 12 -}} + {{- else if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- end }} + resources: +{{ toYaml $.Values.envoyproxy.resources | trim | indent 12 }} + ports: + - containerPort: 9901 + protocol: TCP + name: envoy-admin + {{- range $index, $element := .Values.ContainerPort }} + - name: {{ $element.name}} + containerPort: {{ $element.envoyPort | default (add 8790 $index) }} + protocol: TCP + {{- end }} + command: ["/usr/local/bin/envoy"] + args: ["-c", "/etc/envoy-config/envoy-config.json", "-l", "info", "--log-format", "[METADATA][%Y-%m-%d %T.%e][%t][%l][%n] %v"] + volumeMounts: + - name: {{ $.Values.envoyproxy.configMapName | default "envoy-config-volume" }} + mountPath: /etc/envoy-config/ +{{- if $.Values.envoyproxy.readinessProbe}} + readinessProbe: +{{ toYaml $.Values.envoyproxy.readinessProbe | indent 12}} +{{- end }} +{{- if $.Values.envoyproxy.livenessProbe}} + livenessProbe: +{{ toYaml $.Values.envoyproxy.livenessProbe | indent 12}} +{{- end }} +{{- end}} +{{- if $.Values.containers }} +{{- range $i, $c := .Values.containers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-sidecontainer-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .env }} + env: +{{ toYaml .env | indent 12 }} +{{- end }} + {{- if .envFrom }} + envFrom: +{{ toYaml .env | indent 12 }} +{{- end }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .resizePolicy }} + resizePolicy: +{{ toYaml .resziePolicy | indent 12}} +{{- end }} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} + - +{{ toYaml . | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + + volumes: {{- if $.Values.appMetrics }} - name: envoy-config-volume diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/vertical-pod-autoscaler.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/vertical-pod-autoscaler.yaml new file mode 100644 index 00000000000..27de013e0e2 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/vertical-pod-autoscaler.yaml @@ -0,0 +1,27 @@ +{{ $VerticalPodAutoScalingEnabled := include "VerticalPodAutoScalingEnabled" . }} +{{- if eq "true" $VerticalPodAutoScalingEnabled -}} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: {{ template ".Chart.Name .fullname" . }}-vpa + labels: + kind: Prometheus + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} +spec: +{{- if .Values.verticalPodScaling.resourcePolicy }} + resourcePolicy: +{{ toYaml .Values.verticalPodScaling.resourcePolicy}} +{{- end }} +{{- if .Values.verticalPodScaling.updatePolicy }} + updatePolicy: +{{ toYaml .Values.verticalPodScaling.updatePolicy}} +{{- end }} + targetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: {{ include ".Chart.Name .fullname" $ }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/values.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/values.yaml index 5c534b4541f..bc8aff2ffcd 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/values.yaml @@ -114,18 +114,23 @@ kedaAutoscaling: secret: enabled: false serviceheadless: - enabled: true + enabled: false + sessionAffinity: + enabled: false service: type: ClusterIP enabled: false #name: "test-service" annotations: {} + sessionAffinity: + enabled: false # test1: test2 # test3: test4 statefulSetConfig: mountPath: "/tmp" serviceheadless: enabled: false + volumeClaimTemplates: [] # - spec: @@ -600,3 +605,9 @@ tolerations: [] imagePullSecrets: [] # - test1 # - test2 + +affinity: + enabled: false + +verticalPodScaling: + enabled: false \ No newline at end of file diff --git a/scripts/sql/029000_019_release_rbac.down.sql b/scripts/sql/28901900_release_rbac.down.sql similarity index 100% rename from scripts/sql/029000_019_release_rbac.down.sql rename to scripts/sql/28901900_release_rbac.down.sql diff --git a/scripts/sql/029000_019_release_rbac.up.sql b/scripts/sql/28901900_release_rbac.up.sql similarity index 100% rename from scripts/sql/029000_019_release_rbac.up.sql rename to scripts/sql/28901900_release_rbac.up.sql diff --git a/scripts/sql/029100_019_notification_stettings.down.sql b/scripts/sql/29001900_notification_settings.down.sql similarity index 100% rename from scripts/sql/029100_019_notification_stettings.down.sql rename to scripts/sql/29001900_notification_settings.down.sql diff --git a/scripts/sql/029100_019_notification_settings.up.sql b/scripts/sql/29001900_notification_settings.up.sql similarity index 100% rename from scripts/sql/029100_019_notification_settings.up.sql rename to scripts/sql/29001900_notification_settings.up.sql diff --git a/scripts/sql/029200_019_helm_app_deployment_message.down.sql b/scripts/sql/29101900_helm_app_deployment_message.down.sql similarity index 100% rename from scripts/sql/029200_019_helm_app_deployment_message.down.sql rename to scripts/sql/29101900_helm_app_deployment_message.down.sql diff --git a/scripts/sql/029200_019_helm_app_deployment_message.up.sql b/scripts/sql/29101900_helm_app_deployment_message.up.sql similarity index 100% rename from scripts/sql/029200_019_helm_app_deployment_message.up.sql rename to scripts/sql/29101900_helm_app_deployment_message.up.sql diff --git a/scripts/sql/028901_019_devtron_ci_trigger_v1_1.down.sql b/scripts/sql/29201901_devtron_ci_trigger_v1_1.down.sql similarity index 100% rename from scripts/sql/028901_019_devtron_ci_trigger_v1_1.down.sql rename to scripts/sql/29201901_devtron_ci_trigger_v1_1.down.sql diff --git a/scripts/sql/028901_019_devtron_ci_trigger_v1_1.up.sql b/scripts/sql/29201901_devtron_ci_trigger_v1_1.up.sql similarity index 100% rename from scripts/sql/028901_019_devtron_ci_trigger_v1_1.up.sql rename to scripts/sql/29201901_devtron_ci_trigger_v1_1.up.sql diff --git a/scripts/sql/028902_019_docker_lint.down.sql b/scripts/sql/29201902_docker_lint.down.sql similarity index 100% rename from scripts/sql/028902_019_docker_lint.down.sql rename to scripts/sql/29201902_docker_lint.down.sql diff --git a/scripts/sql/028902_019_docker_lint.up.sql b/scripts/sql/29201902_docker_lint.up.sql similarity index 100% rename from scripts/sql/028902_019_docker_lint.up.sql rename to scripts/sql/29201902_docker_lint.up.sql diff --git a/scripts/sql/029300_020_artifact_uploaded_flag.down.sql b/scripts/sql/29302000_artifact_uploaded_flag.down.sql similarity index 100% rename from scripts/sql/029300_020_artifact_uploaded_flag.down.sql rename to scripts/sql/29302000_artifact_uploaded_flag.down.sql diff --git a/scripts/sql/029300_020_artifact_uploaded_flag.up.sql b/scripts/sql/29302000_artifact_uploaded_flag.up.sql similarity index 100% rename from scripts/sql/029300_020_artifact_uploaded_flag.up.sql rename to scripts/sql/29302000_artifact_uploaded_flag.up.sql diff --git a/scripts/sql/029400_020_deployment_event.down.sql b/scripts/sql/29402000_deployment_event.down.sql similarity index 100% rename from scripts/sql/029400_020_deployment_event.down.sql rename to scripts/sql/29402000_deployment_event.down.sql diff --git a/scripts/sql/029400_020_deployment_event.up.sql b/scripts/sql/29402000_deployment_event.up.sql similarity index 100% rename from scripts/sql/029400_020_deployment_event.up.sql rename to scripts/sql/29402000_deployment_event.up.sql diff --git a/scripts/sql/29502100_refrence-chart-ref_4-19-0.down.sql b/scripts/sql/29502100_refrence-chart-ref_4-19-0.down.sql new file mode 100644 index 00000000000..858040b89e2 --- /dev/null +++ b/scripts/sql/29502100_refrence-chart-ref_4-19-0.down.sql @@ -0,0 +1,5 @@ +DELETE FROM global_strategy_metadata_chart_ref_mapping WHERE chart_ref_id=(select id from chart_ref where version='4.19.0' and name is null); + +DELETE FROM "public"."chart_ref" WHERE ("location" = 'reference-chart_4-19-0' AND "version" = '4.19.0'); + +UPDATE "public"."chart_ref" SET "is_default" = 't' WHERE "location" = 'reference-chart_4-19-0' AND "version" = '4.19.0'; \ No newline at end of file diff --git a/scripts/sql/29502100_refrence-chart-ref_4-19-0.up.sql b/scripts/sql/29502100_refrence-chart-ref_4-19-0.up.sql new file mode 100644 index 00000000000..370ecd61eed --- /dev/null +++ b/scripts/sql/29502100_refrence-chart-ref_4-19-0.up.sql @@ -0,0 +1,9 @@ +INSERT INTO "public"."chart_ref" ("location", "version","deployment_strategy_path", "is_default", "active", "created_on", "created_by", "updated_on", "updated_by") VALUES + ('reference-chart_4-19-0', '4.19.0','pipeline-values.yaml', 'f', 't', 'now()', 1, 'now()', 1); + + +INSERT INTO global_strategy_metadata_chart_ref_mapping ("global_strategy_metadata_id", "chart_ref_id", "active", "created_on", "created_by", "updated_on", "updated_by","default") +VALUES (1,(select id from chart_ref where version='4.19.0' and name is null), true, now(), 1, now(), 1,true), +(2,(select id from chart_ref where version='4.19.0' and name is null), true, now(), 1, now(), 1,false), +(3,(select id from chart_ref where version='4.19.0' and name is null), true, now(), 1, now(), 1,false), +(4,(select id from chart_ref where version='4.19.0' and name is null), true, now(), 1, now(), 1,false); \ No newline at end of file diff --git a/scripts/sql/29602100_refrence-chart-ref_4-20-0.down.sql b/scripts/sql/29602100_refrence-chart-ref_4-20-0.down.sql new file mode 100644 index 00000000000..95b64648047 --- /dev/null +++ b/scripts/sql/29602100_refrence-chart-ref_4-20-0.down.sql @@ -0,0 +1,5 @@ +DELETE FROM global_strategy_metadata_chart_ref_mapping WHERE chart_ref_id=(select id from chart_ref where version='4.20.0' and name is null); + +DELETE FROM "public"."chart_ref" WHERE ("location" = 'reference-chart_4-20-0' AND "version" = '4.20.0'); + +UPDATE "public"."chart_ref" SET "is_default" = 't' WHERE "location" = 'reference-chart_4-20-0' AND "version" = '4.20.0'; \ No newline at end of file diff --git a/scripts/sql/29602100_refrence-chart-ref_4-20-0.up.sql b/scripts/sql/29602100_refrence-chart-ref_4-20-0.up.sql new file mode 100644 index 00000000000..4b3f818cab7 --- /dev/null +++ b/scripts/sql/29602100_refrence-chart-ref_4-20-0.up.sql @@ -0,0 +1,9 @@ +INSERT INTO "public"."chart_ref" ("location", "version","deployment_strategy_path", "is_default", "active", "created_on", "created_by", "updated_on", "updated_by") VALUES + ('reference-chart_4-20-0', '4.20.0','pipeline-values.yaml', 'f', 't', 'now()', 1, 'now()', 1); + + +INSERT INTO global_strategy_metadata_chart_ref_mapping ("global_strategy_metadata_id", "chart_ref_id", "active", "created_on", "created_by", "updated_on", "updated_by","default") +VALUES (1,(select id from chart_ref where version='4.20.0' and name is null), true, now(), 1, now(), 1,true), +(2,(select id from chart_ref where version='4.20.0' and name is null), true, now(), 1, now(), 1,false), +(3,(select id from chart_ref where version='4.20.0' and name is null), true, now(), 1, now(), 1,false), +(4,(select id from chart_ref where version='4.20.0' and name is null), true, now(), 1, now(), 1,false); \ No newline at end of file diff --git a/scripts/sql/29702100_deployment-chart-ref_4-20-0.down.sql b/scripts/sql/29702100_deployment-chart-ref_4-20-0.down.sql new file mode 100644 index 00000000000..0d23e81af2c --- /dev/null +++ b/scripts/sql/29702100_deployment-chart-ref_4-20-0.down.sql @@ -0,0 +1,3 @@ +DELETE FROM global_strategy_metadata_chart_ref_mapping WHERE chart_ref_id=(select id from chart_ref where version='4.20.0' and name='Deployment'); + +DELETE FROM "public"."chart_ref" WHERE ("location" = 'deployment-chart_4-18-0' AND "version" = '4.20.0'); \ No newline at end of file diff --git a/scripts/sql/29702100_deployment-chart-ref_4-20-0.up.sql b/scripts/sql/29702100_deployment-chart-ref_4-20-0.up.sql new file mode 100644 index 00000000000..3a4b09171d1 --- /dev/null +++ b/scripts/sql/29702100_deployment-chart-ref_4-20-0.up.sql @@ -0,0 +1,7 @@ +UPDATE chart_ref SET is_default=false; +INSERT INTO "public"."chart_ref" ("name","location", "version", "deployment_strategy_path","is_default", "active", "created_on", "created_by", "updated_on", "updated_by") VALUES + ('Deployment','deployment-chart_4-20-0', '4.20.0','pipeline-values.yaml','t', 't', 'now()', 1, 'now()', 1); + +INSERT INTO global_strategy_metadata_chart_ref_mapping ("global_strategy_metadata_id", "chart_ref_id", "active", "created_on", "created_by", "updated_on", "updated_by","default") +VALUES (1,(select id from chart_ref where version='4.20.0' and name='Deployment'), true, now(), 1, now(), 1,true), +(4,(select id from chart_ref where version='4.20.0' and name='Deployment'), true, now(), 1, now(), 1,false); \ No newline at end of file diff --git a/scripts/sql/29802100_cluster_terminal_images.down.sql b/scripts/sql/29802100_cluster_terminal_images.down.sql new file mode 100644 index 00000000000..2699a0e08d5 --- /dev/null +++ b/scripts/sql/29802100_cluster_terminal_images.down.sql @@ -0,0 +1,6 @@ +UPDATE "public"."attributes" +SET value = '[{"groupId":"latest","groupRegex":"v1\\.2[4-8]\\..+","imageList":[{"image":"quay.io/devtron/ubuntu-k8s-utils:latest","name":"Ubuntu: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on ubuntu OS"}, {"image":"quay.io/devtron/alpine-k8s-utils:latest","name":"Alpine: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on alpine OS"},{"image":"quay.io/devtron/centos-k8s-utils:latest","name":"CentOS: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on Cent OS"},{"image":"quay.io/devtron/alpine-netshoot:latest","name":"Alpine: Netshoot","description":"Contains Docker + Kubernetes network troubleshooting utilities."},{"image":"quay.io/devtron/k9s-k8s-utils:latest","name":"K9s: Kubernetes CLI","description": " Kubernetes CLI To Manage Your Clusters In Style!"}]} ,{"groupId":"v1.22","groupRegex":"v1\\.(21|22|23)\\..+","imageList":[{"image":"quay.io/devtron/ubuntu-k8s-utils:1.22","name":"Ubuntu: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on ubuntu OS"},{"image":"quay.io/devtron/alpine-k8s-utils:1.22","name":"Alpine: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on alpine OS"},{"image":"quay.io/devtron/centos-k8s-utils:1.22","name":"CentOS: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on Cent OS"},{"image":"quay.io/devtron/alpine-netshoot:latest","name":"Alpine: Netshoot","description":"Contains Docker + Kubernetes network troubleshooting utilities."}]},{"groupId":"v1.19","groupRegex":"v1\\.(18|19|20)\\..+","imageList":[{"image":"quay.io/devtron/ubuntu-k8s-utils:1.19","name":"Ubuntu: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on ubuntu OS"},{"image":"quay.io/devtron/alpine-k8s-utils:1.19","name":"Alpine: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on alpine OS"},{"image":"quay.io/devtron/centos-k8s-utils:1.19","name":"CentOS: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on Cent OS"},{"image":"quay.io/devtron/alpine-netshoot:latest","name":"Alpine: Netshoot","description":"Contains Docker + Kubernetes network troubleshooting utilities."},{"image":"quay.io/devtron/k9s-k8s-utils:latest","name":"K9s: Kubernetes CLI","description": " Kubernetes CLI To Manage Your Clusters In Style!"}]},{"groupId":"v1.16","groupRegex":"v1\\.(15|16|17)\\..+","imageList":[{"image":"quay.io/devtron/ubuntu-k8s-utils:1.16","name":"Ubuntu: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on ubuntu OS"}, {"image":"quay.io/devtron/alpine-k8s-utils:1.16","name":"Alpine: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on alpine OS"},{"image":"quay.io/devtron/centos-k8s-utils:1.16","name":"CentOS: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on Cent OS"},{"image":"quay.io/devtron/alpine-netshoot:latest","name":"Alpine: Netshoot","description":"Contains Docker + Kubernetes network troubleshooting utilities."},{"image":"quay.io/devtron/k9s-k8s-utils:latest","name":"K9s: Kubernetes CLI","description": " Kubernetes CLI To Manage Your Clusters In Style!"}]}]', + updated_on = NOW() +WHERE key = 'DEFAULT_TERMINAL_IMAGE_LIST'; + +ALTER TABLE attributes ALTER COLUMN value TYPE VARCHAR(5000); \ No newline at end of file diff --git a/scripts/sql/29802100_cluster_terminal_images.up.sql b/scripts/sql/29802100_cluster_terminal_images.up.sql new file mode 100644 index 00000000000..688067af392 --- /dev/null +++ b/scripts/sql/29802100_cluster_terminal_images.up.sql @@ -0,0 +1,6 @@ +ALTER TABLE attributes ALTER COLUMN value TYPE VARCHAR(10000); + +UPDATE "public"."attributes" +SET value = '[{"groupId":"latest","groupRegex":"v1\\.(30|31|32)\\..+","imageList":[{"image":"quay.io/devtron/ubuntu-k8s-utils:latest","name":"Ubuntu: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on ubuntu OS"}, {"image":"quay.io/devtron/alpine-k8s-utils:latest","name":"Alpine: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on alpine OS"},{"image":"quay.io/devtron/centos-k8s-utils:latest","name":"CentOS: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on Cent OS"},{"image":"quay.io/devtron/alpine-netshoot:latest","name":"Alpine: Netshoot","description":"Contains Docker + Kubernetes network troubleshooting utilities."},{"image":"quay.io/devtron/k9s-k8s-utils:latest","name":"K9s: Kubernetes CLI","description": " Kubernetes CLI To Manage Your Clusters In Style!"}]} ,{"groupId":"v1.28","groupRegex":"v1\\.(27|28|29)\\..+","imageList":[{"image":"quay.io/devtron/ubuntu-k8s-utils:1.28","name":"Ubuntu: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on ubuntu OS"}, {"image":"quay.io/devtron/alpine-k8s-utils:1.28","name":"Alpine: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on alpine OS"},{"image":"quay.io/devtron/centos-k8s-utils:1.28","name":"CentOS: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on Cent OS"},{"image":"quay.io/devtron/alpine-netshoot:latest","name":"Alpine: Netshoot","description":"Contains Docker + Kubernetes network troubleshooting utilities."},{"image":"quay.io/devtron/k9s-k8s-utils:latest","name":"K9s: Kubernetes CLI","description": " Kubernetes CLI To Manage Your Clusters In Style!"}]} ,{"groupId":"v1.25","groupRegex":"v1\\.(24|25|26)\\..+","imageList":[{"image":"quay.io/devtron/ubuntu-k8s-utils:1.25","name":"Ubuntu: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on ubuntu OS"}, {"image":"quay.io/devtron/alpine-k8s-utils:1.25","name":"Alpine: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on alpine OS"},{"image":"quay.io/devtron/centos-k8s-utils:1.25","name":"CentOS: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on Cent OS"},{"image":"quay.io/devtron/alpine-netshoot:latest","name":"Alpine: Netshoot","description":"Contains Docker + Kubernetes network troubleshooting utilities."},{"image":"quay.io/devtron/k9s-k8s-utils:latest","name":"K9s: Kubernetes CLI","description": " Kubernetes CLI To Manage Your Clusters In Style!"}]} ,{"groupId":"v1.22","groupRegex":"v1\\.(21|22|23)\\..+","imageList":[{"image":"quay.io/devtron/ubuntu-k8s-utils:1.22","name":"Ubuntu: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on ubuntu OS"},{"image":"quay.io/devtron/alpine-k8s-utils:1.22","name":"Alpine: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on alpine OS"},{"image":"quay.io/devtron/centos-k8s-utils:1.22","name":"CentOS: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on Cent OS"},{"image":"quay.io/devtron/alpine-netshoot:latest","name":"Alpine: Netshoot","description":"Contains Docker + Kubernetes network troubleshooting utilities."}]},{"groupId":"v1.19","groupRegex":"v1\\.(18|19|20)\\..+","imageList":[{"image":"quay.io/devtron/ubuntu-k8s-utils:1.19","name":"Ubuntu: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on ubuntu OS"},{"image":"quay.io/devtron/alpine-k8s-utils:1.19","name":"Alpine: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on alpine OS"},{"image":"quay.io/devtron/centos-k8s-utils:1.19","name":"CentOS: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on Cent OS"},{"image":"quay.io/devtron/alpine-netshoot:latest","name":"Alpine: Netshoot","description":"Contains Docker + Kubernetes network troubleshooting utilities."},{"image":"quay.io/devtron/k9s-k8s-utils:latest","name":"K9s: Kubernetes CLI","description": " Kubernetes CLI To Manage Your Clusters In Style!"}]},{"groupId":"v1.16","groupRegex":"v1\\.(15|16|17)\\..+","imageList":[{"image":"quay.io/devtron/ubuntu-k8s-utils:1.16","name":"Ubuntu: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on ubuntu OS"}, {"image":"quay.io/devtron/alpine-k8s-utils:1.16","name":"Alpine: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on alpine OS"},{"image":"quay.io/devtron/centos-k8s-utils:1.16","name":"CentOS: Kubernetes utilites","description":"Contains kubectl, helm, curl, git, busybox, wget, jq, nslookup, telnet on Cent OS"},{"image":"quay.io/devtron/alpine-netshoot:latest","name":"Alpine: Netshoot","description":"Contains Docker + Kubernetes network troubleshooting utilities."},{"image":"quay.io/devtron/k9s-k8s-utils:latest","name":"K9s: Kubernetes CLI","description": " Kubernetes CLI To Manage Your Clusters In Style!"}]}]', + updated_on = NOW() +WHERE key = 'DEFAULT_TERMINAL_IMAGE_LIST'; \ No newline at end of file diff --git a/vendor/modules.txt b/vendor/modules.txt index 451b8c7c4b5..ba2dc057ac1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -344,7 +344,7 @@ github.com/devtron-labs/authenticator/jwt github.com/devtron-labs/authenticator/middleware github.com/devtron-labs/authenticator/oidc github.com/devtron-labs/authenticator/password -# github.com/devtron-labs/common-lib v0.18.1-0.20241001061923-eda545dc839e => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20241010131105-e2c23f9c80da +# github.com/devtron-labs/common-lib v0.18.1-0.20241001061923-eda545dc839e => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20241024135802-b4888f54a136 ## explicit; go 1.21 github.com/devtron-labs/common-lib/async github.com/devtron-labs/common-lib/blob-storage @@ -2212,7 +2212,7 @@ xorm.io/xorm/log xorm.io/xorm/names xorm.io/xorm/schemas xorm.io/xorm/tags -# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20241010131105-e2c23f9c80da +# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20241024135802-b4888f54a136 # github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127 # github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.5.5 # k8s.io/api => k8s.io/api v0.29.7 diff --git a/wire_gen.go b/wire_gen.go index f3f6bfe3434..52afbd14764 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -439,7 +439,7 @@ func InitializeApp() (*App, error) { ciWorkflowRepositoryImpl := pipelineConfig.NewCiWorkflowRepositoryImpl(db, sugaredLogger) ciPipelineMaterialRepositoryImpl := pipelineConfig.NewCiPipelineMaterialRepositoryImpl(db, sugaredLogger) ciArtifactRepositoryImpl := repository2.NewCiArtifactRepositoryImpl(db, sugaredLogger) - eventSimpleFactoryImpl := client2.NewEventSimpleFactoryImpl(sugaredLogger, cdWorkflowRepositoryImpl, pipelineOverrideRepositoryImpl, ciWorkflowRepositoryImpl, ciPipelineMaterialRepositoryImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, userRepositoryImpl, ciArtifactRepositoryImpl) + eventSimpleFactoryImpl := client2.NewEventSimpleFactoryImpl(sugaredLogger, cdWorkflowRepositoryImpl, pipelineOverrideRepositoryImpl, ciWorkflowRepositoryImpl, ciPipelineMaterialRepositoryImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, userRepositoryImpl, environmentRepositoryImpl, ciArtifactRepositoryImpl) applicationServiceClientImpl := application.NewApplicationClientImpl(sugaredLogger, argoCDConnectionManagerImpl) configMapRepositoryImpl := chartConfig.NewConfigMapRepositoryImpl(sugaredLogger, db) chartRepositoryImpl := chartRepoRepository.NewChartRepository(db, transactionUtilImpl) @@ -662,7 +662,7 @@ func InitializeApp() (*App, error) { manifestPushConfigRepositoryImpl := repository10.NewManifestPushConfigRepository(sugaredLogger, db) scanToolExecutionHistoryMappingRepositoryImpl := security.NewScanToolExecutionHistoryMappingRepositoryImpl(db, sugaredLogger) imageScanServiceImpl := security2.NewImageScanServiceImpl(sugaredLogger, imageScanHistoryRepositoryImpl, imageScanResultRepositoryImpl, imageScanObjectMetaRepositoryImpl, cveStoreRepositoryImpl, imageScanDeployInfoRepositoryImpl, userServiceImpl, teamRepositoryImpl, appRepositoryImpl, environmentServiceImpl, ciArtifactRepositoryImpl, policyServiceImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, scanToolMetadataRepositoryImpl, scanToolExecutionHistoryMappingRepositoryImpl, cvePolicyRepositoryImpl) - triggerServiceImpl, err := devtronApps.NewTriggerServiceImpl(sugaredLogger, cdWorkflowCommonServiceImpl, gitOpsManifestPushServiceImpl, gitOpsConfigReadServiceImpl, argoK8sClientImpl, acdConfig, argoClientWrapperServiceImpl, pipelineStatusTimelineServiceImpl, chartTemplateServiceImpl, workflowEventPublishServiceImpl, manifestCreationServiceImpl, deployedConfigurationHistoryServiceImpl, argoUserServiceImpl, pipelineStageServiceImpl, globalPluginServiceImpl, customTagServiceImpl, pluginInputVariableParserImpl, prePostCdScriptHistoryServiceImpl, scopedVariableCMCSManagerImpl, workflowServiceImpl, imageDigestPolicyServiceImpl, userServiceImpl, clientImpl, helmAppServiceImpl, enforcerUtilImpl, userDeploymentRequestServiceImpl, helmAppClientImpl, eventSimpleFactoryImpl, eventRESTClientImpl, environmentVariables, appRepositoryImpl, ciPipelineMaterialRepositoryImpl, imageScanHistoryRepositoryImpl, imageScanDeployInfoRepositoryImpl, pipelineRepositoryImpl, pipelineOverrideRepositoryImpl, manifestPushConfigRepositoryImpl, chartRepositoryImpl, environmentRepositoryImpl, cdWorkflowRepositoryImpl, ciWorkflowRepositoryImpl, ciArtifactRepositoryImpl, ciTemplateServiceImpl, materialRepositoryImpl, appLabelRepositoryImpl, ciPipelineRepositoryImpl, appWorkflowRepositoryImpl, dockerArtifactStoreRepositoryImpl, imageScanServiceImpl, k8sServiceImpl, transactionUtilImpl, deploymentConfigServiceImpl, ciCdPipelineOrchestratorImpl, attributesServiceImpl) + triggerServiceImpl, err := devtronApps.NewTriggerServiceImpl(sugaredLogger, cdWorkflowCommonServiceImpl, gitOpsManifestPushServiceImpl, gitOpsConfigReadServiceImpl, argoK8sClientImpl, acdConfig, argoClientWrapperServiceImpl, pipelineStatusTimelineServiceImpl, chartTemplateServiceImpl, workflowEventPublishServiceImpl, manifestCreationServiceImpl, deployedConfigurationHistoryServiceImpl, argoUserServiceImpl, pipelineStageServiceImpl, globalPluginServiceImpl, customTagServiceImpl, pluginInputVariableParserImpl, prePostCdScriptHistoryServiceImpl, scopedVariableCMCSManagerImpl, workflowServiceImpl, imageDigestPolicyServiceImpl, userServiceImpl, clientImpl, helmAppServiceImpl, enforcerUtilImpl, userDeploymentRequestServiceImpl, helmAppClientImpl, eventSimpleFactoryImpl, eventRESTClientImpl, environmentVariables, appRepositoryImpl, ciPipelineMaterialRepositoryImpl, imageScanHistoryRepositoryImpl, imageScanDeployInfoRepositoryImpl, pipelineRepositoryImpl, pipelineOverrideRepositoryImpl, manifestPushConfigRepositoryImpl, chartRepositoryImpl, environmentRepositoryImpl, cdWorkflowRepositoryImpl, ciWorkflowRepositoryImpl, ciArtifactRepositoryImpl, ciTemplateServiceImpl, materialRepositoryImpl, appLabelRepositoryImpl, ciPipelineRepositoryImpl, appWorkflowRepositoryImpl, dockerArtifactStoreRepositoryImpl, imageScanServiceImpl, k8sServiceImpl, transactionUtilImpl, deploymentConfigServiceImpl, ciCdPipelineOrchestratorImpl, gitOperationServiceImpl, attributesServiceImpl) if err != nil { return nil, err } @@ -722,7 +722,7 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - argoApplicationServiceExtendedImpl := argoApplication.NewArgoApplicationServiceExtendedServiceImpl(sugaredLogger, clusterRepositoryImpl, k8sServiceImpl, argoUserServiceImpl, helmAppClientImpl, helmAppServiceImpl, k8sApplicationServiceImpl, argoApplicationReadServiceImpl) + argoApplicationServiceExtendedImpl := argoApplication.NewArgoApplicationServiceExtendedServiceImpl(sugaredLogger, clusterRepositoryImpl, k8sServiceImpl, argoUserServiceImpl, helmAppClientImpl, helmAppServiceImpl, k8sApplicationServiceImpl, argoApplicationReadServiceImpl, applicationServiceClientImpl) installedAppResourceServiceImpl := resource.NewInstalledAppResourceServiceImpl(sugaredLogger, installedAppRepositoryImpl, appStoreApplicationVersionRepositoryImpl, applicationServiceClientImpl, acdAuthConfig, installedAppVersionHistoryRepositoryImpl, argoUserServiceImpl, helmAppClientImpl, helmAppServiceImpl, appStatusServiceImpl, k8sCommonServiceImpl, k8sApplicationServiceImpl, k8sServiceImpl, deploymentConfigServiceImpl, ociRegistryConfigRepositoryImpl, argoApplicationServiceExtendedImpl) chartGroupEntriesRepositoryImpl := repository17.NewChartGroupEntriesRepositoryImpl(db, sugaredLogger) chartGroupReposotoryImpl := repository17.NewChartGroupReposotoryImpl(db, sugaredLogger) @@ -951,7 +951,7 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - deploymentConfigurationServiceImpl, err := configDiff.NewDeploymentConfigurationServiceImpl(sugaredLogger, configMapServiceImpl, appRepositoryImpl, environmentRepositoryImpl, chartServiceImpl, generateManifestDeploymentTemplateServiceImpl) + deploymentConfigurationServiceImpl, err := configDiff.NewDeploymentConfigurationServiceImpl(sugaredLogger, configMapServiceImpl, appRepositoryImpl, environmentRepositoryImpl, chartServiceImpl, generateManifestDeploymentTemplateServiceImpl, deploymentTemplateHistoryRepositoryImpl, pipelineStrategyHistoryRepositoryImpl, configMapHistoryRepositoryImpl, scopedVariableCMCSManagerImpl, configMapRepositoryImpl, pipelineDeploymentConfigServiceImpl, chartRefServiceImpl, pipelineRepositoryImpl, deploymentTemplateHistoryServiceImpl, configMapHistoryServiceImpl) if err != nil { return nil, err }