From 590a0eeb820ac0c31201bfd9f967ff5f8f0b204e Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Fri, 28 Jun 2024 11:39:16 +0530 Subject: [PATCH 01/37] Add balck github action to format python files Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/python_formatter.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .github/workflows/python_formatter.yaml diff --git a/.github/workflows/python_formatter.yaml b/.github/workflows/python_formatter.yaml new file mode 100644 index 0000000000..83446b571b --- /dev/null +++ b/.github/workflows/python_formatter.yaml @@ -0,0 +1,11 @@ +name: Proper Formatting on Python files + +on: [push, pull_request] + +jobs: + format_python_files: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: psf/black@stable + \ No newline at end of file From c929ea68b69ab47da70b00579e2f62bac9ca0a3d Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Fri, 28 Jun 2024 14:07:44 +0530 Subject: [PATCH 02/37] Formatted python files from the balck formatter Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .../pipelines-profile-controller/sync.py | 413 +++++++++--------- .../pipelines-profile-controller/test_sync.py | 209 +++++---- .../pipelines-profile-controller/sync.py | 413 +++++++++--------- .../pipelines-profile-controller/test_sync.py | 209 +++++---- .../pipelines-profile-controller/sync.py | 413 +++++++++--------- .../pipelines-profile-controller/test_sync.py | 209 +++++---- contrib/kserve/tests/test_sklearn.py | 4 +- hack/trivy_scan.py | 131 ++++-- tests/gh-actions/kf-objects/test_pipeline.py | 17 +- 9 files changed, 1089 insertions(+), 929 deletions(-) diff --git a/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/sync.py b/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/sync.py index 031a3fa268..e04ba23881 100644 --- a/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/sync.py +++ b/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/sync.py @@ -24,10 +24,17 @@ def main(): server.serve_forever() -def get_settings_from_env(controller_port=None, - visualization_server_image=None, frontend_image=None, - visualization_server_tag=None, frontend_tag=None, disable_istio_sidecar=None, - minio_access_key=None, minio_secret_key=None, kfp_default_pipeline_root=None): +def get_settings_from_env( + controller_port=None, + visualization_server_image=None, + frontend_image=None, + visualization_server_tag=None, + frontend_tag=None, + disable_istio_sidecar=None, + minio_access_key=None, + minio_secret_key=None, + kfp_default_pipeline_root=None, +): """ Returns a dict of settings from environment variables relevant to the controller @@ -45,66 +52,82 @@ def get_settings_from_env(controller_port=None, minio_secret_key: Required (no default) """ settings = dict() - settings["controller_port"] = \ - controller_port or \ - os.environ.get("CONTROLLER_PORT", "8080") + settings["controller_port"] = controller_port or os.environ.get( + "CONTROLLER_PORT", "8080" + ) - settings["visualization_server_image"] = \ - visualization_server_image or \ - os.environ.get("VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server") + settings["visualization_server_image"] = ( + visualization_server_image + or os.environ.get( + "VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server" + ) + ) - settings["frontend_image"] = \ - frontend_image or \ - os.environ.get("FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend") + settings["frontend_image"] = frontend_image or os.environ.get( + "FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend" + ) # Look for specific tags for each image first, falling back to # previously used KFP_VERSION environment variable for backwards # compatibility - settings["visualization_server_tag"] = \ - visualization_server_tag or \ - os.environ.get("VISUALIZATION_SERVER_TAG") or \ - os.environ["KFP_VERSION"] + settings["visualization_server_tag"] = ( + visualization_server_tag + or os.environ.get("VISUALIZATION_SERVER_TAG") + or os.environ["KFP_VERSION"] + ) - settings["frontend_tag"] = \ - frontend_tag or \ - os.environ.get("FRONTEND_TAG") or \ - os.environ["KFP_VERSION"] + settings["frontend_tag"] = ( + frontend_tag or os.environ.get("FRONTEND_TAG") or os.environ["KFP_VERSION"] + ) - settings["disable_istio_sidecar"] = \ - disable_istio_sidecar if disable_istio_sidecar is not None \ - else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true" + settings["disable_istio_sidecar"] = ( + disable_istio_sidecar + if disable_istio_sidecar is not None + else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true" + ) - settings["minio_access_key"] = \ - minio_access_key or \ - base64.b64encode(bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8') + settings["minio_access_key"] = minio_access_key or base64.b64encode( + bytes(os.environ.get("MINIO_ACCESS_KEY"), "utf-8") + ).decode("utf-8") - settings["minio_secret_key"] = \ - minio_secret_key or \ - base64.b64encode(bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8') + settings["minio_secret_key"] = minio_secret_key or base64.b64encode( + bytes(os.environ.get("MINIO_SECRET_KEY"), "utf-8") + ).decode("utf-8") # KFP_DEFAULT_PIPELINE_ROOT is optional - settings["kfp_default_pipeline_root"] = \ - kfp_default_pipeline_root or \ - os.environ.get("KFP_DEFAULT_PIPELINE_ROOT") + settings["kfp_default_pipeline_root"] = kfp_default_pipeline_root or os.environ.get( + "KFP_DEFAULT_PIPELINE_ROOT" + ) return settings -def server_factory(visualization_server_image, - visualization_server_tag, frontend_image, frontend_tag, - disable_istio_sidecar, minio_access_key, - minio_secret_key, kfp_default_pipeline_root=None, - url="", controller_port=8080): +def server_factory( + visualization_server_image, + visualization_server_tag, + frontend_image, + frontend_tag, + disable_istio_sidecar, + minio_access_key, + minio_secret_key, + kfp_default_pipeline_root=None, + url="", + controller_port=8080, +): """ Returns an HTTPServer populated with Handler with customized settings """ + class Controller(BaseHTTPRequestHandler): def sync(self, parent, children): # parent is a namespace namespace = parent.get("metadata", {}).get("name") - pipeline_enabled = parent.get("metadata", {}).get( - "labels", {}).get("pipelines.kubeflow.org/enabled") + pipeline_enabled = ( + parent.get("metadata", {}) + .get("labels", {}) + .get("pipelines.kubeflow.org/enabled") + ) if pipeline_enabled != "true": return {"status": {}, "children": []} @@ -113,29 +136,30 @@ def sync(self, parent, children): desired_resources = [] if kfp_default_pipeline_root: desired_configmap_count = 2 - desired_resources += [{ - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": { - "name": "kfp-launcher", - "namespace": namespace, - }, - "data": { - "defaultPipelineRoot": kfp_default_pipeline_root, - }, - }] - + desired_resources += [ + { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "kfp-launcher", + "namespace": namespace, + }, + "data": { + "defaultPipelineRoot": kfp_default_pipeline_root, + }, + } + ] # Compute status based on observed state. desired_status = { - "kubeflow-pipelines-ready": - len(children["Secret.v1"]) == 1 and - len(children["ConfigMap.v1"]) == desired_configmap_count and - len(children["Deployment.apps/v1"]) == 2 and - len(children["Service.v1"]) == 2 and - len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and - len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 and - "True" or "False" + "kubeflow-pipelines-ready": len(children["Secret.v1"]) == 1 + and len(children["ConfigMap.v1"]) == desired_configmap_count + and len(children["Deployment.apps/v1"]) == 2 + and len(children["Service.v1"]) == 2 + and len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 + and len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 + and "True" + or "False" } # Generate the desired child object(s). @@ -148,8 +172,7 @@ def sync(self, parent, children): "namespace": namespace, }, "data": { - "METADATA_GRPC_SERVICE_HOST": - "metadata-grpc-service.kubeflow", + "METADATA_GRPC_SERVICE_HOST": "metadata-grpc-service.kubeflow", "METADATA_GRPC_SERVICE_PORT": "8080", }, }, @@ -158,50 +181,38 @@ def sync(self, parent, children): "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { - "labels": { - "app": "ml-pipeline-visualizationserver" - }, + "labels": {"app": "ml-pipeline-visualizationserver"}, "name": "ml-pipeline-visualizationserver", "namespace": namespace, }, "spec": { "selector": { - "matchLabels": { - "app": "ml-pipeline-visualizationserver" - }, + "matchLabels": {"app": "ml-pipeline-visualizationserver"}, }, "template": { "metadata": { - "labels": { - "app": "ml-pipeline-visualizationserver" - }, - "annotations": disable_istio_sidecar and { - "sidecar.istio.io/inject": "false" - } or {}, + "labels": {"app": "ml-pipeline-visualizationserver"}, + "annotations": disable_istio_sidecar + and {"sidecar.istio.io/inject": "false"} + or {}, }, "spec": { - "containers": [{ - "image": f"{visualization_server_image}:{visualization_server_tag}", - "imagePullPolicy": - "IfNotPresent", - "name": - "ml-pipeline-visualizationserver", - "ports": [{ - "containerPort": 8888 - }], - "resources": { - "requests": { - "cpu": "50m", - "memory": "200Mi" - }, - "limits": { - "cpu": "500m", - "memory": "1Gi" + "containers": [ + { + "image": f"{visualization_server_image}:{visualization_server_tag}", + "imagePullPolicy": "IfNotPresent", + "name": "ml-pipeline-visualizationserver", + "ports": [{"containerPort": 8888}], + "resources": { + "requests": { + "cpu": "50m", + "memory": "200Mi", + }, + "limits": {"cpu": "500m", "memory": "1Gi"}, }, } - }], - "serviceAccountName": - "default-editor", + ], + "serviceAccountName": "default-editor", }, }, }, @@ -215,12 +226,8 @@ def sync(self, parent, children): }, "spec": { "host": "ml-pipeline-visualizationserver", - "trafficPolicy": { - "tls": { - "mode": "ISTIO_MUTUAL" - } - } - } + "trafficPolicy": {"tls": {"mode": "ISTIO_MUTUAL"}}, + }, }, { "apiVersion": "security.istio.io/v1beta1", @@ -231,18 +238,22 @@ def sync(self, parent, children): }, "spec": { "selector": { - "matchLabels": { - "app": "ml-pipeline-visualizationserver" - } + "matchLabels": {"app": "ml-pipeline-visualizationserver"} }, - "rules": [{ - "from": [{ - "source": { - "principals": ["cluster.local/ns/kubeflow/sa/ml-pipeline"] - } - }] - }] - } + "rules": [ + { + "from": [ + { + "source": { + "principals": [ + "cluster.local/ns/kubeflow/sa/ml-pipeline" + ] + } + } + ] + } + ], + }, }, { "apiVersion": "v1", @@ -252,12 +263,14 @@ def sync(self, parent, children): "namespace": namespace, }, "spec": { - "ports": [{ - "name": "http", - "port": 8888, - "protocol": "TCP", - "targetPort": 8888, - }], + "ports": [ + { + "name": "http", + "port": 8888, + "protocol": "TCP", + "targetPort": 8888, + } + ], "selector": { "app": "ml-pipeline-visualizationserver", }, @@ -268,73 +281,62 @@ def sync(self, parent, children): "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { - "labels": { - "app": "ml-pipeline-ui-artifact" - }, + "labels": {"app": "ml-pipeline-ui-artifact"}, "name": "ml-pipeline-ui-artifact", "namespace": namespace, }, "spec": { - "selector": { - "matchLabels": { - "app": "ml-pipeline-ui-artifact" - } - }, + "selector": {"matchLabels": {"app": "ml-pipeline-ui-artifact"}}, "template": { "metadata": { - "labels": { - "app": "ml-pipeline-ui-artifact" - }, - "annotations": disable_istio_sidecar and { - "sidecar.istio.io/inject": "false" - } or {}, + "labels": {"app": "ml-pipeline-ui-artifact"}, + "annotations": disable_istio_sidecar + and {"sidecar.istio.io/inject": "false"} + or {}, }, "spec": { - "containers": [{ - "name": - "ml-pipeline-ui-artifact", - "image": f"{frontend_image}:{frontend_tag}", - "imagePullPolicy": - "IfNotPresent", - "ports": [{ - "containerPort": 3000 - }], - "env": [ - { - "name": "MINIO_ACCESS_KEY", - "valueFrom": { - "secretKeyRef": { - "key": "accesskey", - "name": "mlpipeline-minio-artifact" - } - } - }, - { - "name": "MINIO_SECRET_KEY", - "valueFrom": { - "secretKeyRef": { - "key": "secretkey", - "name": "mlpipeline-minio-artifact" - } - } - } - ], - "resources": { - "requests": { - "cpu": "10m", - "memory": "70Mi" - }, - "limits": { - "cpu": "100m", - "memory": "500Mi" + "containers": [ + { + "name": "ml-pipeline-ui-artifact", + "image": f"{frontend_image}:{frontend_tag}", + "imagePullPolicy": "IfNotPresent", + "ports": [{"containerPort": 3000}], + "env": [ + { + "name": "MINIO_ACCESS_KEY", + "valueFrom": { + "secretKeyRef": { + "key": "accesskey", + "name": "mlpipeline-minio-artifact", + } + }, + }, + { + "name": "MINIO_SECRET_KEY", + "valueFrom": { + "secretKeyRef": { + "key": "secretkey", + "name": "mlpipeline-minio-artifact", + } + }, + }, + ], + "resources": { + "requests": { + "cpu": "10m", + "memory": "70Mi", + }, + "limits": { + "cpu": "100m", + "memory": "500Mi", + }, }, } - }], - "serviceAccountName": - "default-editor" - } - } - } + ], + "serviceAccountName": "default-editor", + }, + }, + }, }, { "apiVersion": "v1", @@ -342,52 +344,55 @@ def sync(self, parent, children): "metadata": { "name": "ml-pipeline-ui-artifact", "namespace": namespace, - "labels": { - "app": "ml-pipeline-ui-artifact" - } + "labels": {"app": "ml-pipeline-ui-artifact"}, }, "spec": { - "ports": [{ - "name": - "http", # name is required to let istio understand request protocol - "port": 80, - "protocol": "TCP", - "targetPort": 3000 - }], - "selector": { - "app": "ml-pipeline-ui-artifact" - } - } + "ports": [ + { + "name": "http", # name is required to let istio understand request protocol + "port": 80, + "protocol": "TCP", + "targetPort": 3000, + } + ], + "selector": {"app": "ml-pipeline-ui-artifact"}, + }, }, ] - print('Received request:\n', json.dumps(parent, sort_keys=True)) - print('Desired resources except secrets:\n', json.dumps(desired_resources, sort_keys=True)) + print("Received request:\n", json.dumps(parent, sort_keys=True)) + print( + "Desired resources except secrets:\n", + json.dumps(desired_resources, sort_keys=True), + ) # Moved after the print argument because this is sensitive data. - desired_resources.append({ - "apiVersion": "v1", - "kind": "Secret", - "metadata": { - "name": "mlpipeline-minio-artifact", - "namespace": namespace, - }, - "data": { - "accesskey": minio_access_key, - "secretkey": minio_secret_key, - }, - }) + desired_resources.append( + { + "apiVersion": "v1", + "kind": "Secret", + "metadata": { + "name": "mlpipeline-minio-artifact", + "namespace": namespace, + }, + "data": { + "accesskey": minio_access_key, + "secretkey": minio_secret_key, + }, + } + ) return {"status": desired_status, "children": desired_resources} def do_POST(self): # Serve the sync() function as a JSON webhook. observed = json.loads( - self.rfile.read(int(self.headers.get("content-length")))) + self.rfile.read(int(self.headers.get("content-length"))) + ) desired = self.sync(observed["parent"], observed["children"]) self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() - self.wfile.write(bytes(json.dumps(desired), 'utf-8')) + self.wfile.write(bytes(json.dumps(desired), "utf-8")) return HTTPServer((url, int(controller_port)), Controller) diff --git a/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/test_sync.py b/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/test_sync.py index 50362d60fd..bc9a6549b5 100644 --- a/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/test_sync.py +++ b/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/test_sync.py @@ -11,10 +11,8 @@ DATA_INCORRECT_CHILDREN = { "parent": { "metadata": { - "labels": { - "pipelines.kubeflow.org/enabled": "true" - }, - "name": "myName" + "labels": {"pipelines.kubeflow.org/enabled": "true"}, + "name": "myName", } }, "children": { @@ -24,16 +22,14 @@ "Service.v1": [], "DestinationRule.networking.istio.io/v1alpha3": [], "AuthorizationPolicy.security.istio.io/v1beta1": [], - } + }, } DATA_CORRECT_CHILDREN = { "parent": { "metadata": { - "labels": { - "pipelines.kubeflow.org/enabled": "true" - }, - "name": "myName" + "labels": {"pipelines.kubeflow.org/enabled": "true"}, + "name": "myName", } }, "children": { @@ -43,7 +39,7 @@ "Service.v1": [1, 1], "DestinationRule.networking.istio.io/v1alpha3": [1], "AuthorizationPolicy.security.istio.io/v1beta1": [1], - } + }, } DATA_MISSING_PIPELINE_ENABLED = {"parent": {}, "children": {}} @@ -70,34 +66,38 @@ "CONTROLLER_PORT": "0", # HTTPServer randomly assigns the port to a free port } -ENV_KFP_VERSION_ONLY = dict(ENV_VARIABLES_BASE, - **{ - "KFP_VERSION": KFP_VERSION, - } - ) - -ENV_IMAGES_NO_TAGS = dict(ENV_VARIABLES_BASE, - **{ - "KFP_VERSION": KFP_VERSION, - "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, - "FRONTEND_IMAGE": FRONTEND_IMAGE, - } - ) - -ENV_IMAGES_WITH_TAGS = dict(ENV_VARIABLES_BASE, - **{ - "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, - "FRONTEND_IMAGE": FRONTEND_IMAGE, - "VISUALIZATION_SERVER_TAG": VISUALIZATION_SERVER_TAG, - "FRONTEND_TAG": FRONTEND_TAG, - } - ) - -ENV_IMAGES_WITH_TAGS_AND_ISTIO = dict(ENV_IMAGES_WITH_TAGS, - **{ - "DISABLE_ISTIO_SIDECAR": "false", - } - ) +ENV_KFP_VERSION_ONLY = dict( + ENV_VARIABLES_BASE, + **{ + "KFP_VERSION": KFP_VERSION, + }, +) + +ENV_IMAGES_NO_TAGS = dict( + ENV_VARIABLES_BASE, + **{ + "KFP_VERSION": KFP_VERSION, + "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, + "FRONTEND_IMAGE": FRONTEND_IMAGE, + }, +) + +ENV_IMAGES_WITH_TAGS = dict( + ENV_VARIABLES_BASE, + **{ + "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, + "FRONTEND_IMAGE": FRONTEND_IMAGE, + "VISUALIZATION_SERVER_TAG": VISUALIZATION_SERVER_TAG, + "FRONTEND_TAG": FRONTEND_TAG, + }, +) + +ENV_IMAGES_WITH_TAGS_AND_ISTIO = dict( + ENV_IMAGES_WITH_TAGS, + **{ + "DISABLE_ISTIO_SIDECAR": "false", + }, +) def generate_image_name(imagename, tag): @@ -154,40 +154,57 @@ def sync_server_from_arguments(request): "sync_server, data, expected_status, expected_visualization_server_image, expected_frontend_server_image", [ ( - ENV_KFP_VERSION_ONLY, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name(DEFAULT_VISUALIZATION_IMAGE, KFP_VERSION), - generate_image_name(DEFAULT_FRONTEND_IMAGE, KFP_VERSION), + ENV_KFP_VERSION_ONLY, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name(DEFAULT_VISUALIZATION_IMAGE, KFP_VERSION), + generate_image_name(DEFAULT_FRONTEND_IMAGE, KFP_VERSION), ), ( - ENV_IMAGES_NO_TAGS, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name(ENV_IMAGES_NO_TAGS["VISUALIZATION_SERVER_IMAGE"], KFP_VERSION), - generate_image_name(ENV_IMAGES_NO_TAGS["FRONTEND_IMAGE"], KFP_VERSION), + ENV_IMAGES_NO_TAGS, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name( + ENV_IMAGES_NO_TAGS["VISUALIZATION_SERVER_IMAGE"], KFP_VERSION + ), + generate_image_name(ENV_IMAGES_NO_TAGS["FRONTEND_IMAGE"], KFP_VERSION), ), ( - ENV_IMAGES_WITH_TAGS, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), - generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), + ENV_IMAGES_WITH_TAGS, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name( + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], + ), + generate_image_name( + ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], + ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], + ), ), ( - ENV_IMAGES_WITH_TAGS, - DATA_CORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "True"}, - generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), - generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), + ENV_IMAGES_WITH_TAGS, + DATA_CORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "True"}, + generate_image_name( + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], + ), + generate_image_name( + ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], + ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], + ), ), ], - indirect=["sync_server"] + indirect=["sync_server"], ) -def test_sync_server_with_pipeline_enabled(sync_server, data, expected_status, - expected_visualization_server_image, expected_frontend_server_image): +def test_sync_server_with_pipeline_enabled( + sync_server, + data, + expected_status, + expected_visualization_server_image, + expected_frontend_server_image, +): """ Nearly end-to-end test of how Controller serves .sync as a POST @@ -207,13 +224,17 @@ def test_sync_server_with_pipeline_enabled(sync_server, data, expected_status, results = json.loads(x.text) # Test overall status of whether children are ok - assert results['status'] == expected_status + assert results["status"] == expected_status # Poke a few children to test things that can vary by environment variable - assert results['children'][1]["spec"]["template"]["spec"]["containers"][0][ - "image"] == expected_visualization_server_image - assert results['children'][5]["spec"]["template"]["spec"]["containers"][0][ - "image"] == expected_frontend_server_image + assert ( + results["children"][1]["spec"]["template"]["spec"]["containers"][0]["image"] + == expected_visualization_server_image + ) + assert ( + results["children"][5]["spec"]["template"]["spec"]["containers"][0]["image"] + == expected_frontend_server_image + ) @pytest.mark.parametrize( @@ -221,19 +242,28 @@ def test_sync_server_with_pipeline_enabled(sync_server, data, expected_status, "expected_frontend_server_image", [ ( - ENV_IMAGES_WITH_TAGS_AND_ISTIO, - DATA_CORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "True"}, - generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), - generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), + ENV_IMAGES_WITH_TAGS_AND_ISTIO, + DATA_CORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "True"}, + generate_image_name( + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], + ), + generate_image_name( + ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], + ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], + ), ), ], - indirect=["sync_server_from_arguments"] + indirect=["sync_server_from_arguments"], ) def test_sync_server_with_direct_passing_of_settings( - sync_server_from_arguments, data, expected_status, expected_visualization_server_image, - expected_frontend_server_image): + sync_server_from_arguments, + data, + expected_status, + expected_visualization_server_image, + expected_frontend_server_image, +): """ Nearly end-to-end test of how Controller serves .sync as a POST, taking variables as arguments @@ -250,13 +280,17 @@ def test_sync_server_with_direct_passing_of_settings( results = json.loads(x.text) # Test overall status of whether children are ok - assert results['status'] == expected_status + assert results["status"] == expected_status # Poke a few children to test things that can vary by environment variable - assert results['children'][1]["spec"]["template"]["spec"]["containers"][0][ - "image"] == expected_visualization_server_image - assert results['children'][5]["spec"]["template"]["spec"]["containers"][0][ - "image"] == expected_frontend_server_image + assert ( + results["children"][1]["spec"]["template"]["spec"]["containers"][0]["image"] + == expected_visualization_server_image + ) + assert ( + results["children"][5]["spec"]["template"]["spec"]["containers"][0]["image"] + == expected_frontend_server_image + ) @pytest.mark.parametrize( @@ -264,10 +298,11 @@ def test_sync_server_with_direct_passing_of_settings( [ (ENV_IMAGES_WITH_TAGS, DATA_MISSING_PIPELINE_ENABLED, {}, []), ], - indirect=["sync_server"] + indirect=["sync_server"], ) -def test_sync_server_without_pipeline_enabled(sync_server, data, expected_status, - expected_children): +def test_sync_server_without_pipeline_enabled( + sync_server, data, expected_status, expected_children +): """ Nearly end-to-end test of how Controller serves .sync as a POST @@ -282,5 +317,5 @@ def test_sync_server_without_pipeline_enabled(sync_server, data, expected_status results = json.loads(x.text) # Test overall status of whether children are ok - assert results['status'] == expected_status - assert results['children'] == expected_children + assert results["status"] == expected_status + assert results["children"] == expected_children diff --git a/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/sync.py b/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/sync.py index 7caecb8ee3..3ff93437ee 100644 --- a/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/sync.py +++ b/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/sync.py @@ -24,10 +24,17 @@ def main(): server.serve_forever() -def get_settings_from_env(controller_port=None, - visualization_server_image=None, frontend_image=None, - visualization_server_tag=None, frontend_tag=None, disable_istio_sidecar=None, - minio_access_key=None, minio_secret_key=None, kfp_default_pipeline_root=None): +def get_settings_from_env( + controller_port=None, + visualization_server_image=None, + frontend_image=None, + visualization_server_tag=None, + frontend_tag=None, + disable_istio_sidecar=None, + minio_access_key=None, + minio_secret_key=None, + kfp_default_pipeline_root=None, +): """ Returns a dict of settings from environment variables relevant to the controller @@ -45,66 +52,82 @@ def get_settings_from_env(controller_port=None, minio_secret_key: Required (no default) """ settings = dict() - settings["controller_port"] = \ - controller_port or \ - os.environ.get("CONTROLLER_PORT", "8080") + settings["controller_port"] = controller_port or os.environ.get( + "CONTROLLER_PORT", "8080" + ) - settings["visualization_server_image"] = \ - visualization_server_image or \ - os.environ.get("VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server") + settings["visualization_server_image"] = ( + visualization_server_image + or os.environ.get( + "VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server" + ) + ) - settings["frontend_image"] = \ - frontend_image or \ - os.environ.get("FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend") + settings["frontend_image"] = frontend_image or os.environ.get( + "FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend" + ) # Look for specific tags for each image first, falling back to # previously used KFP_VERSION environment variable for backwards # compatibility - settings["visualization_server_tag"] = \ - visualization_server_tag or \ - os.environ.get("VISUALIZATION_SERVER_TAG") or \ - os.environ["KFP_VERSION"] + settings["visualization_server_tag"] = ( + visualization_server_tag + or os.environ.get("VISUALIZATION_SERVER_TAG") + or os.environ["KFP_VERSION"] + ) - settings["frontend_tag"] = \ - frontend_tag or \ - os.environ.get("FRONTEND_TAG") or \ - os.environ["KFP_VERSION"] + settings["frontend_tag"] = ( + frontend_tag or os.environ.get("FRONTEND_TAG") or os.environ["KFP_VERSION"] + ) - settings["disable_istio_sidecar"] = \ - disable_istio_sidecar if disable_istio_sidecar is not None \ - else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true" + settings["disable_istio_sidecar"] = ( + disable_istio_sidecar + if disable_istio_sidecar is not None + else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true" + ) - settings["minio_access_key"] = \ - minio_access_key or \ - base64.b64encode(bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8') + settings["minio_access_key"] = minio_access_key or base64.b64encode( + bytes(os.environ.get("MINIO_ACCESS_KEY"), "utf-8") + ).decode("utf-8") - settings["minio_secret_key"] = \ - minio_secret_key or \ - base64.b64encode(bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8') + settings["minio_secret_key"] = minio_secret_key or base64.b64encode( + bytes(os.environ.get("MINIO_SECRET_KEY"), "utf-8") + ).decode("utf-8") # KFP_DEFAULT_PIPELINE_ROOT is optional - settings["kfp_default_pipeline_root"] = \ - kfp_default_pipeline_root or \ - os.environ.get("KFP_DEFAULT_PIPELINE_ROOT") + settings["kfp_default_pipeline_root"] = kfp_default_pipeline_root or os.environ.get( + "KFP_DEFAULT_PIPELINE_ROOT" + ) return settings -def server_factory(visualization_server_image, - visualization_server_tag, frontend_image, frontend_tag, - disable_istio_sidecar, minio_access_key, - minio_secret_key, kfp_default_pipeline_root=None, - url="", controller_port=8080): +def server_factory( + visualization_server_image, + visualization_server_tag, + frontend_image, + frontend_tag, + disable_istio_sidecar, + minio_access_key, + minio_secret_key, + kfp_default_pipeline_root=None, + url="", + controller_port=8080, +): """ Returns an HTTPServer populated with Handler with customized settings """ + class Controller(BaseHTTPRequestHandler): def sync(self, parent, children): # parent is a namespace namespace = parent.get("metadata", {}).get("name") - pipeline_enabled = parent.get("metadata", {}).get( - "labels", {}).get("pipelines.kubeflow.org/enabled") + pipeline_enabled = ( + parent.get("metadata", {}) + .get("labels", {}) + .get("pipelines.kubeflow.org/enabled") + ) if pipeline_enabled != "true": return {"status": {}, "children": []} @@ -113,29 +136,30 @@ def sync(self, parent, children): desired_resources = [] if kfp_default_pipeline_root: desired_configmap_count = 2 - desired_resources += [{ - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": { - "name": "kfp-launcher", - "namespace": namespace, - }, - "data": { - "defaultPipelineRoot": kfp_default_pipeline_root, - }, - }] - + desired_resources += [ + { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "kfp-launcher", + "namespace": namespace, + }, + "data": { + "defaultPipelineRoot": kfp_default_pipeline_root, + }, + } + ] # Compute status based on observed state. desired_status = { - "kubeflow-pipelines-ready": - len(children["Secret.v1"]) == 1 and - len(children["ConfigMap.v1"]) == desired_configmap_count and - len(children["Deployment.apps/v1"]) == 2 and - len(children["Service.v1"]) == 2 and - len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and - len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 and - "True" or "False" + "kubeflow-pipelines-ready": len(children["Secret.v1"]) == 1 + and len(children["ConfigMap.v1"]) == desired_configmap_count + and len(children["Deployment.apps/v1"]) == 2 + and len(children["Service.v1"]) == 2 + and len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 + and len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 + and "True" + or "False" } # Generate the desired child object(s). @@ -148,8 +172,7 @@ def sync(self, parent, children): "namespace": namespace, }, "data": { - "METADATA_GRPC_SERVICE_HOST": - "metadata-grpc-service.kubeflow", + "METADATA_GRPC_SERVICE_HOST": "metadata-grpc-service.kubeflow", "METADATA_GRPC_SERVICE_PORT": "8080", }, }, @@ -158,50 +181,38 @@ def sync(self, parent, children): "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { - "labels": { - "app": "ml-pipeline-visualizationserver" - }, + "labels": {"app": "ml-pipeline-visualizationserver"}, "name": "ml-pipeline-visualizationserver", "namespace": namespace, }, "spec": { "selector": { - "matchLabels": { - "app": "ml-pipeline-visualizationserver" - }, + "matchLabels": {"app": "ml-pipeline-visualizationserver"}, }, "template": { "metadata": { - "labels": { - "app": "ml-pipeline-visualizationserver" - }, - "annotations": disable_istio_sidecar and { - "sidecar.istio.io/inject": "false" - } or {}, + "labels": {"app": "ml-pipeline-visualizationserver"}, + "annotations": disable_istio_sidecar + and {"sidecar.istio.io/inject": "false"} + or {}, }, "spec": { - "containers": [{ - "image": f"{visualization_server_image}:{visualization_server_tag}", - "imagePullPolicy": - "IfNotPresent", - "name": - "ml-pipeline-visualizationserver", - "ports": [{ - "containerPort": 8888 - }], - "resources": { - "requests": { - "cpu": "50m", - "memory": "200Mi" - }, - "limits": { - "cpu": "500m", - "memory": "1Gi" + "containers": [ + { + "image": f"{visualization_server_image}:{visualization_server_tag}", + "imagePullPolicy": "IfNotPresent", + "name": "ml-pipeline-visualizationserver", + "ports": [{"containerPort": 8888}], + "resources": { + "requests": { + "cpu": "50m", + "memory": "200Mi", + }, + "limits": {"cpu": "500m", "memory": "1Gi"}, }, } - }], - "serviceAccountName": - "default-editor", + ], + "serviceAccountName": "default-editor", }, }, }, @@ -215,12 +226,8 @@ def sync(self, parent, children): }, "spec": { "host": "ml-pipeline-visualizationserver", - "trafficPolicy": { - "tls": { - "mode": "ISTIO_MUTUAL" - } - } - } + "trafficPolicy": {"tls": {"mode": "ISTIO_MUTUAL"}}, + }, }, { "apiVersion": "security.istio.io/v1beta1", @@ -231,18 +238,22 @@ def sync(self, parent, children): }, "spec": { "selector": { - "matchLabels": { - "app": "ml-pipeline-visualizationserver" - } + "matchLabels": {"app": "ml-pipeline-visualizationserver"} }, - "rules": [{ - "from": [{ - "source": { - "principals": ["cluster.local/ns/kubeflow/sa/ml-pipeline"] - } - }] - }] - } + "rules": [ + { + "from": [ + { + "source": { + "principals": [ + "cluster.local/ns/kubeflow/sa/ml-pipeline" + ] + } + } + ] + } + ], + }, }, { "apiVersion": "v1", @@ -252,12 +263,14 @@ def sync(self, parent, children): "namespace": namespace, }, "spec": { - "ports": [{ - "name": "http", - "port": 8888, - "protocol": "TCP", - "targetPort": 8888, - }], + "ports": [ + { + "name": "http", + "port": 8888, + "protocol": "TCP", + "targetPort": 8888, + } + ], "selector": { "app": "ml-pipeline-visualizationserver", }, @@ -268,73 +281,62 @@ def sync(self, parent, children): "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { - "labels": { - "app": "ml-pipeline-ui-artifact" - }, + "labels": {"app": "ml-pipeline-ui-artifact"}, "name": "ml-pipeline-ui-artifact", "namespace": namespace, }, "spec": { - "selector": { - "matchLabels": { - "app": "ml-pipeline-ui-artifact" - } - }, + "selector": {"matchLabels": {"app": "ml-pipeline-ui-artifact"}}, "template": { "metadata": { - "labels": { - "app": "ml-pipeline-ui-artifact" - }, - "annotations": disable_istio_sidecar and { - "sidecar.istio.io/inject": "false" - } or {}, + "labels": {"app": "ml-pipeline-ui-artifact"}, + "annotations": disable_istio_sidecar + and {"sidecar.istio.io/inject": "false"} + or {}, }, "spec": { - "containers": [{ - "name": - "ml-pipeline-ui-artifact", - "image": f"{frontend_image}:{frontend_tag}", - "imagePullPolicy": - "IfNotPresent", - "ports": [{ - "containerPort": 3000 - }], - "env": [ - { - "name": "MINIO_ACCESS_KEY", - "valueFrom": { - "secretKeyRef": { - "key": "accesskey", - "name": "mlpipeline-minio-artifact" - } - } - }, - { - "name": "MINIO_SECRET_KEY", - "valueFrom": { - "secretKeyRef": { - "key": "secretkey", - "name": "mlpipeline-minio-artifact" - } - } - } - ], - "resources": { - "requests": { - "cpu": "10m", - "memory": "70Mi" - }, - "limits": { - "cpu": "100m", - "memory": "500Mi" + "containers": [ + { + "name": "ml-pipeline-ui-artifact", + "image": f"{frontend_image}:{frontend_tag}", + "imagePullPolicy": "IfNotPresent", + "ports": [{"containerPort": 3000}], + "env": [ + { + "name": "MINIO_ACCESS_KEY", + "valueFrom": { + "secretKeyRef": { + "key": "accesskey", + "name": "mlpipeline-minio-artifact", + } + }, + }, + { + "name": "MINIO_SECRET_KEY", + "valueFrom": { + "secretKeyRef": { + "key": "secretkey", + "name": "mlpipeline-minio-artifact", + } + }, + }, + ], + "resources": { + "requests": { + "cpu": "10m", + "memory": "70Mi", + }, + "limits": { + "cpu": "100m", + "memory": "500Mi", + }, }, } - }], - "serviceAccountName": - "default-editor" - } - } - } + ], + "serviceAccountName": "default-editor", + }, + }, + }, }, { "apiVersion": "v1", @@ -342,52 +344,55 @@ def sync(self, parent, children): "metadata": { "name": "ml-pipeline-ui-artifact", "namespace": namespace, - "labels": { - "app": "ml-pipeline-ui-artifact" - } + "labels": {"app": "ml-pipeline-ui-artifact"}, }, "spec": { - "ports": [{ - "name": - "http", # name is required to let istio understand request protocol - "port": 80, - "protocol": "TCP", - "targetPort": 3000 - }], - "selector": { - "app": "ml-pipeline-ui-artifact" - } - } + "ports": [ + { + "name": "http", # name is required to let istio understand request protocol + "port": 80, + "protocol": "TCP", + "targetPort": 3000, + } + ], + "selector": {"app": "ml-pipeline-ui-artifact"}, + }, }, ] - print('Received request:\n', json.dumps(parent, indent=2, sort_keys=True)) - print('Desired resources except secrets:\n', json.dumps(desired_resources, indent=2, sort_keys=True)) + print("Received request:\n", json.dumps(parent, indent=2, sort_keys=True)) + print( + "Desired resources except secrets:\n", + json.dumps(desired_resources, indent=2, sort_keys=True), + ) # Moved after the print argument because this is sensitive data. - desired_resources.append({ - "apiVersion": "v1", - "kind": "Secret", - "metadata": { - "name": "mlpipeline-minio-artifact", - "namespace": namespace, - }, - "data": { - "accesskey": minio_access_key, - "secretkey": minio_secret_key, - }, - }) + desired_resources.append( + { + "apiVersion": "v1", + "kind": "Secret", + "metadata": { + "name": "mlpipeline-minio-artifact", + "namespace": namespace, + }, + "data": { + "accesskey": minio_access_key, + "secretkey": minio_secret_key, + }, + } + ) return {"status": desired_status, "children": desired_resources} def do_POST(self): # Serve the sync() function as a JSON webhook. observed = json.loads( - self.rfile.read(int(self.headers.get("content-length")))) + self.rfile.read(int(self.headers.get("content-length"))) + ) desired = self.sync(observed["parent"], observed["children"]) self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() - self.wfile.write(bytes(json.dumps(desired), 'utf-8')) + self.wfile.write(bytes(json.dumps(desired), "utf-8")) return HTTPServer((url, int(controller_port)), Controller) diff --git a/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/test_sync.py b/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/test_sync.py index 6158e3f8c0..20de7ada54 100644 --- a/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/test_sync.py +++ b/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/test_sync.py @@ -11,10 +11,8 @@ DATA_INCORRECT_CHILDREN = { "parent": { "metadata": { - "labels": { - "pipelines.kubeflow.org/enabled": "true" - }, - "name": "myName" + "labels": {"pipelines.kubeflow.org/enabled": "true"}, + "name": "myName", } }, "children": { @@ -24,16 +22,14 @@ "Service.v1": [], "DestinationRule.networking.istio.io/v1alpha3": [], "AuthorizationPolicy.security.istio.io/v1beta1": [], - } + }, } DATA_CORRECT_CHILDREN = { "parent": { "metadata": { - "labels": { - "pipelines.kubeflow.org/enabled": "true" - }, - "name": "myName" + "labels": {"pipelines.kubeflow.org/enabled": "true"}, + "name": "myName", } }, "children": { @@ -43,7 +39,7 @@ "Service.v1": [1, 1], "DestinationRule.networking.istio.io/v1alpha3": [1], "AuthorizationPolicy.security.istio.io/v1beta1": [1], - } + }, } DATA_MISSING_PIPELINE_ENABLED = {"parent": {}, "children": {}} @@ -70,34 +66,38 @@ "CONTROLLER_PORT": "0", # HTTPServer randomly assigns the port to a free port } -ENV_KFP_VERSION_ONLY = dict(ENV_VARIABLES_BASE, - **{ - "KFP_VERSION": KFP_VERSION, - } - ) - -ENV_IMAGES_NO_TAGS = dict(ENV_VARIABLES_BASE, - **{ - "KFP_VERSION": KFP_VERSION, - "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, - "FRONTEND_IMAGE": FRONTEND_IMAGE, - } - ) - -ENV_IMAGES_WITH_TAGS = dict(ENV_VARIABLES_BASE, - **{ - "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, - "FRONTEND_IMAGE": FRONTEND_IMAGE, - "VISUALIZATION_SERVER_TAG": VISUALIZATION_SERVER_TAG, - "FRONTEND_TAG": FRONTEND_TAG, - } - ) - -ENV_IMAGES_WITH_TAGS_AND_ISTIO = dict(ENV_IMAGES_WITH_TAGS, - **{ - "DISABLE_ISTIO_SIDECAR": "false", - } - ) +ENV_KFP_VERSION_ONLY = dict( + ENV_VARIABLES_BASE, + **{ + "KFP_VERSION": KFP_VERSION, + }, +) + +ENV_IMAGES_NO_TAGS = dict( + ENV_VARIABLES_BASE, + **{ + "KFP_VERSION": KFP_VERSION, + "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, + "FRONTEND_IMAGE": FRONTEND_IMAGE, + }, +) + +ENV_IMAGES_WITH_TAGS = dict( + ENV_VARIABLES_BASE, + **{ + "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, + "FRONTEND_IMAGE": FRONTEND_IMAGE, + "VISUALIZATION_SERVER_TAG": VISUALIZATION_SERVER_TAG, + "FRONTEND_TAG": FRONTEND_TAG, + }, +) + +ENV_IMAGES_WITH_TAGS_AND_ISTIO = dict( + ENV_IMAGES_WITH_TAGS, + **{ + "DISABLE_ISTIO_SIDECAR": "false", + }, +) def generate_image_name(imagename, tag): @@ -154,40 +154,57 @@ def sync_server_from_arguments(request): "sync_server, data, expected_status, expected_visualization_server_image, expected_frontend_server_image", [ ( - ENV_KFP_VERSION_ONLY, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name(DEFAULT_VISUALIZATION_IMAGE, KFP_VERSION), - generate_image_name(DEFAULT_FRONTEND_IMAGE, KFP_VERSION), + ENV_KFP_VERSION_ONLY, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name(DEFAULT_VISUALIZATION_IMAGE, KFP_VERSION), + generate_image_name(DEFAULT_FRONTEND_IMAGE, KFP_VERSION), ), ( - ENV_IMAGES_NO_TAGS, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name(ENV_IMAGES_NO_TAGS["VISUALIZATION_SERVER_IMAGE"], KFP_VERSION), - generate_image_name(ENV_IMAGES_NO_TAGS["FRONTEND_IMAGE"], KFP_VERSION), + ENV_IMAGES_NO_TAGS, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name( + ENV_IMAGES_NO_TAGS["VISUALIZATION_SERVER_IMAGE"], KFP_VERSION + ), + generate_image_name(ENV_IMAGES_NO_TAGS["FRONTEND_IMAGE"], KFP_VERSION), ), ( - ENV_IMAGES_WITH_TAGS, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), - generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), + ENV_IMAGES_WITH_TAGS, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name( + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], + ), + generate_image_name( + ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], + ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], + ), ), ( - ENV_IMAGES_WITH_TAGS, - DATA_CORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "True"}, - generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), - generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), + ENV_IMAGES_WITH_TAGS, + DATA_CORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "True"}, + generate_image_name( + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], + ), + generate_image_name( + ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], + ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], + ), ), ], - indirect=["sync_server"] + indirect=["sync_server"], ) -def test_sync_server_with_pipeline_enabled(sync_server, data, expected_status, - expected_visualization_server_image, expected_frontend_server_image): +def test_sync_server_with_pipeline_enabled( + sync_server, + data, + expected_status, + expected_visualization_server_image, + expected_frontend_server_image, +): """ Nearly end-to-end test of how Controller serves .sync as a POST @@ -207,13 +224,17 @@ def test_sync_server_with_pipeline_enabled(sync_server, data, expected_status, results = json.loads(x.text) # Test overall status of whether children are ok - assert results['status'] == expected_status + assert results["status"] == expected_status # Poke a few children to test things that can vary by environment variable - assert results['children'][1]["spec"]["template"]["spec"]["containers"][0][ - "image"] == expected_visualization_server_image - assert results['children'][5]["spec"]["template"]["spec"]["containers"][0][ - "image"] == expected_frontend_server_image + assert ( + results["children"][1]["spec"]["template"]["spec"]["containers"][0]["image"] + == expected_visualization_server_image + ) + assert ( + results["children"][5]["spec"]["template"]["spec"]["containers"][0]["image"] + == expected_frontend_server_image + ) @pytest.mark.parametrize( @@ -221,19 +242,28 @@ def test_sync_server_with_pipeline_enabled(sync_server, data, expected_status, "expected_frontend_server_image", [ ( - ENV_IMAGES_WITH_TAGS_AND_ISTIO, - DATA_CORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "True"}, - generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), - generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), + ENV_IMAGES_WITH_TAGS_AND_ISTIO, + DATA_CORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "True"}, + generate_image_name( + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], + ), + generate_image_name( + ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], + ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], + ), ), ], - indirect=["sync_server_from_arguments"] + indirect=["sync_server_from_arguments"], ) def test_sync_server_with_direct_passing_of_settings( - sync_server_from_arguments, data, expected_status, expected_visualization_server_image, - expected_frontend_server_image): + sync_server_from_arguments, + data, + expected_status, + expected_visualization_server_image, + expected_frontend_server_image, +): """ Nearly end-to-end test of how Controller serves .sync as a POST, taking variables as arguments @@ -250,13 +280,17 @@ def test_sync_server_with_direct_passing_of_settings( results = json.loads(x.text) # Test overall status of whether children are ok - assert results['status'] == expected_status + assert results["status"] == expected_status # Poke a few children to test things that can vary by environment variable - assert results['children'][1]["spec"]["template"]["spec"]["containers"][0][ - "image"] == expected_visualization_server_image - assert results['children'][5]["spec"]["template"]["spec"]["containers"][0][ - "image"] == expected_frontend_server_image + assert ( + results["children"][1]["spec"]["template"]["spec"]["containers"][0]["image"] + == expected_visualization_server_image + ) + assert ( + results["children"][5]["spec"]["template"]["spec"]["containers"][0]["image"] + == expected_frontend_server_image + ) @pytest.mark.parametrize( @@ -264,10 +298,11 @@ def test_sync_server_with_direct_passing_of_settings( [ (ENV_IMAGES_WITH_TAGS, DATA_MISSING_PIPELINE_ENABLED, {}, []), ], - indirect=["sync_server"] + indirect=["sync_server"], ) -def test_sync_server_without_pipeline_enabled(sync_server, data, expected_status, - expected_children): +def test_sync_server_without_pipeline_enabled( + sync_server, data, expected_status, expected_children +): """ Nearly end-to-end test of how Controller serves .sync as a POST @@ -282,5 +317,5 @@ def test_sync_server_without_pipeline_enabled(sync_server, data, expected_status results = json.loads(x.text) # Test overall status of whether children are ok - assert results['status'] == expected_status - assert results['children'] == expected_children + assert results["status"] == expected_status + assert results["children"] == expected_children diff --git a/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/sync.py b/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/sync.py index 031a3fa268..e04ba23881 100644 --- a/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/sync.py +++ b/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/sync.py @@ -24,10 +24,17 @@ def main(): server.serve_forever() -def get_settings_from_env(controller_port=None, - visualization_server_image=None, frontend_image=None, - visualization_server_tag=None, frontend_tag=None, disable_istio_sidecar=None, - minio_access_key=None, minio_secret_key=None, kfp_default_pipeline_root=None): +def get_settings_from_env( + controller_port=None, + visualization_server_image=None, + frontend_image=None, + visualization_server_tag=None, + frontend_tag=None, + disable_istio_sidecar=None, + minio_access_key=None, + minio_secret_key=None, + kfp_default_pipeline_root=None, +): """ Returns a dict of settings from environment variables relevant to the controller @@ -45,66 +52,82 @@ def get_settings_from_env(controller_port=None, minio_secret_key: Required (no default) """ settings = dict() - settings["controller_port"] = \ - controller_port or \ - os.environ.get("CONTROLLER_PORT", "8080") + settings["controller_port"] = controller_port or os.environ.get( + "CONTROLLER_PORT", "8080" + ) - settings["visualization_server_image"] = \ - visualization_server_image or \ - os.environ.get("VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server") + settings["visualization_server_image"] = ( + visualization_server_image + or os.environ.get( + "VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server" + ) + ) - settings["frontend_image"] = \ - frontend_image or \ - os.environ.get("FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend") + settings["frontend_image"] = frontend_image or os.environ.get( + "FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend" + ) # Look for specific tags for each image first, falling back to # previously used KFP_VERSION environment variable for backwards # compatibility - settings["visualization_server_tag"] = \ - visualization_server_tag or \ - os.environ.get("VISUALIZATION_SERVER_TAG") or \ - os.environ["KFP_VERSION"] + settings["visualization_server_tag"] = ( + visualization_server_tag + or os.environ.get("VISUALIZATION_SERVER_TAG") + or os.environ["KFP_VERSION"] + ) - settings["frontend_tag"] = \ - frontend_tag or \ - os.environ.get("FRONTEND_TAG") or \ - os.environ["KFP_VERSION"] + settings["frontend_tag"] = ( + frontend_tag or os.environ.get("FRONTEND_TAG") or os.environ["KFP_VERSION"] + ) - settings["disable_istio_sidecar"] = \ - disable_istio_sidecar if disable_istio_sidecar is not None \ - else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true" + settings["disable_istio_sidecar"] = ( + disable_istio_sidecar + if disable_istio_sidecar is not None + else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true" + ) - settings["minio_access_key"] = \ - minio_access_key or \ - base64.b64encode(bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8') + settings["minio_access_key"] = minio_access_key or base64.b64encode( + bytes(os.environ.get("MINIO_ACCESS_KEY"), "utf-8") + ).decode("utf-8") - settings["minio_secret_key"] = \ - minio_secret_key or \ - base64.b64encode(bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8') + settings["minio_secret_key"] = minio_secret_key or base64.b64encode( + bytes(os.environ.get("MINIO_SECRET_KEY"), "utf-8") + ).decode("utf-8") # KFP_DEFAULT_PIPELINE_ROOT is optional - settings["kfp_default_pipeline_root"] = \ - kfp_default_pipeline_root or \ - os.environ.get("KFP_DEFAULT_PIPELINE_ROOT") + settings["kfp_default_pipeline_root"] = kfp_default_pipeline_root or os.environ.get( + "KFP_DEFAULT_PIPELINE_ROOT" + ) return settings -def server_factory(visualization_server_image, - visualization_server_tag, frontend_image, frontend_tag, - disable_istio_sidecar, minio_access_key, - minio_secret_key, kfp_default_pipeline_root=None, - url="", controller_port=8080): +def server_factory( + visualization_server_image, + visualization_server_tag, + frontend_image, + frontend_tag, + disable_istio_sidecar, + minio_access_key, + minio_secret_key, + kfp_default_pipeline_root=None, + url="", + controller_port=8080, +): """ Returns an HTTPServer populated with Handler with customized settings """ + class Controller(BaseHTTPRequestHandler): def sync(self, parent, children): # parent is a namespace namespace = parent.get("metadata", {}).get("name") - pipeline_enabled = parent.get("metadata", {}).get( - "labels", {}).get("pipelines.kubeflow.org/enabled") + pipeline_enabled = ( + parent.get("metadata", {}) + .get("labels", {}) + .get("pipelines.kubeflow.org/enabled") + ) if pipeline_enabled != "true": return {"status": {}, "children": []} @@ -113,29 +136,30 @@ def sync(self, parent, children): desired_resources = [] if kfp_default_pipeline_root: desired_configmap_count = 2 - desired_resources += [{ - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": { - "name": "kfp-launcher", - "namespace": namespace, - }, - "data": { - "defaultPipelineRoot": kfp_default_pipeline_root, - }, - }] - + desired_resources += [ + { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "kfp-launcher", + "namespace": namespace, + }, + "data": { + "defaultPipelineRoot": kfp_default_pipeline_root, + }, + } + ] # Compute status based on observed state. desired_status = { - "kubeflow-pipelines-ready": - len(children["Secret.v1"]) == 1 and - len(children["ConfigMap.v1"]) == desired_configmap_count and - len(children["Deployment.apps/v1"]) == 2 and - len(children["Service.v1"]) == 2 and - len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and - len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 and - "True" or "False" + "kubeflow-pipelines-ready": len(children["Secret.v1"]) == 1 + and len(children["ConfigMap.v1"]) == desired_configmap_count + and len(children["Deployment.apps/v1"]) == 2 + and len(children["Service.v1"]) == 2 + and len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 + and len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 + and "True" + or "False" } # Generate the desired child object(s). @@ -148,8 +172,7 @@ def sync(self, parent, children): "namespace": namespace, }, "data": { - "METADATA_GRPC_SERVICE_HOST": - "metadata-grpc-service.kubeflow", + "METADATA_GRPC_SERVICE_HOST": "metadata-grpc-service.kubeflow", "METADATA_GRPC_SERVICE_PORT": "8080", }, }, @@ -158,50 +181,38 @@ def sync(self, parent, children): "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { - "labels": { - "app": "ml-pipeline-visualizationserver" - }, + "labels": {"app": "ml-pipeline-visualizationserver"}, "name": "ml-pipeline-visualizationserver", "namespace": namespace, }, "spec": { "selector": { - "matchLabels": { - "app": "ml-pipeline-visualizationserver" - }, + "matchLabels": {"app": "ml-pipeline-visualizationserver"}, }, "template": { "metadata": { - "labels": { - "app": "ml-pipeline-visualizationserver" - }, - "annotations": disable_istio_sidecar and { - "sidecar.istio.io/inject": "false" - } or {}, + "labels": {"app": "ml-pipeline-visualizationserver"}, + "annotations": disable_istio_sidecar + and {"sidecar.istio.io/inject": "false"} + or {}, }, "spec": { - "containers": [{ - "image": f"{visualization_server_image}:{visualization_server_tag}", - "imagePullPolicy": - "IfNotPresent", - "name": - "ml-pipeline-visualizationserver", - "ports": [{ - "containerPort": 8888 - }], - "resources": { - "requests": { - "cpu": "50m", - "memory": "200Mi" - }, - "limits": { - "cpu": "500m", - "memory": "1Gi" + "containers": [ + { + "image": f"{visualization_server_image}:{visualization_server_tag}", + "imagePullPolicy": "IfNotPresent", + "name": "ml-pipeline-visualizationserver", + "ports": [{"containerPort": 8888}], + "resources": { + "requests": { + "cpu": "50m", + "memory": "200Mi", + }, + "limits": {"cpu": "500m", "memory": "1Gi"}, }, } - }], - "serviceAccountName": - "default-editor", + ], + "serviceAccountName": "default-editor", }, }, }, @@ -215,12 +226,8 @@ def sync(self, parent, children): }, "spec": { "host": "ml-pipeline-visualizationserver", - "trafficPolicy": { - "tls": { - "mode": "ISTIO_MUTUAL" - } - } - } + "trafficPolicy": {"tls": {"mode": "ISTIO_MUTUAL"}}, + }, }, { "apiVersion": "security.istio.io/v1beta1", @@ -231,18 +238,22 @@ def sync(self, parent, children): }, "spec": { "selector": { - "matchLabels": { - "app": "ml-pipeline-visualizationserver" - } + "matchLabels": {"app": "ml-pipeline-visualizationserver"} }, - "rules": [{ - "from": [{ - "source": { - "principals": ["cluster.local/ns/kubeflow/sa/ml-pipeline"] - } - }] - }] - } + "rules": [ + { + "from": [ + { + "source": { + "principals": [ + "cluster.local/ns/kubeflow/sa/ml-pipeline" + ] + } + } + ] + } + ], + }, }, { "apiVersion": "v1", @@ -252,12 +263,14 @@ def sync(self, parent, children): "namespace": namespace, }, "spec": { - "ports": [{ - "name": "http", - "port": 8888, - "protocol": "TCP", - "targetPort": 8888, - }], + "ports": [ + { + "name": "http", + "port": 8888, + "protocol": "TCP", + "targetPort": 8888, + } + ], "selector": { "app": "ml-pipeline-visualizationserver", }, @@ -268,73 +281,62 @@ def sync(self, parent, children): "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { - "labels": { - "app": "ml-pipeline-ui-artifact" - }, + "labels": {"app": "ml-pipeline-ui-artifact"}, "name": "ml-pipeline-ui-artifact", "namespace": namespace, }, "spec": { - "selector": { - "matchLabels": { - "app": "ml-pipeline-ui-artifact" - } - }, + "selector": {"matchLabels": {"app": "ml-pipeline-ui-artifact"}}, "template": { "metadata": { - "labels": { - "app": "ml-pipeline-ui-artifact" - }, - "annotations": disable_istio_sidecar and { - "sidecar.istio.io/inject": "false" - } or {}, + "labels": {"app": "ml-pipeline-ui-artifact"}, + "annotations": disable_istio_sidecar + and {"sidecar.istio.io/inject": "false"} + or {}, }, "spec": { - "containers": [{ - "name": - "ml-pipeline-ui-artifact", - "image": f"{frontend_image}:{frontend_tag}", - "imagePullPolicy": - "IfNotPresent", - "ports": [{ - "containerPort": 3000 - }], - "env": [ - { - "name": "MINIO_ACCESS_KEY", - "valueFrom": { - "secretKeyRef": { - "key": "accesskey", - "name": "mlpipeline-minio-artifact" - } - } - }, - { - "name": "MINIO_SECRET_KEY", - "valueFrom": { - "secretKeyRef": { - "key": "secretkey", - "name": "mlpipeline-minio-artifact" - } - } - } - ], - "resources": { - "requests": { - "cpu": "10m", - "memory": "70Mi" - }, - "limits": { - "cpu": "100m", - "memory": "500Mi" + "containers": [ + { + "name": "ml-pipeline-ui-artifact", + "image": f"{frontend_image}:{frontend_tag}", + "imagePullPolicy": "IfNotPresent", + "ports": [{"containerPort": 3000}], + "env": [ + { + "name": "MINIO_ACCESS_KEY", + "valueFrom": { + "secretKeyRef": { + "key": "accesskey", + "name": "mlpipeline-minio-artifact", + } + }, + }, + { + "name": "MINIO_SECRET_KEY", + "valueFrom": { + "secretKeyRef": { + "key": "secretkey", + "name": "mlpipeline-minio-artifact", + } + }, + }, + ], + "resources": { + "requests": { + "cpu": "10m", + "memory": "70Mi", + }, + "limits": { + "cpu": "100m", + "memory": "500Mi", + }, }, } - }], - "serviceAccountName": - "default-editor" - } - } - } + ], + "serviceAccountName": "default-editor", + }, + }, + }, }, { "apiVersion": "v1", @@ -342,52 +344,55 @@ def sync(self, parent, children): "metadata": { "name": "ml-pipeline-ui-artifact", "namespace": namespace, - "labels": { - "app": "ml-pipeline-ui-artifact" - } + "labels": {"app": "ml-pipeline-ui-artifact"}, }, "spec": { - "ports": [{ - "name": - "http", # name is required to let istio understand request protocol - "port": 80, - "protocol": "TCP", - "targetPort": 3000 - }], - "selector": { - "app": "ml-pipeline-ui-artifact" - } - } + "ports": [ + { + "name": "http", # name is required to let istio understand request protocol + "port": 80, + "protocol": "TCP", + "targetPort": 3000, + } + ], + "selector": {"app": "ml-pipeline-ui-artifact"}, + }, }, ] - print('Received request:\n', json.dumps(parent, sort_keys=True)) - print('Desired resources except secrets:\n', json.dumps(desired_resources, sort_keys=True)) + print("Received request:\n", json.dumps(parent, sort_keys=True)) + print( + "Desired resources except secrets:\n", + json.dumps(desired_resources, sort_keys=True), + ) # Moved after the print argument because this is sensitive data. - desired_resources.append({ - "apiVersion": "v1", - "kind": "Secret", - "metadata": { - "name": "mlpipeline-minio-artifact", - "namespace": namespace, - }, - "data": { - "accesskey": minio_access_key, - "secretkey": minio_secret_key, - }, - }) + desired_resources.append( + { + "apiVersion": "v1", + "kind": "Secret", + "metadata": { + "name": "mlpipeline-minio-artifact", + "namespace": namespace, + }, + "data": { + "accesskey": minio_access_key, + "secretkey": minio_secret_key, + }, + } + ) return {"status": desired_status, "children": desired_resources} def do_POST(self): # Serve the sync() function as a JSON webhook. observed = json.loads( - self.rfile.read(int(self.headers.get("content-length")))) + self.rfile.read(int(self.headers.get("content-length"))) + ) desired = self.sync(observed["parent"], observed["children"]) self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() - self.wfile.write(bytes(json.dumps(desired), 'utf-8')) + self.wfile.write(bytes(json.dumps(desired), "utf-8")) return HTTPServer((url, int(controller_port)), Controller) diff --git a/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/test_sync.py b/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/test_sync.py index 50362d60fd..bc9a6549b5 100644 --- a/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/test_sync.py +++ b/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/test_sync.py @@ -11,10 +11,8 @@ DATA_INCORRECT_CHILDREN = { "parent": { "metadata": { - "labels": { - "pipelines.kubeflow.org/enabled": "true" - }, - "name": "myName" + "labels": {"pipelines.kubeflow.org/enabled": "true"}, + "name": "myName", } }, "children": { @@ -24,16 +22,14 @@ "Service.v1": [], "DestinationRule.networking.istio.io/v1alpha3": [], "AuthorizationPolicy.security.istio.io/v1beta1": [], - } + }, } DATA_CORRECT_CHILDREN = { "parent": { "metadata": { - "labels": { - "pipelines.kubeflow.org/enabled": "true" - }, - "name": "myName" + "labels": {"pipelines.kubeflow.org/enabled": "true"}, + "name": "myName", } }, "children": { @@ -43,7 +39,7 @@ "Service.v1": [1, 1], "DestinationRule.networking.istio.io/v1alpha3": [1], "AuthorizationPolicy.security.istio.io/v1beta1": [1], - } + }, } DATA_MISSING_PIPELINE_ENABLED = {"parent": {}, "children": {}} @@ -70,34 +66,38 @@ "CONTROLLER_PORT": "0", # HTTPServer randomly assigns the port to a free port } -ENV_KFP_VERSION_ONLY = dict(ENV_VARIABLES_BASE, - **{ - "KFP_VERSION": KFP_VERSION, - } - ) - -ENV_IMAGES_NO_TAGS = dict(ENV_VARIABLES_BASE, - **{ - "KFP_VERSION": KFP_VERSION, - "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, - "FRONTEND_IMAGE": FRONTEND_IMAGE, - } - ) - -ENV_IMAGES_WITH_TAGS = dict(ENV_VARIABLES_BASE, - **{ - "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, - "FRONTEND_IMAGE": FRONTEND_IMAGE, - "VISUALIZATION_SERVER_TAG": VISUALIZATION_SERVER_TAG, - "FRONTEND_TAG": FRONTEND_TAG, - } - ) - -ENV_IMAGES_WITH_TAGS_AND_ISTIO = dict(ENV_IMAGES_WITH_TAGS, - **{ - "DISABLE_ISTIO_SIDECAR": "false", - } - ) +ENV_KFP_VERSION_ONLY = dict( + ENV_VARIABLES_BASE, + **{ + "KFP_VERSION": KFP_VERSION, + }, +) + +ENV_IMAGES_NO_TAGS = dict( + ENV_VARIABLES_BASE, + **{ + "KFP_VERSION": KFP_VERSION, + "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, + "FRONTEND_IMAGE": FRONTEND_IMAGE, + }, +) + +ENV_IMAGES_WITH_TAGS = dict( + ENV_VARIABLES_BASE, + **{ + "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, + "FRONTEND_IMAGE": FRONTEND_IMAGE, + "VISUALIZATION_SERVER_TAG": VISUALIZATION_SERVER_TAG, + "FRONTEND_TAG": FRONTEND_TAG, + }, +) + +ENV_IMAGES_WITH_TAGS_AND_ISTIO = dict( + ENV_IMAGES_WITH_TAGS, + **{ + "DISABLE_ISTIO_SIDECAR": "false", + }, +) def generate_image_name(imagename, tag): @@ -154,40 +154,57 @@ def sync_server_from_arguments(request): "sync_server, data, expected_status, expected_visualization_server_image, expected_frontend_server_image", [ ( - ENV_KFP_VERSION_ONLY, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name(DEFAULT_VISUALIZATION_IMAGE, KFP_VERSION), - generate_image_name(DEFAULT_FRONTEND_IMAGE, KFP_VERSION), + ENV_KFP_VERSION_ONLY, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name(DEFAULT_VISUALIZATION_IMAGE, KFP_VERSION), + generate_image_name(DEFAULT_FRONTEND_IMAGE, KFP_VERSION), ), ( - ENV_IMAGES_NO_TAGS, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name(ENV_IMAGES_NO_TAGS["VISUALIZATION_SERVER_IMAGE"], KFP_VERSION), - generate_image_name(ENV_IMAGES_NO_TAGS["FRONTEND_IMAGE"], KFP_VERSION), + ENV_IMAGES_NO_TAGS, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name( + ENV_IMAGES_NO_TAGS["VISUALIZATION_SERVER_IMAGE"], KFP_VERSION + ), + generate_image_name(ENV_IMAGES_NO_TAGS["FRONTEND_IMAGE"], KFP_VERSION), ), ( - ENV_IMAGES_WITH_TAGS, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), - generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), + ENV_IMAGES_WITH_TAGS, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name( + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], + ), + generate_image_name( + ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], + ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], + ), ), ( - ENV_IMAGES_WITH_TAGS, - DATA_CORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "True"}, - generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), - generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), + ENV_IMAGES_WITH_TAGS, + DATA_CORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "True"}, + generate_image_name( + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], + ), + generate_image_name( + ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], + ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], + ), ), ], - indirect=["sync_server"] + indirect=["sync_server"], ) -def test_sync_server_with_pipeline_enabled(sync_server, data, expected_status, - expected_visualization_server_image, expected_frontend_server_image): +def test_sync_server_with_pipeline_enabled( + sync_server, + data, + expected_status, + expected_visualization_server_image, + expected_frontend_server_image, +): """ Nearly end-to-end test of how Controller serves .sync as a POST @@ -207,13 +224,17 @@ def test_sync_server_with_pipeline_enabled(sync_server, data, expected_status, results = json.loads(x.text) # Test overall status of whether children are ok - assert results['status'] == expected_status + assert results["status"] == expected_status # Poke a few children to test things that can vary by environment variable - assert results['children'][1]["spec"]["template"]["spec"]["containers"][0][ - "image"] == expected_visualization_server_image - assert results['children'][5]["spec"]["template"]["spec"]["containers"][0][ - "image"] == expected_frontend_server_image + assert ( + results["children"][1]["spec"]["template"]["spec"]["containers"][0]["image"] + == expected_visualization_server_image + ) + assert ( + results["children"][5]["spec"]["template"]["spec"]["containers"][0]["image"] + == expected_frontend_server_image + ) @pytest.mark.parametrize( @@ -221,19 +242,28 @@ def test_sync_server_with_pipeline_enabled(sync_server, data, expected_status, "expected_frontend_server_image", [ ( - ENV_IMAGES_WITH_TAGS_AND_ISTIO, - DATA_CORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "True"}, - generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), - generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), + ENV_IMAGES_WITH_TAGS_AND_ISTIO, + DATA_CORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "True"}, + generate_image_name( + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], + ), + generate_image_name( + ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], + ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], + ), ), ], - indirect=["sync_server_from_arguments"] + indirect=["sync_server_from_arguments"], ) def test_sync_server_with_direct_passing_of_settings( - sync_server_from_arguments, data, expected_status, expected_visualization_server_image, - expected_frontend_server_image): + sync_server_from_arguments, + data, + expected_status, + expected_visualization_server_image, + expected_frontend_server_image, +): """ Nearly end-to-end test of how Controller serves .sync as a POST, taking variables as arguments @@ -250,13 +280,17 @@ def test_sync_server_with_direct_passing_of_settings( results = json.loads(x.text) # Test overall status of whether children are ok - assert results['status'] == expected_status + assert results["status"] == expected_status # Poke a few children to test things that can vary by environment variable - assert results['children'][1]["spec"]["template"]["spec"]["containers"][0][ - "image"] == expected_visualization_server_image - assert results['children'][5]["spec"]["template"]["spec"]["containers"][0][ - "image"] == expected_frontend_server_image + assert ( + results["children"][1]["spec"]["template"]["spec"]["containers"][0]["image"] + == expected_visualization_server_image + ) + assert ( + results["children"][5]["spec"]["template"]["spec"]["containers"][0]["image"] + == expected_frontend_server_image + ) @pytest.mark.parametrize( @@ -264,10 +298,11 @@ def test_sync_server_with_direct_passing_of_settings( [ (ENV_IMAGES_WITH_TAGS, DATA_MISSING_PIPELINE_ENABLED, {}, []), ], - indirect=["sync_server"] + indirect=["sync_server"], ) -def test_sync_server_without_pipeline_enabled(sync_server, data, expected_status, - expected_children): +def test_sync_server_without_pipeline_enabled( + sync_server, data, expected_status, expected_children +): """ Nearly end-to-end test of how Controller serves .sync as a POST @@ -282,5 +317,5 @@ def test_sync_server_without_pipeline_enabled(sync_server, data, expected_status results = json.loads(x.text) # Test overall status of whether children are ok - assert results['status'] == expected_status - assert results['children'] == expected_children + assert results["status"] == expected_status + assert results["children"] == expected_children diff --git a/contrib/kserve/tests/test_sklearn.py b/contrib/kserve/tests/test_sklearn.py index 2c17257019..0e2d21241c 100644 --- a/contrib/kserve/tests/test_sklearn.py +++ b/contrib/kserve/tests/test_sklearn.py @@ -50,7 +50,9 @@ def test_sklearn_kserve(): spec=V1beta1InferenceServiceSpec(predictor=predictor), ) - kserve_client = KServeClient(config_file=os.environ.get("KUBECONFIG", "~/.kube/config")) + kserve_client = KServeClient( + config_file=os.environ.get("KUBECONFIG", "~/.kube/config") + ) kserve_client.create(isvc) kserve_client.wait_isvc_ready(service_name, namespace=KSERVE_TEST_NAMESPACE) res = predict(service_name, "./data/iris_input.json") diff --git a/hack/trivy_scan.py b/hack/trivy_scan.py index d76536604f..a8067be420 100644 --- a/hack/trivy_scan.py +++ b/hack/trivy_scan.py @@ -7,8 +7,8 @@ # - Summary of security counts with images a JSON file inside ../image_lists/summary_of_severity_counts_for_WG folder # 4. Generate a summary of the security scan reports # - The summary will be saved in JSON format inside ../image_lists/summary_of_severity_counts_for_WG folder -# 5. Before run this file you have to -# 1. Install kustomize +# 5. Before run this file you have to +# 1. Install kustomize # - sudo apt install snapd # - sudo snap install kustomize # 2. Install trivy @@ -37,14 +37,16 @@ "manifests": "../common/cert-manager/cert-manager/base ../common/cert-manager/kubeflow-issuer/base ../common/istio-1-22/istio-crds/base ../common/istio-1-22/istio-namespace/base ../common/istio-1-22/istio-install/overlays/oauth2-proxy ../common/oidc-client/oauth2-proxy/overlays/m2m-self-signed ../common/dex/overlays/oauth2-proxy ../common/knative/knative-serving/overlays/gateways ../common/knative/knative-eventing/base ../common/istio-1-22/cluster-local-gateway/base ../common/kubeflow-namespace/base ../common/kubeflow-roles/base ../common/istio-1-22/kubeflow-istio-resources/base", "workbenches": "../apps/pvcviewer-controller/upstream/base ../apps/admission-webhook/upstream/overlays ../apps/centraldashboard/upstream/overlays/oauth2-proxy ../apps/jupyter/jupyter-web-app/upstream/overlays ../apps/volumes-web-app/upstream/overlays ../apps/tensorboard/tensorboards-web-app/upstream/overlays ../apps/profiles/upstream/overlays ../apps/jupyter/notebook-controller/upstream/overlays ../apps/tensorboard/tensorboard-controller/upstream/overlays", "serving": "../contrib/kserve - ../contrib/kserve/models-web-app/overlays/kubeflow", - "model-registry": "../apps/model-registry/upstream" + "model-registry": "../apps/model-registry/upstream", } DIRECTORY = "../image_lists" os.makedirs(DIRECTORY, exist_ok=True) SCAN_REPORTS_DIR = os.path.join(DIRECTORY, "security_scan_reports") ALL_SEVERITY_COUNTS = os.path.join(DIRECTORY, "severity_counts_with_images_for_WG") -SUMMARY_OF_SEVERITY_COUNTS = os.path.join(DIRECTORY, "summary_of_severity_counts_for_WG") +SUMMARY_OF_SEVERITY_COUNTS = os.path.join( + DIRECTORY, "summary_of_severity_counts_for_WG" +) os.makedirs(SCAN_REPORTS_DIR, exist_ok=True) os.makedirs(ALL_SEVERITY_COUNTS, exist_ok=True) @@ -62,6 +64,7 @@ def save_images(wg, images, version): f.write('\n'.join(images)) log(f"File {output_file} successfully created") + def validate_semantic_version(version): # Validates a semantic version string (e.g., "0.1.2" or "latest"). regex = r"^[0-9]+\.[0-9]+\.[0-9]+$" @@ -70,18 +73,23 @@ def validate_semantic_version(version): else: raise ValueError(f"Invalid semantic version: '{version}'") + def extract_images(version): version = validate_semantic_version(version) log(f"Running the script using Kubeflow version: {version}") - all_images = set() # Collect all unique images across workgroups + all_images = set() # Collect all unique images across workgroups for wg, dirs in wg_dirs.items(): wg_images = set() # Collect unique images for this workgroup for dir_path in dirs.split(): for root, _, files in os.walk(dir_path): for file in files: - if file in ["kustomization.yaml", "kustomization.yml", "Kustomization"]: + if file in [ + "kustomization.yaml", + "kustomization.yml", + "Kustomization", + ]: full_path = os.path.join(root, file) try: # Execute `kustomize build` to render the kustomization file @@ -89,7 +97,7 @@ def extract_images(version): except subprocess.CalledProcessError as e: log(f"ERROR:\t Failed \"kustomize build\" command for directory: {root}. See error above") continue - + # Use regex to find lines with 'image: :' or 'image: ' # and '- image: :' but avoid environment variables kustomize_images = re.findall(r'^\s*-?\s*image:\s*([^$\s:]+(?:\:[^\s]+)?)$', result.stdout, re.MULTILINE) @@ -104,9 +112,18 @@ def extract_images(version): uniq_images = sorted(all_images) save_images("all", uniq_images, version) -parser = argparse.ArgumentParser(description="Extract images from Kubeflow kustomizations.") + +parser = argparse.ArgumentParser( + description="Extract images from Kubeflow kustomizations." +) # Define a positional argument 'version' with optional occurrence and default value 'latest'. You can run this file as python3 .py or python .py -parser.add_argument("version", nargs="?", type=str, default="latest", help="Kubeflow version to use (defaults to latest).") +parser.add_argument( + "version", + nargs="?", + type=str, + default="latest", + help="Kubeflow version to use (defaults to latest).", +) args = parser.parse_args() extract_images(args.version) @@ -115,13 +132,17 @@ def extract_images(version): log("Started scanning images") # Get list of text files excluding "kf_latest_all_images.txt" -files = [f for f in glob.glob(os.path.join(DIRECTORY, "*.txt")) if not f.endswith("kf_latest_all_images.txt")] +files = [ + f + for f in glob.glob(os.path.join(DIRECTORY, "*.txt")) + if not f.endswith("kf_latest_all_images.txt") +] # Loop through each text file in the specified directory for file in files: log(f"Scanning images in {file}") - file_base_name = os.path.basename(file).replace('.txt', '') + file_base_name = os.path.basename(file).replace(".txt", "") # Directory to save reports for this specific file file_reports_dir = os.path.join(SCAN_REPORTS_DIR, file_base_name) @@ -131,15 +152,15 @@ def extract_images(version): severity_count = os.path.join(file_reports_dir, "severity_counts") os.makedirs(severity_count, exist_ok=True) - with open(file, 'r') as f: + with open(file, "r") as f: lines = f.readlines() for line in lines: line = line.strip() - image_name = line.split(':')[0] - image_tag = line.split(':')[1] if ':' in line else '' + image_name = line.split(":")[0] + image_tag = line.split(":")[1] if ":" in line else "" - image_name_scan = image_name.split('/')[-1] + image_name_scan = image_name.split("/")[-1] if image_tag: image_name_scan = f"{image_name_scan}_{image_tag}" @@ -151,7 +172,7 @@ def extract_images(version): try: result = subprocess.run(["trivy", "image", "--format", "json", "--output", scan_output_file, line], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - with open(scan_output_file, 'r') as json_file: + with open(scan_output_file, "r") as json_file: scan_data = json.load(json_file) if not scan_data.get('Results'): @@ -174,12 +195,8 @@ def extract_images(version): continue elif severity in severity_counts: severity_counts[severity] += 1 - - report = { - "image": line, - "severity_counts": severity_counts - } + report = {"image": line, "severity_counts": severity_counts} image_table = PrettyTable() image_table.field_names = ["Critical", "High", "Medium", "Low"] @@ -209,16 +226,18 @@ def extract_images(version): else: combined_data = [] for json_file in json_files: - with open(json_file, 'r') as jf: + with open(json_file, "r") as jf: combined_data.append(json.load(jf)) - with open(output_file, 'w') as of: + with open(output_file, "w") as of: json.dump({"data": combined_data}, of, indent=4) log(f"JSON files successfully combined into '{output_file}'") # File to save summary of the severity counts for WGs as JSON format. -summary_file = os.path.join(SUMMARY_OF_SEVERITY_COUNTS, "severity_summary_in_json_format.json") +summary_file = os.path.join( + SUMMARY_OF_SEVERITY_COUNTS, "severity_summary_in_json_format.json" +) # Initialize counters total_images = 0 @@ -233,27 +252,27 @@ def extract_images(version): # Loop through each JSON file in the ALL_SEVERITY_COUNTS for file_path in glob.glob(os.path.join(ALL_SEVERITY_COUNTS, "*.json")): # Split filename based on underscores - filename_parts = os.path.basename(file_path).split('_') + filename_parts = os.path.basename(file_path).split("_") # Check if there are at least 3 parts (prefix, name, _images) if len(filename_parts) >= 4: - # Extract name (second part) - filename = filename_parts[2] - filename = filename.capitalize() + # Extract name (second part) + filename = filename_parts[2] + filename = filename.capitalize() else: log(f"Skipping invalid filename format: {file_path}") continue - with open(file_path, 'r') as f: - data = json.load(f)['data'] + with open(file_path, "r") as f: + data = json.load(f)["data"] # Initialize counts for this file image_count = len(data) - low = sum(entry['severity_counts']['LOW'] for entry in data) - medium = sum(entry['severity_counts']['MEDIUM'] for entry in data) - high = sum(entry['severity_counts']['HIGH'] for entry in data) - critical = sum(entry['severity_counts']['CRITICAL'] for entry in data) + low = sum(entry["severity_counts"]["LOW"] for entry in data) + medium = sum(entry["severity_counts"]["MEDIUM"] for entry in data) + high = sum(entry["severity_counts"]["HIGH"] for entry in data) + critical = sum(entry["severity_counts"]["CRITICAL"] for entry in data) # Update the total counts total_images += image_count @@ -268,19 +287,19 @@ def extract_images(version): "LOW": low, "MEDIUM": medium, "HIGH": high, - "CRITICAL": critical + "CRITICAL": critical, } # Update merged_data with filename as key merged_data[filename] = file_data # Add total counts to merged_data - merged_data['total'] = { + merged_data["total"] = { "images": total_images, "LOW": total_low, "MEDIUM": total_medium, "HIGH": total_high, - "CRITICAL": total_critical + "CRITICAL": total_critical, } @@ -289,46 +308,64 @@ def extract_images(version): # Write the final output to a file -with open(summary_file, 'w') as summary_f: +with open(summary_file, "w") as summary_f: json.dump(merged_data, summary_f, indent=4) log(f"Summary written to: {summary_file} as JSON format") # Load JSON content from the file -with open(summary_file, 'r') as file: +with open(summary_file, "r") as file: data = json.load(file) # Define a mapping for working group names groupnames = { "Automl": "AutoML", "Pipelines": "Pipelines", - "Workbenches":"Workbenches(Notebooks)", + "Workbenches": "Workbenches(Notebooks)", "Serving": "Kserve", - "Manifests":"Manifests", + "Manifests": "Manifests", "Training": "Training", - "Model-registry":"Model Registry", + "Model-registry": "Model Registry", "total": "All Images", } # Create PrettyTable table = PrettyTable() -table.field_names = ["Working Group", "Images", "Critical CVE", "High CVE", "Medium CVE", "Low CVE"] +table.field_names = [ + "Working Group", + "Images", + "Critical CVE", + "High CVE", + "Medium CVE", + "Low CVE", +] # Populate the table with data for group_name in groupnames: if group_name in data: # Check if group_name exists in data value = data[group_name] - table.add_row([groupnames[group_name], value["images"], value["CRITICAL"], value["HIGH"], value["MEDIUM"], value["LOW"]]) + table.add_row( + [ + groupnames[group_name], + value["images"], + value["CRITICAL"], + value["HIGH"], + value["MEDIUM"], + value["LOW"], + ] + ) # log the table log(table) # Write the table output to a file in the specified folder -output_file = SUMMARY_OF_SEVERITY_COUNTS + '/summary_of_severity_counts_for_WGs_in_table.txt' -with open(output_file, 'w') as f: +output_file = ( + SUMMARY_OF_SEVERITY_COUNTS + "/summary_of_severity_counts_for_WGs_in_table.txt" +) +with open(output_file, "w") as f: f.write(str(table)) log("Output saved to:", output_file) log("Severity counts with images respect to WGs are saved in the",ALL_SEVERITY_COUNTS) -log("Scanned Json reports on images are saved in" ,SCAN_REPORTS_DIR) \ No newline at end of file +log("Scanned Json reports on images are saved in" ,SCAN_REPORTS_DIR) diff --git a/tests/gh-actions/kf-objects/test_pipeline.py b/tests/gh-actions/kf-objects/test_pipeline.py index 9bd8228e5a..6755d30ff4 100755 --- a/tests/gh-actions/kf-objects/test_pipeline.py +++ b/tests/gh-actions/kf-objects/test_pipeline.py @@ -7,22 +7,23 @@ def echo_op(): print("Test pipeline") -@dsl.pipeline( - name='test-pipeline', - description='A test pipeline.' -) + +@dsl.pipeline(name="test-pipeline", description="A test pipeline.") def hello_world_pipeline(): echo_task = echo_op() + if __name__ == "__main__": # Run the Kubeflow Pipeline in the user's namespace. - kfp_client = kfp.Client(host="http://localhost:3000", - namespace="kubeflow-user-example-com") + kfp_client = kfp.Client( + host="http://localhost:3000", namespace="kubeflow-user-example-com" + ) kfp_client.runs.api_client.default_headers.update( - {"kubeflow-userid": "kubeflow-user-example-com"}) + {"kubeflow-userid": "kubeflow-user-example-com"} + ) # create the KFP run run_id = kfp_client.create_run_from_pipeline_func( hello_world_pipeline, namespace="kubeflow-user-example-com", arguments={}, - ).run_id \ No newline at end of file + ).run_id From 335393ed0578731a51839bddb75a8da5cd9d0569 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Sat, 29 Jun 2024 18:36:51 +0530 Subject: [PATCH 03/37] Add github action to format yaml files Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/python_formatter.yaml | 1 + .github/workflows/yaml_formatter.yaml | 15 +++++++++++++++ .yamllint.yaml | 12 ++++++++++++ 3 files changed, 28 insertions(+) create mode 100644 .github/workflows/yaml_formatter.yaml create mode 100644 .yamllint.yaml diff --git a/.github/workflows/python_formatter.yaml b/.github/workflows/python_formatter.yaml index 83446b571b..74d8de063a 100644 --- a/.github/workflows/python_formatter.yaml +++ b/.github/workflows/python_formatter.yaml @@ -7,5 +7,6 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + - uses: psf/black@stable \ No newline at end of file diff --git a/.github/workflows/yaml_formatter.yaml b/.github/workflows/yaml_formatter.yaml new file mode 100644 index 0000000000..6b02d0004a --- /dev/null +++ b/.github/workflows/yaml_formatter.yaml @@ -0,0 +1,15 @@ +name: Proper Formatting on Yaml files + +on: [push, pull_request] + +jobs: + format_python_files: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Install yamllint + run: pip install yamllint + + - name: Lint YAML files + run: yamllint . \ No newline at end of file diff --git a/.yamllint.yaml b/.yamllint.yaml new file mode 100644 index 0000000000..46153d48f6 --- /dev/null +++ b/.yamllint.yaml @@ -0,0 +1,12 @@ +# It extends the default conf by adjusting some options. +extends: default + +rules: + document-start: + present: false + document-end: + present: false + indentation: + indent-sequences: false + line-length: + max: 400 \ No newline at end of file From 0388a293af9a14112c241a50cee89733720ecd5e Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Sat, 29 Jun 2024 19:11:53 +0530 Subject: [PATCH 04/37] Add step YAML Formatting Guidelines to yaml_formatter.yaml file Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/yaml_formatter.yaml | 28 ++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/.github/workflows/yaml_formatter.yaml b/.github/workflows/yaml_formatter.yaml index 6b02d0004a..4fef54bb07 100644 --- a/.github/workflows/yaml_formatter.yaml +++ b/.github/workflows/yaml_formatter.yaml @@ -12,4 +12,30 @@ jobs: run: pip install yamllint - name: Lint YAML files - run: yamllint . \ No newline at end of file + run: yamllint . + + - name: YAML Formatting Guidelines + run: | + echo "### YAML Formatting Guidelines ### + If there is a formatting error in your YAML file, you will see errors like the one below: + 'Error: 6:4 [indentation] wrong indentation: expected 2 but found 3' + + To fix these errors, refer to the YAML formatting rules at: + https://yamllint.readthedocs.io/en/stable/rules.html# + + Search for the keyword inside the brackets [] in the error message. In this example, it's 'indentation'. + + Note: Some rules have been customized in the '.yamllint.yaml' file. Below is the content of that file: + + extends: default + + rules: + document-start: + present: false + document-end: + present: false + indentation: + indent-sequences: false + line-length: + max: 400 + " \ No newline at end of file From afbe9304fb92b88daea2a0bc0e0d8c2de6e7af17 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Sat, 29 Jun 2024 19:29:06 +0530 Subject: [PATCH 05/37] made chnages to run next steps although the previous step fail Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/yaml_formatter.yaml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/yaml_formatter.yaml b/.github/workflows/yaml_formatter.yaml index 4fef54bb07..82714c366c 100644 --- a/.github/workflows/yaml_formatter.yaml +++ b/.github/workflows/yaml_formatter.yaml @@ -1,9 +1,9 @@ -name: Proper Formatting on Yaml files +name: Proper Formatting on YAML files on: [push, pull_request] jobs: - format_python_files: + format_YAML_files: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 @@ -13,8 +13,10 @@ jobs: - name: Lint YAML files run: yamllint . + continue-on-error: true - name: YAML Formatting Guidelines + if: always() run: | echo "### YAML Formatting Guidelines ### If there is a formatting error in your YAML file, you will see errors like the one below: @@ -38,4 +40,4 @@ jobs: indent-sequences: false line-length: max: 400 - " \ No newline at end of file + " From 2c4db16e85ea9d2cbfcb878397b459fcbfb56a46 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Sat, 29 Jun 2024 20:48:39 +0530 Subject: [PATCH 06/37] switch steps Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/yaml_formatter.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/yaml_formatter.yaml b/.github/workflows/yaml_formatter.yaml index 82714c366c..bd62c57275 100644 --- a/.github/workflows/yaml_formatter.yaml +++ b/.github/workflows/yaml_formatter.yaml @@ -10,13 +10,8 @@ jobs: - name: Install yamllint run: pip install yamllint - - - name: Lint YAML files - run: yamllint . - continue-on-error: true - + - name: YAML Formatting Guidelines - if: always() run: | echo "### YAML Formatting Guidelines ### If there is a formatting error in your YAML file, you will see errors like the one below: @@ -41,3 +36,8 @@ jobs: line-length: max: 400 " + + - name: Lint YAML files + run: yamllint . + + From fc0e758b65f6000f9afd8cd745df2053c4885b76 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Sun, 30 Jun 2024 01:19:56 +0530 Subject: [PATCH 07/37] added shellCheck for bash formatting Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/bash_formatter.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 .github/workflows/bash_formatter.yaml diff --git a/.github/workflows/bash_formatter.yaml b/.github/workflows/bash_formatter.yaml new file mode 100644 index 0000000000..d607d3e358 --- /dev/null +++ b/.github/workflows/bash_formatter.yaml @@ -0,0 +1,16 @@ +name: Proper Formatting on bash files + +on: [push, pull_request] + +jobs: + format_bash_files: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Install ShellCheck + run: sudo apt install -y shellcheck + + - name: Run ShellCheck + run: find . -type f -name "*.sh" -exec shellcheck {} + + From 43d95b80ae7966f5468bb82f56ed5e9ab38c4ccc Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Mon, 1 Jul 2024 18:16:04 +0530 Subject: [PATCH 08/37] Changed code only to lint yaml files inside the common and example folder Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/yaml_formatter.yaml | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/.github/workflows/yaml_formatter.yaml b/.github/workflows/yaml_formatter.yaml index bd62c57275..a9db7fc59a 100644 --- a/.github/workflows/yaml_formatter.yaml +++ b/.github/workflows/yaml_formatter.yaml @@ -37,7 +37,28 @@ jobs: max: 400 " - - name: Lint YAML files - run: yamllint . + # - name: Lint YAML files + # run: yamllint . + + - name: Fetch main branch + run: git fetch origin main + + - name: Set up changed files + run: | + git diff --name-only origin/main...HEAD | grep -E '^common/.*\.ya?ml$|^example/.*\.ya?ml$' > changed_files.txt || true + + - name: Display changed files + run: cat changed_files.txt + + - name: Run yamllint on changed files + run: | + if [ -s changed_files.txt ]; then + while IFS= read -r file; do + echo "Running yamllint on $file" + yamllint "$file" + done < changed_files.txt + else + echo "No YAML files changed." + fi From eb483c80a354bda26393662f482e98ffdbb28b0d Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Mon, 1 Jul 2024 18:20:58 +0530 Subject: [PATCH 09/37] Changed main to master Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/yaml_formatter.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/yaml_formatter.yaml b/.github/workflows/yaml_formatter.yaml index a9db7fc59a..6d165e6cdc 100644 --- a/.github/workflows/yaml_formatter.yaml +++ b/.github/workflows/yaml_formatter.yaml @@ -41,7 +41,7 @@ jobs: # run: yamllint . - name: Fetch main branch - run: git fetch origin main + run: git fetch origin master - name: Set up changed files run: | From 5f6e20a130704d42302c4549442f65098c9a9388 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Mon, 1 Jul 2024 22:33:29 +0530 Subject: [PATCH 10/37] Run yamllint on files which is chnaged in PR only Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/yaml_formatter.yaml | 21 +++++++-------------- run_yamllint.sh | 10 ++++++++++ 2 files changed, 17 insertions(+), 14 deletions(-) create mode 100644 run_yamllint.sh diff --git a/.github/workflows/yaml_formatter.yaml b/.github/workflows/yaml_formatter.yaml index 6d165e6cdc..242796f847 100644 --- a/.github/workflows/yaml_formatter.yaml +++ b/.github/workflows/yaml_formatter.yaml @@ -36,29 +36,22 @@ jobs: line-length: max: 400 " - - # - name: Lint YAML files - # run: yamllint . - - name: Fetch main branch + - name: Fetch master branch run: git fetch origin master - name: Set up changed files run: | - git diff --name-only origin/main...HEAD | grep -E '^common/.*\.ya?ml$|^example/.*\.ya?ml$' > changed_files.txt || true + git diff --name-only origin/main...HEAD | grep -E '^common/.*\.ya?ml$|^example/.*\.ya?ml$' > changed_files_in_PR.txt || true - name: Display changed files - run: cat changed_files.txt + run: cat changed_files_in_PR.txt - name: Run yamllint on changed files run: | - if [ -s changed_files.txt ]; then - while IFS= read -r file; do - echo "Running yamllint on $file" - yamllint "$file" - done < changed_files.txt - else - echo "No YAML files changed." - fi + chmod +x ./run_yamllint.sh + ./run_yamllint.sh + shell: bash + diff --git a/run_yamllint.sh b/run_yamllint.sh new file mode 100644 index 0000000000..3225710f59 --- /dev/null +++ b/run_yamllint.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -s changed_files_in_PR.txt ]; then + while IFS= read -r file; do + echo "Running yamllint on $file" + yamllint "$file" + done < changed_files_in_PR.txt +else + echo "No YAML files changed in this PR." +fi From 18081cb902194b3f1e7926174ba35404495d1e22 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Mon, 1 Jul 2024 23:57:47 +0530 Subject: [PATCH 11/37] Changed 'Proper formatting on python files' github workflow only to run for python files in common and example folder Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/python_formatter.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/python_formatter.yaml b/.github/workflows/python_formatter.yaml index 74d8de063a..e9dd0a311f 100644 --- a/.github/workflows/python_formatter.yaml +++ b/.github/workflows/python_formatter.yaml @@ -9,4 +9,9 @@ jobs: - uses: actions/checkout@v3 - uses: psf/black@stable + with: + src: | + ./common + ./example + \ No newline at end of file From b132ba13a558ffc4490e9063a2be5391171a5793 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 00:22:48 +0530 Subject: [PATCH 12/37] Add shellcheckrc file Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .shellcheckrc | 2 + tests/gh-actions/runasnonroot.sh | 204 +++++++++++++++---------------- 2 files changed, 104 insertions(+), 102 deletions(-) create mode 100644 .shellcheckrc diff --git a/.shellcheckrc b/.shellcheckrc new file mode 100644 index 0000000000..98a56bdd29 --- /dev/null +++ b/.shellcheckrc @@ -0,0 +1,2 @@ +# ~/.shellcheckrc +disable=SC1017 \ No newline at end of file diff --git a/tests/gh-actions/runasnonroot.sh b/tests/gh-actions/runasnonroot.sh index 921623c9c8..532424cd37 100644 --- a/tests/gh-actions/runasnonroot.sh +++ b/tests/gh-actions/runasnonroot.sh @@ -1,102 +1,102 @@ -#!/bin/bash - -namespace="kubeflow" -error_flag=0 - -# Function to check if 'id' command is available in a container -has_id_command() { - local pod_name="$1" - local container_name="$2" - - # Execute 'id' command and capture the output - if kubectl exec -it -n "$namespace" "$pod_name" -c "$container_name" -- id -u >/dev/null 2>&1; then - return 0 # 'id' command is available - else - return 1 # 'id' command is not available - fi -} - -# Function to check 'securityContext' and 'runAsNonRoot' at the pod or container level -has_securityContext_and_runAsNonRoot() { - local pod_name="$1" - local container_name="$2" - - # Use jq to check if 'securityContext' is defined at the pod level - local securityContextPod=$(kubectl get pod -n "$namespace" "$pod_name" -o json | jq -r '.spec.securityContext') - - if [ "$securityContextPod" = "null" ]; then - : # 'securityContext' is missing at the pod level, continue checking at the container level - else - # Check 'runAsNonRoot' at the pod level - local runAsNonRootPod=$(kubectl get pod -n "$namespace" "$pod_name" -o json | jq -r '.spec.securityContext.runAsNonRoot // "Missing"') - - if [ "$runAsNonRootPod" = "Missing" ]; then - : # 'runAsNonRoot' is missing at the pod level, continue checking at the container level - else - return 0 # 'runAsNonRoot' is present at the pod level (success) - fi - fi - - # Use jq to check 'securityContext' at the container level - local securityContextContainer=$(kubectl get pod -n "$namespace" "$pod_name" -o json | jq -r '.spec.containers[] | select(.name == "'"$container_name"'").securityContext') - - if [ "$securityContextContainer" = "null" ]; then - if [ "$securityContextPod" = "null" ]; then - echo "Error: 'securityContext' is missing at the pod and container level in container $container_name of pod $pod_name" - return 1 - else - echo "Error: There is no runasnonroot on pod level and 'securityContext' is missing at container level in container $container_name of pod $pod_name" - return 1 - fi - fi - - # Check 'runAsNonRoot' at the container level - local runAsNonRootContainer=$(kubectl get pod -n "$namespace" "$pod_name" -o json | jq -r '.spec.containers[] | select(.name == "'"$container_name"'").securityContext.runAsNonRoot // "Missing"') - - if [ "$runAsNonRootContainer" = "Missing" ]; then - echo "Error: There is no runasnonroot on pod level and'runAsNonRoot' is missing in container $container_name of pod $pod_name" - return 1 # 'runAsNonRoot' is missing at the container level (fail) - fi - - return 0 # 'securityContext' and 'runAsNonRoot' are defined at the container level -} - -# Get a list of pod names in the specified namespace that are not in the "Completed" state -pod_names=$(kubectl get pods -n "$namespace" --field-selector=status.phase!=Succeeded,status.phase!=Failed -o json | jq -r '.items[].metadata.name') - -# Loop through the pod names and execute checks -for pod_name in $pod_names; do - echo "Entering pod $pod_name in namespace $namespace..." - - container_names=$(kubectl get pod -n "$namespace" "$pod_name" -o json | jq -r '.spec.containers[].name') - - for container_name in $container_names; do - if has_securityContext_and_runAsNonRoot "$pod_name" "$container_name"; then - error_flag=1 - fi - - if has_id_command "$pod_name" "$container_name"; then - user_id=$(kubectl exec -it -n "$namespace" "$pod_name" -c "$container_name" -- id -u) - - # Clean up whitespace in the user_id using tr - user_id_cleaned=$(echo -n "$user_id" | tr -d '[:space:]') - - if [ "$user_id_cleaned" = "0" ]; then - echo "Error: Pod $pod_name contains user ID 0 in container $container_name" - error_flag=1 - else - echo "Container: $container_name - User ID: $user_id_cleaned" - fi - else - echo "Warning: 'id' command not available in container $container_name" - fi - done -done - -# Exit with an error if any pod contains an error condition -if [ $error_flag -eq 1 ]; then - exit 1 -fi - -# Exit successfully -exit 0 +#!/bin/bash + +namespace="kubeflow" +error_flag=0 + +# Function to check if 'id' command is available in a container +has_id_command() { + local pod_name="$1" + local container_name="$2" + + # Execute 'id' command and capture the output + if kubectl exec -it -n "$namespace" "$pod_name" -c "$container_name" -- id -u >/dev/null 2>&1; then + return 0 # 'id' command is available + else + return 1 # 'id' command is not available + fi +} + +# Function to check 'securityContext' and 'runAsNonRoot' at the pod or container level +has_securityContext_and_runAsNonRoot() { + local pod_name="$1" + local container_name="$2" + + # Use jq to check if 'securityContext' is defined at the pod level + local securityContextPod=$(kubectl get pod -n "$namespace" "$pod_name" -o json | jq -r '.spec.securityContext') + + if [ "$securityContextPod" = "null" ]; then + : # 'securityContext' is missing at the pod level, continue checking at the container level + else + # Check 'runAsNonRoot' at the pod level + local runAsNonRootPod=$(kubectl get pod -n "$namespace" "$pod_name" -o json | jq -r '.spec.securityContext.runAsNonRoot // "Missing"') + + if [ "$runAsNonRootPod" = "Missing" ]; then + : # 'runAsNonRoot' is missing at the pod level, continue checking at the container level + else + return 0 # 'runAsNonRoot' is present at the pod level (success) + fi + fi + + # Use jq to check 'securityContext' at the container level + local securityContextContainer=$(kubectl get pod -n "$namespace" "$pod_name" -o json | jq -r '.spec.containers[] | select(.name == "'"$container_name"'").securityContext') + + if [ "$securityContextContainer" = "null" ]; then + if [ "$securityContextPod" = "null" ]; then + echo "Error: 'securityContext' is missing at the pod and container level in container $container_name of pod $pod_name" + return 1 + else + echo "Error: There is no runasnonroot on pod level and 'securityContext' is missing at container level in container $container_name of pod $pod_name" + return 1 + fi + fi + + # Check 'runAsNonRoot' at the container level + local runAsNonRootContainer=$(kubectl get pod -n "$namespace" "$pod_name" -o json | jq -r '.spec.containers[] | select(.name == "'"$container_name"'").securityContext.runAsNonRoot // "Missing"') + + if [ "$runAsNonRootContainer" = "Missing" ]; then + echo "Error: There is no runasnonroot on pod level and'runAsNonRoot' is missing in container $container_name of pod $pod_name" + return 1 # 'runAsNonRoot' is missing at the container level (fail) + fi + + return 0 # 'securityContext' and 'runAsNonRoot' are defined at the container level +} + +# Get a list of pod names in the specified namespace that are not in the "Completed" state +pod_names=$(kubectl get pods -n "$namespace" --field-selector=status.phase!=Succeeded,status.phase!=Failed -o json | jq -r '.items[].metadata.name') + +# Loop through the pod names and execute checks +for pod_name in $pod_names; do + echo "Entering pod $pod_name in namespace $namespace..." + + container_names=$(kubectl get pod -n "$namespace" "$pod_name" -o json | jq -r '.spec.containers[].name') + + for container_name in $container_names; do + if has_securityContext_and_runAsNonRoot "$pod_name" "$container_name"; then + error_flag=1 + fi + + if has_id_command "$pod_name" "$container_name"; then + user_id=$(kubectl exec -it -n "$namespace" "$pod_name" -c "$container_name" -- id -u) + + # Clean up whitespace in the user_id using tr + user_id_cleaned=$(echo -n "$user_id" | tr -d '[:space:]') + + if [ "$user_id_cleaned" = "0" ]; then + echo "Error: Pod $pod_name contains user ID 0 in container $container_name" + error_flag=1 + else + echo "Container: $container_name - User ID: $user_id_cleaned" + fi + else + echo "Warning: 'id' command not available in container $container_name" + fi + done +done + +# Exit with an error if any pod contains an error condition +if [ $error_flag -eq 1 ]; then + exit 1 +fi + +# Exit successfully +exit 0 From 672abc421a41643dc501d1864f4a8291bc2083db Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 00:53:17 +0530 Subject: [PATCH 13/37] Disable SC1017,SC2086,SC2070 rules when shellcheck and did changes in sh files to address the formattings according to shellcheck Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .shellcheckrc | 2 +- .../multi-user/pipelines-profile-controller/run_tests.sh | 1 + .../multi-user/pipelines-profile-controller/run_tests.sh | 1 + hack/synchronize-istio-cni-manifests.sh | 2 +- hack/synchronize-istio-manifests.sh | 2 +- 5 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.shellcheckrc b/.shellcheckrc index 98a56bdd29..34deb1a03e 100644 --- a/.shellcheckrc +++ b/.shellcheckrc @@ -1,2 +1,2 @@ # ~/.shellcheckrc -disable=SC1017 \ No newline at end of file +disable=SC1017,SC2086,SC2070 \ No newline at end of file diff --git a/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/run_tests.sh b/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/run_tests.sh index 4933ec79a8..be33353dfe 100755 --- a/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/run_tests.sh +++ b/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/run_tests.sh @@ -1,3 +1,4 @@ +#!/bin/sh # Build venv with required packages VENV=".venv" PYTHON_VENV="${VENV}/bin/python" diff --git a/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/run_tests.sh b/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/run_tests.sh index 4933ec79a8..480c215f19 100755 --- a/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/run_tests.sh +++ b/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/run_tests.sh @@ -1,3 +1,4 @@ +#!/usr/bin/env bash # Build venv with required packages VENV=".venv" PYTHON_VENV="${VENV}/bin/python" diff --git a/hack/synchronize-istio-cni-manifests.sh b/hack/synchronize-istio-cni-manifests.sh index b2fcd7c3af..1f8a22c3e5 100644 --- a/hack/synchronize-istio-cni-manifests.sh +++ b/hack/synchronize-istio-cni-manifests.sh @@ -1,5 +1,5 @@ -@ -1,88 +0,0 @@ #!/usr/bin/env bash +@ -1,88 +0,0 @@ # This script aims at helping create a PR to update the manifests of the # knative. diff --git a/hack/synchronize-istio-manifests.sh b/hack/synchronize-istio-manifests.sh index 6a4c8987de..f5a0d89c08 100644 --- a/hack/synchronize-istio-manifests.sh +++ b/hack/synchronize-istio-manifests.sh @@ -1,4 +1,4 @@ -# #!/usr/bin/env bash +# !/usr/bin/env bash # # This script aims at helping create a PR to update the manifests of Istio # # This script: From db196665a59e3944bb31451e18e7bdd90221f08d Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 01:13:52 +0530 Subject: [PATCH 14/37] made chnages in sh files Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .../pipelines-profile-controller/run_tests.sh | 1 + .../v1/gcp-workload-identity-setup.sh | 4 +- .../upstream/gcp-workload-identity-setup.sh | 4 +- hack/synchronize-istio-manifests.sh | 2 +- hack/trivy_scan.py | 91 +++++++++++++------ 5 files changed, 69 insertions(+), 33 deletions(-) diff --git a/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/run_tests.sh b/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/run_tests.sh index 4933ec79a8..be33353dfe 100755 --- a/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/run_tests.sh +++ b/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/run_tests.sh @@ -1,3 +1,4 @@ +#!/bin/sh # Build venv with required packages VENV=".venv" PYTHON_VENV="${VENV}/bin/python" diff --git a/apps/kfp-tekton/upstream/v1/gcp-workload-identity-setup.sh b/apps/kfp-tekton/upstream/v1/gcp-workload-identity-setup.sh index 79f86c95c8..2c47c230e6 100755 --- a/apps/kfp-tekton/upstream/v1/gcp-workload-identity-setup.sh +++ b/apps/kfp-tekton/upstream/v1/gcp-workload-identity-setup.sh @@ -160,12 +160,12 @@ function bind_gsa_and_ksa { } echo "Binding each kfp system KSA to $SYSTEM_GSA" -for ksa in ${SYSTEM_KSA[@]}; do +for ksa in "${SYSTEM_KSA[@]}"; do bind_gsa_and_ksa $SYSTEM_GSA $ksa done echo "Binding each kfp user KSA to $USER_GSA" -for ksa in ${USER_KSA[@]}; do +for ksa in "${USER_KSA[@]}"; do bind_gsa_and_ksa $USER_GSA $ksa done diff --git a/apps/pipeline/upstream/gcp-workload-identity-setup.sh b/apps/pipeline/upstream/gcp-workload-identity-setup.sh index 79f86c95c8..a8e5b4b68f 100755 --- a/apps/pipeline/upstream/gcp-workload-identity-setup.sh +++ b/apps/pipeline/upstream/gcp-workload-identity-setup.sh @@ -126,8 +126,8 @@ function create_gsa_if_not_present { gcloud iam service-accounts create $name fi } -create_gsa_if_not_present $SYSTEM_GSA -create_gsa_if_not_present $USER_GSA +create_gsa_if_not_present "$SYSTEM_GSA" +create_gsa_if_not_present "$USER_GSA" function create_ksa_if_not_present { local name=${1} diff --git a/hack/synchronize-istio-manifests.sh b/hack/synchronize-istio-manifests.sh index f5a0d89c08..6fa9811a0b 100644 --- a/hack/synchronize-istio-manifests.sh +++ b/hack/synchronize-istio-manifests.sh @@ -1,4 +1,4 @@ -# !/usr/bin/env bash +#!/usr/bin/env bash # # This script aims at helping create a PR to update the manifests of Istio # # This script: diff --git a/hack/trivy_scan.py b/hack/trivy_scan.py index a8067be420..197de52a71 100644 --- a/hack/trivy_scan.py +++ b/hack/trivy_scan.py @@ -52,16 +52,18 @@ os.makedirs(ALL_SEVERITY_COUNTS, exist_ok=True) os.makedirs(SUMMARY_OF_SEVERITY_COUNTS, exist_ok=True) + def log(*args, **kwargs): # Custom log function that print messages with flush=True by default. - kwargs.setdefault('flush', True) + kwargs.setdefault("flush", True) print(*args, **kwargs) + def save_images(wg, images, version): # Saves a list of container images to a text file named after the workgroup and version. output_file = f"../image_lists/kf_{version}_{wg}_images.txt" - with open(output_file, 'w') as f: - f.write('\n'.join(images)) + with open(output_file, "w") as f: + f.write("\n".join(images)) log(f"File {output_file} successfully created") @@ -93,14 +95,26 @@ def extract_images(version): full_path = os.path.join(root, file) try: # Execute `kustomize build` to render the kustomization file - result = subprocess.run(['kustomize', 'build', root], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + result = subprocess.run( + ["kustomize", "build", root], + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) except subprocess.CalledProcessError as e: - log(f"ERROR:\t Failed \"kustomize build\" command for directory: {root}. See error above") + log( + f'ERROR:\t Failed "kustomize build" command for directory: {root}. See error above' + ) continue # Use regex to find lines with 'image: :' or 'image: ' # and '- image: :' but avoid environment variables - kustomize_images = re.findall(r'^\s*-?\s*image:\s*([^$\s:]+(?:\:[^\s]+)?)$', result.stdout, re.MULTILINE) + kustomize_images = re.findall( + r"^\s*-?\s*image:\s*([^$\s:]+(?:\:[^\s]+)?)$", + result.stdout, + re.MULTILINE, + ) wg_images.update(kustomize_images) # Ensure uniqueness within workgroup images @@ -128,7 +142,6 @@ def extract_images(version): extract_images(args.version) - log("Started scanning images") # Get list of text files excluding "kf_latest_all_images.txt" @@ -164,34 +177,52 @@ def extract_images(version): if image_tag: image_name_scan = f"{image_name_scan}_{image_tag}" - - scan_output_file = os.path.join(file_reports_dir, f"{image_name_scan}_scan.json") - log(f"Scanning ",line) + scan_output_file = os.path.join( + file_reports_dir, f"{image_name_scan}_scan.json" + ) + + log(f"Scanning ", line) try: - result = subprocess.run(["trivy", "image", "--format", "json", "--output", scan_output_file, line], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + result = subprocess.run( + [ + "trivy", + "image", + "--format", + "json", + "--output", + scan_output_file, + line, + ], + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) with open(scan_output_file, "r") as json_file: scan_data = json.load(json_file) - if not scan_data.get('Results'): + if not scan_data.get("Results"): log(f"No vulnerabilities found in {image_name}:{image_tag}") else: vulnerabilities_list = [ - result['Vulnerabilities'] - for result in scan_data['Results'] - if 'Vulnerabilities' in result and result['Vulnerabilities'] + result["Vulnerabilities"] + for result in scan_data["Results"] + if "Vulnerabilities" in result and result["Vulnerabilities"] ] if not vulnerabilities_list: - log(f"The vulnerabilities detection may be insufficient because security updates are not provided for {image_name}:{image_tag}\n") + log( + f"The vulnerabilities detection may be insufficient because security updates are not provided for {image_name}:{image_tag}\n" + ) else: severity_counts = {"LOW": 0, "MEDIUM": 0, "HIGH": 0, "CRITICAL": 0} for vulnerabilities in vulnerabilities_list: for vulnerability in vulnerabilities: - severity = vulnerability.get('Severity', 'UNKNOWN') - if severity == 'UNKNOWN': + severity = vulnerability.get("Severity", "UNKNOWN") + if severity == "UNKNOWN": continue elif severity in severity_counts: severity_counts[severity] += 1 @@ -200,16 +231,20 @@ def extract_images(version): image_table = PrettyTable() image_table.field_names = ["Critical", "High", "Medium", "Low"] - image_table.add_row([ + image_table.add_row( + [ severity_counts["CRITICAL"], severity_counts["HIGH"], severity_counts["MEDIUM"], - severity_counts["LOW"] - ]) + severity_counts["LOW"], + ] + ) log(f"{image_table}\n") - severity_report_file = os.path.join(severity_count, f"{image_name_scan}_severity_report.json") - with open(severity_report_file, 'w') as report_file: + severity_report_file = os.path.join( + severity_count, f"{image_name_scan}_severity_report.json" + ) + with open(severity_report_file, "w") as report_file: json.dump(report, report_file, indent=4) except subprocess.CalledProcessError as e: @@ -261,8 +296,8 @@ def extract_images(version): filename = filename.capitalize() else: - log(f"Skipping invalid filename format: {file_path}") - continue + log(f"Skipping invalid filename format: {file_path}") + continue with open(file_path, "r") as f: data = json.load(f)["data"] @@ -304,7 +339,7 @@ def extract_images(version): log("Summary in Json Format:") -log(json.dumps(merged_data, indent=4)) +log(json.dumps(merged_data, indent=4)) # Write the final output to a file @@ -367,5 +402,5 @@ def extract_images(version): f.write(str(table)) log("Output saved to:", output_file) -log("Severity counts with images respect to WGs are saved in the",ALL_SEVERITY_COUNTS) -log("Scanned Json reports on images are saved in" ,SCAN_REPORTS_DIR) +log("Severity counts with images respect to WGs are saved in the", ALL_SEVERITY_COUNTS) +log("Scanned Json reports on images are saved in", SCAN_REPORTS_DIR) From 38a57d5b8e6989dda59e69f6d7362d9251694489 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 01:21:31 +0530 Subject: [PATCH 15/37] made chnages in sh files to address formattings Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- apps/kfp-tekton/upstream/gcp-workload-identity-setup.sh | 4 ++-- apps/pipeline/upstream/gcp-workload-identity-setup.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/apps/kfp-tekton/upstream/gcp-workload-identity-setup.sh b/apps/kfp-tekton/upstream/gcp-workload-identity-setup.sh index 79f86c95c8..2c47c230e6 100755 --- a/apps/kfp-tekton/upstream/gcp-workload-identity-setup.sh +++ b/apps/kfp-tekton/upstream/gcp-workload-identity-setup.sh @@ -160,12 +160,12 @@ function bind_gsa_and_ksa { } echo "Binding each kfp system KSA to $SYSTEM_GSA" -for ksa in ${SYSTEM_KSA[@]}; do +for ksa in "${SYSTEM_KSA[@]}"; do bind_gsa_and_ksa $SYSTEM_GSA $ksa done echo "Binding each kfp user KSA to $USER_GSA" -for ksa in ${USER_KSA[@]}; do +for ksa in "${USER_KSA[@]}"; do bind_gsa_and_ksa $USER_GSA $ksa done diff --git a/apps/pipeline/upstream/gcp-workload-identity-setup.sh b/apps/pipeline/upstream/gcp-workload-identity-setup.sh index a8e5b4b68f..3570def571 100755 --- a/apps/pipeline/upstream/gcp-workload-identity-setup.sh +++ b/apps/pipeline/upstream/gcp-workload-identity-setup.sh @@ -160,12 +160,12 @@ function bind_gsa_and_ksa { } echo "Binding each kfp system KSA to $SYSTEM_GSA" -for ksa in ${SYSTEM_KSA[@]}; do +for ksa in "${SYSTEM_KSA[@]}"; do bind_gsa_and_ksa $SYSTEM_GSA $ksa done echo "Binding each kfp user KSA to $USER_GSA" -for ksa in ${USER_KSA[@]}; do +for ksa in "${USER_KSA[@]}"; do bind_gsa_and_ksa $USER_GSA $ksa done From 58ee8cd0c23b7a3610513fd85be6ee6418ea2e3a Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 01:29:42 +0530 Subject: [PATCH 16/37] made chnages in sh files Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- hack/synchronize-kserve-web-app-manifests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/synchronize-kserve-web-app-manifests.sh b/hack/synchronize-kserve-web-app-manifests.sh index f7b5b102b2..d630b4ea12 100644 --- a/hack/synchronize-kserve-web-app-manifests.sh +++ b/hack/synchronize-kserve-web-app-manifests.sh @@ -24,7 +24,7 @@ if [ -n "$(git status --porcelain)" ]; then echo "WARNING: You have uncommitted changes" fi -if [ `git branch --list $BRANCH` ] +if [ "$(git branch --list $BRANCH)" ] then echo "WARNING: Branch $BRANCH already exists." fi From 399e0a34e221896b1153fd3ad386cfe72578153c Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 01:36:55 +0530 Subject: [PATCH 17/37] disable SC2046 rule Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .shellcheckrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.shellcheckrc b/.shellcheckrc index 34deb1a03e..fd31002e38 100644 --- a/.shellcheckrc +++ b/.shellcheckrc @@ -1,2 +1,2 @@ # ~/.shellcheckrc -disable=SC1017,SC2086,SC2070 \ No newline at end of file +disable=SC1017,SC2086,SC2070,SC2046 \ No newline at end of file From 3c4c9851e5be1c37417cee0cbeead4ce86dde098 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 01:43:03 +0530 Subject: [PATCH 18/37] disable SC2006 and SC2155 rule Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .shellcheckrc | 2 +- apps/kfp-tekton/upstream/v1/wi-utils.sh | 1 + apps/kfp-tekton/upstream/wi-utils.sh | 1 + apps/pipeline/upstream/wi-utils.sh | 1 + hack/synchronize-kserve-web-app-manifests.sh | 6 +++--- 5 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.shellcheckrc b/.shellcheckrc index fd31002e38..8fb4df5ccf 100644 --- a/.shellcheckrc +++ b/.shellcheckrc @@ -1,2 +1,2 @@ # ~/.shellcheckrc -disable=SC1017,SC2086,SC2070,SC2046 \ No newline at end of file +disable=SC1017,SC2086,SC2070,SC2046,SC2155,SC2006 \ No newline at end of file diff --git a/apps/kfp-tekton/upstream/v1/wi-utils.sh b/apps/kfp-tekton/upstream/v1/wi-utils.sh index 3da163c967..6d6bbdd21f 100644 --- a/apps/kfp-tekton/upstream/v1/wi-utils.sh +++ b/apps/kfp-tekton/upstream/v1/wi-utils.sh @@ -1,4 +1,5 @@ #!/bin/bash +# shellcheck disable=SC2034 # # Copyright 2019 The Kubeflow Authors # diff --git a/apps/kfp-tekton/upstream/wi-utils.sh b/apps/kfp-tekton/upstream/wi-utils.sh index 3da163c967..6d6bbdd21f 100644 --- a/apps/kfp-tekton/upstream/wi-utils.sh +++ b/apps/kfp-tekton/upstream/wi-utils.sh @@ -1,4 +1,5 @@ #!/bin/bash +# shellcheck disable=SC2034 # # Copyright 2019 The Kubeflow Authors # diff --git a/apps/pipeline/upstream/wi-utils.sh b/apps/pipeline/upstream/wi-utils.sh index 3da163c967..6d6bbdd21f 100644 --- a/apps/pipeline/upstream/wi-utils.sh +++ b/apps/pipeline/upstream/wi-utils.sh @@ -1,4 +1,5 @@ #!/bin/bash +# shellcheck disable=SC2034 # # Copyright 2019 The Kubeflow Authors # diff --git a/hack/synchronize-kserve-web-app-manifests.sh b/hack/synchronize-kserve-web-app-manifests.sh index d630b4ea12..beabded784 100644 --- a/hack/synchronize-kserve-web-app-manifests.sh +++ b/hack/synchronize-kserve-web-app-manifests.sh @@ -39,11 +39,11 @@ echo "Checking out in $SRC_DIR to $COMMIT..." # Checkout the Model Registry repository mkdir -p $SRC_DIR -cd $SRC_DIR +cd $SRC_DIR || exit if [ ! -d "models-web-app/.git" ]; then git clone https://github.com/kserve/models-web-app.git fi -cd $SRC_DIR/models-web-app +cd $SRC_DIR/models-web-app || exit if ! git rev-parse --verify --quiet $COMMIT; then git checkout -b $COMMIT else @@ -71,7 +71,7 @@ DST_TXT="\[$COMMIT\](https://github.com/kserve/models-web-app/tree/$COMMIT/confi sed -i "s|$SRC_TXT|$DST_TXT|g" "${MANIFESTS_DIR}"/README.md echo "Committing the changes..." -cd $MANIFESTS_DIR +cd $MANIFESTS_DIR || exit git add contrib/kserve/models-web-app git add README.md git commit -s -m "Update kserve models web application manifests from ${COMMIT}" From 35b242fcf5e67f64b01b5438a4ab6f43b09ab82f Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 02:35:16 +0530 Subject: [PATCH 19/37] Change shellcheck only to run for PR chnaged files Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/bash_formatter.yaml | 18 ++++++++++++++++-- hack/synchronize-istio-cni-manifests.sh | 1 - 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/.github/workflows/bash_formatter.yaml b/.github/workflows/bash_formatter.yaml index d607d3e358..ebcd6c703a 100644 --- a/.github/workflows/bash_formatter.yaml +++ b/.github/workflows/bash_formatter.yaml @@ -11,6 +11,20 @@ jobs: - name: Install ShellCheck run: sudo apt install -y shellcheck - - name: Run ShellCheck - run: find . -type f -name "*.sh" -exec shellcheck {} + + - name: Fetch master branch + run: git fetch master + + - name: Set up changed files + run: | + git diff --name-only origin/main...HEAD | grep -E '^.*\.sh$' | grep -v '^apps/' > changed_files_in_PR.txt || true + + - name: Display changed files + run: cat changed_files_in_PR.txt + + - name: Run ShellCheck on changed files + run: | + cat changed_files_in_PR.txt | xargs -I {} shellcheck {} + shell: bash + # - name: Run ShellCheck + # run: find . -type f -name "*.sh" -exec shellcheck {} + diff --git a/hack/synchronize-istio-cni-manifests.sh b/hack/synchronize-istio-cni-manifests.sh index 1f8a22c3e5..10e0ade135 100644 --- a/hack/synchronize-istio-cni-manifests.sh +++ b/hack/synchronize-istio-cni-manifests.sh @@ -1,4 +1,3 @@ -#!/usr/bin/env bash @ -1,88 +0,0 @@ # This script aims at helping create a PR to update the manifests of the From bc5fd0bff1cce252c3587dfb5cdfc243433e66ad Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 02:37:45 +0530 Subject: [PATCH 20/37] Address a issue with bash_formatter.yaml Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/bash_formatter.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bash_formatter.yaml b/.github/workflows/bash_formatter.yaml index ebcd6c703a..ccb1c17f28 100644 --- a/.github/workflows/bash_formatter.yaml +++ b/.github/workflows/bash_formatter.yaml @@ -12,7 +12,7 @@ jobs: run: sudo apt install -y shellcheck - name: Fetch master branch - run: git fetch master + run: git fetch origin master - name: Set up changed files run: | From cb67c399945848dd8e93ae2af46fa73f16b39a3a Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 02:41:13 +0530 Subject: [PATCH 21/37] Changed origin/main to origin/master Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/bash_formatter.yaml | 2 +- .github/workflows/yaml_formatter.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/bash_formatter.yaml b/.github/workflows/bash_formatter.yaml index ccb1c17f28..1f61681569 100644 --- a/.github/workflows/bash_formatter.yaml +++ b/.github/workflows/bash_formatter.yaml @@ -16,7 +16,7 @@ jobs: - name: Set up changed files run: | - git diff --name-only origin/main...HEAD | grep -E '^.*\.sh$' | grep -v '^apps/' > changed_files_in_PR.txt || true + git diff --name-only origin/master...HEAD | grep -E '^.*\.sh$' | grep -v '^apps/' > changed_files_in_PR.txt || true - name: Display changed files run: cat changed_files_in_PR.txt diff --git a/.github/workflows/yaml_formatter.yaml b/.github/workflows/yaml_formatter.yaml index 242796f847..b36b29ef92 100644 --- a/.github/workflows/yaml_formatter.yaml +++ b/.github/workflows/yaml_formatter.yaml @@ -42,7 +42,7 @@ jobs: - name: Set up changed files run: | - git diff --name-only origin/main...HEAD | grep -E '^common/.*\.ya?ml$|^example/.*\.ya?ml$' > changed_files_in_PR.txt || true + git diff --name-only origin/master...HEAD | grep -E '^common/.*\.ya?ml$|^example/.*\.ya?ml$' > changed_files_in_PR.txt || true - name: Display changed files run: cat changed_files_in_PR.txt From 77e2c25fc63ba8cc08b090f6fe10f1c6a46d3951 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Fri, 28 Jun 2024 14:07:44 +0530 Subject: [PATCH 22/37] Formatted python files from the balck formatter Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- hack/trivy_scan.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/trivy_scan.py b/hack/trivy_scan.py index 197de52a71..ff7706bb1b 100644 --- a/hack/trivy_scan.py +++ b/hack/trivy_scan.py @@ -402,5 +402,5 @@ def extract_images(version): f.write(str(table)) log("Output saved to:", output_file) -log("Severity counts with images respect to WGs are saved in the", ALL_SEVERITY_COUNTS) -log("Scanned Json reports on images are saved in", SCAN_REPORTS_DIR) +log("Severity counts with images respect to WGs are saved in the",ALL_SEVERITY_COUNTS) +log("Scanned Json reports on images are saved in",SCAN_REPORTS_DIR) \ No newline at end of file From 8f252bb61bdbfd4a158545e35318156751b91993 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Sat, 29 Jun 2024 18:36:51 +0530 Subject: [PATCH 23/37] Add github action to format yaml files Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/yaml_formatter.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/yaml_formatter.yaml b/.github/workflows/yaml_formatter.yaml index b36b29ef92..380d8fa920 100644 --- a/.github/workflows/yaml_formatter.yaml +++ b/.github/workflows/yaml_formatter.yaml @@ -1,4 +1,8 @@ +<<<<<<< HEAD name: Proper Formatting on YAML files +======= +name: Proper Formatting on Yaml files +>>>>>>> 6eb43b53 (Add github action to format yaml files) on: [push, pull_request] From 045b504ee6194cd7cc17f21b3ee95af93aa9b328 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Sat, 29 Jun 2024 19:11:53 +0530 Subject: [PATCH 24/37] Add step YAML Formatting Guidelines to yaml_formatter.yaml file Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/yaml_formatter.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/yaml_formatter.yaml b/.github/workflows/yaml_formatter.yaml index 380d8fa920..b36b29ef92 100644 --- a/.github/workflows/yaml_formatter.yaml +++ b/.github/workflows/yaml_formatter.yaml @@ -1,8 +1,4 @@ -<<<<<<< HEAD name: Proper Formatting on YAML files -======= -name: Proper Formatting on Yaml files ->>>>>>> 6eb43b53 (Add github action to format yaml files) on: [push, pull_request] From 19881d438e32c3cfbae90cc20be6dafb1aad5466 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Sat, 29 Jun 2024 19:29:06 +0530 Subject: [PATCH 25/37] made chnages to run next steps although the previous step fail Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/yaml_formatter.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/yaml_formatter.yaml b/.github/workflows/yaml_formatter.yaml index b36b29ef92..5753c9b521 100644 --- a/.github/workflows/yaml_formatter.yaml +++ b/.github/workflows/yaml_formatter.yaml @@ -10,7 +10,7 @@ jobs: - name: Install yamllint run: pip install yamllint - + - name: YAML Formatting Guidelines run: | echo "### YAML Formatting Guidelines ### From 04e18583a5f31ba3808b974708c105e5ea8e889e Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Sat, 29 Jun 2024 20:48:39 +0530 Subject: [PATCH 26/37] switch steps Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/yaml_formatter.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/yaml_formatter.yaml b/.github/workflows/yaml_formatter.yaml index 5753c9b521..b36b29ef92 100644 --- a/.github/workflows/yaml_formatter.yaml +++ b/.github/workflows/yaml_formatter.yaml @@ -10,7 +10,7 @@ jobs: - name: Install yamllint run: pip install yamllint - + - name: YAML Formatting Guidelines run: | echo "### YAML Formatting Guidelines ### From 8ee988d10a5668dbebde80ba9258a31880a6e2ea Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 00:22:48 +0530 Subject: [PATCH 27/37] Add shellcheckrc file Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .shellcheckrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.shellcheckrc b/.shellcheckrc index 8fb4df5ccf..9c339b78ba 100644 --- a/.shellcheckrc +++ b/.shellcheckrc @@ -1,2 +1,2 @@ # ~/.shellcheckrc -disable=SC1017,SC2086,SC2070,SC2046,SC2155,SC2006 \ No newline at end of file +disable=SC1017,SC2086,SC2070,SC2046,SC2155,SC2006 From 0f27d467555d1a48e44e01707c972ec6e0646be1 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 00:53:17 +0530 Subject: [PATCH 28/37] Disable SC1017,SC2086,SC2070 rules when shellcheck and did changes in sh files to address the formattings according to shellcheck Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- hack/synchronize-istio-cni-manifests.sh | 4 ++++ hack/synchronize-istio-manifests.sh | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/hack/synchronize-istio-cni-manifests.sh b/hack/synchronize-istio-cni-manifests.sh index 10e0ade135..0cc8236297 100644 --- a/hack/synchronize-istio-cni-manifests.sh +++ b/hack/synchronize-istio-cni-manifests.sh @@ -1,3 +1,7 @@ +<<<<<<< HEAD +======= +#!/usr/bin/env bash +>>>>>>> 7c29d8a6 (Disable SC1017,SC2086,SC2070 rules when shellcheck and did changes in sh files to address the formattings according to shellcheck) @ -1,88 +0,0 @@ # This script aims at helping create a PR to update the manifests of the diff --git a/hack/synchronize-istio-manifests.sh b/hack/synchronize-istio-manifests.sh index 6fa9811a0b..85aa4277ff 100644 --- a/hack/synchronize-istio-manifests.sh +++ b/hack/synchronize-istio-manifests.sh @@ -1,4 +1,8 @@ +<<<<<<< HEAD #!/usr/bin/env bash +======= +# !/usr/bin/env bash +>>>>>>> 7c29d8a6 (Disable SC1017,SC2086,SC2070 rules when shellcheck and did changes in sh files to address the formattings according to shellcheck) # # This script aims at helping create a PR to update the manifests of Istio # # This script: From 5e5e4c4893c96cd4a3f4c426f6297f78878b2fdd Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 01:13:52 +0530 Subject: [PATCH 29/37] made chnages in sh files Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- hack/synchronize-istio-manifests.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/hack/synchronize-istio-manifests.sh b/hack/synchronize-istio-manifests.sh index 85aa4277ff..ad75d68022 100644 --- a/hack/synchronize-istio-manifests.sh +++ b/hack/synchronize-istio-manifests.sh @@ -1,8 +1,5 @@ -<<<<<<< HEAD #!/usr/bin/env bash -======= -# !/usr/bin/env bash ->>>>>>> 7c29d8a6 (Disable SC1017,SC2086,SC2070 rules when shellcheck and did changes in sh files to address the formattings according to shellcheck) + # # This script aims at helping create a PR to update the manifests of Istio # # This script: From 859ced52ab2e74d8afc5a182738589e58a2f8aca Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 02:35:16 +0530 Subject: [PATCH 30/37] Change shellcheck only to run for PR chnaged files Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- hack/synchronize-istio-cni-manifests.sh | 4 ---- 1 file changed, 4 deletions(-) diff --git a/hack/synchronize-istio-cni-manifests.sh b/hack/synchronize-istio-cni-manifests.sh index 0cc8236297..10e0ade135 100644 --- a/hack/synchronize-istio-cni-manifests.sh +++ b/hack/synchronize-istio-cni-manifests.sh @@ -1,7 +1,3 @@ -<<<<<<< HEAD -======= -#!/usr/bin/env bash ->>>>>>> 7c29d8a6 (Disable SC1017,SC2086,SC2070 rules when shellcheck and did changes in sh files to address the formattings according to shellcheck) @ -1,88 +0,0 @@ # This script aims at helping create a PR to update the manifests of the From 107db8e3b0996600cc5776df1e6b16c859889672 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 03:36:27 +0530 Subject: [PATCH 31/37] Ensure the full history is fetched Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/yaml_formatter.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/yaml_formatter.yaml b/.github/workflows/yaml_formatter.yaml index b36b29ef92..dca73409d4 100644 --- a/.github/workflows/yaml_formatter.yaml +++ b/.github/workflows/yaml_formatter.yaml @@ -7,6 +7,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + with: + fetch-depth: 0 - name: Install yamllint run: pip install yamllint From 64bd2d9853a6f09a085c21684eed811bfd0b81e7 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 03:39:36 +0530 Subject: [PATCH 32/37] Ensure the full history is fetched in bash_formatter Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/bash_formatter.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/bash_formatter.yaml b/.github/workflows/bash_formatter.yaml index 1f61681569..b2e970ff03 100644 --- a/.github/workflows/bash_formatter.yaml +++ b/.github/workflows/bash_formatter.yaml @@ -7,6 +7,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + with: + fetch-depth: 0 - name: Install ShellCheck run: sudo apt install -y shellcheck From 0226a0efca68d0dd635adfa9e82f7d7562cfbd51 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Tue, 2 Jul 2024 03:41:53 +0530 Subject: [PATCH 33/37] fix SC2148 (error) Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- hack/synchronize-istio-cni-manifests.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/hack/synchronize-istio-cni-manifests.sh b/hack/synchronize-istio-cni-manifests.sh index 10e0ade135..1f8a22c3e5 100644 --- a/hack/synchronize-istio-cni-manifests.sh +++ b/hack/synchronize-istio-cni-manifests.sh @@ -1,3 +1,4 @@ +#!/usr/bin/env bash @ -1,88 +0,0 @@ # This script aims at helping create a PR to update the manifests of the From a4bc63ff4624f7442aa27f805da4c6067bdead6c Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Sun, 7 Jul 2024 11:21:29 +0530 Subject: [PATCH 34/37] remove commented lines in bash_formatter.yaml Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/bash_formatter.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/bash_formatter.yaml b/.github/workflows/bash_formatter.yaml index b2e970ff03..96bc1a42f9 100644 --- a/.github/workflows/bash_formatter.yaml +++ b/.github/workflows/bash_formatter.yaml @@ -27,6 +27,5 @@ jobs: run: | cat changed_files_in_PR.txt | xargs -I {} shellcheck {} shell: bash - # - name: Run ShellCheck - # run: find . -type f -name "*.sh" -exec shellcheck {} + + From 2730efcf13098972b3ace9cac42de5613e604a25 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Sun, 7 Jul 2024 13:23:33 +0530 Subject: [PATCH 35/37] remove formatting chnages done to app folder content Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .../pipelines-profile-controller/run_tests.sh | 1 - .../pipelines-profile-controller/sync.py | 413 +++++++++--------- .../pipelines-profile-controller/test_sync.py | 209 ++++----- .../upstream/gcp-workload-identity-setup.sh | 4 +- .../pipelines-profile-controller/run_tests.sh | 1 - .../pipelines-profile-controller/sync.py | 413 +++++++++--------- .../pipelines-profile-controller/test_sync.py | 209 ++++----- .../v1/gcp-workload-identity-setup.sh | 4 +- apps/kfp-tekton/upstream/v1/wi-utils.sh | 1 - apps/kfp-tekton/upstream/wi-utils.sh | 1 - .../pipelines-profile-controller/run_tests.sh | 1 - .../pipelines-profile-controller/sync.py | 413 +++++++++--------- .../pipelines-profile-controller/test_sync.py | 209 ++++----- .../upstream/gcp-workload-identity-setup.sh | 8 +- apps/pipeline/upstream/wi-utils.sh | 1 - 15 files changed, 881 insertions(+), 1007 deletions(-) diff --git a/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/run_tests.sh b/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/run_tests.sh index be33353dfe..4933ec79a8 100755 --- a/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/run_tests.sh +++ b/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/run_tests.sh @@ -1,4 +1,3 @@ -#!/bin/sh # Build venv with required packages VENV=".venv" PYTHON_VENV="${VENV}/bin/python" diff --git a/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/sync.py b/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/sync.py index e04ba23881..031a3fa268 100644 --- a/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/sync.py +++ b/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/sync.py @@ -24,17 +24,10 @@ def main(): server.serve_forever() -def get_settings_from_env( - controller_port=None, - visualization_server_image=None, - frontend_image=None, - visualization_server_tag=None, - frontend_tag=None, - disable_istio_sidecar=None, - minio_access_key=None, - minio_secret_key=None, - kfp_default_pipeline_root=None, -): +def get_settings_from_env(controller_port=None, + visualization_server_image=None, frontend_image=None, + visualization_server_tag=None, frontend_tag=None, disable_istio_sidecar=None, + minio_access_key=None, minio_secret_key=None, kfp_default_pipeline_root=None): """ Returns a dict of settings from environment variables relevant to the controller @@ -52,82 +45,66 @@ def get_settings_from_env( minio_secret_key: Required (no default) """ settings = dict() - settings["controller_port"] = controller_port or os.environ.get( - "CONTROLLER_PORT", "8080" - ) + settings["controller_port"] = \ + controller_port or \ + os.environ.get("CONTROLLER_PORT", "8080") - settings["visualization_server_image"] = ( - visualization_server_image - or os.environ.get( - "VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server" - ) - ) + settings["visualization_server_image"] = \ + visualization_server_image or \ + os.environ.get("VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server") - settings["frontend_image"] = frontend_image or os.environ.get( - "FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend" - ) + settings["frontend_image"] = \ + frontend_image or \ + os.environ.get("FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend") # Look for specific tags for each image first, falling back to # previously used KFP_VERSION environment variable for backwards # compatibility - settings["visualization_server_tag"] = ( - visualization_server_tag - or os.environ.get("VISUALIZATION_SERVER_TAG") - or os.environ["KFP_VERSION"] - ) + settings["visualization_server_tag"] = \ + visualization_server_tag or \ + os.environ.get("VISUALIZATION_SERVER_TAG") or \ + os.environ["KFP_VERSION"] - settings["frontend_tag"] = ( - frontend_tag or os.environ.get("FRONTEND_TAG") or os.environ["KFP_VERSION"] - ) + settings["frontend_tag"] = \ + frontend_tag or \ + os.environ.get("FRONTEND_TAG") or \ + os.environ["KFP_VERSION"] - settings["disable_istio_sidecar"] = ( - disable_istio_sidecar - if disable_istio_sidecar is not None - else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true" - ) + settings["disable_istio_sidecar"] = \ + disable_istio_sidecar if disable_istio_sidecar is not None \ + else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true" - settings["minio_access_key"] = minio_access_key or base64.b64encode( - bytes(os.environ.get("MINIO_ACCESS_KEY"), "utf-8") - ).decode("utf-8") + settings["minio_access_key"] = \ + minio_access_key or \ + base64.b64encode(bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8') - settings["minio_secret_key"] = minio_secret_key or base64.b64encode( - bytes(os.environ.get("MINIO_SECRET_KEY"), "utf-8") - ).decode("utf-8") + settings["minio_secret_key"] = \ + minio_secret_key or \ + base64.b64encode(bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8') # KFP_DEFAULT_PIPELINE_ROOT is optional - settings["kfp_default_pipeline_root"] = kfp_default_pipeline_root or os.environ.get( - "KFP_DEFAULT_PIPELINE_ROOT" - ) + settings["kfp_default_pipeline_root"] = \ + kfp_default_pipeline_root or \ + os.environ.get("KFP_DEFAULT_PIPELINE_ROOT") return settings -def server_factory( - visualization_server_image, - visualization_server_tag, - frontend_image, - frontend_tag, - disable_istio_sidecar, - minio_access_key, - minio_secret_key, - kfp_default_pipeline_root=None, - url="", - controller_port=8080, -): +def server_factory(visualization_server_image, + visualization_server_tag, frontend_image, frontend_tag, + disable_istio_sidecar, minio_access_key, + minio_secret_key, kfp_default_pipeline_root=None, + url="", controller_port=8080): """ Returns an HTTPServer populated with Handler with customized settings """ - class Controller(BaseHTTPRequestHandler): def sync(self, parent, children): # parent is a namespace namespace = parent.get("metadata", {}).get("name") - pipeline_enabled = ( - parent.get("metadata", {}) - .get("labels", {}) - .get("pipelines.kubeflow.org/enabled") - ) + pipeline_enabled = parent.get("metadata", {}).get( + "labels", {}).get("pipelines.kubeflow.org/enabled") if pipeline_enabled != "true": return {"status": {}, "children": []} @@ -136,30 +113,29 @@ def sync(self, parent, children): desired_resources = [] if kfp_default_pipeline_root: desired_configmap_count = 2 - desired_resources += [ - { - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": { - "name": "kfp-launcher", - "namespace": namespace, - }, - "data": { - "defaultPipelineRoot": kfp_default_pipeline_root, - }, - } - ] + desired_resources += [{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "kfp-launcher", + "namespace": namespace, + }, + "data": { + "defaultPipelineRoot": kfp_default_pipeline_root, + }, + }] + # Compute status based on observed state. desired_status = { - "kubeflow-pipelines-ready": len(children["Secret.v1"]) == 1 - and len(children["ConfigMap.v1"]) == desired_configmap_count - and len(children["Deployment.apps/v1"]) == 2 - and len(children["Service.v1"]) == 2 - and len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 - and len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 - and "True" - or "False" + "kubeflow-pipelines-ready": + len(children["Secret.v1"]) == 1 and + len(children["ConfigMap.v1"]) == desired_configmap_count and + len(children["Deployment.apps/v1"]) == 2 and + len(children["Service.v1"]) == 2 and + len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and + len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 and + "True" or "False" } # Generate the desired child object(s). @@ -172,7 +148,8 @@ def sync(self, parent, children): "namespace": namespace, }, "data": { - "METADATA_GRPC_SERVICE_HOST": "metadata-grpc-service.kubeflow", + "METADATA_GRPC_SERVICE_HOST": + "metadata-grpc-service.kubeflow", "METADATA_GRPC_SERVICE_PORT": "8080", }, }, @@ -181,38 +158,50 @@ def sync(self, parent, children): "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { - "labels": {"app": "ml-pipeline-visualizationserver"}, + "labels": { + "app": "ml-pipeline-visualizationserver" + }, "name": "ml-pipeline-visualizationserver", "namespace": namespace, }, "spec": { "selector": { - "matchLabels": {"app": "ml-pipeline-visualizationserver"}, + "matchLabels": { + "app": "ml-pipeline-visualizationserver" + }, }, "template": { "metadata": { - "labels": {"app": "ml-pipeline-visualizationserver"}, - "annotations": disable_istio_sidecar - and {"sidecar.istio.io/inject": "false"} - or {}, + "labels": { + "app": "ml-pipeline-visualizationserver" + }, + "annotations": disable_istio_sidecar and { + "sidecar.istio.io/inject": "false" + } or {}, }, "spec": { - "containers": [ - { - "image": f"{visualization_server_image}:{visualization_server_tag}", - "imagePullPolicy": "IfNotPresent", - "name": "ml-pipeline-visualizationserver", - "ports": [{"containerPort": 8888}], - "resources": { - "requests": { - "cpu": "50m", - "memory": "200Mi", - }, - "limits": {"cpu": "500m", "memory": "1Gi"}, + "containers": [{ + "image": f"{visualization_server_image}:{visualization_server_tag}", + "imagePullPolicy": + "IfNotPresent", + "name": + "ml-pipeline-visualizationserver", + "ports": [{ + "containerPort": 8888 + }], + "resources": { + "requests": { + "cpu": "50m", + "memory": "200Mi" + }, + "limits": { + "cpu": "500m", + "memory": "1Gi" }, } - ], - "serviceAccountName": "default-editor", + }], + "serviceAccountName": + "default-editor", }, }, }, @@ -226,8 +215,12 @@ def sync(self, parent, children): }, "spec": { "host": "ml-pipeline-visualizationserver", - "trafficPolicy": {"tls": {"mode": "ISTIO_MUTUAL"}}, - }, + "trafficPolicy": { + "tls": { + "mode": "ISTIO_MUTUAL" + } + } + } }, { "apiVersion": "security.istio.io/v1beta1", @@ -238,22 +231,18 @@ def sync(self, parent, children): }, "spec": { "selector": { - "matchLabels": {"app": "ml-pipeline-visualizationserver"} - }, - "rules": [ - { - "from": [ - { - "source": { - "principals": [ - "cluster.local/ns/kubeflow/sa/ml-pipeline" - ] - } - } - ] + "matchLabels": { + "app": "ml-pipeline-visualizationserver" } - ], - }, + }, + "rules": [{ + "from": [{ + "source": { + "principals": ["cluster.local/ns/kubeflow/sa/ml-pipeline"] + } + }] + }] + } }, { "apiVersion": "v1", @@ -263,14 +252,12 @@ def sync(self, parent, children): "namespace": namespace, }, "spec": { - "ports": [ - { - "name": "http", - "port": 8888, - "protocol": "TCP", - "targetPort": 8888, - } - ], + "ports": [{ + "name": "http", + "port": 8888, + "protocol": "TCP", + "targetPort": 8888, + }], "selector": { "app": "ml-pipeline-visualizationserver", }, @@ -281,62 +268,73 @@ def sync(self, parent, children): "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { - "labels": {"app": "ml-pipeline-ui-artifact"}, + "labels": { + "app": "ml-pipeline-ui-artifact" + }, "name": "ml-pipeline-ui-artifact", "namespace": namespace, }, "spec": { - "selector": {"matchLabels": {"app": "ml-pipeline-ui-artifact"}}, + "selector": { + "matchLabels": { + "app": "ml-pipeline-ui-artifact" + } + }, "template": { "metadata": { - "labels": {"app": "ml-pipeline-ui-artifact"}, - "annotations": disable_istio_sidecar - and {"sidecar.istio.io/inject": "false"} - or {}, + "labels": { + "app": "ml-pipeline-ui-artifact" + }, + "annotations": disable_istio_sidecar and { + "sidecar.istio.io/inject": "false" + } or {}, }, "spec": { - "containers": [ - { - "name": "ml-pipeline-ui-artifact", - "image": f"{frontend_image}:{frontend_tag}", - "imagePullPolicy": "IfNotPresent", - "ports": [{"containerPort": 3000}], - "env": [ - { - "name": "MINIO_ACCESS_KEY", - "valueFrom": { - "secretKeyRef": { - "key": "accesskey", - "name": "mlpipeline-minio-artifact", - } - }, - }, - { - "name": "MINIO_SECRET_KEY", - "valueFrom": { - "secretKeyRef": { - "key": "secretkey", - "name": "mlpipeline-minio-artifact", - } - }, - }, - ], - "resources": { - "requests": { - "cpu": "10m", - "memory": "70Mi", - }, - "limits": { - "cpu": "100m", - "memory": "500Mi", - }, + "containers": [{ + "name": + "ml-pipeline-ui-artifact", + "image": f"{frontend_image}:{frontend_tag}", + "imagePullPolicy": + "IfNotPresent", + "ports": [{ + "containerPort": 3000 + }], + "env": [ + { + "name": "MINIO_ACCESS_KEY", + "valueFrom": { + "secretKeyRef": { + "key": "accesskey", + "name": "mlpipeline-minio-artifact" + } + } + }, + { + "name": "MINIO_SECRET_KEY", + "valueFrom": { + "secretKeyRef": { + "key": "secretkey", + "name": "mlpipeline-minio-artifact" + } + } + } + ], + "resources": { + "requests": { + "cpu": "10m", + "memory": "70Mi" + }, + "limits": { + "cpu": "100m", + "memory": "500Mi" }, } - ], - "serviceAccountName": "default-editor", - }, - }, - }, + }], + "serviceAccountName": + "default-editor" + } + } + } }, { "apiVersion": "v1", @@ -344,55 +342,52 @@ def sync(self, parent, children): "metadata": { "name": "ml-pipeline-ui-artifact", "namespace": namespace, - "labels": {"app": "ml-pipeline-ui-artifact"}, + "labels": { + "app": "ml-pipeline-ui-artifact" + } }, "spec": { - "ports": [ - { - "name": "http", # name is required to let istio understand request protocol - "port": 80, - "protocol": "TCP", - "targetPort": 3000, - } - ], - "selector": {"app": "ml-pipeline-ui-artifact"}, - }, + "ports": [{ + "name": + "http", # name is required to let istio understand request protocol + "port": 80, + "protocol": "TCP", + "targetPort": 3000 + }], + "selector": { + "app": "ml-pipeline-ui-artifact" + } + } }, ] - print("Received request:\n", json.dumps(parent, sort_keys=True)) - print( - "Desired resources except secrets:\n", - json.dumps(desired_resources, sort_keys=True), - ) + print('Received request:\n', json.dumps(parent, sort_keys=True)) + print('Desired resources except secrets:\n', json.dumps(desired_resources, sort_keys=True)) # Moved after the print argument because this is sensitive data. - desired_resources.append( - { - "apiVersion": "v1", - "kind": "Secret", - "metadata": { - "name": "mlpipeline-minio-artifact", - "namespace": namespace, - }, - "data": { - "accesskey": minio_access_key, - "secretkey": minio_secret_key, - }, - } - ) + desired_resources.append({ + "apiVersion": "v1", + "kind": "Secret", + "metadata": { + "name": "mlpipeline-minio-artifact", + "namespace": namespace, + }, + "data": { + "accesskey": minio_access_key, + "secretkey": minio_secret_key, + }, + }) return {"status": desired_status, "children": desired_resources} def do_POST(self): # Serve the sync() function as a JSON webhook. observed = json.loads( - self.rfile.read(int(self.headers.get("content-length"))) - ) + self.rfile.read(int(self.headers.get("content-length")))) desired = self.sync(observed["parent"], observed["children"]) self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() - self.wfile.write(bytes(json.dumps(desired), "utf-8")) + self.wfile.write(bytes(json.dumps(desired), 'utf-8')) return HTTPServer((url, int(controller_port)), Controller) diff --git a/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/test_sync.py b/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/test_sync.py index bc9a6549b5..50362d60fd 100644 --- a/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/test_sync.py +++ b/apps/kfp-tekton/upstream/base/installs/multi-user/pipelines-profile-controller/test_sync.py @@ -11,8 +11,10 @@ DATA_INCORRECT_CHILDREN = { "parent": { "metadata": { - "labels": {"pipelines.kubeflow.org/enabled": "true"}, - "name": "myName", + "labels": { + "pipelines.kubeflow.org/enabled": "true" + }, + "name": "myName" } }, "children": { @@ -22,14 +24,16 @@ "Service.v1": [], "DestinationRule.networking.istio.io/v1alpha3": [], "AuthorizationPolicy.security.istio.io/v1beta1": [], - }, + } } DATA_CORRECT_CHILDREN = { "parent": { "metadata": { - "labels": {"pipelines.kubeflow.org/enabled": "true"}, - "name": "myName", + "labels": { + "pipelines.kubeflow.org/enabled": "true" + }, + "name": "myName" } }, "children": { @@ -39,7 +43,7 @@ "Service.v1": [1, 1], "DestinationRule.networking.istio.io/v1alpha3": [1], "AuthorizationPolicy.security.istio.io/v1beta1": [1], - }, + } } DATA_MISSING_PIPELINE_ENABLED = {"parent": {}, "children": {}} @@ -66,38 +70,34 @@ "CONTROLLER_PORT": "0", # HTTPServer randomly assigns the port to a free port } -ENV_KFP_VERSION_ONLY = dict( - ENV_VARIABLES_BASE, - **{ - "KFP_VERSION": KFP_VERSION, - }, -) - -ENV_IMAGES_NO_TAGS = dict( - ENV_VARIABLES_BASE, - **{ - "KFP_VERSION": KFP_VERSION, - "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, - "FRONTEND_IMAGE": FRONTEND_IMAGE, - }, -) - -ENV_IMAGES_WITH_TAGS = dict( - ENV_VARIABLES_BASE, - **{ - "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, - "FRONTEND_IMAGE": FRONTEND_IMAGE, - "VISUALIZATION_SERVER_TAG": VISUALIZATION_SERVER_TAG, - "FRONTEND_TAG": FRONTEND_TAG, - }, -) - -ENV_IMAGES_WITH_TAGS_AND_ISTIO = dict( - ENV_IMAGES_WITH_TAGS, - **{ - "DISABLE_ISTIO_SIDECAR": "false", - }, -) +ENV_KFP_VERSION_ONLY = dict(ENV_VARIABLES_BASE, + **{ + "KFP_VERSION": KFP_VERSION, + } + ) + +ENV_IMAGES_NO_TAGS = dict(ENV_VARIABLES_BASE, + **{ + "KFP_VERSION": KFP_VERSION, + "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, + "FRONTEND_IMAGE": FRONTEND_IMAGE, + } + ) + +ENV_IMAGES_WITH_TAGS = dict(ENV_VARIABLES_BASE, + **{ + "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, + "FRONTEND_IMAGE": FRONTEND_IMAGE, + "VISUALIZATION_SERVER_TAG": VISUALIZATION_SERVER_TAG, + "FRONTEND_TAG": FRONTEND_TAG, + } + ) + +ENV_IMAGES_WITH_TAGS_AND_ISTIO = dict(ENV_IMAGES_WITH_TAGS, + **{ + "DISABLE_ISTIO_SIDECAR": "false", + } + ) def generate_image_name(imagename, tag): @@ -154,57 +154,40 @@ def sync_server_from_arguments(request): "sync_server, data, expected_status, expected_visualization_server_image, expected_frontend_server_image", [ ( - ENV_KFP_VERSION_ONLY, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name(DEFAULT_VISUALIZATION_IMAGE, KFP_VERSION), - generate_image_name(DEFAULT_FRONTEND_IMAGE, KFP_VERSION), + ENV_KFP_VERSION_ONLY, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name(DEFAULT_VISUALIZATION_IMAGE, KFP_VERSION), + generate_image_name(DEFAULT_FRONTEND_IMAGE, KFP_VERSION), ), ( - ENV_IMAGES_NO_TAGS, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name( - ENV_IMAGES_NO_TAGS["VISUALIZATION_SERVER_IMAGE"], KFP_VERSION - ), - generate_image_name(ENV_IMAGES_NO_TAGS["FRONTEND_IMAGE"], KFP_VERSION), + ENV_IMAGES_NO_TAGS, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name(ENV_IMAGES_NO_TAGS["VISUALIZATION_SERVER_IMAGE"], KFP_VERSION), + generate_image_name(ENV_IMAGES_NO_TAGS["FRONTEND_IMAGE"], KFP_VERSION), ), ( - ENV_IMAGES_WITH_TAGS, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name( - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], - ), - generate_image_name( - ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], - ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], - ), + ENV_IMAGES_WITH_TAGS, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), + generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), ), ( - ENV_IMAGES_WITH_TAGS, - DATA_CORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "True"}, - generate_image_name( - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], - ), - generate_image_name( - ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], - ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], - ), + ENV_IMAGES_WITH_TAGS, + DATA_CORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "True"}, + generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), + generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), ), ], - indirect=["sync_server"], + indirect=["sync_server"] ) -def test_sync_server_with_pipeline_enabled( - sync_server, - data, - expected_status, - expected_visualization_server_image, - expected_frontend_server_image, -): +def test_sync_server_with_pipeline_enabled(sync_server, data, expected_status, + expected_visualization_server_image, expected_frontend_server_image): """ Nearly end-to-end test of how Controller serves .sync as a POST @@ -224,17 +207,13 @@ def test_sync_server_with_pipeline_enabled( results = json.loads(x.text) # Test overall status of whether children are ok - assert results["status"] == expected_status + assert results['status'] == expected_status # Poke a few children to test things that can vary by environment variable - assert ( - results["children"][1]["spec"]["template"]["spec"]["containers"][0]["image"] - == expected_visualization_server_image - ) - assert ( - results["children"][5]["spec"]["template"]["spec"]["containers"][0]["image"] - == expected_frontend_server_image - ) + assert results['children'][1]["spec"]["template"]["spec"]["containers"][0][ + "image"] == expected_visualization_server_image + assert results['children'][5]["spec"]["template"]["spec"]["containers"][0][ + "image"] == expected_frontend_server_image @pytest.mark.parametrize( @@ -242,28 +221,19 @@ def test_sync_server_with_pipeline_enabled( "expected_frontend_server_image", [ ( - ENV_IMAGES_WITH_TAGS_AND_ISTIO, - DATA_CORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "True"}, - generate_image_name( - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], - ), - generate_image_name( - ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], - ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], - ), + ENV_IMAGES_WITH_TAGS_AND_ISTIO, + DATA_CORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "True"}, + generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), + generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), ), ], - indirect=["sync_server_from_arguments"], + indirect=["sync_server_from_arguments"] ) def test_sync_server_with_direct_passing_of_settings( - sync_server_from_arguments, - data, - expected_status, - expected_visualization_server_image, - expected_frontend_server_image, -): + sync_server_from_arguments, data, expected_status, expected_visualization_server_image, + expected_frontend_server_image): """ Nearly end-to-end test of how Controller serves .sync as a POST, taking variables as arguments @@ -280,17 +250,13 @@ def test_sync_server_with_direct_passing_of_settings( results = json.loads(x.text) # Test overall status of whether children are ok - assert results["status"] == expected_status + assert results['status'] == expected_status # Poke a few children to test things that can vary by environment variable - assert ( - results["children"][1]["spec"]["template"]["spec"]["containers"][0]["image"] - == expected_visualization_server_image - ) - assert ( - results["children"][5]["spec"]["template"]["spec"]["containers"][0]["image"] - == expected_frontend_server_image - ) + assert results['children'][1]["spec"]["template"]["spec"]["containers"][0][ + "image"] == expected_visualization_server_image + assert results['children'][5]["spec"]["template"]["spec"]["containers"][0][ + "image"] == expected_frontend_server_image @pytest.mark.parametrize( @@ -298,11 +264,10 @@ def test_sync_server_with_direct_passing_of_settings( [ (ENV_IMAGES_WITH_TAGS, DATA_MISSING_PIPELINE_ENABLED, {}, []), ], - indirect=["sync_server"], + indirect=["sync_server"] ) -def test_sync_server_without_pipeline_enabled( - sync_server, data, expected_status, expected_children -): +def test_sync_server_without_pipeline_enabled(sync_server, data, expected_status, + expected_children): """ Nearly end-to-end test of how Controller serves .sync as a POST @@ -317,5 +282,5 @@ def test_sync_server_without_pipeline_enabled( results = json.loads(x.text) # Test overall status of whether children are ok - assert results["status"] == expected_status - assert results["children"] == expected_children + assert results['status'] == expected_status + assert results['children'] == expected_children diff --git a/apps/kfp-tekton/upstream/gcp-workload-identity-setup.sh b/apps/kfp-tekton/upstream/gcp-workload-identity-setup.sh index 2c47c230e6..79f86c95c8 100755 --- a/apps/kfp-tekton/upstream/gcp-workload-identity-setup.sh +++ b/apps/kfp-tekton/upstream/gcp-workload-identity-setup.sh @@ -160,12 +160,12 @@ function bind_gsa_and_ksa { } echo "Binding each kfp system KSA to $SYSTEM_GSA" -for ksa in "${SYSTEM_KSA[@]}"; do +for ksa in ${SYSTEM_KSA[@]}; do bind_gsa_and_ksa $SYSTEM_GSA $ksa done echo "Binding each kfp user KSA to $USER_GSA" -for ksa in "${USER_KSA[@]}"; do +for ksa in ${USER_KSA[@]}; do bind_gsa_and_ksa $USER_GSA $ksa done diff --git a/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/run_tests.sh b/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/run_tests.sh index be33353dfe..4933ec79a8 100755 --- a/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/run_tests.sh +++ b/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/run_tests.sh @@ -1,4 +1,3 @@ -#!/bin/sh # Build venv with required packages VENV=".venv" PYTHON_VENV="${VENV}/bin/python" diff --git a/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/sync.py b/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/sync.py index 3ff93437ee..7caecb8ee3 100644 --- a/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/sync.py +++ b/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/sync.py @@ -24,17 +24,10 @@ def main(): server.serve_forever() -def get_settings_from_env( - controller_port=None, - visualization_server_image=None, - frontend_image=None, - visualization_server_tag=None, - frontend_tag=None, - disable_istio_sidecar=None, - minio_access_key=None, - minio_secret_key=None, - kfp_default_pipeline_root=None, -): +def get_settings_from_env(controller_port=None, + visualization_server_image=None, frontend_image=None, + visualization_server_tag=None, frontend_tag=None, disable_istio_sidecar=None, + minio_access_key=None, minio_secret_key=None, kfp_default_pipeline_root=None): """ Returns a dict of settings from environment variables relevant to the controller @@ -52,82 +45,66 @@ def get_settings_from_env( minio_secret_key: Required (no default) """ settings = dict() - settings["controller_port"] = controller_port or os.environ.get( - "CONTROLLER_PORT", "8080" - ) + settings["controller_port"] = \ + controller_port or \ + os.environ.get("CONTROLLER_PORT", "8080") - settings["visualization_server_image"] = ( - visualization_server_image - or os.environ.get( - "VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server" - ) - ) + settings["visualization_server_image"] = \ + visualization_server_image or \ + os.environ.get("VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server") - settings["frontend_image"] = frontend_image or os.environ.get( - "FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend" - ) + settings["frontend_image"] = \ + frontend_image or \ + os.environ.get("FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend") # Look for specific tags for each image first, falling back to # previously used KFP_VERSION environment variable for backwards # compatibility - settings["visualization_server_tag"] = ( - visualization_server_tag - or os.environ.get("VISUALIZATION_SERVER_TAG") - or os.environ["KFP_VERSION"] - ) + settings["visualization_server_tag"] = \ + visualization_server_tag or \ + os.environ.get("VISUALIZATION_SERVER_TAG") or \ + os.environ["KFP_VERSION"] - settings["frontend_tag"] = ( - frontend_tag or os.environ.get("FRONTEND_TAG") or os.environ["KFP_VERSION"] - ) + settings["frontend_tag"] = \ + frontend_tag or \ + os.environ.get("FRONTEND_TAG") or \ + os.environ["KFP_VERSION"] - settings["disable_istio_sidecar"] = ( - disable_istio_sidecar - if disable_istio_sidecar is not None - else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true" - ) + settings["disable_istio_sidecar"] = \ + disable_istio_sidecar if disable_istio_sidecar is not None \ + else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true" - settings["minio_access_key"] = minio_access_key or base64.b64encode( - bytes(os.environ.get("MINIO_ACCESS_KEY"), "utf-8") - ).decode("utf-8") + settings["minio_access_key"] = \ + minio_access_key or \ + base64.b64encode(bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8') - settings["minio_secret_key"] = minio_secret_key or base64.b64encode( - bytes(os.environ.get("MINIO_SECRET_KEY"), "utf-8") - ).decode("utf-8") + settings["minio_secret_key"] = \ + minio_secret_key or \ + base64.b64encode(bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8') # KFP_DEFAULT_PIPELINE_ROOT is optional - settings["kfp_default_pipeline_root"] = kfp_default_pipeline_root or os.environ.get( - "KFP_DEFAULT_PIPELINE_ROOT" - ) + settings["kfp_default_pipeline_root"] = \ + kfp_default_pipeline_root or \ + os.environ.get("KFP_DEFAULT_PIPELINE_ROOT") return settings -def server_factory( - visualization_server_image, - visualization_server_tag, - frontend_image, - frontend_tag, - disable_istio_sidecar, - minio_access_key, - minio_secret_key, - kfp_default_pipeline_root=None, - url="", - controller_port=8080, -): +def server_factory(visualization_server_image, + visualization_server_tag, frontend_image, frontend_tag, + disable_istio_sidecar, minio_access_key, + minio_secret_key, kfp_default_pipeline_root=None, + url="", controller_port=8080): """ Returns an HTTPServer populated with Handler with customized settings """ - class Controller(BaseHTTPRequestHandler): def sync(self, parent, children): # parent is a namespace namespace = parent.get("metadata", {}).get("name") - pipeline_enabled = ( - parent.get("metadata", {}) - .get("labels", {}) - .get("pipelines.kubeflow.org/enabled") - ) + pipeline_enabled = parent.get("metadata", {}).get( + "labels", {}).get("pipelines.kubeflow.org/enabled") if pipeline_enabled != "true": return {"status": {}, "children": []} @@ -136,30 +113,29 @@ def sync(self, parent, children): desired_resources = [] if kfp_default_pipeline_root: desired_configmap_count = 2 - desired_resources += [ - { - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": { - "name": "kfp-launcher", - "namespace": namespace, - }, - "data": { - "defaultPipelineRoot": kfp_default_pipeline_root, - }, - } - ] + desired_resources += [{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "kfp-launcher", + "namespace": namespace, + }, + "data": { + "defaultPipelineRoot": kfp_default_pipeline_root, + }, + }] + # Compute status based on observed state. desired_status = { - "kubeflow-pipelines-ready": len(children["Secret.v1"]) == 1 - and len(children["ConfigMap.v1"]) == desired_configmap_count - and len(children["Deployment.apps/v1"]) == 2 - and len(children["Service.v1"]) == 2 - and len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 - and len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 - and "True" - or "False" + "kubeflow-pipelines-ready": + len(children["Secret.v1"]) == 1 and + len(children["ConfigMap.v1"]) == desired_configmap_count and + len(children["Deployment.apps/v1"]) == 2 and + len(children["Service.v1"]) == 2 and + len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and + len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 and + "True" or "False" } # Generate the desired child object(s). @@ -172,7 +148,8 @@ def sync(self, parent, children): "namespace": namespace, }, "data": { - "METADATA_GRPC_SERVICE_HOST": "metadata-grpc-service.kubeflow", + "METADATA_GRPC_SERVICE_HOST": + "metadata-grpc-service.kubeflow", "METADATA_GRPC_SERVICE_PORT": "8080", }, }, @@ -181,38 +158,50 @@ def sync(self, parent, children): "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { - "labels": {"app": "ml-pipeline-visualizationserver"}, + "labels": { + "app": "ml-pipeline-visualizationserver" + }, "name": "ml-pipeline-visualizationserver", "namespace": namespace, }, "spec": { "selector": { - "matchLabels": {"app": "ml-pipeline-visualizationserver"}, + "matchLabels": { + "app": "ml-pipeline-visualizationserver" + }, }, "template": { "metadata": { - "labels": {"app": "ml-pipeline-visualizationserver"}, - "annotations": disable_istio_sidecar - and {"sidecar.istio.io/inject": "false"} - or {}, + "labels": { + "app": "ml-pipeline-visualizationserver" + }, + "annotations": disable_istio_sidecar and { + "sidecar.istio.io/inject": "false" + } or {}, }, "spec": { - "containers": [ - { - "image": f"{visualization_server_image}:{visualization_server_tag}", - "imagePullPolicy": "IfNotPresent", - "name": "ml-pipeline-visualizationserver", - "ports": [{"containerPort": 8888}], - "resources": { - "requests": { - "cpu": "50m", - "memory": "200Mi", - }, - "limits": {"cpu": "500m", "memory": "1Gi"}, + "containers": [{ + "image": f"{visualization_server_image}:{visualization_server_tag}", + "imagePullPolicy": + "IfNotPresent", + "name": + "ml-pipeline-visualizationserver", + "ports": [{ + "containerPort": 8888 + }], + "resources": { + "requests": { + "cpu": "50m", + "memory": "200Mi" + }, + "limits": { + "cpu": "500m", + "memory": "1Gi" }, } - ], - "serviceAccountName": "default-editor", + }], + "serviceAccountName": + "default-editor", }, }, }, @@ -226,8 +215,12 @@ def sync(self, parent, children): }, "spec": { "host": "ml-pipeline-visualizationserver", - "trafficPolicy": {"tls": {"mode": "ISTIO_MUTUAL"}}, - }, + "trafficPolicy": { + "tls": { + "mode": "ISTIO_MUTUAL" + } + } + } }, { "apiVersion": "security.istio.io/v1beta1", @@ -238,22 +231,18 @@ def sync(self, parent, children): }, "spec": { "selector": { - "matchLabels": {"app": "ml-pipeline-visualizationserver"} - }, - "rules": [ - { - "from": [ - { - "source": { - "principals": [ - "cluster.local/ns/kubeflow/sa/ml-pipeline" - ] - } - } - ] + "matchLabels": { + "app": "ml-pipeline-visualizationserver" } - ], - }, + }, + "rules": [{ + "from": [{ + "source": { + "principals": ["cluster.local/ns/kubeflow/sa/ml-pipeline"] + } + }] + }] + } }, { "apiVersion": "v1", @@ -263,14 +252,12 @@ def sync(self, parent, children): "namespace": namespace, }, "spec": { - "ports": [ - { - "name": "http", - "port": 8888, - "protocol": "TCP", - "targetPort": 8888, - } - ], + "ports": [{ + "name": "http", + "port": 8888, + "protocol": "TCP", + "targetPort": 8888, + }], "selector": { "app": "ml-pipeline-visualizationserver", }, @@ -281,62 +268,73 @@ def sync(self, parent, children): "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { - "labels": {"app": "ml-pipeline-ui-artifact"}, + "labels": { + "app": "ml-pipeline-ui-artifact" + }, "name": "ml-pipeline-ui-artifact", "namespace": namespace, }, "spec": { - "selector": {"matchLabels": {"app": "ml-pipeline-ui-artifact"}}, + "selector": { + "matchLabels": { + "app": "ml-pipeline-ui-artifact" + } + }, "template": { "metadata": { - "labels": {"app": "ml-pipeline-ui-artifact"}, - "annotations": disable_istio_sidecar - and {"sidecar.istio.io/inject": "false"} - or {}, + "labels": { + "app": "ml-pipeline-ui-artifact" + }, + "annotations": disable_istio_sidecar and { + "sidecar.istio.io/inject": "false" + } or {}, }, "spec": { - "containers": [ - { - "name": "ml-pipeline-ui-artifact", - "image": f"{frontend_image}:{frontend_tag}", - "imagePullPolicy": "IfNotPresent", - "ports": [{"containerPort": 3000}], - "env": [ - { - "name": "MINIO_ACCESS_KEY", - "valueFrom": { - "secretKeyRef": { - "key": "accesskey", - "name": "mlpipeline-minio-artifact", - } - }, - }, - { - "name": "MINIO_SECRET_KEY", - "valueFrom": { - "secretKeyRef": { - "key": "secretkey", - "name": "mlpipeline-minio-artifact", - } - }, - }, - ], - "resources": { - "requests": { - "cpu": "10m", - "memory": "70Mi", - }, - "limits": { - "cpu": "100m", - "memory": "500Mi", - }, + "containers": [{ + "name": + "ml-pipeline-ui-artifact", + "image": f"{frontend_image}:{frontend_tag}", + "imagePullPolicy": + "IfNotPresent", + "ports": [{ + "containerPort": 3000 + }], + "env": [ + { + "name": "MINIO_ACCESS_KEY", + "valueFrom": { + "secretKeyRef": { + "key": "accesskey", + "name": "mlpipeline-minio-artifact" + } + } + }, + { + "name": "MINIO_SECRET_KEY", + "valueFrom": { + "secretKeyRef": { + "key": "secretkey", + "name": "mlpipeline-minio-artifact" + } + } + } + ], + "resources": { + "requests": { + "cpu": "10m", + "memory": "70Mi" + }, + "limits": { + "cpu": "100m", + "memory": "500Mi" }, } - ], - "serviceAccountName": "default-editor", - }, - }, - }, + }], + "serviceAccountName": + "default-editor" + } + } + } }, { "apiVersion": "v1", @@ -344,55 +342,52 @@ def sync(self, parent, children): "metadata": { "name": "ml-pipeline-ui-artifact", "namespace": namespace, - "labels": {"app": "ml-pipeline-ui-artifact"}, + "labels": { + "app": "ml-pipeline-ui-artifact" + } }, "spec": { - "ports": [ - { - "name": "http", # name is required to let istio understand request protocol - "port": 80, - "protocol": "TCP", - "targetPort": 3000, - } - ], - "selector": {"app": "ml-pipeline-ui-artifact"}, - }, + "ports": [{ + "name": + "http", # name is required to let istio understand request protocol + "port": 80, + "protocol": "TCP", + "targetPort": 3000 + }], + "selector": { + "app": "ml-pipeline-ui-artifact" + } + } }, ] - print("Received request:\n", json.dumps(parent, indent=2, sort_keys=True)) - print( - "Desired resources except secrets:\n", - json.dumps(desired_resources, indent=2, sort_keys=True), - ) + print('Received request:\n', json.dumps(parent, indent=2, sort_keys=True)) + print('Desired resources except secrets:\n', json.dumps(desired_resources, indent=2, sort_keys=True)) # Moved after the print argument because this is sensitive data. - desired_resources.append( - { - "apiVersion": "v1", - "kind": "Secret", - "metadata": { - "name": "mlpipeline-minio-artifact", - "namespace": namespace, - }, - "data": { - "accesskey": minio_access_key, - "secretkey": minio_secret_key, - }, - } - ) + desired_resources.append({ + "apiVersion": "v1", + "kind": "Secret", + "metadata": { + "name": "mlpipeline-minio-artifact", + "namespace": namespace, + }, + "data": { + "accesskey": minio_access_key, + "secretkey": minio_secret_key, + }, + }) return {"status": desired_status, "children": desired_resources} def do_POST(self): # Serve the sync() function as a JSON webhook. observed = json.loads( - self.rfile.read(int(self.headers.get("content-length"))) - ) + self.rfile.read(int(self.headers.get("content-length")))) desired = self.sync(observed["parent"], observed["children"]) self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() - self.wfile.write(bytes(json.dumps(desired), "utf-8")) + self.wfile.write(bytes(json.dumps(desired), 'utf-8')) return HTTPServer((url, int(controller_port)), Controller) diff --git a/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/test_sync.py b/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/test_sync.py index 20de7ada54..6158e3f8c0 100644 --- a/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/test_sync.py +++ b/apps/kfp-tekton/upstream/v1/base/installs/multi-user/pipelines-profile-controller/test_sync.py @@ -11,8 +11,10 @@ DATA_INCORRECT_CHILDREN = { "parent": { "metadata": { - "labels": {"pipelines.kubeflow.org/enabled": "true"}, - "name": "myName", + "labels": { + "pipelines.kubeflow.org/enabled": "true" + }, + "name": "myName" } }, "children": { @@ -22,14 +24,16 @@ "Service.v1": [], "DestinationRule.networking.istio.io/v1alpha3": [], "AuthorizationPolicy.security.istio.io/v1beta1": [], - }, + } } DATA_CORRECT_CHILDREN = { "parent": { "metadata": { - "labels": {"pipelines.kubeflow.org/enabled": "true"}, - "name": "myName", + "labels": { + "pipelines.kubeflow.org/enabled": "true" + }, + "name": "myName" } }, "children": { @@ -39,7 +43,7 @@ "Service.v1": [1, 1], "DestinationRule.networking.istio.io/v1alpha3": [1], "AuthorizationPolicy.security.istio.io/v1beta1": [1], - }, + } } DATA_MISSING_PIPELINE_ENABLED = {"parent": {}, "children": {}} @@ -66,38 +70,34 @@ "CONTROLLER_PORT": "0", # HTTPServer randomly assigns the port to a free port } -ENV_KFP_VERSION_ONLY = dict( - ENV_VARIABLES_BASE, - **{ - "KFP_VERSION": KFP_VERSION, - }, -) - -ENV_IMAGES_NO_TAGS = dict( - ENV_VARIABLES_BASE, - **{ - "KFP_VERSION": KFP_VERSION, - "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, - "FRONTEND_IMAGE": FRONTEND_IMAGE, - }, -) - -ENV_IMAGES_WITH_TAGS = dict( - ENV_VARIABLES_BASE, - **{ - "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, - "FRONTEND_IMAGE": FRONTEND_IMAGE, - "VISUALIZATION_SERVER_TAG": VISUALIZATION_SERVER_TAG, - "FRONTEND_TAG": FRONTEND_TAG, - }, -) - -ENV_IMAGES_WITH_TAGS_AND_ISTIO = dict( - ENV_IMAGES_WITH_TAGS, - **{ - "DISABLE_ISTIO_SIDECAR": "false", - }, -) +ENV_KFP_VERSION_ONLY = dict(ENV_VARIABLES_BASE, + **{ + "KFP_VERSION": KFP_VERSION, + } + ) + +ENV_IMAGES_NO_TAGS = dict(ENV_VARIABLES_BASE, + **{ + "KFP_VERSION": KFP_VERSION, + "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, + "FRONTEND_IMAGE": FRONTEND_IMAGE, + } + ) + +ENV_IMAGES_WITH_TAGS = dict(ENV_VARIABLES_BASE, + **{ + "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, + "FRONTEND_IMAGE": FRONTEND_IMAGE, + "VISUALIZATION_SERVER_TAG": VISUALIZATION_SERVER_TAG, + "FRONTEND_TAG": FRONTEND_TAG, + } + ) + +ENV_IMAGES_WITH_TAGS_AND_ISTIO = dict(ENV_IMAGES_WITH_TAGS, + **{ + "DISABLE_ISTIO_SIDECAR": "false", + } + ) def generate_image_name(imagename, tag): @@ -154,57 +154,40 @@ def sync_server_from_arguments(request): "sync_server, data, expected_status, expected_visualization_server_image, expected_frontend_server_image", [ ( - ENV_KFP_VERSION_ONLY, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name(DEFAULT_VISUALIZATION_IMAGE, KFP_VERSION), - generate_image_name(DEFAULT_FRONTEND_IMAGE, KFP_VERSION), + ENV_KFP_VERSION_ONLY, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name(DEFAULT_VISUALIZATION_IMAGE, KFP_VERSION), + generate_image_name(DEFAULT_FRONTEND_IMAGE, KFP_VERSION), ), ( - ENV_IMAGES_NO_TAGS, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name( - ENV_IMAGES_NO_TAGS["VISUALIZATION_SERVER_IMAGE"], KFP_VERSION - ), - generate_image_name(ENV_IMAGES_NO_TAGS["FRONTEND_IMAGE"], KFP_VERSION), + ENV_IMAGES_NO_TAGS, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name(ENV_IMAGES_NO_TAGS["VISUALIZATION_SERVER_IMAGE"], KFP_VERSION), + generate_image_name(ENV_IMAGES_NO_TAGS["FRONTEND_IMAGE"], KFP_VERSION), ), ( - ENV_IMAGES_WITH_TAGS, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name( - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], - ), - generate_image_name( - ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], - ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], - ), + ENV_IMAGES_WITH_TAGS, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), + generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), ), ( - ENV_IMAGES_WITH_TAGS, - DATA_CORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "True"}, - generate_image_name( - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], - ), - generate_image_name( - ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], - ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], - ), + ENV_IMAGES_WITH_TAGS, + DATA_CORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "True"}, + generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), + generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), ), ], - indirect=["sync_server"], + indirect=["sync_server"] ) -def test_sync_server_with_pipeline_enabled( - sync_server, - data, - expected_status, - expected_visualization_server_image, - expected_frontend_server_image, -): +def test_sync_server_with_pipeline_enabled(sync_server, data, expected_status, + expected_visualization_server_image, expected_frontend_server_image): """ Nearly end-to-end test of how Controller serves .sync as a POST @@ -224,17 +207,13 @@ def test_sync_server_with_pipeline_enabled( results = json.loads(x.text) # Test overall status of whether children are ok - assert results["status"] == expected_status + assert results['status'] == expected_status # Poke a few children to test things that can vary by environment variable - assert ( - results["children"][1]["spec"]["template"]["spec"]["containers"][0]["image"] - == expected_visualization_server_image - ) - assert ( - results["children"][5]["spec"]["template"]["spec"]["containers"][0]["image"] - == expected_frontend_server_image - ) + assert results['children'][1]["spec"]["template"]["spec"]["containers"][0][ + "image"] == expected_visualization_server_image + assert results['children'][5]["spec"]["template"]["spec"]["containers"][0][ + "image"] == expected_frontend_server_image @pytest.mark.parametrize( @@ -242,28 +221,19 @@ def test_sync_server_with_pipeline_enabled( "expected_frontend_server_image", [ ( - ENV_IMAGES_WITH_TAGS_AND_ISTIO, - DATA_CORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "True"}, - generate_image_name( - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], - ), - generate_image_name( - ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], - ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], - ), + ENV_IMAGES_WITH_TAGS_AND_ISTIO, + DATA_CORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "True"}, + generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), + generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), ), ], - indirect=["sync_server_from_arguments"], + indirect=["sync_server_from_arguments"] ) def test_sync_server_with_direct_passing_of_settings( - sync_server_from_arguments, - data, - expected_status, - expected_visualization_server_image, - expected_frontend_server_image, -): + sync_server_from_arguments, data, expected_status, expected_visualization_server_image, + expected_frontend_server_image): """ Nearly end-to-end test of how Controller serves .sync as a POST, taking variables as arguments @@ -280,17 +250,13 @@ def test_sync_server_with_direct_passing_of_settings( results = json.loads(x.text) # Test overall status of whether children are ok - assert results["status"] == expected_status + assert results['status'] == expected_status # Poke a few children to test things that can vary by environment variable - assert ( - results["children"][1]["spec"]["template"]["spec"]["containers"][0]["image"] - == expected_visualization_server_image - ) - assert ( - results["children"][5]["spec"]["template"]["spec"]["containers"][0]["image"] - == expected_frontend_server_image - ) + assert results['children'][1]["spec"]["template"]["spec"]["containers"][0][ + "image"] == expected_visualization_server_image + assert results['children'][5]["spec"]["template"]["spec"]["containers"][0][ + "image"] == expected_frontend_server_image @pytest.mark.parametrize( @@ -298,11 +264,10 @@ def test_sync_server_with_direct_passing_of_settings( [ (ENV_IMAGES_WITH_TAGS, DATA_MISSING_PIPELINE_ENABLED, {}, []), ], - indirect=["sync_server"], + indirect=["sync_server"] ) -def test_sync_server_without_pipeline_enabled( - sync_server, data, expected_status, expected_children -): +def test_sync_server_without_pipeline_enabled(sync_server, data, expected_status, + expected_children): """ Nearly end-to-end test of how Controller serves .sync as a POST @@ -317,5 +282,5 @@ def test_sync_server_without_pipeline_enabled( results = json.loads(x.text) # Test overall status of whether children are ok - assert results["status"] == expected_status - assert results["children"] == expected_children + assert results['status'] == expected_status + assert results['children'] == expected_children diff --git a/apps/kfp-tekton/upstream/v1/gcp-workload-identity-setup.sh b/apps/kfp-tekton/upstream/v1/gcp-workload-identity-setup.sh index 2c47c230e6..79f86c95c8 100755 --- a/apps/kfp-tekton/upstream/v1/gcp-workload-identity-setup.sh +++ b/apps/kfp-tekton/upstream/v1/gcp-workload-identity-setup.sh @@ -160,12 +160,12 @@ function bind_gsa_and_ksa { } echo "Binding each kfp system KSA to $SYSTEM_GSA" -for ksa in "${SYSTEM_KSA[@]}"; do +for ksa in ${SYSTEM_KSA[@]}; do bind_gsa_and_ksa $SYSTEM_GSA $ksa done echo "Binding each kfp user KSA to $USER_GSA" -for ksa in "${USER_KSA[@]}"; do +for ksa in ${USER_KSA[@]}; do bind_gsa_and_ksa $USER_GSA $ksa done diff --git a/apps/kfp-tekton/upstream/v1/wi-utils.sh b/apps/kfp-tekton/upstream/v1/wi-utils.sh index 6d6bbdd21f..3da163c967 100644 --- a/apps/kfp-tekton/upstream/v1/wi-utils.sh +++ b/apps/kfp-tekton/upstream/v1/wi-utils.sh @@ -1,5 +1,4 @@ #!/bin/bash -# shellcheck disable=SC2034 # # Copyright 2019 The Kubeflow Authors # diff --git a/apps/kfp-tekton/upstream/wi-utils.sh b/apps/kfp-tekton/upstream/wi-utils.sh index 6d6bbdd21f..3da163c967 100644 --- a/apps/kfp-tekton/upstream/wi-utils.sh +++ b/apps/kfp-tekton/upstream/wi-utils.sh @@ -1,5 +1,4 @@ #!/bin/bash -# shellcheck disable=SC2034 # # Copyright 2019 The Kubeflow Authors # diff --git a/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/run_tests.sh b/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/run_tests.sh index 480c215f19..4933ec79a8 100755 --- a/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/run_tests.sh +++ b/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/run_tests.sh @@ -1,4 +1,3 @@ -#!/usr/bin/env bash # Build venv with required packages VENV=".venv" PYTHON_VENV="${VENV}/bin/python" diff --git a/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/sync.py b/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/sync.py index e04ba23881..031a3fa268 100644 --- a/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/sync.py +++ b/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/sync.py @@ -24,17 +24,10 @@ def main(): server.serve_forever() -def get_settings_from_env( - controller_port=None, - visualization_server_image=None, - frontend_image=None, - visualization_server_tag=None, - frontend_tag=None, - disable_istio_sidecar=None, - minio_access_key=None, - minio_secret_key=None, - kfp_default_pipeline_root=None, -): +def get_settings_from_env(controller_port=None, + visualization_server_image=None, frontend_image=None, + visualization_server_tag=None, frontend_tag=None, disable_istio_sidecar=None, + minio_access_key=None, minio_secret_key=None, kfp_default_pipeline_root=None): """ Returns a dict of settings from environment variables relevant to the controller @@ -52,82 +45,66 @@ def get_settings_from_env( minio_secret_key: Required (no default) """ settings = dict() - settings["controller_port"] = controller_port or os.environ.get( - "CONTROLLER_PORT", "8080" - ) + settings["controller_port"] = \ + controller_port or \ + os.environ.get("CONTROLLER_PORT", "8080") - settings["visualization_server_image"] = ( - visualization_server_image - or os.environ.get( - "VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server" - ) - ) + settings["visualization_server_image"] = \ + visualization_server_image or \ + os.environ.get("VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server") - settings["frontend_image"] = frontend_image or os.environ.get( - "FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend" - ) + settings["frontend_image"] = \ + frontend_image or \ + os.environ.get("FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend") # Look for specific tags for each image first, falling back to # previously used KFP_VERSION environment variable for backwards # compatibility - settings["visualization_server_tag"] = ( - visualization_server_tag - or os.environ.get("VISUALIZATION_SERVER_TAG") - or os.environ["KFP_VERSION"] - ) + settings["visualization_server_tag"] = \ + visualization_server_tag or \ + os.environ.get("VISUALIZATION_SERVER_TAG") or \ + os.environ["KFP_VERSION"] - settings["frontend_tag"] = ( - frontend_tag or os.environ.get("FRONTEND_TAG") or os.environ["KFP_VERSION"] - ) + settings["frontend_tag"] = \ + frontend_tag or \ + os.environ.get("FRONTEND_TAG") or \ + os.environ["KFP_VERSION"] - settings["disable_istio_sidecar"] = ( - disable_istio_sidecar - if disable_istio_sidecar is not None - else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true" - ) + settings["disable_istio_sidecar"] = \ + disable_istio_sidecar if disable_istio_sidecar is not None \ + else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true" - settings["minio_access_key"] = minio_access_key or base64.b64encode( - bytes(os.environ.get("MINIO_ACCESS_KEY"), "utf-8") - ).decode("utf-8") + settings["minio_access_key"] = \ + minio_access_key or \ + base64.b64encode(bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8') - settings["minio_secret_key"] = minio_secret_key or base64.b64encode( - bytes(os.environ.get("MINIO_SECRET_KEY"), "utf-8") - ).decode("utf-8") + settings["minio_secret_key"] = \ + minio_secret_key or \ + base64.b64encode(bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8') # KFP_DEFAULT_PIPELINE_ROOT is optional - settings["kfp_default_pipeline_root"] = kfp_default_pipeline_root or os.environ.get( - "KFP_DEFAULT_PIPELINE_ROOT" - ) + settings["kfp_default_pipeline_root"] = \ + kfp_default_pipeline_root or \ + os.environ.get("KFP_DEFAULT_PIPELINE_ROOT") return settings -def server_factory( - visualization_server_image, - visualization_server_tag, - frontend_image, - frontend_tag, - disable_istio_sidecar, - minio_access_key, - minio_secret_key, - kfp_default_pipeline_root=None, - url="", - controller_port=8080, -): +def server_factory(visualization_server_image, + visualization_server_tag, frontend_image, frontend_tag, + disable_istio_sidecar, minio_access_key, + minio_secret_key, kfp_default_pipeline_root=None, + url="", controller_port=8080): """ Returns an HTTPServer populated with Handler with customized settings """ - class Controller(BaseHTTPRequestHandler): def sync(self, parent, children): # parent is a namespace namespace = parent.get("metadata", {}).get("name") - pipeline_enabled = ( - parent.get("metadata", {}) - .get("labels", {}) - .get("pipelines.kubeflow.org/enabled") - ) + pipeline_enabled = parent.get("metadata", {}).get( + "labels", {}).get("pipelines.kubeflow.org/enabled") if pipeline_enabled != "true": return {"status": {}, "children": []} @@ -136,30 +113,29 @@ def sync(self, parent, children): desired_resources = [] if kfp_default_pipeline_root: desired_configmap_count = 2 - desired_resources += [ - { - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": { - "name": "kfp-launcher", - "namespace": namespace, - }, - "data": { - "defaultPipelineRoot": kfp_default_pipeline_root, - }, - } - ] + desired_resources += [{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "kfp-launcher", + "namespace": namespace, + }, + "data": { + "defaultPipelineRoot": kfp_default_pipeline_root, + }, + }] + # Compute status based on observed state. desired_status = { - "kubeflow-pipelines-ready": len(children["Secret.v1"]) == 1 - and len(children["ConfigMap.v1"]) == desired_configmap_count - and len(children["Deployment.apps/v1"]) == 2 - and len(children["Service.v1"]) == 2 - and len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 - and len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 - and "True" - or "False" + "kubeflow-pipelines-ready": + len(children["Secret.v1"]) == 1 and + len(children["ConfigMap.v1"]) == desired_configmap_count and + len(children["Deployment.apps/v1"]) == 2 and + len(children["Service.v1"]) == 2 and + len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and + len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 and + "True" or "False" } # Generate the desired child object(s). @@ -172,7 +148,8 @@ def sync(self, parent, children): "namespace": namespace, }, "data": { - "METADATA_GRPC_SERVICE_HOST": "metadata-grpc-service.kubeflow", + "METADATA_GRPC_SERVICE_HOST": + "metadata-grpc-service.kubeflow", "METADATA_GRPC_SERVICE_PORT": "8080", }, }, @@ -181,38 +158,50 @@ def sync(self, parent, children): "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { - "labels": {"app": "ml-pipeline-visualizationserver"}, + "labels": { + "app": "ml-pipeline-visualizationserver" + }, "name": "ml-pipeline-visualizationserver", "namespace": namespace, }, "spec": { "selector": { - "matchLabels": {"app": "ml-pipeline-visualizationserver"}, + "matchLabels": { + "app": "ml-pipeline-visualizationserver" + }, }, "template": { "metadata": { - "labels": {"app": "ml-pipeline-visualizationserver"}, - "annotations": disable_istio_sidecar - and {"sidecar.istio.io/inject": "false"} - or {}, + "labels": { + "app": "ml-pipeline-visualizationserver" + }, + "annotations": disable_istio_sidecar and { + "sidecar.istio.io/inject": "false" + } or {}, }, "spec": { - "containers": [ - { - "image": f"{visualization_server_image}:{visualization_server_tag}", - "imagePullPolicy": "IfNotPresent", - "name": "ml-pipeline-visualizationserver", - "ports": [{"containerPort": 8888}], - "resources": { - "requests": { - "cpu": "50m", - "memory": "200Mi", - }, - "limits": {"cpu": "500m", "memory": "1Gi"}, + "containers": [{ + "image": f"{visualization_server_image}:{visualization_server_tag}", + "imagePullPolicy": + "IfNotPresent", + "name": + "ml-pipeline-visualizationserver", + "ports": [{ + "containerPort": 8888 + }], + "resources": { + "requests": { + "cpu": "50m", + "memory": "200Mi" + }, + "limits": { + "cpu": "500m", + "memory": "1Gi" }, } - ], - "serviceAccountName": "default-editor", + }], + "serviceAccountName": + "default-editor", }, }, }, @@ -226,8 +215,12 @@ def sync(self, parent, children): }, "spec": { "host": "ml-pipeline-visualizationserver", - "trafficPolicy": {"tls": {"mode": "ISTIO_MUTUAL"}}, - }, + "trafficPolicy": { + "tls": { + "mode": "ISTIO_MUTUAL" + } + } + } }, { "apiVersion": "security.istio.io/v1beta1", @@ -238,22 +231,18 @@ def sync(self, parent, children): }, "spec": { "selector": { - "matchLabels": {"app": "ml-pipeline-visualizationserver"} - }, - "rules": [ - { - "from": [ - { - "source": { - "principals": [ - "cluster.local/ns/kubeflow/sa/ml-pipeline" - ] - } - } - ] + "matchLabels": { + "app": "ml-pipeline-visualizationserver" } - ], - }, + }, + "rules": [{ + "from": [{ + "source": { + "principals": ["cluster.local/ns/kubeflow/sa/ml-pipeline"] + } + }] + }] + } }, { "apiVersion": "v1", @@ -263,14 +252,12 @@ def sync(self, parent, children): "namespace": namespace, }, "spec": { - "ports": [ - { - "name": "http", - "port": 8888, - "protocol": "TCP", - "targetPort": 8888, - } - ], + "ports": [{ + "name": "http", + "port": 8888, + "protocol": "TCP", + "targetPort": 8888, + }], "selector": { "app": "ml-pipeline-visualizationserver", }, @@ -281,62 +268,73 @@ def sync(self, parent, children): "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { - "labels": {"app": "ml-pipeline-ui-artifact"}, + "labels": { + "app": "ml-pipeline-ui-artifact" + }, "name": "ml-pipeline-ui-artifact", "namespace": namespace, }, "spec": { - "selector": {"matchLabels": {"app": "ml-pipeline-ui-artifact"}}, + "selector": { + "matchLabels": { + "app": "ml-pipeline-ui-artifact" + } + }, "template": { "metadata": { - "labels": {"app": "ml-pipeline-ui-artifact"}, - "annotations": disable_istio_sidecar - and {"sidecar.istio.io/inject": "false"} - or {}, + "labels": { + "app": "ml-pipeline-ui-artifact" + }, + "annotations": disable_istio_sidecar and { + "sidecar.istio.io/inject": "false" + } or {}, }, "spec": { - "containers": [ - { - "name": "ml-pipeline-ui-artifact", - "image": f"{frontend_image}:{frontend_tag}", - "imagePullPolicy": "IfNotPresent", - "ports": [{"containerPort": 3000}], - "env": [ - { - "name": "MINIO_ACCESS_KEY", - "valueFrom": { - "secretKeyRef": { - "key": "accesskey", - "name": "mlpipeline-minio-artifact", - } - }, - }, - { - "name": "MINIO_SECRET_KEY", - "valueFrom": { - "secretKeyRef": { - "key": "secretkey", - "name": "mlpipeline-minio-artifact", - } - }, - }, - ], - "resources": { - "requests": { - "cpu": "10m", - "memory": "70Mi", - }, - "limits": { - "cpu": "100m", - "memory": "500Mi", - }, + "containers": [{ + "name": + "ml-pipeline-ui-artifact", + "image": f"{frontend_image}:{frontend_tag}", + "imagePullPolicy": + "IfNotPresent", + "ports": [{ + "containerPort": 3000 + }], + "env": [ + { + "name": "MINIO_ACCESS_KEY", + "valueFrom": { + "secretKeyRef": { + "key": "accesskey", + "name": "mlpipeline-minio-artifact" + } + } + }, + { + "name": "MINIO_SECRET_KEY", + "valueFrom": { + "secretKeyRef": { + "key": "secretkey", + "name": "mlpipeline-minio-artifact" + } + } + } + ], + "resources": { + "requests": { + "cpu": "10m", + "memory": "70Mi" + }, + "limits": { + "cpu": "100m", + "memory": "500Mi" }, } - ], - "serviceAccountName": "default-editor", - }, - }, - }, + }], + "serviceAccountName": + "default-editor" + } + } + } }, { "apiVersion": "v1", @@ -344,55 +342,52 @@ def sync(self, parent, children): "metadata": { "name": "ml-pipeline-ui-artifact", "namespace": namespace, - "labels": {"app": "ml-pipeline-ui-artifact"}, + "labels": { + "app": "ml-pipeline-ui-artifact" + } }, "spec": { - "ports": [ - { - "name": "http", # name is required to let istio understand request protocol - "port": 80, - "protocol": "TCP", - "targetPort": 3000, - } - ], - "selector": {"app": "ml-pipeline-ui-artifact"}, - }, + "ports": [{ + "name": + "http", # name is required to let istio understand request protocol + "port": 80, + "protocol": "TCP", + "targetPort": 3000 + }], + "selector": { + "app": "ml-pipeline-ui-artifact" + } + } }, ] - print("Received request:\n", json.dumps(parent, sort_keys=True)) - print( - "Desired resources except secrets:\n", - json.dumps(desired_resources, sort_keys=True), - ) + print('Received request:\n', json.dumps(parent, sort_keys=True)) + print('Desired resources except secrets:\n', json.dumps(desired_resources, sort_keys=True)) # Moved after the print argument because this is sensitive data. - desired_resources.append( - { - "apiVersion": "v1", - "kind": "Secret", - "metadata": { - "name": "mlpipeline-minio-artifact", - "namespace": namespace, - }, - "data": { - "accesskey": minio_access_key, - "secretkey": minio_secret_key, - }, - } - ) + desired_resources.append({ + "apiVersion": "v1", + "kind": "Secret", + "metadata": { + "name": "mlpipeline-minio-artifact", + "namespace": namespace, + }, + "data": { + "accesskey": minio_access_key, + "secretkey": minio_secret_key, + }, + }) return {"status": desired_status, "children": desired_resources} def do_POST(self): # Serve the sync() function as a JSON webhook. observed = json.loads( - self.rfile.read(int(self.headers.get("content-length"))) - ) + self.rfile.read(int(self.headers.get("content-length")))) desired = self.sync(observed["parent"], observed["children"]) self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() - self.wfile.write(bytes(json.dumps(desired), "utf-8")) + self.wfile.write(bytes(json.dumps(desired), 'utf-8')) return HTTPServer((url, int(controller_port)), Controller) diff --git a/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/test_sync.py b/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/test_sync.py index bc9a6549b5..50362d60fd 100644 --- a/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/test_sync.py +++ b/apps/pipeline/upstream/base/installs/multi-user/pipelines-profile-controller/test_sync.py @@ -11,8 +11,10 @@ DATA_INCORRECT_CHILDREN = { "parent": { "metadata": { - "labels": {"pipelines.kubeflow.org/enabled": "true"}, - "name": "myName", + "labels": { + "pipelines.kubeflow.org/enabled": "true" + }, + "name": "myName" } }, "children": { @@ -22,14 +24,16 @@ "Service.v1": [], "DestinationRule.networking.istio.io/v1alpha3": [], "AuthorizationPolicy.security.istio.io/v1beta1": [], - }, + } } DATA_CORRECT_CHILDREN = { "parent": { "metadata": { - "labels": {"pipelines.kubeflow.org/enabled": "true"}, - "name": "myName", + "labels": { + "pipelines.kubeflow.org/enabled": "true" + }, + "name": "myName" } }, "children": { @@ -39,7 +43,7 @@ "Service.v1": [1, 1], "DestinationRule.networking.istio.io/v1alpha3": [1], "AuthorizationPolicy.security.istio.io/v1beta1": [1], - }, + } } DATA_MISSING_PIPELINE_ENABLED = {"parent": {}, "children": {}} @@ -66,38 +70,34 @@ "CONTROLLER_PORT": "0", # HTTPServer randomly assigns the port to a free port } -ENV_KFP_VERSION_ONLY = dict( - ENV_VARIABLES_BASE, - **{ - "KFP_VERSION": KFP_VERSION, - }, -) - -ENV_IMAGES_NO_TAGS = dict( - ENV_VARIABLES_BASE, - **{ - "KFP_VERSION": KFP_VERSION, - "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, - "FRONTEND_IMAGE": FRONTEND_IMAGE, - }, -) - -ENV_IMAGES_WITH_TAGS = dict( - ENV_VARIABLES_BASE, - **{ - "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, - "FRONTEND_IMAGE": FRONTEND_IMAGE, - "VISUALIZATION_SERVER_TAG": VISUALIZATION_SERVER_TAG, - "FRONTEND_TAG": FRONTEND_TAG, - }, -) - -ENV_IMAGES_WITH_TAGS_AND_ISTIO = dict( - ENV_IMAGES_WITH_TAGS, - **{ - "DISABLE_ISTIO_SIDECAR": "false", - }, -) +ENV_KFP_VERSION_ONLY = dict(ENV_VARIABLES_BASE, + **{ + "KFP_VERSION": KFP_VERSION, + } + ) + +ENV_IMAGES_NO_TAGS = dict(ENV_VARIABLES_BASE, + **{ + "KFP_VERSION": KFP_VERSION, + "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, + "FRONTEND_IMAGE": FRONTEND_IMAGE, + } + ) + +ENV_IMAGES_WITH_TAGS = dict(ENV_VARIABLES_BASE, + **{ + "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, + "FRONTEND_IMAGE": FRONTEND_IMAGE, + "VISUALIZATION_SERVER_TAG": VISUALIZATION_SERVER_TAG, + "FRONTEND_TAG": FRONTEND_TAG, + } + ) + +ENV_IMAGES_WITH_TAGS_AND_ISTIO = dict(ENV_IMAGES_WITH_TAGS, + **{ + "DISABLE_ISTIO_SIDECAR": "false", + } + ) def generate_image_name(imagename, tag): @@ -154,57 +154,40 @@ def sync_server_from_arguments(request): "sync_server, data, expected_status, expected_visualization_server_image, expected_frontend_server_image", [ ( - ENV_KFP_VERSION_ONLY, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name(DEFAULT_VISUALIZATION_IMAGE, KFP_VERSION), - generate_image_name(DEFAULT_FRONTEND_IMAGE, KFP_VERSION), + ENV_KFP_VERSION_ONLY, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name(DEFAULT_VISUALIZATION_IMAGE, KFP_VERSION), + generate_image_name(DEFAULT_FRONTEND_IMAGE, KFP_VERSION), ), ( - ENV_IMAGES_NO_TAGS, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name( - ENV_IMAGES_NO_TAGS["VISUALIZATION_SERVER_IMAGE"], KFP_VERSION - ), - generate_image_name(ENV_IMAGES_NO_TAGS["FRONTEND_IMAGE"], KFP_VERSION), + ENV_IMAGES_NO_TAGS, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name(ENV_IMAGES_NO_TAGS["VISUALIZATION_SERVER_IMAGE"], KFP_VERSION), + generate_image_name(ENV_IMAGES_NO_TAGS["FRONTEND_IMAGE"], KFP_VERSION), ), ( - ENV_IMAGES_WITH_TAGS, - DATA_INCORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "False"}, - generate_image_name( - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], - ), - generate_image_name( - ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], - ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], - ), + ENV_IMAGES_WITH_TAGS, + DATA_INCORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "False"}, + generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), + generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), ), ( - ENV_IMAGES_WITH_TAGS, - DATA_CORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "True"}, - generate_image_name( - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], - ), - generate_image_name( - ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], - ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], - ), + ENV_IMAGES_WITH_TAGS, + DATA_CORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "True"}, + generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), + generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), ), ], - indirect=["sync_server"], + indirect=["sync_server"] ) -def test_sync_server_with_pipeline_enabled( - sync_server, - data, - expected_status, - expected_visualization_server_image, - expected_frontend_server_image, -): +def test_sync_server_with_pipeline_enabled(sync_server, data, expected_status, + expected_visualization_server_image, expected_frontend_server_image): """ Nearly end-to-end test of how Controller serves .sync as a POST @@ -224,17 +207,13 @@ def test_sync_server_with_pipeline_enabled( results = json.loads(x.text) # Test overall status of whether children are ok - assert results["status"] == expected_status + assert results['status'] == expected_status # Poke a few children to test things that can vary by environment variable - assert ( - results["children"][1]["spec"]["template"]["spec"]["containers"][0]["image"] - == expected_visualization_server_image - ) - assert ( - results["children"][5]["spec"]["template"]["spec"]["containers"][0]["image"] - == expected_frontend_server_image - ) + assert results['children'][1]["spec"]["template"]["spec"]["containers"][0][ + "image"] == expected_visualization_server_image + assert results['children'][5]["spec"]["template"]["spec"]["containers"][0][ + "image"] == expected_frontend_server_image @pytest.mark.parametrize( @@ -242,28 +221,19 @@ def test_sync_server_with_pipeline_enabled( "expected_frontend_server_image", [ ( - ENV_IMAGES_WITH_TAGS_AND_ISTIO, - DATA_CORRECT_CHILDREN, - {"kubeflow-pipelines-ready": "True"}, - generate_image_name( - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], - ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"], - ), - generate_image_name( - ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], - ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"], - ), + ENV_IMAGES_WITH_TAGS_AND_ISTIO, + DATA_CORRECT_CHILDREN, + {"kubeflow-pipelines-ready": "True"}, + generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], + ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), + generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), ), ], - indirect=["sync_server_from_arguments"], + indirect=["sync_server_from_arguments"] ) def test_sync_server_with_direct_passing_of_settings( - sync_server_from_arguments, - data, - expected_status, - expected_visualization_server_image, - expected_frontend_server_image, -): + sync_server_from_arguments, data, expected_status, expected_visualization_server_image, + expected_frontend_server_image): """ Nearly end-to-end test of how Controller serves .sync as a POST, taking variables as arguments @@ -280,17 +250,13 @@ def test_sync_server_with_direct_passing_of_settings( results = json.loads(x.text) # Test overall status of whether children are ok - assert results["status"] == expected_status + assert results['status'] == expected_status # Poke a few children to test things that can vary by environment variable - assert ( - results["children"][1]["spec"]["template"]["spec"]["containers"][0]["image"] - == expected_visualization_server_image - ) - assert ( - results["children"][5]["spec"]["template"]["spec"]["containers"][0]["image"] - == expected_frontend_server_image - ) + assert results['children'][1]["spec"]["template"]["spec"]["containers"][0][ + "image"] == expected_visualization_server_image + assert results['children'][5]["spec"]["template"]["spec"]["containers"][0][ + "image"] == expected_frontend_server_image @pytest.mark.parametrize( @@ -298,11 +264,10 @@ def test_sync_server_with_direct_passing_of_settings( [ (ENV_IMAGES_WITH_TAGS, DATA_MISSING_PIPELINE_ENABLED, {}, []), ], - indirect=["sync_server"], + indirect=["sync_server"] ) -def test_sync_server_without_pipeline_enabled( - sync_server, data, expected_status, expected_children -): +def test_sync_server_without_pipeline_enabled(sync_server, data, expected_status, + expected_children): """ Nearly end-to-end test of how Controller serves .sync as a POST @@ -317,5 +282,5 @@ def test_sync_server_without_pipeline_enabled( results = json.loads(x.text) # Test overall status of whether children are ok - assert results["status"] == expected_status - assert results["children"] == expected_children + assert results['status'] == expected_status + assert results['children'] == expected_children diff --git a/apps/pipeline/upstream/gcp-workload-identity-setup.sh b/apps/pipeline/upstream/gcp-workload-identity-setup.sh index 3570def571..79f86c95c8 100755 --- a/apps/pipeline/upstream/gcp-workload-identity-setup.sh +++ b/apps/pipeline/upstream/gcp-workload-identity-setup.sh @@ -126,8 +126,8 @@ function create_gsa_if_not_present { gcloud iam service-accounts create $name fi } -create_gsa_if_not_present "$SYSTEM_GSA" -create_gsa_if_not_present "$USER_GSA" +create_gsa_if_not_present $SYSTEM_GSA +create_gsa_if_not_present $USER_GSA function create_ksa_if_not_present { local name=${1} @@ -160,12 +160,12 @@ function bind_gsa_and_ksa { } echo "Binding each kfp system KSA to $SYSTEM_GSA" -for ksa in "${SYSTEM_KSA[@]}"; do +for ksa in ${SYSTEM_KSA[@]}; do bind_gsa_and_ksa $SYSTEM_GSA $ksa done echo "Binding each kfp user KSA to $USER_GSA" -for ksa in "${USER_KSA[@]}"; do +for ksa in ${USER_KSA[@]}; do bind_gsa_and_ksa $USER_GSA $ksa done diff --git a/apps/pipeline/upstream/wi-utils.sh b/apps/pipeline/upstream/wi-utils.sh index 6d6bbdd21f..3da163c967 100644 --- a/apps/pipeline/upstream/wi-utils.sh +++ b/apps/pipeline/upstream/wi-utils.sh @@ -1,5 +1,4 @@ #!/bin/bash -# shellcheck disable=SC2034 # # Copyright 2019 The Kubeflow Authors # From 5e7dfb507c186d11746db55c1efd7b31be48cbff Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Sun, 7 Jul 2024 13:41:13 +0530 Subject: [PATCH 36/37] Add guidlines to how to format python files according to black formatter Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/python_formatter.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.github/workflows/python_formatter.yaml b/.github/workflows/python_formatter.yaml index e9dd0a311f..ba470cad84 100644 --- a/.github/workflows/python_formatter.yaml +++ b/.github/workflows/python_formatter.yaml @@ -7,6 +7,21 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + + - name: Python Files Formatting Guidelines + run: | + echo "### Python Files Formatting Guidelines ### + If there is a formatting errors in your python files, + 1. First install black + It requires Python 3.8+ to run. + Install with "pip install black" and if you use pipx, install Black with "pipx install black" + If you want to format Jupyter Notebooks, install with pip install "black[jupyter]" + + 2. Run the command + "python -m black {source_file_or_directory}" or + "black {source_file_or_directory}" + to format python files. + " - uses: psf/black@stable with: From 9ae743f1638a5e225be70c7f4c5dbf88511b9910 Mon Sep 17 00:00:00 2001 From: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> Date: Sun, 7 Jul 2024 13:57:15 +0530 Subject: [PATCH 37/37] Add guidlines to how to format bash files according to shellcheck formatter Signed-off-by: hansinikarunarathne <107214435+hansinikarunarathne@users.noreply.github.com> --- .github/workflows/bash_formatter.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/bash_formatter.yaml b/.github/workflows/bash_formatter.yaml index 96bc1a42f9..fd1d254bf6 100644 --- a/.github/workflows/bash_formatter.yaml +++ b/.github/workflows/bash_formatter.yaml @@ -13,6 +13,15 @@ jobs: - name: Install ShellCheck run: sudo apt install -y shellcheck + - name: Bash Formatting Guidelines + run: | + echo "### Bash Files Formatting Guidelines ### + If there are errors and warnings regarding your bash files, + You can check that error code definitions in https://www.shellcheck.net/wiki/ site. + You can correct them using the https://www.shellcheck.net/ site. + You have to ignore disable errors in .shellcheckrc file. + " + - name: Fetch master branch run: git fetch origin master