diff --git a/cli/.vscode/settings.json b/cli/.vscode/settings.json new file mode 100644 index 00000000..163c9840 --- /dev/null +++ b/cli/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "python.formatting.provider": "black" +} diff --git a/cli/cli.py b/cli/cli.py index de9d021e..6f878fb1 100644 --- a/cli/cli.py +++ b/cli/cli.py @@ -1,9 +1,13 @@ import argparse +from maestro_cli.logging import Logger from maestro_cli.commands.run import run_command +Logger.setup_logging() + + def default_func(args): - parser.print_help() + parser.print_help() parser = argparse.ArgumentParser(prog="Maestro CLI", usage="maestro-cli") @@ -11,14 +15,16 @@ def default_func(args): subparsers = parser.add_subparsers() -parser_foo = subparsers.add_parser('run') -parser_foo.add_argument('configuration_id', type=str, help="Run Configuration to start a test") -parser_foo.set_defaults(func=run_command) +parser_run = subparsers.add_parser("run") +parser_run.add_argument( + "configuration_id", type=str, help="Run Configuration to start a test" +) +parser_run.set_defaults(func=run_command) args = parser.parse_args() if args: - args.func(args) + args.func(args) else: - parser.print_help() + parser.print_help() diff --git a/cli/maestro_cli/commands/run.py b/cli/maestro_cli/commands/run.py index 6d1f57f3..4e6fcc66 100644 --- a/cli/maestro_cli/commands/run.py +++ b/cli/maestro_cli/commands/run.py @@ -1,3 +1,57 @@ +from time import sleep +from maestro_cli.services.maestro_api.run import RunApi, RunStatus +from maestro_cli.services.maestro_api.run_metric import RunMetricApi + +from maestro_cli.logging import Logger + + def run_command(args): - print(args.configuration_id) - pass + """ + Start test based on run_configuration_id from command line + + The command also monitors the status of created Run + and provides regular feedback to the console. + """ + + def output_last_metric(run_id): + metrics = RunMetricApi.all(run_id) + + if len(metrics) > 0: + + def sort_func(metric): + return metric.min_datetime + + metrics.sort(key=sort_func, reverse=True) + # Last metric is not accurate, use one before as last + last_metric = metrics[1] if len(metrics) > 1 else 0 + + total_count = last_metric.total_count + success_count = last_metric.success_count + errors = round(1 - success_count / total_count, 2) + + Logger.info(f"Hits: {total_count} req/s. Errors: {errors} %") + else: + Logger.info("Waiting for metrics....") + + def monitor_run_status(run_id): + run = RunApi.get(run_id) + + if run.run_status == RunStatus.RUNNING.value: + output_last_metric(run_id) + else: + Logger.info(f"Run status is '{run.run_status}'") + if run.run_status not in [ + RunStatus.ERROR.value, + RunStatus.FINISHED.value, + RunStatus.STOPPED.value, + ]: + sleep(1) + monitor_run_status(run_id) + + run_configuration_id = args.configuration_id + run = RunApi.create(run_configuration_id) + RunApi.start(run.id) # Generate events to start a test + + Logger.info(f"Run started. run_id='{run.id}'") + + monitor_run_status(run.id) diff --git a/cli/maestro_cli/services/maestro_api/__init__.py b/cli/maestro_cli/services/maestro_api/__init__.py new file mode 100644 index 00000000..e2df6749 --- /dev/null +++ b/cli/maestro_cli/services/maestro_api/__init__.py @@ -0,0 +1,104 @@ +import requests + +from maestro_cli.settings import MAESTRO_API_HOST, MAESTRO_API_TOKEN + + +class MaestroApiClient: + headers = { + "Authorization": "Bearer %s" % MAESTRO_API_TOKEN, + "User-Agent": "maestroagent", + } + + @staticmethod + def get(url, data={}, mapper=None): + response = requests.get( + "%s%s" % (MAESTRO_API_HOST, url), + headers=MaestroApiClient.headers, + params=data, + ) + + return MaestroApiClient.handle_response(response, mapper) + + @staticmethod + def put(url, data={}, mapper=None): + response = requests.put( + "%s%s" % (MAESTRO_API_HOST, url), + headers=MaestroApiClient.headers, + json=data, + ) + + return MaestroApiClient.handle_response(response, mapper) + + @staticmethod + def post(url, data={}, mapper=None): + response = requests.post( + "%s%s" % (MAESTRO_API_HOST, url), + headers=MaestroApiClient.headers, + json=data, + ) + + return MaestroApiClient.handle_response(response, mapper) + + @staticmethod + def download_file(url, to_url): + r = requests.get( + "%s%s" % (MAESTRO_API_HOST, url), + headers=MaestroApiClient.headers, + ) + + open(to_url, "wb").write(r.content) + + @staticmethod + def upload_file(url, data, files, mapper): + response = requests.put( + "%s%s" % (MAESTRO_API_HOST, url), + headers=MaestroApiClient.headers, + data=data, + files=files, + ) + + return MaestroApiClient.handle_response(response, mapper) + + @staticmethod + def handle_response(response, mapper): + # 2xx + if response.status_code < 300: + return MaestroApiClient.map_response_json(response, mapper) + + # 3xx + if response.status_code < 400: + return MaestroApiClient.handle_3xx(response) + + # 4xx + if response.status_code < 500: + return MaestroApiClient.handle_4xx(response) + + # 5xx + return MaestroApiClient.handle_5xx(response) + + @staticmethod + def map_response_json(response, mapper): + json = response.json() + if mapper is None: + return json + isarray = isinstance(json, list) + if isarray: + return [mapper(item) for item in json] + else: + return mapper(json) + + @staticmethod + def handle3xx(respose): + raise Exception(respose) + + @staticmethod + def handle_4xx(respose): + if respose.status_code == 403: + json = respose.json() + raise Exception("Bad request: %s" % json.get("error")) + else: + raise Exception(respose) + + @staticmethod + def handle_5xx(respose): + raise Exception(respose) diff --git a/cli/maestro_cli/services/maestro_api/run.py b/cli/maestro_cli/services/maestro_api/run.py new file mode 100644 index 00000000..133fcd08 --- /dev/null +++ b/cli/maestro_cli/services/maestro_api/run.py @@ -0,0 +1,121 @@ +from enum import Enum + +import dateutil.parser + +from maestro_cli.services.maestro_api import MaestroApiClient + + +class RunStatus(Enum): + PENDING = "PENDING" + CREATING = "CREATING" + RUNNING = "RUNNING" + STOPPED = "STOPPED" + FINISHED = "FINISHED" + ERROR = "ERROR" + + +class RunHost: + def __init__(self, host, ip): + + self.host = host + self.ip = ip + + +class RunCustomProperty: + def __init__(self, name, value): + + self.name = name + self.value = value + + +class RunLoadProfile: + def __init__(self, start, end, duration): + + self.start = start + self.end = end + self.duration = duration + + +class Run: + def __init__( + self, + id, + run_status, + run_plan_id, + agent_ids, + custom_data_ids, + hosts, + load_profile, + custom_properties, + created_at, + updated_at, + ): + self.id = id + self.run_status = run_status + self.run_plan_id = run_plan_id + self.agent_ids = agent_ids + self.custom_data_ids = custom_data_ids + self.hosts = [ + RunHost(host=host.get("host"), ip=host.get("ip")) for host in hosts + ] + self.custom_properties = [ + RunCustomProperty(name=prop.get("name"), value=prop.get("value")) + for prop in custom_properties + ] + self.load_profile = [ + RunLoadProfile( + start=step.get("start"), + end=step.get("end"), + duration=step.get("duration"), + ) + for step in load_profile + ] + self.created_at = created_at + self.updated_at = updated_at + + +class RunApi: + @staticmethod + def run_json_to_object(json): + return Run( + id=json.get("id"), + run_status=json.get("run_status"), + run_plan_id=json.get("run_plan_id"), + agent_ids=json.get("agent_ids"), + custom_data_ids=json.get("custom_data_ids"), + hosts=json.get("hosts"), + custom_properties=json.get("custom_properties"), + load_profile=json.get("load_profile"), + created_at=dateutil.parser.parse(json.get("created_at")), + updated_at=dateutil.parser.parse(json.get("updated_at")), + ) + + @staticmethod + def get(run_id): + + return MaestroApiClient.get( + "/api/run/%s" % run_id, mapper=RunApi.run_json_to_object + ) + + @staticmethod + def create(run_configuration_id): + return MaestroApiClient.post( + "/api/run", + data={"run_configuration_id": run_configuration_id}, + mapper=RunApi.run_json_to_object, + ) + + @staticmethod + def update(run_id, run_status): + + return MaestroApiClient.put( + "/api/run/%s" % run_id, + data={"run_status": run_status}, + mapper=RunApi.run_json_to_object, + ) + + @staticmethod + def start(run_id): + return MaestroApiClient.post( + f"/api/run_status/{run_id}/start", + ) diff --git a/cli/maestro_cli/services/maestro_api/run_metric.py b/cli/maestro_cli/services/maestro_api/run_metric.py new file mode 100644 index 00000000..aae91649 --- /dev/null +++ b/cli/maestro_cli/services/maestro_api/run_metric.py @@ -0,0 +1,63 @@ +from enum import Enum + +import dateutil.parser + +from maestro_cli.services.maestro_api import MaestroApiClient + + +class RunStatus(Enum): + PENDING = "PENDING" + CREATING = "CREATING" + RUNNING = "RUNNING" + STOPPED = "STOPPED" + FINISHED = "FINISHED" + ERROR = "ERROR" + + +class RunMetric: + def __init__( + self, + latency_avg, + latency_p99, + latency_p95, + latency_p90, + latency_p50, + success_count, + total_count, + min_datetime, + max_datetime, + ): + self.latency_avg = latency_avg + self.latency_p99 = latency_p99 + self.latency_p95 = latency_p95 + self.latency_p90 = latency_p90 + self.latency_p50 = latency_p50 + self.success_count = success_count + self.total_count = total_count + self.min_datetime = min_datetime + self.max_datetime = max_datetime + + +class RunMetricApi: + @staticmethod + def run_metric_json_to_object(json): + return RunMetric( + latency_avg=json.get("latency_avg"), + latency_p99=json.get("latency_p99"), + latency_p95=json.get("latency_p95"), + latency_p90=json.get("latency_p90"), + latency_p50=json.get("latency_p50"), + success_count=json.get("success_count"), + total_count=json.get("total_count"), + min_datetime=dateutil.parser.parse(json.get("min_datetime")), + max_datetime=dateutil.parser.parse(json.get("max_datetime")), + ) + + @staticmethod + def all(run_id, time_interval=15): + + return MaestroApiClient.get( + "/api/run_metrics/%s" % run_id, + data={"time_interval": time_interval}, + mapper=RunMetricApi.run_metric_json_to_object, + ) diff --git a/cli/tests/services/maestro_api/test_run.py b/cli/tests/services/maestro_api/test_run.py new file mode 100644 index 00000000..10826032 --- /dev/null +++ b/cli/tests/services/maestro_api/test_run.py @@ -0,0 +1,118 @@ +import dateutil.parser + +from maestro_cli.services.maestro_api.run import ( + Run, + RunApi, + RunStatus, +) + + +def test_maestro_run_get(mocker): + run_id = "1-2-3-4" + + get_mock = mocker.patch( + "maestro_cli.services.maestro_api.MaestroApiClient.get", + ) + + RunApi.get(run_id) + + get_mock.assert_called_with( + "/api/run/1-2-3-4", + mapper=RunApi.run_json_to_object, + ) + get_mock.assert_called_once() + + +def test_maestro_run_mapped_response(): + + run_id = "tr_id_1" + run_plan_id = "tp_id_1" + agent_ids = ["sd_id_1"] + custom_data_ids = ["cd_id_1"] + start = 1 + end = 10 + duration = 10 + host = "test.ff.net" + ip = "127.0.0.2" + custom_property = "custom_prop_test" + custom_property_value = "123" + + created_at = "2021-05-19T17:31:47.560000" + updated_at = "2021-06-19T17:31:47.560000" + + expected = Run( + id=run_id, + run_status=RunStatus.PENDING.value, + run_plan_id=run_plan_id, + agent_ids=agent_ids, + custom_data_ids=custom_data_ids, + hosts=[dict(host=host, ip=ip)], + custom_properties=[dict(name=custom_property, value=custom_property_value)], + load_profile=[dict(start=start, end=end, duration=duration)], + created_at=dateutil.parser.parse(created_at), + updated_at=dateutil.parser.parse(updated_at), + ) + + actual = RunApi.run_json_to_object( + dict( + id=run_id, + run_status=RunStatus.PENDING.value, + run_plan_id=run_plan_id, + agent_ids=agent_ids, + custom_data_ids=custom_data_ids, + hosts=[dict(host=host, ip=ip)], + custom_properties=[dict(name=custom_property, value=custom_property_value)], + load_profile=[dict(start=start, end=end, duration=duration)], + created_at=created_at, + updated_at=updated_at, + ) + ) + assert expected.id == actual.id + assert expected.run_status == actual.run_status + assert expected.run_plan_id == actual.run_plan_id + assert expected.agent_ids == actual.agent_ids + assert expected.custom_data_ids == actual.custom_data_ids + assert expected.hosts[0].host == actual.hosts[0].host + assert expected.hosts[0].ip == actual.hosts[0].ip + assert expected.load_profile[0].start == actual.load_profile[0].start + assert expected.load_profile[0].end == actual.load_profile[0].end + assert expected.load_profile[0].duration == actual.load_profile[0].duration + assert expected.created_at == actual.created_at + assert expected.updated_at == actual.updated_at + + +def test_maestro_run_update(mocker): + run_id = "1-2-3-4" + run_status = "RUNNING" + data = {"run_status": run_status} + + put_mock = mocker.patch( + "maestro_cli.services.maestro_api.MaestroApiClient.put", + ) + + RunApi.update(run_id, run_status) + + put_mock.assert_called_with( + "/api/run/1-2-3-4", + data=data, + mapper=RunApi.run_json_to_object, + ) + put_mock.assert_called_once() + + +def test_maestro_run_create(mocker): + run_configuration_id = "1-2-3-4" + data = {"run_configuration_id": run_configuration_id} + + post_mock = mocker.patch( + "maestro_cli.services.maestro_api.MaestroApiClient.post", + ) + + RunApi.create(run_configuration_id) + + post_mock.assert_called_with( + "/api/run", + data=data, + mapper=RunApi.run_json_to_object, + ) + post_mock.assert_called_once() diff --git a/cli/tests/services/maestro_api/test_run_metric.py b/cli/tests/services/maestro_api/test_run_metric.py new file mode 100644 index 00000000..85e056a3 --- /dev/null +++ b/cli/tests/services/maestro_api/test_run_metric.py @@ -0,0 +1,71 @@ +import dateutil.parser + +from maestro_cli.services.maestro_api.run_metric import RunMetric, RunMetricApi + + +def test_maestro_api_metrics_all(mocker): + run_id = "1-2-3-4" + + get_mock = mocker.patch( + "maestro_cli.services.maestro_api.MaestroApiClient.get", + ) + + RunMetricApi.all(run_id) + + get_mock.assert_called_with( + "/api/run_metrics/%s" % run_id, + data={"time_interval": 15}, + mapper=RunMetricApi.run_metric_json_to_object, + ) + get_mock.assert_called_once() + + +def test_maestro_api_metrics_mapped_response(): + + latency_avg = 10.1 + latency_p99 = 20.1 + latency_p95 = 30.1 + latency_p90 = 40.1 + latency_p50 = 50.1 + latency_avg = 25.6 + success_count = 100 + total_count = 110 + + min_datetime = "2021-05-19T17:31:47.560000" + max_datetime = "2021-06-19T17:31:47.560000" + + expected = RunMetric( + latency_avg=latency_avg, + latency_p99=latency_p99, + latency_p95=latency_p95, + latency_p90=latency_p90, + latency_p50=latency_p50, + success_count=success_count, + total_count=total_count, + min_datetime=dateutil.parser.parse(min_datetime), + max_datetime=dateutil.parser.parse(max_datetime), + ) + + actual = RunMetricApi.run_metric_json_to_object( + dict( + latency_avg=latency_avg, + latency_p99=latency_p99, + latency_p95=latency_p95, + latency_p90=latency_p90, + latency_p50=latency_p50, + success_count=success_count, + total_count=total_count, + min_datetime=min_datetime, + max_datetime=max_datetime, + ) + ) + + assert expected.latency_avg == actual.latency_avg + assert expected.latency_p99 == actual.latency_p99 + assert expected.latency_p95 == actual.latency_p95 + assert expected.latency_p90 == actual.latency_p90 + assert expected.latency_p50 == actual.latency_p50 + assert expected.success_count == actual.success_count + assert expected.total_count == actual.total_count + assert expected.min_datetime == actual.min_datetime + assert expected.max_datetime == actual.max_datetime