diff --git a/keepercommander/commands/discover/__init__.py b/keepercommander/commands/discover/__init__.py new file mode 100644 index 000000000..ca4c70876 --- /dev/null +++ b/keepercommander/commands/discover/__init__.py @@ -0,0 +1,245 @@ +from __future__ import annotations +import logging +from ..base import Command +from ..pam.config_facades import PamConfigurationRecordFacade +from ..pam import gateway_helper +from ..pam.router_helper import get_response_payload +from ..pam.gateway_helper import get_all_gateways +from ..ksm import KSMCommand +from ... import utils, vault_extensions +from ... import vault +from ...proto import APIRequest_pb2 +from ...crypto import encrypt_aes_v2, decrypt_aes_v2 +from ...display import bcolors +from discovery_common.constants import PAM_USER, PAM_MACHINE, PAM_DATABASE, PAM_DIRECTORY +import json +import base64 + +from typing import List, Optional, Union, TYPE_CHECKING + +if TYPE_CHECKING: + from ...params import KeeperParams + from ...vault import KeeperRecord, ApplicationRecord + from ...proto import pam_pb2 + + +class GatewayContext: + def __init__(self, configuration: KeeperRecord, facade: PamConfigurationRecordFacade, + gateway: pam_pb2.PAMController, application: ApplicationRecord): + self.configuration = configuration + self.facade = facade + self.gateway = gateway + self.application = application + self._shared_folders = None + + @staticmethod + def from_configuration_uid(params: KeeperParams, configuration_uid: str): + + configuration_record = vault.KeeperRecord.load(params, configuration_uid) + if not isinstance(configuration_record, vault.TypedRecord): + print(f'{bcolors.FAIL}PAM Configuration [{configuration_uid}] is not available.{bcolors.ENDC}') + return + + configuration_facade = PamConfigurationRecordFacade() + configuration_facade.record = configuration_record + + gateway_uid = configuration_facade.controller_uid + gateway = next((x for x in gateway_helper.get_all_gateways(params) + if utils.base64_url_encode(x.controllerUid) == gateway_uid), + None) + + if gateway is None: + print(f'{bcolors.FAIL}Gateway [{gateway_uid}] was not found.{bcolors.ENDC}') + return + + application_id = utils.base64_url_encode(gateway.applicationUid) + application = KSMCommand.get_app_record(params, application_id) + + return GatewayContext( + configuration=configuration_record, + facade=configuration_facade, + gateway=gateway, + application=application + ) + + @staticmethod + def from_gateway(params: KeeperParams, gateway: str): + # Get all the PAM configuration records + configuration_records = list(vault_extensions.find_records(params, "pam.*Configuration")) + if len(configuration_records) == 0: + print(f"{bcolors.FAIL}Cannot find any PAM configuration records in the Vault{bcolors.ENDC}") + + all_gateways = get_all_gateways(params) + + for record in configuration_records: + + logging.debug(f"checking configuration record {record.title}") + + # Load the configuration record and get the gateway_uid from the facade. + configuration_record = vault.KeeperRecord.load(params, record.record_uid) + configuration_facade = PamConfigurationRecordFacade() + configuration_facade.record = configuration_record + + configuration_gateway_uid = configuration_facade.controller_uid + if configuration_gateway_uid is None: + logging.debug(f"configuration {configuration_record.title} does not have a gateway set, skipping.") + continue + + # Get the gateway for this configuration + found_gateway = next((x for x in all_gateways if utils.base64_url_encode(x.controllerUid) == + configuration_gateway_uid), None) + if found_gateway is None: + logging.debug(f"cannot find gateway for configuration {configuration_record.title}, skipping.") + continue + + application_id = utils.base64_url_encode(found_gateway.applicationUid) + application = KSMCommand.get_app_record(params, application_id) + if application is None: + logging.debug(f"cannot find application for gateway {gateway}, skipping.") + + if (utils.base64_url_encode(found_gateway.controllerUid) == gateway or + found_gateway.controllerName.lower() == gateway.lower()): + return GatewayContext( + configuration=configuration_record, + facade=configuration_facade, + gateway=found_gateway, + application=application + ) + + return None + + @property + def gateway_uid(self) -> str: + return utils.base64_url_encode(self.gateway.controllerUid) + + @property + def configuration_uid(self) -> str: + return self.configuration.record_uid + + @property + def gateway_name(self) -> str: + return self.gateway.controllerName + + @property + def default_shared_folder_uid(self) -> str: + return self.facade.folder_uid + + def is_gateway(self, request_gateway: str) -> bool: + if request_gateway is None or self.gateway_name is None: + return False + return (request_gateway == utils.base64_url_encode(self.gateway.controllerUid) or + request_gateway.lower() == self.gateway_name.lower()) + + def get_shared_folders(self, params: KeeperParams) -> List[dict]: + if self._shared_folders is None: + self._shared_folders = [] + application_uid = utils.base64_url_encode(self.gateway.applicationUid) + app_info = KSMCommand.get_app_info(params, application_uid) + for info in app_info: + if info.shares is None: + continue + for shared in info.shares: + uid_str = utils.base64_url_encode(shared.secretUid) + shared_type = APIRequest_pb2.ApplicationShareType.Name(shared.shareType) + if shared_type == 'SHARE_TYPE_FOLDER': + if uid_str not in params.shared_folder_cache: + continue + cached_shared_folder = params.shared_folder_cache[uid_str] + self._shared_folders.append({ + "uid": uid_str, + "name": cached_shared_folder.get('name_unencrypted'), + "folder": cached_shared_folder + }) + return self._shared_folders + + def decrypt(self, cipher_base64: bytes) -> dict: + ciphertext = base64.b64decode(cipher_base64) + return json.loads(decrypt_aes_v2(ciphertext, self.configuration.record_key)) + + def encrypt(self, data: dict) -> str: + json_data = json.dumps(data) + ciphertext = encrypt_aes_v2(json_data.encode(), self.configuration.record_key) + return base64.b64encode(ciphertext).decode() + + def encrypt_str(self, data: Union[bytes, str]) -> str: + if isinstance(data, str): + data = data.encode() + ciphertext = encrypt_aes_v2(data, self.configuration.record_key) + return base64.b64encode(ciphertext).decode() + + +class PAMGatewayActionDiscoverCommandBase(Command): + + """ + The discover command base. + + Contains static methods to get the configuration record, get and update the discovery store. These are methods + used by multiple discover actions. + """ + + # If the discovery data field does not exist, or the field contains no values, use the template to init the + # field. + + STORE_LABEL = "discoveryKey" + FIELD_MAPPING = { + "hostnameOrIPAddress": { + "type": "dict", + "field_input": [ + {"key": "hostName", "prompt": "Hostname"}, + {"key": "port", "prompt": "Port"} + ], + "field_format": [ + {"key": "hostName", "label": "Hostname"}, + {"key": "port", "label": "Port"}, + ] + }, + "alternativeIPs": { + "type": "multiline", + } + } + + type_name_map = { + PAM_USER: "PAM Users", + PAM_MACHINE: "PAM Machines", + PAM_DATABASE: "PAM Databases", + PAM_DIRECTORY: "PAM Directories", + } + + @staticmethod + def get_response_data(router_response: dict) -> Optional[dict]: + + if router_response is None: + return None + + response = router_response.get("response") + logging.debug(f"Router Response: {response}") + payload = get_response_payload(router_response) + return payload.get("data") + + @staticmethod + def _gr(msg): + return f"{bcolors.OKGREEN}{msg}{bcolors.ENDC}" + + @staticmethod + def _bl(msg): + return f"{bcolors.OKBLUE}{msg}{bcolors.ENDC}" + + @staticmethod + def _h(msg): + return f"{bcolors.HEADER}{msg}{bcolors.ENDC}" + + @staticmethod + def _b(msg): + return f"{bcolors.BOLD}{msg}{bcolors.ENDC}" + + @staticmethod + def _f(msg): + return f"{bcolors.FAIL}{msg}{bcolors.ENDC}" + + @staticmethod + def _p(msg): + return msg + + @staticmethod + def _n(record_type): + return PAMGatewayActionDiscoverCommandBase.type_name_map.get(record_type, "PAM Configuration") diff --git a/keepercommander/commands/discover/debug.py b/keepercommander/commands/discover/debug.py new file mode 100644 index 000000000..c5972a42d --- /dev/null +++ b/keepercommander/commands/discover/debug.py @@ -0,0 +1,268 @@ +from __future__ import annotations +import argparse +import os +from . import PAMGatewayActionDiscoverCommandBase +from ...display import bcolors +from ... import vault +from discovery_common.infrastructure import Infrastructure +from discovery_common.record_link import RecordLink +from discovery_common.types import UserAcl, DiscoveryObject +from keeper_dag import EdgeType +from importlib.metadata import version +from typing import Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from ...vault import TypedRecord + from ...params import KeeperParams + + +class PAMGatewayActionDiscoverDebugCommand(PAMGatewayActionDiscoverCommandBase): + parser = argparse.ArgumentParser(prog='dr-discover-command-debug') + + # The record to base everything on. + parser.add_argument('--record-uid', '-i', required=False, dest='record_uid', action='store', + help='Keeper record UID.') + + # What to do + parser.add_argument('--info', required=False, dest='info_flag', + action='store_true', help='Display information about the record.') + parser.add_argument('--belongs-to', required=False, dest='belongs_to_flag', + action='store_true', help='Connect the record to the parent record.') + parser.add_argument('--disconnect', required=False, dest='disconnect_flag', + action='store_true', help='Disconnect the record to the parent record.') + parser.add_argument('--render', required=False, dest='render_flag', action='store_true', + help='Render graphs.') + parser.add_argument('--version', required=False, dest='version_flag', action='store_true', + help='Get module versions.') + + # For --belongs-to and --disconnect + parser.add_argument('--parent-record-uid', '-p', required=False, dest='parent_record_uid', + action='store', help='The parent record UID.') + + # For the info command + parser.add_argument('--render-all-edges', required=False, dest='render_all_edges', + action='store_false', help='Render graphs.') + parser.add_argument('--graph-dir', required=False, dest='graph_dir', action='store', + help='Directory to save graphs.') + parser.add_argument('--infra-graph-name', required=False, dest='infra_name', action='store', + default="infra_graph", help='Infrastructure graph name.') + parser.add_argument('--rl-graph-name', required=False, dest='rl_name', action='store', + default="record_linking_graph", help='Record linking graph name.') + parser.add_argument('--graph-type', '-gt', required=False, choices=['dot', 'twopi', 'patchwork'], + dest='graph_type', default="dot", action='store', help='The render graph type.') + + def get_parser(self): + return PAMGatewayActionDiscoverDebugCommand.parser + + @staticmethod + def _versions(): + print("") + print(f"{bcolors.BOLD}keeper-dag version:{bcolors.ENDC} {version('keeper-dag')}") + print(f"{bcolors.BOLD}discovery-common version:{bcolors.ENDC} {version('discovery-common')}") + print("") + + @staticmethod + def _show_info(params: KeeperParams, configuration_record: TypedRecord, record: TypedRecord): + + infra = Infrastructure(record=configuration_record, params=params) + record_link = RecordLink(record=configuration_record, params=params) + + print("") + print(f"{bcolors.BOLD}Configuration UID:{bcolors.ENDC} {configuration_record.record_uid}") + print(f"{bcolors.BOLD}Configuration Key Bytes Hex:{bcolors.ENDC} {configuration_record.record_key.hex()}") + print("") + try: + discovery_vertices = infra.dag.search_content({"record_uid": record.record_uid}) + if len(discovery_vertices) > 0: + + if len(discovery_vertices) > 1: + print(f"{bcolors.FAIL}Found multiple vertices with the record UID of " + f"{record.record_uid}{bcolors.ENDC}") + for vertex in discovery_vertices: + print(f" * Infrastructure Vertex UID: {vertex.uid}") + print("") + + discovery_vertex = discovery_vertices[0] + content = DiscoveryObject.get_discovery_object(discovery_vertex) + + print(f"{bcolors.HEADER}Discovery Object Information{bcolors.ENDC}") + print(f"Vertex UID: {content.uid}") + print(f"Record UID: {content.record_uid}") + print(f"Parent Record UID: {content.parent_record_uid}") + print(f"Shared Folder UID: {content.shared_folder_uid}") + print(f"Record Type: {content.record_type}") + print(f"Object Type: {content.object_type_value}") + print(f"Ignore Object: {content.ignore_object}") + print(f"Rule Engine Result: {content.action_rules_result}") + print(f"Discovery ID: {content.id}") + print(f"Discovery Name: {content.name}") + print(f"Discovery Title: {content.title}") + print(f"Discovery Description: {content.description}") + print(f"Discovery Notes:") + for note in content.notes: + print(f" * {note}") + if content.error is not None: + print(f"{bcolors.FAIL}Error: {content.error}{bcolors.ENDC}") + if content.stacktrace is not None: + print(f"{bcolors.FAIL}Stack Trace:{bcolors.ENDC}") + print(f"{bcolors.FAIL}{content.stacktrace}{bcolors.ENDC}") + print("") + print(f"{bcolors.HEADER}Record Type Specifics{bcolors.ENDC}") + + item_dict = content.item + for k, v in item_dict.__dict__.items(): + print(f"{k} = {v}") + + print("") + print(f"{bcolors.HEADER}Belongs To Vertices{bcolors.ENDC}") + vertices = discovery_vertex.belongs_to_vertices() + for vertex in vertices: + content = DiscoveryObject.get_discovery_object(vertex) + print(f" * {content.description} ({vertex.uid})") + for edge_type in [EdgeType.LINK, EdgeType.ACL, EdgeType.KEY, EdgeType.DELETION]: + edge = discovery_vertex.get_edge(vertex, edge_type=edge_type) + if edge is not None: + print(f" . {edge_type}, active: {edge.active}") + + if len(vertices) == 0: + print(f"{bcolors.FAIL} Does not belong to anyone{bcolors.ENDC}") + + print("") + print(f"{bcolors.HEADER}Vertices Belonging To{bcolors.ENDC}") + vertices = discovery_vertex.has_vertices() + for vertex in vertices: + content = DiscoveryObject.get_discovery_object(vertex) + print(f" * {content.description} ({vertex.uid})") + for edge_type in [EdgeType.LINK, EdgeType.ACL, EdgeType.KEY, EdgeType.DELETION]: + edge = vertex.get_edge(discovery_vertex, edge_type=edge_type) + if edge is not None: + print(f" . {edge_type}, active: {edge.active}") + if len(vertices) == 0: + print(f" Does not have any children.") + + print("") + else: + print(f"{bcolors.FAIL}Could not find infrastructure vertex.{bcolors.ENDC}") + except Exception as err: + print(f"{bcolors.FAIL}Could not get information on infrastructure: {err}{bcolors.ENDC}") + + record_vertex = record_link.dag.get_vertex(record.record_uid) + if record_vertex is not None: + print(f"{bcolors.HEADER}Record Linking{bcolors.ENDC}") + for parent_vertex in record_vertex.belongs_to_vertices(): + + description = "Unknown" + discovery_vertices = infra.dag.search_content({"record_uid": parent_vertex.uid}) + if len(discovery_vertices) > 0: + content = DiscoveryObject.get_discovery_object(discovery_vertices[0]) + description = content.description + acl_edge = record_vertex.get_edge(parent_vertex, EdgeType.ACL) + if acl_edge is not None: + acl_content = acl_edge.content_as_object(UserAcl) + print(f" * ACL to {description} ({parent_vertex.uid})") + print(f" . belongs_to = {acl_content.belongs_to}") + print(f" . is_admin = {acl_content.is_admin}") + link_edge = record_vertex.get_edge(parent_vertex, EdgeType.LINK) + if link_edge is not None: + print(f" * LINK to {description} ({parent_vertex.uid})") + else: + print(f"{bcolors.FAIL}Cannot find in record linking.{bcolors.ENDC}") + + @staticmethod + def _render(params: KeeperParams, + configuration_record: TypedRecord, + infra_name: str = "infra_name", rl_name: str = "record_link_graph", + graph_type: str = "dot", graph_dir: str = None, render_all_edges: bool = False): + + if graph_dir is None: + graph_dir = os.environ.get("HOME", os.environ.get("PROFILENAME", ".")) + + print(f"Loading graphs for controller {configuration_record.record_uid}.") + + infra = Infrastructure(record=configuration_record, params=params) + record_link = RecordLink(record=configuration_record, params=params) + + print("") + try: + filename = os.path.join(graph_dir, f"{infra_name}.dot") + infra.to_dot( + graph_type=graph_type, + show_only_active_vertices=False, + show_only_active_edges=render_all_edges + ).render(filename) + print(f"Infrastructure graph rendered to {filename}") + except Exception as err: + print(f"{bcolors.FAIL}Could not generate infrastructure graph: {err}{bcolors.ENDC}") + raise err + + try: + filename = os.path.join(graph_dir, f"{rl_name}.dot") + record_link.to_dot( + graph_type=graph_type, + show_only_active_vertices=False, + show_only_active_edges=render_all_edges + ).render(filename) + print(f"Record linking graph rendered to {filename}") + except Exception as err: + print(f"{bcolors.FAIL}Could not generate record linking graph: {err}{bcolors.ENDC}") + raise err + + filename = os.path.join(graph_dir, f"infra_raw.dot") + with open(filename, "w") as fh: + fh.write(str(infra.dag.to_dot())) + fh.close() + + filename = os.path.join(graph_dir, f"record_linking_raw.dot") + with open(filename, "w") as fh: + fh.write(str(record_link.dag.to_dot())) + fh.close() + + def execute(self, params, **kwargs): + + info_flag = kwargs.get("info_flag", False) + belongs_to_flag = kwargs.get("belongs_to_flag", False) + disconnect_flag = kwargs.get("disconnect_flag", False) + render_flag = kwargs.get("render_flag", False) + version_flag = kwargs.get("version_flag", False) + + record_uid = kwargs.get("record_uid") + configuration_record = None + if record_uid is not None: + record = vault.KeeperRecord.load(params, record_uid) # type: Optional[TypedRecord] + if record is None: + print(f"{bcolors.FAIL}Record does not exists.{bcolors.ENDC}") + return + + configuration_record = record + if record.record_type in ["pamUser", "pamMachine", "pamDatabase", "pamDirectory"]: + record_rotation = params.record_rotation_cache.get(record_uid) + if record_rotation is None: + print(f"{bcolors.FAIL}Record does not have rotation settings.{bcolors.ENDC}") + return + + controller_uid = record_rotation.get("configuration_uid") + if controller_uid is None: + print(f"{bcolors.FAIL}Record does not have the PAM Configuration set.{bcolors.ENDC}") + return + + configuration_record = vault.KeeperRecord.load(params, controller_uid) # type: Optional[TypedRecord] + + if version_flag is True: + self._versions() + if render_flag is True: + self._render( + params=params, + configuration_record=configuration_record, + infra_name=kwargs.get("infra_name"), + rl_name=kwargs.get("rl_name"), + graph_type=kwargs.get("graph_type"), + graph_dir=kwargs.get("graph_dir"), + render_all_edges=kwargs.get("render_all_edges"), + ) + if info_flag is True: + self._show_info( + params=params, + configuration_record=configuration_record, + record=record + ) + diff --git a/keepercommander/commands/discover/job_remove.py b/keepercommander/commands/discover/job_remove.py new file mode 100644 index 000000000..805316a19 --- /dev/null +++ b/keepercommander/commands/discover/job_remove.py @@ -0,0 +1,76 @@ +from __future__ import annotations +import argparse +import logging +from . import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ..pam.pam_dto import GatewayActionDiscoverJobRemoveInputs, GatewayActionDiscoverJobRemove, GatewayAction +from ...proto import pam_pb2 +from ..pam.router_helper import router_send_action_to_gateway, router_get_connected_gateways +from ... import vault_extensions +from ...display import bcolors +from discovery_common.jobs import Jobs + + +class PAMGatewayActionDiscoverJobRemoveCommand(PAMGatewayActionDiscoverCommandBase): + parser = argparse.ArgumentParser(prog='dr-discover-command-process') + parser.add_argument('--job-id', '-j', required=True, dest='job_id', action='store', + help='Discovery job id.') + + def get_parser(self): + return PAMGatewayActionDiscoverJobRemoveCommand.parser + + def execute(self, params, **kwargs): + + if not hasattr(params, 'pam_controllers'): + router_get_connected_gateways(params) + + job_id = kwargs.get("job_id") + + # Get all the PAM configuration records + configuration_records = list(vault_extensions.find_records(params, "pam.*Configuration")) + + for configuration_record in configuration_records: + + gateway_context = GatewayContext.from_configuration_uid(params, configuration_record.record_uid) + if gateway_context is None: + continue + + jobs = Jobs(record=configuration_record, params=params) + job_item = jobs.get_job(job_id) + if job_item is not None: + + try: + # First, cancel the running discovery job if it is running. + logging.debug("cancel job on the gateway, if running") + action_inputs = GatewayActionDiscoverJobRemoveInputs( + configuration_uid=gateway_context.configuration_uid, + job_id=job_id + ) + + conversation_id = GatewayAction.generate_conversation_id() + router_response = router_send_action_to_gateway( + params=params, + gateway_action=GatewayActionDiscoverJobRemove( + inputs=action_inputs, + conversation_id=conversation_id), + message_type=pam_pb2.CMT_GENERAL, + is_streaming=False, + destination_gateway_uid_str=gateway_context.gateway_uid + ) + + data = self.get_response_data(router_response) + if data is None: + raise Exception("The router returned a failure.") + elif data.get("success") is False: + error = data.get("error") + raise Exception(f"Discovery job was not removed: {error}") + except Exception as err: + logging.debug(f"gateway return error removing discovery job: {err}") + return + + jobs.cancel(job_id) + + print(f"{bcolors.OKGREEN}Discovery job has been removed or cancelled.{bcolors.ENDC}") + return + + print(f'{bcolors.FAIL}Discovery job not found. Cannot get remove the job.{bcolors.ENDC}') + return diff --git a/keepercommander/commands/discover/job_start.py b/keepercommander/commands/discover/job_start.py new file mode 100644 index 000000000..3a883a83f --- /dev/null +++ b/keepercommander/commands/discover/job_start.py @@ -0,0 +1,176 @@ +from __future__ import annotations +import argparse +import logging + +from . import PAMGatewayActionDiscoverCommandBase, GatewayContext +from .job_status import PAMGatewayActionDiscoverJobStatusCommand +from ..pam.router_helper import router_send_action_to_gateway, print_router_response, router_get_connected_gateways +from ..pam.user_facade import PamUserRecordFacade +from ..pam.pam_dto import GatewayActionDiscoverJobStartInputs, GatewayActionDiscoverJobStart, GatewayAction +from ... import vault_extensions +from ... import vault +from ...proto import pam_pb2 +from ...display import bcolors +from discovery_common.jobs import Jobs +from typing import List, TYPE_CHECKING + +if TYPE_CHECKING: + from ...params import KeeperParams + + +class PAMGatewayActionDiscoverJobStartCommand(PAMGatewayActionDiscoverCommandBase): + parser = argparse.ArgumentParser(prog='dr-discover-start-command') + parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', + help='Gateway name of UID.') + parser.add_argument('--resource', '-r', required=False, dest='resource_uid', action='store', + help='UID of the resource record. Set to discover specific resource.') + parser.add_argument('--lang', required=False, dest='language', action='store', default="en", + help='Language') + parser.add_argument('--skip-machine-dir-users', required=False, dest='include_machine_dir_users', + action='store_false', default=True, help='Include directory users found on the machine.') + parser.add_argument('--inc-azure-aadds', required=False, dest='include_azure_aadds', + action='store_true', help='Include Azure Active Directory Domain Service.') + parser.add_argument('--skip-rules', required=False, dest='skip_rules', + action='store_true', help='Skip running the rule engine.') + parser.add_argument('--skip-machines', required=False, dest='skip_machines', + action='store_true', help='Skip discovering machines.') + parser.add_argument('--skip-databases', required=False, dest='skip_databases', + action='store_true', help='Skip discovering databases.') + parser.add_argument('--skip-directories', required=False, dest='skip_directories', + action='store_true', help='Skip discovering directories.') + parser.add_argument('--skip-cloud-users', required=False, dest='skip_cloud_users', + action='store_true', help='Skip discovering cloud users.') + + def get_parser(self): + return PAMGatewayActionDiscoverJobStartCommand.parser + + @staticmethod + def make_protobuf_user_map(params: KeeperParams, gateway_context: GatewayContext) -> List[dict]: + """ + Make a user map for PAM Users. + + The map is used to find existing records. + Since KSM cannot read the rotation settings using protobuf, + it cannot match a vault record to a discovered users. + This map will map a login/DN and parent UID to a record UID. + """ + + user_map = [] + for record in vault_extensions.find_records(params, record_type="pamUser"): + user_record = vault.KeeperRecord.load(params, record.record_uid) + user_facade = PamUserRecordFacade() + user_facade.record = user_record + + info = params.record_rotation_cache.get(user_record.record_uid) + if info is None: + continue + + # Make sure this user is part of this gateway. + if info.get("configuration_uid") != gateway_context.configuration_uid: + continue + + # If the user Admin Cred Record (i.e., parent) is blank, skip the mapping item + # This will be a UID string, not 16 bytes. + if info.get("resource_uid") is None or info.get("resource_uid") == "": + continue + + user_map.append({ + "user": user_facade.login if user_facade.login != "" else None, + "dn": user_facade.distinguishedName if user_facade.distinguishedName != "" else None, + "record_uid": user_record.record_uid, + "parent_record_uid": info.get("resource_uid") + }) + + logging.debug(f"found {len(user_map)} user map items") + + return user_map + + def execute(self, params, **kwargs): + + if not hasattr(params, 'pam_controllers'): + router_get_connected_gateways(params) + + # Load the configuration record and get the gateway_uid from the facade. + gateway = kwargs.get('gateway') + + gateway_context = GatewayContext.from_gateway(params, gateway) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.") + return + + jobs = Jobs(record=gateway_context.configuration, params=params) + current_job_item = jobs.current_job + if current_job_item is not None: + if current_job_item.is_running is True: + print("") + print(f"{bcolors.FAIL}An discovery job is currently running. " + f"Cannot start another until it is finished.{bcolors.ENDC}") + print(f"To check the status, use the command " + f"'{bcolors.OKGREEN}pam action discover status{bcolors.ENDC}'.") + print(f"To stop and remove the current job, use the command " + f"'{bcolors.OKGREEN}pam action discover remove -j {current_job_item.job_id}'.") + return + + print(f"{bcolors.FAIL}An active discovery job exists for this gateway.{bcolors.ENDC}") + print("") + status = PAMGatewayActionDiscoverJobStatusCommand() + status.execute(params=params) + print("") + + yn = input("Do you wish to remove the active discovery job and run a new one [Y/N]>").lower() + while True: + if yn[0] == "y": + jobs.cancel(current_job_item.job_id) + break + elif yn[0] == "n": + print(f"{bcolors.FAIL}Not starting a discovery job.{bcolors.ENDC}") + return + + action_inputs = GatewayActionDiscoverJobStartInputs( + configuration_uid=gateway_context.configuration_uid, + resource_uid=kwargs.get('resource_uid'), + user_map=gateway_context.encrypt( + self.make_protobuf_user_map( + params=params, + gateway_context=gateway_context + ) + ), + + shared_folder_uid=gateway_context.default_shared_folder_uid, + language=kwargs.get('language'), + + # Settings + include_machine_dir_users=kwargs.get('include_machine_dir_users', True), + include_azure_aadds=kwargs.get('include_azure_aadds', False), + skip_rules=kwargs.get('skip_rules', False), + skip_machines=kwargs.get('skip_machines', False), + skip_databases=kwargs.get('skip_databases', False), + skip_directories=kwargs.get('skip_directories', False), + skip_cloud_users=kwargs.get('skip_cloud_users', False) + ) + + conversation_id = GatewayAction.generate_conversation_id() + router_response = router_send_action_to_gateway( + params=params, + gateway_action=GatewayActionDiscoverJobStart( + inputs=action_inputs, + conversation_id=conversation_id), + message_type=pam_pb2.CMT_GENERAL, + is_streaming=False, + destination_gateway_uid_str=gateway_context.gateway_uid + ) + + data = self.get_response_data(router_response) + if data is None: + print(f"{bcolors.FAIL}The router returned a failure.{bcolors.ENDC}") + return + + if "has been queued" in data.get("Response", ""): + + print("") + print("The discovery job is currently running.") + print(f"To check the status, use the command '{bcolors.OKGREEN}pam action discover status{bcolors.ENDC}'.") + print(f"To stop and remove the current job, use the command " + f"'{bcolors.OKGREEN}pam action discover remove -j '.") + else: + print_router_response(router_response, conversation_id) diff --git a/keepercommander/commands/discover/job_status.py b/keepercommander/commands/discover/job_status.py new file mode 100644 index 000000000..770281783 --- /dev/null +++ b/keepercommander/commands/discover/job_status.py @@ -0,0 +1,253 @@ +from __future__ import annotations +import argparse +import json +from . import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ... import vault_extensions +from ...display import bcolors +from ..pam.router_helper import router_get_connected_gateways +from discovery_common.jobs import Jobs +from discovery_common.infrastructure import Infrastructure +from discovery_common.constants import DIS_INFRA_GRAPH_ID +from discovery_common.types import DiscoveryDelta, DiscoveryObject +from keeper_dag.dag import DAG +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from discovery_common.jobs import JobItem + + +def _h(text): + return f"{bcolors.HEADER}{text}{bcolors.ENDC}" + + +def _f(text): + return f"{bcolors.FAIL}{text}{bcolors.ENDC}" + + +class PAMGatewayActionDiscoverJobStatusCommand(PAMGatewayActionDiscoverCommandBase): + parser = argparse.ArgumentParser(prog='dr-discover-status-command') + parser.add_argument('--gateway', '-g', required=False, dest='gateway', action='store', + help='Show only discovery jobs from a specific gateway.') + parser.add_argument('--job-id', '-j', required=False, dest='job_id', action='store', + help='Detailed information for a specific discovery job.') + parser.add_argument('--file', required=False, dest='json_file', action='store', + help='Save status to JSON file.') + parser.add_argument('--history', required=False, dest='show_history', action='store_true', + help='Show history') + + def get_parser(self): + return PAMGatewayActionDiscoverJobStatusCommand.parser + + def job_detail(self, job): + pass + + @staticmethod + def print_job_table(jobs, max_gateway_name): + + print("") + print(f"{bcolors.HEADER}{'Job ID'.ljust(14, ' ')} " + f"{'Gateway Name'.ljust(max_gateway_name, ' ')} " + f"{'Gateway UID'.ljust(22, ' ')} " + f"{'Status'.ljust(12, ' ')} " + f"{'Resource UID'.ljust(22, ' ')} " + f"{'Started'.ljust(19, ' ')} " + f"{'Completed'.ljust(19, ' ')} " + f"{'Duration'.ljust(19, ' ')} " + f"{bcolors.ENDC}") + + print(f"{''.ljust(14, '=')} " + f"{''.ljust(max_gateway_name, '=')} " + f"{''.ljust(22, '=')} " + f"{''.ljust(12, '=')} " + f"{''.ljust(22, '=')} " + f"{''.ljust(19, '=')} " + f"{''.ljust(19, '=')} " + f"{''.ljust(19, '=')}") + + found_completed = False + + for job in jobs: + color = "" + if job['status'] == "COMPLETE": + color = bcolors.OKGREEN + found_completed = True + elif job['status'] == "RUNNING": + color = bcolors.OKBLUE + elif job['status'] == "FAILED": + color = bcolors.FAIL + print(f"{color}{job['job_id']} " + f"{job['gateway'].ljust(max_gateway_name, ' ')} " + f"{job['gateway_uid']} " + f"{job['status'].ljust(12, ' ')} " + f"{(job.get('resource_uid') or 'NA').ljust(22, ' ')} " + f"{(job.get('start_ts_str') or 'NA').ljust(19, ' ')} " + f"{(job.get('end_ts_str') or 'NA').ljust(19, ' ')} " + f"{(job.get('duration') or 'NA').ljust(19, ' ')} " + f"{bcolors.ENDC}") + + if found_completed is True: + print("") + print(f"To process a completed Discovery job, use the command " + f"'{bcolors.OKGREEN}pam action discover process -j {bcolors.ENDC}'.") + print("") + + @staticmethod + def print_job_detail(params, gateway_context, jobs, job_id): + + infra = Infrastructure(record=gateway_context.configuration, params=params) + + for job in jobs: + if job_id == job["job_id"]: + gateway_context = job["gateway_context"] + if job['status'] == "COMPLETE": + color = bcolors.OKGREEN + elif job['status'] == "RUNNING": + color = bcolors.OKBLUE + else: + color = bcolors.FAIL + status = f"{color}{job['status']}{bcolors.ENDC}" + + print("") + print(f"{_h('Job ID')}: {job['job_id']}") + print(f"{_h('Sync Point')}: {job['sync_point']}") + print(f"{_h('Gateway Name')}: {job['gateway']}") + print(f"{_h('Gateway UID')}: {job['gateway_uid']}") + print(f"{_h('Configuration UID')}: {gateway_context.configuration_uid}") + print(f"{_h('Status')}: {status}") + print(f"{_h('Resource UID')}: {job.get('resource_uid', 'NA')}") + print(f"{_h('Started')}: {job['start_ts_str']}") + print(f"{_h('Completed')}: {job.get('end_ts_str')}") + print(f"{_h('Duration')}: {job.get('duration')}") + + # If it failed, show the error and stacktrace. + if job['status'] == "FAILED": + print("") + print(f"{_h('Gateway Error')}:") + print(f"{color}{job['error']}{bcolors.ENDC}") + print("") + print(f"{_h('Gateway Stacktrace')}:") + print(f"{color}{job['stacktrace']}{bcolors.ENDC}") + # If it finished, show information about what was discovered. + elif job.get('end_ts') is not None: + job_item = job.get("job_item") # type: JobItem + + try: + infra.load(sync_point=job_item.sync_point) + print("") + delta = DiscoveryDelta.model_validate(job.get('delta')) + print(f"{_h('Added')} - {len(delta.added)} count") + for item in delta.added: + vertex = infra.dag.get_vertex(item.uid) + discovery_object = DiscoveryObject.get_discovery_object(vertex) + print(f" * {discovery_object.description}") + + print("") + print(f"{_h('Changed')} - {len(delta.changed)} count") + for item in delta.changed: + vertex = infra.dag.get_vertex(item.uid) + discovery_object = DiscoveryObject.get_discovery_object(vertex) + print(f" * {discovery_object.description}") + if item.changes is None: + print(f" no changed, may be a object not added in prior discoveries.") + else: + for key, value in item.changes.items(): + print(f" - {key} = {value}") + + print("") + print(f"{_h('Deleted')} - {len(delta.deleted)} count") + for item in delta.deleted: + print(f" * discovery vertex {item.uid}") + + except Exception as err: + print(f"{_f('Could not load delta from infrastructure: ' + str(err))}") + print("Fall back to raw graph.") + print("") + dag = DAG(conn=infra.conn, record=infra.record, graph_id=DIS_INFRA_GRAPH_ID) + print(dag.to_dot_raw(sync_point=job_item.sync_point, rank_dir="RL")) + + return + + print(f"{bcolors.FAIL}Cannot find the job{bcolors.ENDC}") + + def execute(self, params, **kwargs): + + if not hasattr(params, 'pam_controllers'): + router_get_connected_gateways(params) + + gateway_filter = kwargs.get("gateway") + job_id = kwargs.get("job_id") + show_history = kwargs.get("show_history") + + if job_id is not None: + show_history = True + + # Get all the PAM configuration records + configuration_records = list(vault_extensions.find_records(params, "pam.*Configuration")) + + # This is used to format the table. Start with a length of 12 characters for the gateway. + max_gateway_name = 12 + + all_jobs = [] + + # For each configuration/ gateway, we are going to get all jobs. + # We are going to query the gateway for any updated status. + for configuration_record in configuration_records: + + gateway_context = GatewayContext.from_configuration_uid(params, configuration_record.record_uid) + if gateway_context is None: + continue + + # If we are using a gateway filter, and this gateway is not the one, then go onto the next conf/gateway. + if gateway_filter is not None and gateway_context.is_gateway(gateway_filter) is False: + continue + + # If the gateway name is longer that the prior, set the max length to this gateway's name. + if len(gateway_context.gateway_name) > max_gateway_name: + max_gateway_name = len(gateway_context.gateway_name) + + jobs = Jobs(record=configuration_record, params=params) + if show_history is True: + job_list = reversed(jobs.history) + else: + job_list = [] + if jobs.current_job is not None: + job_list = [jobs.current_job] + + for job_item in job_list: + job = job_item.model_dump() + job["status"] = "RUNNING" + if job_item.start_ts is not None: + job["start_ts_str"] = job_item.start_ts_str + if job_item.end_ts is not None: + job["end_ts_str"] = job_item.end_ts_str + job["status"] = "COMPLETE" + job["duration"] = job_item.duration_sec_str + + job["gateway"] = gateway_context.gateway_name + job["gateway_uid"] = gateway_context.gateway_uid + + # This is needs for details + job["gateway_context"] = gateway_context + job["job_item"] = job_item + + if job_item.success is False: + job["status"] = "FAILED" + + all_jobs.append(job) + + # Instead of printing a table, save a json file. + if kwargs.get("json_file") is not None: + with open(kwargs.get("json_file"), "w") as fh: + fh.write(json.dumps(all_jobs, indent=4)) + fh.close() + return + + if len(all_jobs) == 0: + print(f"{ bcolors.FAIL}There are no discovery jobs. Use 'pam action discover start' to start a " + f"discovery job.{bcolors.ENDC}") + return + + if job_id is not None: + self.print_job_detail(params, gateway_context, all_jobs, job_id) + else: + self.print_job_table(all_jobs, max_gateway_name) diff --git a/keepercommander/commands/discover/result_get.py b/keepercommander/commands/discover/result_get.py new file mode 100644 index 000000000..7dd579301 --- /dev/null +++ b/keepercommander/commands/discover/result_get.py @@ -0,0 +1,58 @@ +from __future__ import annotations +import argparse +import json +import importlib +from . import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ... import vault_extensions +from ...display import bcolors +from ..pam.router_helper import router_get_connected_gateways +from discovery_common.jobs import Jobs +from discovery_common.infrastructure import Infrastructure +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from discovery_common.jobs import JobItem + + +class PAMGatewayActionDiscoverResultGetCommand(PAMGatewayActionDiscoverCommandBase): + parser = argparse.ArgumentParser(prog='dr-discover-command-process') + parser.add_argument('--job-id', '-j', required=True, dest='job_id', action='store', + help='Discovery job id.') + parser.add_argument('--file', required=True, dest='filename', action='store', + help='Save results to file.') + + def get_parser(self): + return PAMGatewayActionDiscoverResultGetCommand.parser + + def execute(self, params, **kwargs): + + job_id = kwargs.get("job_id") + + if not hasattr(params, 'pam_controllers'): + router_get_connected_gateways(params) + + configuration_records = list(vault_extensions.find_records(params, "pam.*Configuration")) + for configuration_record in configuration_records: + + gateway_context = GatewayContext.from_configuration_uid(params, configuration_record.record_uid) + if gateway_context is None: + continue + + jobs = Jobs(record=configuration_record, params=params) + job_item = jobs.get_job(job_id) # type: JobItem + if job_item is None: + continue + + if job_item.end_ts is None: + print(f'{bcolors.FAIL}Discovery job is currently running. Cannot get results.{bcolors.ENDC}') + return + if job_item.success is False: + print(f'{bcolors.FAIL}Discovery job failed. Cannot get results.{bcolors.ENDC}') + return + + # TODO - Make a way to serialize the discovery into a form + infra = Infrastructure(record=configuration_record, params=params) + + return + + print(f'{bcolors.FAIL}Discovery job not found. Cannot get results.{bcolors.ENDC}') diff --git a/keepercommander/commands/discover/result_process.py b/keepercommander/commands/discover/result_process.py new file mode 100644 index 000000000..d9445ae0d --- /dev/null +++ b/keepercommander/commands/discover/result_process.py @@ -0,0 +1,1194 @@ +from __future__ import annotations +import logging +import argparse +import json +import os.path + +from keeper_secrets_manager_core.utils import url_safe_str_to_bytes +from . import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ..pam.router_helper import router_get_connected_gateways, router_set_record_rotation_information +from ... import api, subfolder, utils, crypto, vault, vault_extensions +from ...display import bcolors +from ...proto import router_pb2, record_pb2 +from discovery_common.jobs import Jobs +from discovery_common.process import Process, QuitException, NoDiscoveryDataException +from discovery_common.types import (DiscoveryObject, UserAcl, PromptActionEnum, PromptResult, + BulkRecordAdd, BulkRecordConvert, BulkProcessResults, BulkRecordSuccess, + BulkRecordFail, DirectoryInfo) +from pydantic import BaseModel +from typing import Optional, List, Any, TYPE_CHECKING + +if TYPE_CHECKING: + from ...params import KeeperParams + from ...vault import TypedRecord + from keeper_dag.vertex import DAGVertex + from discovery_common.record_link import RecordLink + + +def _h(value: str) -> str: + return f"{bcolors.HEADER}{value}{bcolors.ENDC}" + + +def _b(value: str) -> str: + return f"{bcolors.BOLD}{value}{bcolors.ENDC}" + + +def _f(value: str) -> str: + return f"{bcolors.FAIL}{value}{bcolors.ENDC}" + + +def _ok(value: str) -> str: + return f"{bcolors.OKGREEN}{value}{bcolors.ENDC}" + + +# This is used for the admin user search +class AdminSearchResult(BaseModel): + record: Any + is_directory_user: bool + is_pam_user: bool + + +class PAMGatewayActionDiscoverResultProcessCommand(PAMGatewayActionDiscoverCommandBase): + + """ + Process the discovery data + """ + + parser = argparse.ArgumentParser(prog='dr-discover-command-process') + parser.add_argument('--job-id', '-j', required=True, dest='job_id', action='store', + help='Discovery job to process.') + parser.add_argument('--add-all', required=False, dest='add_all', action='store_true', + help='Add record when prompted.') + + def get_parser(self): + return PAMGatewayActionDiscoverResultProcessCommand.parser + + @staticmethod + def _is_directory_user(record_type: str) -> bool: + # pamAzureConfiguration has tenant users what are like a directory. + return (record_type == "pamDirectory" or + record_type == "pamAzureConfiguration") + + @staticmethod + def _get_shared_folder(params: KeeperParams, pad: str, gateway_context: GatewayContext) -> str: + while True: + shared_folders = gateway_context.get_shared_folders(params) + index = 0 + for folder in shared_folders: + print(f"{pad}* {_h(str(index+1))} - {folder.get('uid')} {folder.get('name')}") + index += 1 + selected = input(f"{pad}Enter number of the shared folder>") + try: + return shared_folders[int(selected) - 1].get("uid") + except ValueError: + print(f"{pad}{_f('Input was not a number.')}") + + @staticmethod + def get_field_values(record: TypedRecord, field_type: str) -> List[str]: + return next( + (f.value + for f in record.fields + if f.type == field_type), + None + ) + + def get_keys_by_record(self, params: KeeperParams, gateway_context: GatewayContext, + record: TypedRecord) -> List[str]: + """ + For the record, get the values of fields that are key for this record type. + + :param params: + :param gateway_context: + :param record: + :return: + """ + + key_field = Process.get_key_field(record.record_type) + keys = [] + if key_field == "host_port": + values = self.get_field_values(record, "pamHostname") + if len(values) == 0: + return [] + + host = values[0].get("hostName") + port = values[0].get("port") + if port is not None: + if host is not None: + keys.append(f"{host}:{port}".lower()) + + elif key_field == "host": + values = self.get_field_values(record, "pamHostname") + if len(values) == 0: + return [] + + host = values[0].get("hostName") + if host is not None: + keys.append(host.lower()) + + elif key_field == "user": + + # This is user protobuf values. + # We could make this also use record linking if we stop using protobuf. + + record_rotation = params.record_rotation_cache.get(record.record_uid) + if record_rotation is not None: + controller_uid = record_rotation.get("configuration_uid") + if controller_uid is None or controller_uid != gateway_context.configuration_uid: + return [] + + resource_uid = record_rotation.get("resource_uid") + # If the resource uid is None, the Admin Cred Record has not been set. + if resource_uid is None: + return [] + + values = self.get_field_values(record, "login") + if len(values) == 0: + return [] + + keys.append(f"{resource_uid}:{values[0]}".lower()) + + return keys + + def _build_record_cache(self, params: KeeperParams, gateway_context: GatewayContext) -> dict: + + """ + Make a lookup cache for all the records. + + This is used to flag discovered items as existing if the record has already been added. This is used to + prevent duplicate records being added. + """ + + logging.debug(f"building the PAM record cache") + + # Make a cache of existing record by the criteria per record type + cache = { + "pamUser": {}, + "pamMachine": {}, + "pamDirectory": {}, + "pamDatabase": {} + } + + # Set all the PAM Records + records = list(vault_extensions.find_records(params, "pam*")) + for record in records: + # If the record type is not part of the cache, skip the record + if record.record_type not in cache: + continue + + # Load the full record + record = vault.TypedRecord.load(params, record.record_uid) # type: Optional[TypedRecord] + + cache_keys = self.get_keys_by_record( + params=params, + gateway_context=gateway_context, + record=record + ) + if len(cache_keys) == 0: + continue + + for cache_key in cache_keys: + cache[record.record_type][cache_key] = record.record_uid + + return cache + + def _edit_record(self, content: DiscoveryObject, pad: str, editable: List[str]) -> bool: + + edit_label = input(f"{pad}Enter 'title' or the name of the {_ok('Label')} to edit, RETURN to cancel> ") + + # Just pressing return exits the edit + if edit_label == "": + return False + + # If the "title" is entered, then edit the title of the record. + if edit_label.lower() == "title": + new_title = input(f"{pad}Enter new title> ") + content.title = new_title + + # If a field label is entered, and it's in the list of editable fields, then allow the user to edit. + elif edit_label in editable: + new_value = None + if edit_label in self.FIELD_MAPPING: + type_hint = self.FIELD_MAPPING[edit_label].get("type") + if type_hint == "dict": + field_input_format = self.FIELD_MAPPING[edit_label].get("field_input") + new_value = {} + for field in field_input_format: + new_value[field.get('key')] = input(f"{pad}Enter {field_input_format.get('prompt')} value> ") + elif type_hint == "multiline": + new_value = input(f"{pad}Enter {edit_label} value> ") + new_values = map(str.strip, new_value.split(',')) + new_value = "\n".join(new_values) + else: + new_value = input(f"{pad}Enter new value> ") + + # Is the value a path to a file, i.e., a private key file. + if os.path.exists(new_value) is True: + with open(new_value, "r") as fh: + new_value = fh.read() + fh.close() + + for edit_field in content.fields: + if edit_field.label == edit_label: + edit_field.value = [new_value] + + # Else, the label they entered cannot be edited. + else: + print( + f"{pad}{_f('The field is not editable.')}") + return False + + return True + + @staticmethod + def _auto_add_preprocess(vertex: DAGVertex, content: DiscoveryObject, parent_vertex: DAGVertex, + acl: Optional[UserAcl] = None) -> Optional[PromptResult]: + """ + This is client side check if we should skip prompting the user. + + The checks are + * A directory with the same domain already has a record. + + """ + + _ = vertex + _ = acl + + # Check if the directory for a domain exists. + # From the parent, find any directory objects. + # If they already have a record UID, don't prompt about this one. + # Once a directory for the domain exists, the user should not be prompted about this domain anymore. + if content.record_type == "pamDirectory": + for v in parent_vertex.has_vertices(): + other_content = DiscoveryObject.get_discovery_object(v) + if other_content.record_uid is not None and other_content.name == content.name: + return PromptResult(action=PromptActionEnum.SKIP) + return None + + def _prompt_display_fields(self, content: DiscoveryObject, pad: str) -> List[str]: + + editable = [] + for field in content.fields: + has_editable = False + if field.label in ["login", "password", "distinguishedName", "alternativeIPs", "database", + "privatePEMKey", "connectDatabase"]: + editable.append(field.label) + has_editable = True + value = field.value + if len(value) > 0: + value = value[0] + if field.label in self.FIELD_MAPPING: + type_hint = self.FIELD_MAPPING[field.label].get("type") + formatted_value = [] + if type_hint == "dict": + field_input_format = self.FIELD_MAPPING[field.label].get("field_format") + for format_field in field_input_format: + formatted_value.append(f"{format_field.get('label')}: " + f"{value.get(format_field.get('key'))}") + elif type_hint == "multiline": + formatted_value.append(", ".join(value.split("\n"))) + value = ", ".join(formatted_value) + else: + if has_editable is True: + value = f"{bcolors.FAIL}MISSING{bcolors.ENDC}" + else: + value = f"{bcolors.OKBLUE}None{bcolors.ENDC}" + + color = bcolors.HEADER + if has_editable is True: + color = bcolors.OKGREEN + + rows = str(value).split("\n") + if len(rows) > 1: + value = rows[0] + _b(f"... {len(rows)} rows.") + + print(f"{pad} " + f"{color}Label:{bcolors.ENDC} {field.label}, " + f"{_h('Type:')} {field.type}, " + f"{_h('Value:')} {value}") + + if len(content.notes) > 0: + print("") + for note in content.notes: + print(f"{pad}* {note}") + + return editable + + @staticmethod + def _prompt_display_relationships(vertex: DAGVertex, content: DiscoveryObject, pad: str): + + if vertex is None: + return + + if content.record_type == "pamUser": + belongs_to = [] + for v in vertex.belongs_to_vertices(): + resource_content = DiscoveryObject.get_discovery_object(v) + belongs_to.append(resource_content.name) + count = len(belongs_to) + print("") + print(f"{pad}This user is found on {count} resource{'s' if count > 1 else ''}") + + def _prompt(self, + content: DiscoveryObject, + acl: UserAcl, + vertex: Optional[DAGVertex] = None, + parent_vertex: Optional[DAGVertex] = None, + resource_has_admin: bool = True, + item_count: int = 0, + items_left: int = 0, + indent: int = 0, + context: Optional[Any] = None) -> PromptResult: + + if context is None: + raise Exception("Context not set for processing the discovery results") + + parent_content = DiscoveryObject.get_discovery_object(parent_vertex) + + print("") + + params = context.get("params") + gateway_context = context.get("gateway_context") + dry_run = context.get("dry_run") + auto_add = context.get("auto_add") + + # If auto add is True, there are sometime we don't want to add the object. + # If we get a result, we want to return it. + # Skip the prompt. + if auto_add is True and vertex is not None: + result = self._auto_add_preprocess(vertex, content, parent_vertex, acl) + if result is not None: + return result + + # If the record type is a pamUser, then include parent description. + if content.record_type == "pamUser" and parent_vertex is not None: + parent_pad = "" + if indent - 1 > 0: + parent_pad = "".ljust(2 * indent, ' ') + + print(f"{parent_pad}{_h(parent_content.description)}") + + pad = "" + if indent > 0: + pad = "".ljust(2 * indent, ' ') + + print(f"{pad}{_h(content.description)}") + + show_current_object = True + while show_current_object is True: + print(f"{pad}{bcolors.HEADER}Record Title:{bcolors.ENDC} {content.title}") + + logging.debug(f"Fields: {content.fields}") + + # Display the fields and return a list of fields are editable. + editable = self._prompt_display_fields(content=content, pad=pad) + if vertex is not None: + self._prompt_display_relationships(vertex=vertex, content=content, pad=pad) + + while True: + + shared_folder_uid = content.shared_folder_uid + if shared_folder_uid is None: + shared_folder_uid = gateway_context.default_shared_folder_uid + + count_prompt = "" + if item_count > 0: + count_prompt = f"{bcolors.HEADER}[{item_count - items_left + 1}/{item_count}]{bcolors.ENDC}" + edit_add_prompt = f"{count_prompt} ({_b('E')})dit, ({_b('A')})dd, " + if dry_run is True: + edit_add_prompt = "" + shared_folders = gateway_context.get_shared_folders(params) + if len(shared_folders) > 1 and dry_run is False: + folder_name = next((x['name'] + for x in shared_folders + if x['uid'] == shared_folder_uid), + None) + edit_add_prompt = f"{count_prompt} ({_b('E')})dit, "\ + f"({_b('A')})dd to {folder_name}, "\ + f"Add to ({_b('F')})older, " + prompt = f"{edit_add_prompt}({_b('S')})kip, ({_b('I')})gnore, ({_b('Q')})uit" + + command = "a" + if auto_add is False: + command = input(f"{pad}{prompt}> ").lower() + if (command == "a" or command == "f") and dry_run is False: + + print(f"{pad}{bcolors.OKGREEN}Adding record to save queue.{bcolors.ENDC}") + print("") + + if command == "f": + shared_folder_uid = self._get_shared_folder(params, pad, gateway_context) + + content.shared_folder_uid = shared_folder_uid + + if content.record_type == "pamUser" and resource_has_admin is False: + while True: + yn = input(f"{parent_content.description} does not have an administrator. " + "Do you want to make this user the administrator? [Y/N]> ").lower() + if yn == "": + continue + if yn[0] == "n": + break + if yn[1] == "y": + acl.is_admin = True + break + + return PromptResult( + action=PromptActionEnum.ADD, + acl=acl, + content=content + ) + + elif command == "e" and dry_run is False: + self._edit_record(content, pad, editable) + break + + elif command == "i": + return PromptResult( + action=PromptActionEnum.IGNORE, + acl=acl, + content=content + ) + + elif command == "s": + print(f"{pad}{bcolors.OKBLUE}Skipping record{bcolors.ENDC}") + + return PromptResult( + action=PromptActionEnum.SKIP, + acl=acl, + content=content + ) + elif command == "q": + raise QuitException() + print() + + def _find_user_record(self, params: KeeperParams, context: Optional[Any] = None) -> Optional[TypedRecord]: + + gateway_context = context.get("gateway_context") # type: GatewayContext + record_link = context.get("record_link") # type: RecordLink + + # Get the latest records + params.sync_data = True + + # Make a list of all records in the shared folders. + # We will use this to check if a selected user is in the shared folders. + shared_record_uids = [] + for shared_folder in gateway_context.get_shared_folders(params): + folder = shared_folder.get("folder") + if "records" in folder: + for record in folder["records"]: + shared_record_uids.append(record.get("record_uid")) + + logging.debug(f"shared folders record uid {shared_record_uids}") + + while True: + user_search = input("Enter an user to search for [ENTER/RETURN to quit]> ") + if user_search == "": + return None + + # Search for record with the search string. + # Currently, this only works with TypedRecord, version 3. + user_record = list(vault_extensions.find_records( + params, + search_str=user_search, + record_version=3 + )) + if len(user_record) == 0: + print(f"{bcolors.FAIL}Could not find any record.{bcolors.ENDC}") + + # Find usable admin records. + admin_search_results = [] # type: List[AdminSearchResult] + for record in user_record: + + user_record = vault.KeeperRecord.load(params, record.record_uid) + if user_record.record_type == "pamUser": + + # Does the record exist in the gateway shared folder? + # We want to filter our other gateway's pamUser, or it will get overwhelming. + if user_record.record_uid not in shared_record_uids: + logging.debug(f"pamUser {record.title}, {user_record.record_uid} not in shared " + "folder, skip") + continue + + # # If a pamUser, make sure the user is part of our configuration + # record_rotation = params.record_rotation_cache.get(record.record_uid) + # if record_rotation is not None: + # configuration_uid = record_rotation.get("configuration_uid") + # if configuration_uid is None or configuration_uid == "": + # logging.debug(f"pamUser {record.title}, {record.record_uid} does not have a controller, " + # "skip") + # continue + # if configuration_uid != gateway_context.configuration_uid: + # logging.debug(f"pamUser {record.title}, {record.record_uid} controller is not this " + # " controller, skip") + # continue + # else: + # logging.debug(f"pamUser {record.title}, {record.record_uid} does not have a rotation + # settings.") + + # If the record does not exist in the record linking, it's orphaned; accept it + # If it does exist, then check if it belonged to a directory. + # Very unlikely a user that belongs to a database or another machine can be used. + + record_vertex = record_link.get_record_link(user_record.record_uid) + is_directory_user = False + if record_vertex is not None: + parent_record_uid = record_link.get_parent_record_uid(user_record.record_uid) + parent_record = vault.TypedRecord.load(params, parent_record_uid) # type: Optional[TypedRecord] + if parent_record is not None: + is_directory_user = self._is_directory_user(parent_record.record_type) + if is_directory_user is False: + logging.debug(f"pamUser parent for {user_record.title}, " + "{user_record.record_uid} is not a directory, skip") + continue + + else: + logging.debug(f"pamUser {user_record.title}, {user_record.record_uid} does not have record " + "linking vertex.") + else: + logging.debug(f"pamUser {user_record.title}, {user_record.record_uid} does not have record " + "linking vertex.") + + admin_search_results.append( + AdminSearchResult( + record=user_record, + is_directory_user=is_directory_user, + is_pam_user=True + ) + ) + + # Else this is a non-PAM record. + # Make sure it has a login, password, private key + else: + logging.debug(f"{record.record_uid} is not a pamUser") + login_field = next((x for x in record.fields if x.type == "login"), None) + password_field = next((x for x in record.fields if x.type == "password"), None) + private_key_field = next((x for x in record.fields if x.type == "keyPair"), None) + + if login_field is not None and (password_field is not None or private_key_field is not None): + admin_search_results.append( + AdminSearchResult( + record=record, + is_directory_user=False, + is_pam_user=False + ) + ) + else: + logging.debug(f"{record.title} is missing full credentials, skip") + + user_index = 1 + + admin_search_results = sorted(admin_search_results, + key=lambda x: x.is_pam_user, + reverse=True) + + has_local_user = False + for admin_search_result in admin_search_results: + is_local_user = False + if admin_search_result.record.record_type != "pamUser": + has_local_user = True + is_local_user = True + + print(f"{bcolors.HEADER}[{user_index}] {bcolors.ENDC}" + f"{_b('* ') if is_local_user is True else ''}" + f"{admin_search_result.record.title} " + f'{"(Directory User)" if admin_search_result.is_directory_user is True else ""}') + user_index += 1 + + if has_local_user is True: + print(f"{bcolors.BOLD}* Not a PAM User record. " + f"A PAM User would be generated from this record.{bcolors.ENDC}") + + select = input("Enter line number of user record to use, enter/return to refind the search, " + f"or {_b('Q')} to quit search. > ").lower() + if select == "": + continue + elif select[0] == "q": + return None + else: + try: + return admin_search_results[int(select) - 1].record # type: TypedRecord + except IndexError: + print(f"{bcolors.FAIL}Entered row index does not exists.{bcolors.ENDC}") + continue + + @staticmethod + def _handle_admin_record_from_record(record: TypedRecord, content: DiscoveryObject, indent: int = 0, + context: Optional[Any] = None) -> Optional[PromptResult]: + + params = context.get("param") # type: KeeperParams + gateway_context = context.get("gateway_context") # type: GatewayContext + + # Is this a pamUser record? + # Return the record UID and set its ACL to be the admin. + if record.record_type == "pamUser": + return PromptResult( + action=PromptActionEnum.ADD, + acl=UserAcl(is_admin=True), + record_uid=record.record_uid, + ) + + # If we are here, this was not a pamUser + # We need to duplicate the record. + # But confirm first + + # Get fields from the old record. + # Copy them into the fields. + login_field = next((x for x in record.fields if x.type == "login"), None) + password_field = next((x for x in record.fields if x.type == "password"), None) + private_key_field = next((x for x in record.fields if x.type == "keyPair"), None) + + content.set_field_value("login", login_field.value) + if password_field is not None: + content.set_field_value("password", password_field.value) + if private_key_field is not None: + value = private_key_field.value + if value is not None and len(value) > 0: + value = value[0] + private_key = value.get("privateKey") + if private_key is not None: + content.set_field_value("private_key", private_key) + + # Check if we have more than one shared folder. + # If we have one, confirm about adding the user. + # If multiple shared folders, allow user to select which one. + shared_folders = gateway_context.get_shared_folders(params) + if len(shared_folders) == 0: + while True: + yn = input(f"Create a PAM User record from {record.title}? [Y/N]>").lower() + if yn == "": + continue + elif yn[0] == "n": + return None + elif yn[0] == "y": + content.shared_folder_uid = gateway_context.default_shared_folder_uid + else: + folder_name = next((x['name'] + for x in shared_folders + if x['uid'] == gateway_context.default_shared_folder_uid), + None) + while True: + afq = input(f"({_b('A')})dd user to {folder_name}, " + f"Add user to ({_b('F')})older, " + f"({_b('Q')})uit >").lower() + if afq == "": + continue + if afq[0] == "a": + content.shared_folder_uid = gateway_context.default_shared_folder_uid + break + elif afq[0] == "f": + shared_folder_uid = PAMGatewayActionDiscoverResultProcessCommand._get_shared_folder( + params, "", gateway_context) + if shared_folder_uid is not None: + content.shared_folder_uid = shared_folder_uid + break + + return PromptResult( + action=PromptActionEnum.ADD, + acl=UserAcl(is_admin=True), + content=content, + note=f"This record replaces record {record.title} ({record.record_uid}). " + "The password on that record will not be rotated." + ) + + def _prompt_admin(self, parent_vertex: DAGVertex, content: DiscoveryObject, acl: UserAcl, + indent: int = 0, context: Optional[Any] = None) -> PromptResult: + + if content is None: + raise Exception("The admin content was not passed in to prompt the user.") + + params = context.get("params") + + parent_content = DiscoveryObject.get_discovery_object(parent_vertex) + + print("") + while True: + + print(f"{bcolors.BOLD}{parent_content.description} does not have an administrator user.{bcolors.ENDC}") + + action = input("Would you like to " + f"({_b('A')})dd new administrator user, " + f"({_b('F')})ind an existing admin, or " + f"({_b('S')})kip add? > ").lower() + + if action == "": + continue + + if action[0] == 'a': + prompt_result = self._prompt( + vertex=None, + parent_vertex=parent_vertex, + content=content, + acl=acl, + context=context, + indent=indent + 2 + ) + login = content.get_field_value("login") + if login is None or login == "": + print("") + print(f"{bcolors.FAIL}A value is needed for the login field.{bcolors.ENDC}") + continue + + print(f"{bcolors.OKGREEN}Adding admin record to save queue.{bcolors.ENDC}") + return prompt_result + elif action[0] == 'f': + record = self._find_user_record(params, context=context) + if record is not None: + admin_prompt_result = self._handle_admin_record_from_record( + record=record, + content=content, + context=context + ) + if admin_prompt_result is not None: + if admin_prompt_result.action == PromptActionEnum.ADD: + print(f"{bcolors.OKGREEN}Adding admin record to save queue.{bcolors.ENDC}") + return admin_prompt_result + elif action[0] == 's': + return PromptResult( + action=PromptActionEnum.SKIP + ) + print("") + + @staticmethod + def _prompt_confirm_add(bulk_add_records: List[BulkRecordAdd]): + + """ + If we quit, we want to ask the user if they want to add record for discovery objects that they selected + for addition. + """ + + print("") + count = len(bulk_add_records) + if count == 1: + msg = (f"{bcolors.BOLD}There is 1 record queued to be added to your vault. " + f"Do you wish to add it? [Y/N]> {bcolors.ENDC}") + else: + msg = (f"{bcolors.BOLD}There are {count} records queue to be added to your vault. " + f"Do you wish to add them? [Y/N]> {bcolors.ENDC}") + while True: + yn = input(msg).lower() + if yn == "": + continue + if yn[0] == "y": + return True + elif yn[0] == "n": + return False + print(f"{bcolors.FAIL}Did not get 'Y' or 'N'{bcolors.ENDC}") + + @staticmethod + def _prepare_record(content: DiscoveryObject, context: Optional[Any] = None) -> (Any, str): + + """ + Prepare the Vault record side. + + It's not created here. + It will be created at the end of the processing run in bulk. + We to build a record to get a record UID. + + :params content: The discovery object instance. + :params context: Optionally, it will contain information set from the run() method. + :returns: Returns an unsaved Keeper record instance. + """ + + params = context.get("params") + + # DEFINE V3 RECORD + + # Create an instance of a vault record to structure the data + record = vault.TypedRecord() + record.type_name = content.record_type + record.record_uid = utils.generate_uid() + record.record_key = utils.generate_aes_key() + record.title = content.title + for field in content.fields: + field_args = { + "field_type": field.type, + "field_value": field.value + } + if field.type != field.label: + field_args["field_label"] = field.label + record_field = vault.TypedField.new_field(**field_args) + record_field.required = field.required + record.fields.append(record_field) + + folder = params.folder_cache.get(content.shared_folder_uid) + folder_key = None # type: Optional[bytes] + if isinstance(folder, subfolder.SharedFolderFolderNode): + shared_folder_uid = folder.shared_folder_uid + elif isinstance(folder, subfolder.SharedFolderNode): + shared_folder_uid = folder.uid + else: + shared_folder_uid = None + if shared_folder_uid and shared_folder_uid in params.shared_folder_cache: + shared_folder = params.shared_folder_cache.get(shared_folder_uid) + folder_key = shared_folder.get('shared_folder_key_unencrypted') + + # DEFINE PROTOBUF FOR RECORD + + record_add_protobuf = record_pb2.RecordAdd() + record_add_protobuf.record_uid = utils.base64_url_decode(record.record_uid) + record_add_protobuf.record_key = crypto.encrypt_aes_v2(record.record_key, params.data_key) + record_add_protobuf.client_modified_time = utils.current_milli_time() + record_add_protobuf.folder_type = record_pb2.user_folder + if folder: + record_add_protobuf.folder_uid = utils.base64_url_decode(folder.uid) + if folder.type == 'shared_folder': + record_add_protobuf.folder_type = record_pb2.shared_folder + elif folder.type == 'shared_folder_folder': + record_add_protobuf.folder_type = record_pb2.shared_folder_folder + if folder_key: + record_add_protobuf.folder_key = crypto.encrypt_aes_v2(record.record_key, folder_key) + + data = vault_extensions.extract_typed_record_data(record) + json_data = api.get_record_data_json_bytes(data) + record_add_protobuf.data = crypto.encrypt_aes_v2(json_data, record.record_key) + + # refs = vault_extensions.extract_typed_record_refs(record) + # for ref in refs: + # ref_record_key = None # type: Optional[bytes] + # if record.linked_keys: + # ref_record_key = record.linked_keys.get(ref) + # if not ref_record_key: + # ref_record = vault.KeeperRecord.load(params, ref) + # if ref_record: + # ref_record_key = ref_record.record_key + # + # if ref_record_key: + # link = record_pb2.RecordLink() + # link.record_uid = utils.base64_url_decode(ref) + # link.record_key = crypto.encrypt_aes_v2(ref_record_key, record.record_key) + # add_record.record_links.append(link) + + if params.enterprise_ec_key: + audit_data = vault_extensions.extract_audit_data(record) + if audit_data: + record_add_protobuf.audit.version = 0 + record_add_protobuf.audit.data = crypto.encrypt_ec( + json.dumps(audit_data).encode('utf-8'), params.enterprise_ec_key) + + return record_add_protobuf, record.record_uid + + @classmethod + def _create_records(cls, bulk_add_records: List[BulkRecordAdd], context: Optional[Any] = None) -> ( + BulkProcessResults): + + if len(bulk_add_records) == 1: + print("Adding the record to the Vault ...") + else: + print(f"Adding {len(bulk_add_records)} records to the Vault ...") + + params = context.get("params") + gateway_context = context.get("gateway_context") + + build_process_results = BulkProcessResults() + + # STEP 1 - Batch add new records + + # Generate a list of RecordAdd instance. + # In BulkRecordAdd they will be the record instance. + record_add_list = [r.record for r in bulk_add_records] # type: List[record_pb2.RecordAdd] + + records_per_request = 999 + + add_results = [] # type: List[record_pb2.RecordModifyResult] + logging.debug("adding record in batches") + while record_add_list: + logging.debug(f"* adding batch") + rq = record_pb2.RecordsAddRequest() + rq.client_time = utils.current_milli_time() + rq.records.extend(record_add_list[:records_per_request]) + record_add_list = record_add_list[records_per_request:] + rs = api.communicate_rest(params, rq, 'vault/records_add', rs_type=record_pb2.RecordsModifyResponse) + add_results.extend(rs.records) + + logging.debug(f"add_result: {add_results}") + + if len(add_results) != len(bulk_add_records): + logging.debug(f"attempted to batch add {len(bulk_add_records)} record(s), " + f"only have {len(add_results)} results.") + + # STEP 3 - Add rotation settings. + # Use the list we passed in, find the results, and add if the additions were successful. + + # For the records passed in to be created. + for bulk_record in bulk_add_records: + # Grab the type Keeper record instance, and title from that record. + pb_add_record = bulk_record.record + title = bulk_record.title + + rotation_disabled = False + + # Find the result for this record. + result = None + for x in add_results: + logging.debug(f"{pb_add_record.record_uid} vs {x.record_uid}") + if pb_add_record.record_uid == x.record_uid: + result = x + break + + # If we didn't get a result, then don't add the rotation settings. + if result is None: + build_process_results.failure.append( + BulkRecordFail( + title=title, + error="No status on addition to Vault. Cannot determine if added or not." + ) + ) + logging.debug(f"Did not get a result when adding record {title}") + continue + + # Check if addition failed. If it did fail, don't add the rotation settings. + success = (result.status == record_pb2.RecordModifyResult.DESCRIPTOR.values_by_name['RS_SUCCESS'].number) + status = record_pb2.RecordModifyResult.DESCRIPTOR.values_by_number[result.status].name + + if success is False: + build_process_results.failure.append( + BulkRecordFail( + title=title, + error=status + ) + ) + logging.debug(f"Had problem adding record for {title}: {status}") + continue + + rq = router_pb2.RouterRecordRotationRequest() + rq.recordUid = url_safe_str_to_bytes(bulk_record.record_uid) + rq.revision = 0 + + # Set the gateway/configuration that this record should be connected. + rq.configurationUid = url_safe_str_to_bytes(gateway_context.configuration_uid) + + # Only set the resource if the record type is a PAM User. + # Machines, databases, and directories have a login/password in the record that indicates who the admin is. + if bulk_record.record_type == "pamUser": + rq.resourceUid = url_safe_str_to_bytes(bulk_record.parent_record_uid) + + # Right now, the schedule and password complexity are not set. This would be part of a rule engine. + rq.schedule = '' + rq.pwdComplexity = b'' + rq.disabled = rotation_disabled + + router_set_record_rotation_information(params, rq) + + build_process_results.success.append( + BulkRecordSuccess( + title=title, + record_uid=bulk_record.record_uid + ) + ) + + params.sync_data = True + + return build_process_results + + @classmethod + def _convert_records(cls, bulk_convert_records: List[BulkRecordConvert], context: Optional[Any] = None): + + params = context.get("params") + gateway_context = context.get("gateway_context") + + for bulk_convert_record in bulk_convert_records: + + record = vault.KeeperRecord.load(params, bulk_convert_record.record_uid) + + rotation_disabled = False + + rq = router_pb2.RouterRecordRotationRequest() + rq.recordUid = url_safe_str_to_bytes(bulk_convert_record.record_uid) + record_rotation_revision = params.record_rotation_cache.get(bulk_convert_record.record_uid) + rq.revision = record_rotation_revision.get('revision') if record_rotation_revision else 0 + + # Set the gateway/configuration that this record should be connected. + rq.configurationUid = url_safe_str_to_bytes(gateway_context.configuration_uid) + + # Only set the resource if the record type is a PAM User. + # Machines, databases, and directories have a login/password in the record that indicates who the admin is. + if record.record_type == "pamUser": + rq.resourceUid = url_safe_str_to_bytes(bulk_convert_record.parent_record_uid) + else: + rq.resourceUid = None + + # Right now, the schedule and password complexity are not set. This would be part of a rule engine. + rq.schedule = '' + rq.pwdComplexity = b'' + rq.disabled = rotation_disabled + + router_set_record_rotation_information(params, rq) + + params.sync_data = True + + @staticmethod + def _get_directory_info(domain: str, + skip_users: bool = False, + context: Optional[Any] = None) -> Optional[DirectoryInfo]: + """ + Get information about this record from the vault records. + + """ + + params = context.get("params") + gateway_context = context.get("gateway_context") + + directory_info = DirectoryInfo() + + # Find the all directory records, in for this gateway, that have a domain that matches what we are looking for. + for directory_record in vault_extensions.find_records(params, record_type="pamDirectory"): + directory_record = vault.TypedRecord.load(params, + directory_record.record_uid) # type: Optional[TypedRecord] + + info = params.record_rotation_cache.get(directory_record.record_uid) + if info is None: + continue + + # Make sure this user is part of this gateway. + if info.get("configuration_uid") != gateway_context.configuration_uid: + continue + + domain_field = directory_record.get_typed_field("text", label="domainName") + if len(domain_field.value) == 0 or domain_field.value[0] == "": + continue + + if domain_field.value[0].lower() != domain.lower(): + continue + + directory_info.directory_record_uids.append(directory_record.record_uid) + + if directory_info.has_directories is True and skip_users is False: + + for user_record in vault_extensions.find_records(params, record_type="pamUser"): + info = params.record_rotation_cache.get(user_record.record_uid) + if info is None: + continue + + if info.get("resource_uid") is None or info.get("resource_uid") == "": + continue + + # If the user's belongs to a directory, and add it to the directory user list. + if info.get("resource_uid") in info.directory_record_uids: + directory_info.directory_user_record_uids.append(user_record.record_uid) + + return directory_info + + def execute(self, params: KeeperParams, **kwargs): + + if not hasattr(params, 'pam_controllers'): + router_get_connected_gateways(params) + + job_id = kwargs.get("job_id") + auto_add = kwargs.get("add_all", False) + + # Right now, keep dry_run False. We might add it back in. + dry_run = kwargs.get("dry_run", False) + + configuration_records = list(vault_extensions.find_records(params, "pam.*Configuration")) + for configuration_record in configuration_records: + + gateway_context = GatewayContext.from_configuration_uid(params, configuration_record.record_uid) + if gateway_context is None: + continue + + record_cache = self._build_record_cache( + params=params, + gateway_context=gateway_context + ) + + # Get the current job. + # There can only be one active job. + # This will give us the sync point for the delta + jobs = Jobs(record=configuration_record, params=params) + job_item = jobs.current_job + if job_item is None: + continue + + # If this is not the job we are looking for, continue to the next gateway. + if job_item.job_id != job_id: + continue + + if job_item.end_ts is None: + print(f'{bcolors.FAIL}Discovery job is currently running. Cannot process.{bcolors.ENDC}') + return + if job_item.success is False: + print(f'{bcolors.FAIL}Discovery job failed. Cannot process.{bcolors.ENDC}') + return + + process = Process( + record=configuration_record, + job_id=job_item.job_id, + params=params, + logger=logging + ) + + if dry_run is True: + if auto_add is True: + logging.debug("dry run has been set, disable auto add.") + auto_add = False + + print(f"{bcolors.HEADER}The DRY RUN flag has been set. The rule engine will not add any records. " + f"You will not be prompted to edit or add records.{bcolors.ENDC}") + print("") + + if auto_add is True: + print(f"{bcolors.HEADER}The AUTO ADD flag has been set. All found items will be added.{bcolors.ENDC}") + print("") + + try: + results = process.run( + # Prompt user the about adding records + prompt_func=self._prompt, + + # Prompt user for an admin for a resource + prompt_admin_func=self._prompt_admin, + + # If quit, confirm if the user wants to add records + prompt_confirm_add_func=self._prompt_confirm_add, + + # Prepare records and place in queue; does not add record to vault + record_prepare_func=self._prepare_record, + + # Add record to the vault, protobuf, and record-linking graph + record_create_func=self._create_records, + + # This function will take existing pamUser record and make them belong to this + # gateway. + record_convert_func=self._convert_records, + + # A function to get directory users + directory_info_func=self._get_directory_info, + + # Provides a cache of the record key to record UID. + record_cache=record_cache, + + # Commander-specific context. + # Record link will be added by Process run as "record_link" + context={ + "params": params, + "gateway_context": gateway_context, + "dry_run": dry_run, + "auto_add": auto_add + } + ) + + logging.debug(f"Results: {results}") + + print("") + if results is not None and results.num_results > 0: + print(f"{bcolors.OKGREEN}Successfully added {results.success_count} " + f"record{'s' if results.success_count != 1 else ''}.{bcolors.ENDC}") + if results.has_failures is True: + print(f"{bcolors.FAIL}There were {results.failure_count} " + f"failure{'s' if results.failure_count != 1 else ''}.{bcolors.ENDC}") + for fail in results.failure: + print(f" * {fail.title}: {fail.error}") + else: + print(f"{bcolors.FAIL}No records have been added.{bcolors.ENDC}") + + except NoDiscoveryDataException: + print(f"{bcolors.OKGREEN}All items have been added for this discovery job.{bcolors.ENDC}") + + except Exception as err: + print(f"{bcolors.FAIL}Could not process discovery: {err}{bcolors.ENDC}") + raise err + + return + + print(f"{bcolors.HEADER}Could not find the Discovery job.{bcolors.ENDC}") + print("") diff --git a/keepercommander/commands/discover/rule_add.py b/keepercommander/commands/discover/rule_add.py new file mode 100644 index 000000000..fea559af3 --- /dev/null +++ b/keepercommander/commands/discover/rule_add.py @@ -0,0 +1,114 @@ +from __future__ import annotations +import argparse +import logging +from . import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ..pam.pam_dto import GatewayActionDiscoverRuleValidateInputs, GatewayActionDiscoverRuleValidate, GatewayAction +from ..pam.router_helper import router_send_action_to_gateway, router_get_connected_gateways +from ...display import bcolors +from ...proto import pam_pb2 +from discovery_common.rule import Rules +from discovery_common.types import ActionRuleItem +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ...params import KeeperParams + + +class PAMGatewayActionDiscoverRuleAddCommand(PAMGatewayActionDiscoverCommandBase): + parser = argparse.ArgumentParser(prog='dr-discover-rule-add') + parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', + help='Gateway name of UID.') + parser.add_argument('--action', '-a', required=True, choices=['add', 'ignore', 'prompt'], + dest='rule_action', action='store', help='Action to take if rule matches') + parser.add_argument('--priority', '-p', required=True, dest='priority', action='store', type=int, + help='Rule execute priority') + parser.add_argument('--ignore-case', required=False, dest='ignore_case', action='store_true', + help='Ignore value case. Rule value must be in lowercase.') + parser.add_argument('--shared-folder-uid', required=False, dest='shared_folder_uid', + action='store', help='Folder to place record.') + parser.add_argument('--statement', '-s', required=True, dest='statement', action='store', + help='Rule statement') + + def get_parser(self): + return PAMGatewayActionDiscoverRuleAddCommand.parser + + @staticmethod + def validate_rule_statement(params: KeeperParams, gateway_context: GatewayContext, statement: str): + + # Send rule the gateway to be validated. The rule is encrypted. It might contain sensitive information. + action_inputs = GatewayActionDiscoverRuleValidateInputs( + configuration_uid=gateway_context.configuration_uid, + statement=gateway_context.encrypt_str(statement) + ) + conversation_id = GatewayAction.generate_conversation_id() + router_response = router_send_action_to_gateway( + params=params, + gateway_action=GatewayActionDiscoverRuleValidate( + inputs=action_inputs, + conversation_id=conversation_id), + message_type=pam_pb2.CMT_GENERAL, + is_streaming=False, + destination_gateway_uid_str=gateway_context.gateway_uid + ) + + data = PAMGatewayActionDiscoverCommandBase.get_response_data(router_response) + + if data is None: + raise Exception("The router returned a failure.") + elif data.get("success") is False: + error = data.get("error") + raise Exception(f"The rule does not appear valid: {error}") + + statement_struct = data.get("statementStruct") + logging.debug(f"Rule Structure = {statement_struct}") + if isinstance(statement_struct, list) is False: + raise Exception(f"The structured rule statement is not a list.") + + return statement_struct + + def execute(self, params, **kwargs): + + if not hasattr(params, 'pam_controllers'): + router_get_connected_gateways(params) + + try: + gateway = kwargs.get("gateway") + gateway_context = GatewayContext.from_gateway(params, gateway) + if gateway_context is None: + print(f'{bcolors.FAIL}Discovery job gateway [{gateway}] was not found.{bcolors.ENDC}') + return + + # If we are setting the shared_folder_uid, make sure it exists. + shared_folder_uid = kwargs.get("shared_folder_uid") + if shared_folder_uid is not None: + shared_folder_uids = gateway_context.get_shared_folders(params) + exists = next((x for x in shared_folder_uids if x["uid"] == shared_folder_uid), None) + if exists is None: + print(f"{bcolors.FAIL}The shared folder UID {shared_folder_uid} is not part of this " + f"application/gateway. Valid shared folder UID are:{bcolors.ENDC}") + for item in shared_folder_uids: + print(f"* {item['uid']} - {item['name']}") + return + + statement = kwargs.get("statement") + statement_struct = self.validate_rule_statement( + params=params, + gateway_context=gateway_context, + statement=statement + ) + + # If the rule passes its validation, then add control DAG + rules = Rules(record=gateway_context.configuration, params=params) + new_rule = ActionRuleItem( + action=kwargs.get("rule_action"), + priority=kwargs.get("priority"), + case_sensitive=not kwargs.get("ignore_case", False), + shared_folder_uid=kwargs.get("shared_folder_uid"), + statement=statement_struct, + enabled=True + ) + rules.add_rule(new_rule) + + print(f"{bcolors.OKGREEN}Rule has been added{bcolors.ENDC}") + except Exception as err: + print(f"{bcolors.FAIL}Rule was not added: {err}{bcolors.ENDC}") diff --git a/keepercommander/commands/discover/rule_list.py b/keepercommander/commands/discover/rule_list.py new file mode 100644 index 000000000..28aa8fa54 --- /dev/null +++ b/keepercommander/commands/discover/rule_list.py @@ -0,0 +1,81 @@ +from __future__ import annotations +import argparse +from . import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ...display import bcolors +from ..pam.router_helper import router_get_connected_gateways +from discovery_common.rule import Rules +from discovery_common.types import RuleTypeEnum +from typing import List, TYPE_CHECKING + +if TYPE_CHECKING: + from discovery_common.types import RuleItem + + +class PAMGatewayActionDiscoverRuleListCommand(PAMGatewayActionDiscoverCommandBase): + parser = argparse.ArgumentParser(prog='dr-discover-rule-list') + parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', + help='Gateway name of UID.') + parser.add_argument('--search', '-s', required=False, dest='search', action='store', + help='Search for rules.') + + def get_parser(self): + return PAMGatewayActionDiscoverRuleListCommand.parser + + @staticmethod + def print_rule_table(rule_list: List[RuleItem]): + + print("") + print(f"{bcolors.HEADER}{'Rule ID'.ljust(15, ' ')} " + f"{'Action'.ljust(6, ' ')} " + f"{'Priority'.ljust(8, ' ')} " + f"{'Case'.ljust(12, ' ')} " + f"{'Added'.ljust(19, ' ')} " + f"{'Shared Folder UID'.ljust(22, ' ')} " + "Rule" + f"{bcolors.ENDC}") + + print(f"{''.ljust(15, '=')} " + f"{''.ljust(6, '=')} " + f"{''.ljust(8, '=')} " + f"{''.ljust(12, '=')} " + f"{''.ljust(19, '=')} " + f"{''.ljust(22, '=')} " + f"{''.ljust(10, '=')} ") + + for rule in rule_list: + if rule.case_sensitive is True: + ignore_case_str = "Sensitive" + else: + ignore_case_str = "Insensitive" + + shared_folder_uid = "" + if rule.shared_folder_uid is not None: + shared_folder_uid = rule.shared_folder_uid + print(f"{bcolors.OKGREEN}{rule.rule_id.ljust(14, ' ')}{bcolors.ENDC} " + f"{rule.action.value.ljust(6, ' ')} " + f"{str(rule.priority).rjust(8, ' ')} " + f"{ignore_case_str.ljust(12, ' ')} " + f"{rule.added_ts_str.ljust(19, ' ')} " + f"{shared_folder_uid.ljust(22, ' ')} " + f"{Rules.make_action_rule_statement_str(rule.statement)}") + + def execute(self, params, **kwargs): + + if not hasattr(params, 'pam_controllers'): + router_get_connected_gateways(params) + + gateway = kwargs.get("gateway") + gateway_context = GatewayContext.from_gateway(params, gateway) + if gateway_context is None: + print(f'{bcolors.FAIL}Discovery job gateway [{gateway}] was not found.{bcolors.ENDC}') + return + + rules = Rules(record=gateway_context.configuration, params=params) + rule_list = rules.rule_list(rule_type=RuleTypeEnum.ACTION, + search=kwargs.get("search")) # type: List[RuleItem] + if len(rule_list) == 0: + print(f"{bcolors.FAIL}There are no rules. Use 'pam action discovery rule add' " + f"to create rules.{bcolors.ENDC}") + return + + self.print_rule_table(rule_list=rule_list) diff --git a/keepercommander/commands/discover/rule_remove.py b/keepercommander/commands/discover/rule_remove.py new file mode 100644 index 000000000..e2e0b21d0 --- /dev/null +++ b/keepercommander/commands/discover/rule_remove.py @@ -0,0 +1,40 @@ +import argparse +from . import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ..pam.router_helper import router_get_connected_gateways +from ...display import bcolors +from discovery_common.rule import Rules +from discovery_common.types import RuleTypeEnum + + +class PAMGatewayActionDiscoverRuleRemoveCommand(PAMGatewayActionDiscoverCommandBase): + parser = argparse.ArgumentParser(prog='dr-discover-rule-list') + parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', + help='Gateway name of UID') + parser.add_argument('--rule-id', '-i', required=True, dest='rule_id', action='store', + help='Identifier for the rule') + + def get_parser(self): + return PAMGatewayActionDiscoverRuleRemoveCommand.parser + + def execute(self, params, **kwargs): + + if not hasattr(params, 'pam_controllers'): + router_get_connected_gateways(params) + + gateway = kwargs.get("gateway") + gateway_context = GatewayContext.from_gateway(params, gateway) + if gateway_context is None: + print(f'{bcolors.FAIL}Discovery job gateway [{gateway}] was not found.{bcolors.ENDC}') + return + + try: + rule_id = kwargs.get("rule_id") + rules = Rules(record=gateway_context.configuration, params=params) + rule_item = rules.get_rule_item(rule_type=RuleTypeEnum.ACTION, rule_id=rule_id) + if rule_item is None: + raise ValueError("Rule Id does not exist.") + rules.remove_rule(rule_item) + + print(f"{bcolors.OKGREEN}Rule has been removed.{bcolors.ENDC}") + except Exception as err: + print(f"{bcolors.FAIL}Rule was not removed: {err}{bcolors.ENDC}") diff --git a/keepercommander/commands/discover/rule_update.py b/keepercommander/commands/discover/rule_update.py new file mode 100644 index 000000000..b54eafd34 --- /dev/null +++ b/keepercommander/commands/discover/rule_update.py @@ -0,0 +1,72 @@ +from __future__ import annotations +import argparse +from . import PAMGatewayActionDiscoverCommandBase, GatewayContext +from .rule_add import PAMGatewayActionDiscoverRuleAddCommand +from ..pam.router_helper import router_get_connected_gateways +from ...display import bcolors +from discovery_common.rule import Rules, RuleTypeEnum + + +class PAMGatewayActionDiscoverRuleUpdateCommand(PAMGatewayActionDiscoverCommandBase): + parser = argparse.ArgumentParser(prog='dr-discover-rule-add') + parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', + help='Gateway name of UID.') + parser.add_argument('--rule-id', '-i', required=True, dest='rule_id', action='store', + help='Identifier for the rule') + parser.add_argument('--action', '-a', required=False, choices=['add', 'ignore', 'prompt'], + dest='rule_action', action='store', help='Update the action to take if rule matches') + parser.add_argument('--priority', '-p', required=False, dest='priority', action='store', type=int, + help='Update the rule execute priority') + parser.add_argument('--ignore-case', required=False, dest='ignore_case', action='store_true', + help='Update the rule to ignore case') + parser.add_argument('--no-ignore-case', required=False, dest='ignore_case', action='store_false', + help='Update the rule to not ignore case') + parser.add_argument('--shared-folder-uid', required=False, dest='shared_folder_uid', + action='store', help='Update the folder to place record.') + parser.add_argument('--statement', '-s', required=False, dest='statement', action='store', + help='Update the rule statement') + + def get_parser(self): + return PAMGatewayActionDiscoverRuleUpdateCommand.parser + + def execute(self, params, **kwargs): + + if not hasattr(params, 'pam_controllers'): + router_get_connected_gateways(params) + + gateway = kwargs.get("gateway") + gateway_context = GatewayContext.from_gateway(params, gateway) + if gateway_context is None: + print(f'{bcolors.FAIL}Discovery job gateway [{gateway}] was not found.{bcolors.ENDC}') + return + + try: + rule_id = kwargs.get("rule_id") + rules = Rules(record=gateway_context.configuration, params=params) + rule_item = rules.get_rule_item(rule_type=RuleTypeEnum.ACTION, rule_id=rule_id) + if rule_item is None: + raise ValueError("Rule Id does not exist.") + + rule_action = kwargs.get("rule_action") + if rule_action is not None: + rule_item.action = RuleTypeEnum.find_enum(rule_action) + priority = kwargs.get("priority") + if priority is not None: + rule_item.priority = priority + ignore_case = kwargs.get("ignore_case") + if ignore_case is not None: + rule_item.case_sensitive = not ignore_case + shared_folder_uid = kwargs.get("shared_folder_uid") + if shared_folder_uid is not None: + rule_item.shared_folder_uid = shared_folder_uid + statement = kwargs.get("statement") + if statement is not None: + rule_item.statement = PAMGatewayActionDiscoverRuleAddCommand.validate_rule_statement( + params=params, + gateway_context=gateway_context, + statement=statement + ) + rules.update_rule(rule_item) + print(f"{bcolors.OKGREEN}Rule has been updated{bcolors.ENDC}") + except Exception as err: + print(f"{bcolors.FAIL}Rule was not updated: {err}{bcolors.ENDC}") diff --git a/keepercommander/commands/discoveryrotation.py b/keepercommander/commands/discoveryrotation.py index d54d67ddb..a4ace46cd 100644 --- a/keepercommander/commands/discoveryrotation.py +++ b/keepercommander/commands/discoveryrotation.py @@ -22,6 +22,7 @@ from datetime import datetime from typing import Dict, Optional, Any, Set, List + import requests from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization @@ -33,12 +34,16 @@ from .ksm import KSMCommand from .pam import gateway_helper, router_helper from .pam.config_facades import PamConfigurationRecordFacade -from .pam.config_helper import pam_configurations_get_all, pam_configuration_remove, pam_configuration_create_record_v6, record_rotation_get, \ +from .pam.config_helper import pam_configurations_get_all, \ + pam_configuration_remove, pam_configuration_create_record_v6, record_rotation_get, \ pam_decrypt_configuration_data -from .pam.pam_dto import GatewayActionGatewayInfo, GatewayActionDiscoverInputs, GatewayActionDiscover, \ - GatewayActionRotate, \ - GatewayActionRotateInputs, GatewayAction, GatewayActionJobInfoInputs, \ - GatewayActionJobInfo, GatewayActionJobCancel +from .pam.pam_dto import ( + GatewayActionGatewayInfo, + GatewayActionRotate, + GatewayActionRotateInputs, GatewayAction, GatewayActionJobInfoInputs, + GatewayActionJobInfo, + GatewayActionJobCancel) + from .pam.router_helper import router_send_action_to_gateway, print_router_response, \ router_get_connected_gateways, router_set_record_rotation_information, router_get_rotation_schedules, \ get_router_url @@ -53,6 +58,20 @@ from ..proto.APIRequest_pb2 import GetKsmPublicKeysRequest, GetKsmPublicKeysResponse from ..subfolder import find_parent_top_folder, try_resolve_path, BaseFolderNode from ..vault import TypedField +from .discover.job_start import PAMGatewayActionDiscoverJobStartCommand +from .discover.job_status import PAMGatewayActionDiscoverJobStatusCommand +from .discover.job_remove import PAMGatewayActionDiscoverJobRemoveCommand +from .discover.result_process import PAMGatewayActionDiscoverResultProcessCommand +from .discover.rule_add import PAMGatewayActionDiscoverRuleAddCommand +from .discover.rule_list import PAMGatewayActionDiscoverRuleListCommand +from .discover.rule_remove import PAMGatewayActionDiscoverRuleRemoveCommand +from .discover.rule_update import PAMGatewayActionDiscoverRuleUpdateCommand +from .pam_debug.acl import PAMDebugACLCommand +from .pam_debug.alter import PAMDebugAlterCommand +from .pam_debug.graph import PAMDebugGraphCommand +from .pam_debug.info import PAMDebugInfoCommand +from .pam_debug.verify import PAMDebugVerifyCommand +from .pam_debug.version import PAMDebugVersionCommand def register_commands(commands): @@ -122,22 +141,59 @@ def __init__(self): self.default_verb = 'list' +class PAMDiscoveryCommand(GroupCommand): + + def __init__(self): + super(PAMDiscoveryCommand, self).__init__() + self.register_command('start', PAMGatewayActionDiscoverJobStartCommand(), 'Start a discovery process', 's') + self.register_command('status', PAMGatewayActionDiscoverJobStatusCommand(), 'Status of discovery jobs', 'st') + self.register_command('remove', PAMGatewayActionDiscoverJobRemoveCommand(), 'Cancel or remove of discovery jobs', 'r') + self.register_command('process', PAMGatewayActionDiscoverResultProcessCommand(), 'Process discovered items', 'p') + self.register_command('rule', PAMDiscoveryRuleCommand(), 'Manage discovery rules') + + self.default_verb = 'status' + + +class PAMDiscoveryRuleCommand(GroupCommand): + + def __init__(self): + super(PAMDiscoveryRuleCommand, self).__init__() + self.register_command('add', PAMGatewayActionDiscoverRuleAddCommand(), 'Add a rule', 'a') + self.register_command('list', PAMGatewayActionDiscoverRuleListCommand(), 'List all rules', 'l') + self.register_command('remove', PAMGatewayActionDiscoverRuleRemoveCommand(), 'Remove a rule', 'r') + self.register_command('update', PAMGatewayActionDiscoverRuleUpdateCommand(), 'Update a rule', 'u') + self.default_verb = 'list' + + class GatewayActionCommand(GroupCommand): def __init__(self): super(GatewayActionCommand, self).__init__() self.register_command('gateway-info', PAMGatewayActionServerInfoCommand(), 'Info command', 'i') - self.register_command('unreleased-discover', PAMGatewayActionDiscoverCommand(), 'Discover command') + self.register_command('discover', PAMDiscoveryCommand(), 'Discover command', 'd') self.register_command('rotate', PAMGatewayActionRotateCommand(), 'Rotate command', 'r') self.register_command('job-info', PAMGatewayActionJobCommand(), 'View Job details', 'ji') self.register_command('job-cancel', PAMGatewayActionJobCommand(), 'View Job details', 'jc') + self.register_command('debug', PAMDebugCommand(), 'PAM debug information') # self.register_command('job-list', DRCmdListJobs(), 'List Running jobs') +class PAMDebugCommand(GroupCommand): + + def __init__(self): + super(PAMDebugCommand, self).__init__() + self.register_command('info', PAMDebugInfoCommand(), 'Debug a record', 'i') + self.register_command('graph', PAMDebugGraphCommand(), 'Render graphs', 'g') + self.register_command('verify', PAMDebugVerifyCommand(), 'Verify graphs', 'v') + self.register_command('alter', PAMDebugAlterCommand(), 'Alter graph information', 'a') + self.register_command('acl', PAMDebugACLCommand(), 'Control ACL of PAM Users', 'c') + self.register_command('version', PAMDebugVersionCommand(), 'Version modules versions') + + class PAMCmdListJobs(Command): parser = argparse.ArgumentParser(prog='pam action job-list') - parser.add_argument('--jobId', '-j', required=False, dest='job_id', action='store', help='ID of the Job running') + parser.add_argument('--jobId', '-j', required=False, dest='job_id', action='store', help='ID of the Job running') def get_parser(self): return PAMCmdListJobs.parser @@ -495,19 +551,23 @@ def execute(self, params, **kwargs): record_uid = utils.base64_url_encode(s.recordUid) controller_uid = s.controllerUid - controller_details = next((ctr for ctr in enterprise_all_controllers if ctr.controllerUid == controller_uid), None) + controller_details = next( + (ctr for ctr in enterprise_all_controllers if ctr.controllerUid == controller_uid), None) configuration_uid = s.configurationUid configuration_uid_str = utils.base64_url_encode(configuration_uid) - pam_configuration = next((pam_config for pam_config in all_pam_config_records if pam_config.get('record_uid') == configuration_uid_str), None) + pam_configuration = next((pam_config for pam_config in all_pam_config_records if + pam_config.get('record_uid') == configuration_uid_str), None) - is_controller_online = any((poc for poc in enterprise_controllers_connected_uids_bytes if poc == controller_uid)) + is_controller_online = any( + (poc for poc in enterprise_controllers_connected_uids_bytes if poc == controller_uid)) row_color = '' if record_uid in params.record_cache: row_color = bcolors.HIGHINTENSITYWHITE rec = params.record_cache[record_uid] - data_json = rec['data_unencrypted'].decode('utf-8') if isinstance(rec['data_unencrypted'], bytes) else rec['data_unencrypted'] + data_json = rec['data_unencrypted'].decode('utf-8') if isinstance(rec['data_unencrypted'], bytes) else \ + rec['data_unencrypted'] data = json.loads(data_json) record_title = data.get('title') @@ -556,7 +616,6 @@ def execute(self, params, **kwargs): else: controller_stat_color = bcolors.WHITE - controller_color = bcolors.WHITE if is_controller_online: controller_color = bcolors.OKGREEN @@ -573,7 +632,8 @@ def execute(self, params, **kwargs): if not is_verbose: row.append(f"{bcolors.FAIL}[No config found]{bcolors.ENDC}") else: - row.append(f"{bcolors.FAIL}[No config found. Looks like configuration {configuration_uid_str} was removed but rotation schedule was not modified{bcolors.ENDC}") + row.append( + f"{bcolors.FAIL}[No config found. Looks like configuration {configuration_uid_str} was removed but rotation schedule was not modified{bcolors.ENDC}") else: pam_data_decrypted = pam_decrypt_configuration_data(pam_configuration) @@ -597,8 +657,10 @@ def execute(self, params, **kwargs): class PAMGatewayListCommand(Command): parser = argparse.ArgumentParser(prog='dr-gateway') - parser.add_argument('--force', '-f', required=False, default=False, dest='is_force', action='store_true', help='Force retrieval of gateways') - parser.add_argument('--verbose', '-v', required=False, default=False, dest='is_verbose', action='store_true', help='Verbose output') + parser.add_argument('--force', '-f', required=False, default=False, dest='is_force', action='store_true', + help='Force retrieval of gateways') + parser.add_argument('--verbose', '-v', required=False, default=False, dest='is_verbose', action='store_true', + help='Verbose output') def get_parser(self): return PAMGatewayListCommand.parser @@ -698,8 +760,8 @@ def execute(self, params, **kwargs): if is_verbose: row.append(f'{row_color}{c.deviceName}{bcolors.ENDC}') row.append(f'{row_color}{c.deviceToken}{bcolors.ENDC}') - row.append(f'{row_color}{datetime.fromtimestamp(c.created/1000)}{bcolors.ENDC}') - row.append(f'{row_color}{datetime.fromtimestamp(c.lastModified/1000)}{bcolors.ENDC}') + row.append(f'{row_color}{datetime.fromtimestamp(c.created / 1000)}{bcolors.ENDC}') + row.append(f'{row_color}{datetime.fromtimestamp(c.lastModified / 1000)}{bcolors.ENDC}') row.append(f'{row_color}{c.nodeId}{bcolors.ENDC}') table.append(row) @@ -726,9 +788,9 @@ def execute(self, params, **kwargs): pam_configuration_uid = kwargs.get('pam_configuration') is_verbose = kwargs.get('verbose') - if not pam_configuration_uid: # Print ALL root level configs + if not pam_configuration_uid: # Print ALL root level configs PAMConfigurationListCommand.print_root_rotation_setting(params, is_verbose) - else: # Print element configs (config that is not a root) + else: # Print element configs (config that is not a root) PAMConfigurationListCommand.print_pam_configuration_details(params, pam_configuration_uid, is_verbose) @staticmethod @@ -778,7 +840,7 @@ def print_root_rotation_setting(params, is_verbose=False): configurations = list(vault_extensions.find_records(params, record_version=6)) facade = PamConfigurationRecordFacade() - for c in configurations: # type: vault.TypedRecord + for c in configurations: # type: vault.TypedRecord if c.record_type in ('pamAwsConfiguration', 'pamAzureConfiguration', 'pamNetworkConfiguration'): facade.record = c shared_folder_parents = find_parent_top_folder(params, c.record_uid) @@ -799,9 +861,11 @@ def print_root_rotation_setting(params, is_verbose=False): table.append(row) else: - logging.warning(f'Following configuration is not in the shared folder: UID: %s, Title: %s', c.record_uid, c.title) + logging.warning(f'Following configuration is not in the shared folder: UID: %s, Title: %s', + c.record_uid, c.title) else: - logging.warning(f'Following configuration has unsupported type: UID: %s, Title: %s', c.record_uid, c.title) + logging.warning(f'Following configuration has unsupported type: UID: %s, Title: %s', c.record_uid, + c.title) table.sort(key=lambda x: (x[1] or '')) dump_report_data(table, headers, fmt='table', filename="", row_number=False, column_width=None) @@ -869,8 +933,8 @@ def parse_pam_configuration(self, params, record, **kwargs): field.value.append(dict()) value = field.value[0] - gateway_uid = None # type: Optional[str] - gateway = kwargs.get('gateway') # type: Optional[str] + gateway_uid = None # type: Optional[str] + gateway = kwargs.get('gateway') # type: Optional[str] if gateway: gateways = gateway_helper.get_all_gateways(params) gateway_uid = next((utils.base64_url_encode(x.controllerUid) for x in gateways @@ -887,8 +951,8 @@ def parse_pam_configuration(self, params, record, **kwargs): # if len(shares) == 0: # raise Exception(f'Gateway %s has no shared folders', gateway.controllerName) - shared_folder_uid = None # type: Optional[str] - folder_name = kwargs.get('shared_folder') # type: Optional[str] + shared_folder_uid = None # type: Optional[str] + folder_name = kwargs.get('shared_folder') # type: Optional[str] if folder_name: if folder_name in params.shared_folder_cache: shared_folder_uid = folder_name @@ -991,7 +1055,7 @@ def parse_properties(self, params, record, **kwargs): # type: (KeeperParams, va if extra_properties: self.assign_typed_fields(record, [RecordEditMixin.parse_field(x) for x in extra_properties]) - def verify_required(self, record): # type: (vault.TypedRecord) -> None + def verify_required(self, record): # type: (vault.TypedRecord) -> None for field in record.fields: if field.required: if len(field.value) == 0: @@ -1165,6 +1229,7 @@ def execute(self, params, **kwargs): for w in self.warnings: logging.warning(w) + params.sync_data = True @@ -1256,7 +1321,7 @@ def execute(self, params, **kwargs): class PAMRouterScriptCommand(GroupCommand): def __init__(self): super().__init__() - self.register_command('list', PAMScriptListCommand(), 'List script fields') + self.register_command('list', PAMScriptListCommand(), 'List script fields') self.register_command('add', PAMScriptAddCommand(), 'List Record Rotation Schedulers') self.register_command('edit', PAMScriptEditCommand(), 'Add, delete, or edit script field') self.register_command('delete', PAMScriptDeleteCommand(), 'Delete script field') @@ -1386,7 +1451,7 @@ def execute(self, params, **kwargs): if not record_name: raise CommandError('rotate script', '"record" argument is required') - script_name = kwargs.get('script') # type: Optional[str] + script_name = kwargs.get('script') # type: Optional[str] if not script_name: raise CommandError('rotate script', '"script" argument is required') @@ -1461,7 +1526,7 @@ def execute(self, params, **kwargs): if not record_name: raise CommandError('rotate script', '"record" argument is required') - script_name = kwargs.get('script') # type: Optional[str] + script_name = kwargs.get('script') # type: Optional[str] if not script_name: raise CommandError('rotate script', '"script" argument is required') @@ -1508,7 +1573,6 @@ def get_parser(self): return PAMGatewayActionJobCancelCommand.parser def execute(self, params, **kwargs): - job_id = kwargs.get('job_id') print(f"Job id to cancel [{job_id}]") @@ -1535,7 +1599,6 @@ def get_parser(self): return PAMGatewayActionJobCommand.parser def execute(self, params, **kwargs): - job_id = kwargs.get('job_id') gateway_uid = kwargs.get('gateway_uid') @@ -1561,6 +1624,7 @@ class PAMGatewayActionRotateCommand(Command): parser = argparse.ArgumentParser(prog='dr-rotate-command') parser.add_argument('--record-uid', '-r', required=True, dest='record_uid', action='store', help='Record UID to rotate') + # parser.add_argument('--config', '-c', required=True, dest='configuration_uid', action='store', # help='Rotation configuration UID') @@ -1593,7 +1657,8 @@ def execute(self, params, **kwargs): # rule_list_json = crypto.decrypt_aes_v2(utils.base64_url_decode(ri_pwd_complexity_encrypted), record.record_key) # complexity = json.loads(rule_list_json.decode()) - ri_rotation_setting_uid = utils.base64_url_encode(ri.configurationUid) # Configuration on the UI is "Rotation Setting" + ri_rotation_setting_uid = utils.base64_url_encode( + ri.configurationUid) # Configuration on the UI is "Rotation Setting" resource_uid = utils.base64_url_encode(ri.resourceUid) pam_config = vault.KeeperRecord.load(params, ri_rotation_setting_uid) @@ -1620,7 +1685,6 @@ def execute(self, params, **kwargs): print(f'{bcolors.WARNING}There are no connected gateways.{bcolors.ENDC}') return - # rrs = RouterRotationStatus.Name(ri.status) # if rrs == 'RRS_NO_ROTATION': # print(f'{bcolors.FAIL}Record [{record_uid}] does not have rotation associated with it.{bcolors.ENDC}') @@ -1679,33 +1743,81 @@ def execute(self, params, **kwargs): print_router_response(router_response, response_type='gateway_info', is_verbose=is_verbose) -class PAMGatewayActionDiscoverCommand(Command): - parser = argparse.ArgumentParser(prog='dr-discover-command') - parser.add_argument('--shared-folder', '-f', required=True, dest='shared_folder_uid', action='store', - help='UID of the Shared Folder where results will be stored') - parser.add_argument('--provider-record', '-p', required=True, dest='provider_record_uid', action='store', - help='Provider Record UID that defines network') - # parser.add_argument('--destinations', '-d', required=False, dest='destinations', action='store', - # help='Controller id') +class PAMGatewayActionDiscoverCommandBase(Command): - def get_parser(self): - return PAMGatewayActionDiscoverCommand.parser + """ + The discover command base. - def execute(self, params, **kwargs): + Contains static methods to get the configuration record, get and update the discovery store. These are method + used by multiple discover actions. + """ - provider_record_uid = kwargs.get('provider_record_uid') - shared_folder_uid = kwargs.get('shared_folder_uid') + # If the discovery data field does not exist, or the field contains no values, use the template to init the + # field. + STORE_VALUE_TEMPLATE = { + "ignore_list": [], + "jobs": [] + } - action_inputs = GatewayActionDiscoverInputs(shared_folder_uid, provider_record_uid) - conversation_id = GatewayAction.generate_conversation_id() + STORE_LABEL = "discoveryStore" - router_response = router_send_action_to_gateway( - params, - GatewayActionDiscover(inputs=action_inputs, conversation_id=conversation_id), - message_type=pam_pb2.CMT_GENERAL, - is_streaming=False) + @staticmethod + def get_configuration(params, configuration_uid): - print_router_response(router_response, conversation_id) + configuration_record = vault.KeeperRecord.load(params, configuration_uid) + if not isinstance(configuration_record, vault.TypedRecord): + print(f'{bcolors.FAIL}PAM Configuration [{configuration_uid}] is not available.{bcolors.ENDC}') + return + + configuration_facade = PamConfigurationRecordFacade() + configuration_facade.record = configuration_record + + return configuration_record, configuration_facade + + @staticmethod + def get_discovery_store(configuration_record): + + # Get the discovery store. It contains information about discovery job for a configuration. It is on the custom + # fields. + discovery_field = None + if configuration_record.custom is not None: + discovery_field = next((field + for field in configuration_record.custom + if field.label == PAMGatewayActionDiscoverCommandBase.STORE_LABEL), + None) + + discovery_field_exists = True + if discovery_field is None: + logging.debug("discovery store field does not exists, creating") + discovery_field = TypedField.new_field("_hidden", + [PAMGatewayActionDiscoverCommandBase.STORE_VALUE_TEMPLATE], + PAMGatewayActionDiscoverCommandBase.STORE_LABEL) + discovery_field_exists = False + else: + logging.debug("discovery store record exists") + + # The value should not be [], if it is, init with the defaults. + if len(discovery_field.value) == 0: + logging.debug("discovery store does not have a value, set to the default value") + discovery_field.value = [PAMGatewayActionDiscoverCommandBase.STORE_VALUE_TEMPLATE] + + # TODO - REMOVE ME, this is just so we have one job + # discovery_field.value = [PAMGatewayActionDiscoverCommandBase.STORE_VALUE_TEMPLATE] + + return discovery_field.value[0], discovery_field, discovery_field_exists + + @staticmethod + def update_discovery_store(params, configuration_record, discovery_store, discovery_field, discovery_field_exists): + + discovery_field.value = [discovery_store] + if discovery_field_exists is False: + if configuration_record.custom is None: + configuration_record.custom = [] + configuration_record.custom.append(discovery_field) + + # Update the record here to prevent a race-condition + record_management.update_record(params, configuration_record) + params.sync_data = True class PAMGatewayRemoveCommand(Command): @@ -1731,7 +1843,6 @@ def execute(self, params, **kwargs): class PAMCreateGatewayCommand(Command): - dr_create_controller_parser = argparse.ArgumentParser(prog='dr-create-gateway') dr_create_controller_parser.add_argument('--name', '-n', required=True, dest='gateway_name', help='Name of the Gateway', @@ -1746,7 +1857,8 @@ class PAMCreateGatewayCommand(Command): dr_create_controller_parser.add_argument('--return_value', '-r', dest='return_value', action='store_true', help='Return value from the command for automation purposes') dr_create_controller_parser.add_argument('--config-init', '-c', type=str, dest='config_init', action='store', - choices=['json', 'b64'], help='Initialize client config and return configuration string.') # json, b64, file + choices=['json', 'b64'], + help='Initialize client config and return configuration string.') # json, b64, file def get_parser(self): return PAMCreateGatewayCommand.dr_create_controller_parser diff --git a/keepercommander/commands/pam/pam_dto.py b/keepercommander/commands/pam/pam_dto.py index 1814f4e3e..892b034dc 100644 --- a/keepercommander/commands/pam/pam_dto.py +++ b/keepercommander/commands/pam/pam_dto.py @@ -17,21 +17,63 @@ def toJSON(self): return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) -# ACTION INPUTS +# ACTION DISCOVER INPUTS + +class GatewayActionDiscoverJobStartInputs: + + def __init__(self, configuration_uid, user_map, shared_folder_uid, resource_uid=None, language="en", + # Settings + include_machine_dir_users=False, + include_azure_aadds=False, + skip_rules=False, + skip_machines=False, + skip_databases=False, + skip_directories=False, + skip_cloud_users=False, + credentials=None): + self.configurationUid = configuration_uid + self.resourceUid = resource_uid + self.userMap = user_map + self.sharedFolderUid = shared_folder_uid + self.language = language + self.includeMachineDirUsers = include_machine_dir_users + self.includeAzureAadds = include_azure_aadds + self.skipRules = skip_rules + self.skipMachines = skip_machines + self.skipDatabases = skip_databases + self.skipDirectories = skip_directories + self.skipCloudUsers = skip_cloud_users + + if credentials is None: + credentials = [] + self.credentials = credentials + def toJSON(self): + return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) -class GatewayActionDiscoverInputs: - def __init__(self, shared_folder_uid, provider_record_uid): - self.shared_folder_uid = shared_folder_uid - self.provider_record_uid = provider_record_uid +class GatewayActionDiscoverJobRemoveInputs: + + def __init__(self, configuration_uid, job_id): + self.configurationUid = configuration_uid + self.jobId = job_id def toJSON(self): return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) +class GatewayActionDiscoverRuleValidateInputs: + + def __init__(self, configuration_uid, statement): + self.configurationUid = configuration_uid + self.statement = statement + + def toJSON(self): + return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) + # ACTIONS + class GatewayAction(metaclass=abc.ABCMeta): def __init__(self, action, is_scheduled, gateway_destination=None, inputs=None, conversation_id=None): @@ -67,10 +109,29 @@ def toJSON(self): return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) -class GatewayActionDiscover(GatewayAction): +class GatewayActionDiscoverJobStart(GatewayAction): - def __init__(self, inputs: GatewayActionDiscoverInputs, conversation_id=None): - super().__init__('discover', inputs=inputs, conversation_id=conversation_id, is_scheduled=True) + def __init__(self, inputs: GatewayActionDiscoverJobStartInputs, conversation_id=None): + super().__init__('discover-job-start', inputs=inputs, conversation_id=conversation_id, is_scheduled=True) + + def toJSON(self): + return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) + + +class GatewayActionDiscoverJobRemove(GatewayAction): + + def __init__(self, inputs: GatewayActionDiscoverJobRemoveInputs, conversation_id=None): + super().__init__('discover-job-remove', inputs=inputs, conversation_id=conversation_id, is_scheduled=True) + + def toJSON(self): + return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) + + +class GatewayActionDiscoverRuleValidate(GatewayAction): + + def __init__(self, inputs: GatewayActionDiscoverRuleValidateInputs, conversation_id=None): + super().__init__('discover-rule-validate', inputs=inputs, conversation_id=conversation_id, + is_scheduled=True) def toJSON(self): return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) @@ -118,8 +179,8 @@ def toJSON(self): class GatewayActionRotate(GatewayAction): def __init__(self, inputs: GatewayActionRotateInputs, conversation_id=None, gateway_destination=None): - super().__init__('rotate', inputs=inputs, conversation_id=conversation_id, gateway_destination=gateway_destination, - is_scheduled=True) + super().__init__('rotate', inputs=inputs, conversation_id=conversation_id, + gateway_destination=gateway_destination, is_scheduled=True) def toJSON(self): return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) diff --git a/keepercommander/commands/pam/router_helper.py b/keepercommander/commands/pam/router_helper.py index 15022cdab..7dff8a536 100644 --- a/keepercommander/commands/pam/router_helper.py +++ b/keepercommander/commands/pam/router_helper.py @@ -18,7 +18,7 @@ from ...params import KeeperParams from ...proto import pam_pb2, router_pb2 -VERIFY_SSL = True +VERIFY_SSL = bool(os.environ.get("VERIFY_SSL", "TRUE") == "TRUE") def get_router_url(params: KeeperParams): @@ -359,6 +359,15 @@ def router_send_message_to_gateway(params, transmission_key, rq_proto, destinati return rs +def get_response_payload(router_response): + + router_response_response = router_response.get('response') + router_response_response_payload_str = router_response_response.get('payload') + router_response_response_payload_dict = json.loads(router_response_response_payload_str) + + return router_response_response_payload_dict + + def print_router_response(router_response, response_type, original_conversation_id=None, is_verbose=False): if not router_response: return diff --git a/keepercommander/commands/pam/user_facade.py b/keepercommander/commands/pam/user_facade.py new file mode 100644 index 000000000..8fd80036e --- /dev/null +++ b/keepercommander/commands/pam/user_facade.py @@ -0,0 +1,91 @@ +from ...record_facades import TypedRecordFacade, string_getter, string_setter, boolean_getter, boolean_setter +from ...vault import TypedField +from typing import Optional + + +class PamUserRecordFacade(TypedRecordFacade): + _login_getter = string_getter('_login') + _login_setter = string_setter('_login') + _password_getter = string_getter('_password') + _password_setter = string_setter('_password') + _distinguishedName_getter = string_getter('_distinguishedName') + _distinguishedName_setter = string_setter('_distinguishedName') + _connectDatabase_getter = string_getter('_connectDatabase') + _connectDatabase_setter = string_setter('_connectDatabase') + _managed_getter = boolean_getter('_managed') + _managed_setter = boolean_setter('_managed') + _oneTimeCode_getter = string_getter('_oneTimeCode') + _oneTimeCode_setter = string_setter('_oneTimeCode') + + def __init__(self): + super(PamUserRecordFacade, self).__init__() + self._login = None # type: Optional[TypedField] + self._password = None # type: Optional[TypedField] + self._distinguishedName = None # type: Optional[TypedField] + self._connectDatabase = None # type: Optional[TypedField] + self._managed = None # type: Optional[TypedField] + self._oneTimeCode = None # type: Optional[TypedField] + + @property + def login(self): + return PamUserRecordFacade._login_getter(self) + + @login.setter + def login(self, value): + PamUserRecordFacade._login_setter(self, value) + + @property + def password(self): + return PamUserRecordFacade._password_getter(self) + + @password.setter + def password(self, value): + PamUserRecordFacade._password_setter(self, value) + + @property + def distinguishedName(self): + return PamUserRecordFacade._distinguishedName_getter(self) + + @distinguishedName.setter + def distinguishedName(self, value): + PamUserRecordFacade._distinguishedName_setter(self, value) + + @property + def connectDatabase(self): + return PamUserRecordFacade._connectDatabase_getter(self) + + @connectDatabase.setter + def connectDatabase(self, value): + PamUserRecordFacade._connectDatabase_setter(self, value) + + @property + def managed(self): + return PamUserRecordFacade._connectDatabase_getter(self) + + @managed.setter + def managed(self, value): + PamUserRecordFacade._managed_setter(self, value) + + @property + def oneTimeCode(self): + return PamUserRecordFacade._oneTimeCode_getter(self) + + @oneTimeCode.setter + def oneTimeCode(self, value): + PamUserRecordFacade._oneTimeCode_setter(self, value) + + def load_typed_fields(self): + if self.record: + self.record.type_name = 'pamUser' + for attr in ["login", "password", "distinguishedName", "connectDatabase", "managed", "oneTimeCode"]: + attr_prv = f"_{attr}" + value = next((x for x in self.record.fields if x.type == attr), None) + setattr(self, attr_prv, value) + if value is None: + value = TypedField.new_field(attr, '') + setattr(self, attr_prv, value) + self.record.fields.append(value) + else: + for attr in ["_login", "_password", "_distinguishedName", "_connectDatabase", "_managed", "_oneTimeCode"]: + setattr(self, attr, None) + super(PamUserRecordFacade, self).load_typed_fields() diff --git a/keepercommander/commands/pam_debug/__init__.py b/keepercommander/commands/pam_debug/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/keepercommander/commands/pam_debug/acl.py b/keepercommander/commands/pam_debug/acl.py new file mode 100644 index 000000000..e02ec73d1 --- /dev/null +++ b/keepercommander/commands/pam_debug/acl.py @@ -0,0 +1,30 @@ +from __future__ import annotations +import argparse +import os +from ..discover import PAMGatewayActionDiscoverCommandBase +from ...display import bcolors +from ... import vault +from discovery_common.infrastructure import Infrastructure +from discovery_common.record_link import RecordLink +from discovery_common.types import UserAcl, DiscoveryObject +from keeper_dag import EdgeType +from importlib.metadata import version +from typing import Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from ...vault import TypedRecord + from ...params import KeeperParams + + +class PAMDebugACLCommand(PAMGatewayActionDiscoverCommandBase): + parser = argparse.ArgumentParser(prog='dr-pam-command-debug') + + # The record to base everything on. + parser.add_argument('--gateway', '-g', required=False, dest='gateway', action='store', + help='Gateway name or UID.') + + def get_parser(self): + return PAMDebugACLCommand.parser + + def execute(self, params: KeeperParams, **kwargs): + pass \ No newline at end of file diff --git a/keepercommander/commands/pam_debug/alter.py b/keepercommander/commands/pam_debug/alter.py new file mode 100644 index 000000000..d4e718e90 --- /dev/null +++ b/keepercommander/commands/pam_debug/alter.py @@ -0,0 +1,30 @@ +from __future__ import annotations +import argparse +import os +from ..discover import PAMGatewayActionDiscoverCommandBase +from ...display import bcolors +from ... import vault +from discovery_common.infrastructure import Infrastructure +from discovery_common.record_link import RecordLink +from discovery_common.types import UserAcl, DiscoveryObject +from keeper_dag import EdgeType +from importlib.metadata import version +from typing import Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from ...vault import TypedRecord + from ...params import KeeperParams + + +class PAMDebugAlterCommand(PAMGatewayActionDiscoverCommandBase): + parser = argparse.ArgumentParser(prog='dr-pam-command-debug') + + # The record to base everything on. + parser.add_argument('--gateway', '-g', required=False, dest='gateway', action='store', + help='Gateway name or UID.') + + def get_parser(self): + return PAMDebugAlterCommand.parser + + def execute(self, params: KeeperParams, **kwargs): + pass \ No newline at end of file diff --git a/keepercommander/commands/pam_debug/graph.py b/keepercommander/commands/pam_debug/graph.py new file mode 100644 index 000000000..1bbf69785 --- /dev/null +++ b/keepercommander/commands/pam_debug/graph.py @@ -0,0 +1,484 @@ +from __future__ import annotations +import argparse +import logging +import os + +from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ...display import bcolors +from ... import vault +from ...utils import value_to_boolean +from discovery_common.infrastructure import Infrastructure +from discovery_common.record_link import RecordLink +from discovery_common.user_service import UserService +from discovery_common.constants import (PAM_USER, PAM_DIRECTORY, PAM_MACHINE, PAM_DATABASE, VERTICES_SORT_MAP, + DIS_INFRA_GRAPH_ID, RECORD_LINK_GRAPH_ID, USER_SERVICE_GRAPH_ID) +from discovery_common.types import (DiscoveryObject, DiscoveryUser, DiscoveryDirectory, DiscoveryMachine, + DiscoveryDatabase) +from discovery_common.dag_sort import sort_infra_vertices +from keeper_dag import DAG +from keeper_dag.connection.commander import Connection as CommanderConnection +from keeper_dag.connection.local import Connection as LocalConnection +from keeper_dag.vertex import DAGVertex +from keeper_dag.edge import DAGEdge +from typing import Optional, Union, TYPE_CHECKING + +Connection = Union[CommanderConnection, LocalConnection] +if TYPE_CHECKING: + from ...vault import TypedRecord + from ...params import KeeperParams + + + +class PAMDebugGraphCommand(PAMGatewayActionDiscoverCommandBase): + parser = argparse.ArgumentParser(prog='dr-pam-command-debug') + + # The record to base everything on. + parser.add_argument('--gateway', '-g', required=True, dest='gateway', action='store', + help='Gateway name or UID.') + parser.add_argument('--type', '-t', required=True, choices=['infra', 'rl', 'service'], + dest='graph_type', action='store', help='Graph type', default='infra') + parser.add_argument('--raw', required=False, dest='raw', action='store_true', + help='Render raw graph. Will render corrupt graphs.') + + parser.add_argument('--list', required=False, dest='do_text_list', action='store_true', + help='List items in a list.') + + parser.add_argument('--render', required=False, dest='do_render', action='store_true', + help='Render a graph') + parser.add_argument('--file', '-f', required=False, dest='filepath', action='store', + default="keeper_graph", help='Base name for the graph file.') + parser.add_argument('--format', required=False, choices=['raw', 'dot', 'twopi', 'patchwork'], + dest='format', default="dot", action='store', help='The format of the graph.') + parser.add_argument('--debug-dag-level', required=False, dest='debug_level', action='store', + help='DAG debug level. Default is 0', type=int, default=0) + + mapping = { + PAM_USER: {"order": 1, "sort": "_sort_name", "item": DiscoveryUser, "key": "user"}, + PAM_DIRECTORY: {"order": 1, "sort": "_sort_name", "item": DiscoveryDirectory, "key": "host_port"}, + PAM_MACHINE: {"order": 2, "sort": "_sort_host", "item": DiscoveryMachine, "key": "host"}, + PAM_DATABASE: {"order": 3, "sort": "_sort_host", "item": DiscoveryDatabase, "key": "host_port"}, + } + + graph_id_map = { + "infra": DIS_INFRA_GRAPH_ID, + "rl": RECORD_LINK_GRAPH_ID, + "service": USER_SERVICE_GRAPH_ID + } + + def get_parser(self): + return PAMDebugGraphCommand.parser + + def _do_text_list_infra(self, params: KeeperParams, gateway_context: GatewayContext, debug_level: int = 0): + + infra = Infrastructure(record=gateway_context.configuration, params=params, logger=logging, + debug_level=debug_level) + + try: + configuration = infra.get_root.has_vertices()[0] + except (Exception,): + print(f"{bcolors.FAIL}Could not find the configuration in the infrastructure graph.{bcolors.ENDC}") + return + + line_start = { + 0: "", + 1: "* ", + 2: "- ", + } + + color_func = { + 0: self._h, + 1: self._gr, + 2: self._p + } + + def _handle(current_vertex: DAGVertex, indent: int = 0, last_record_type: Optional[str] = None): + + if current_vertex.active is False: + return + + pad = "" + if indent > 0: + pad = "".ljust(4 * indent, ' ') + + text = "" + ls = line_start.get(indent, " ") + cf = color_func.get(indent, self._p) + + current_content = DiscoveryObject.get_discovery_object(current_vertex) + if current_content.record_uid is None: + text += f"{pad}{ls}{current_content.title} does not have a record." + else: + record = vault.KeeperRecord.load(params, current_content.record_uid) # type: Optional[TypedRecord] + text += f"{pad}{ls}" + cf(f"{record.title}; {record.record_uid}") + + if current_vertex.active is False: + text += " " + self._f("Inactive") + + print(text) + + record_type_to_vertices_map = sort_infra_vertices(current_vertex) + # Process the record type by their map order in ascending order. + + # Sort the record types by their order in the constant. + # 'order' is an int. + for record_type in sorted(record_type_to_vertices_map, key=lambda i: VERTICES_SORT_MAP[i]['order']): + for vertex in record_type_to_vertices_map[record_type]: + if last_record_type is None or last_record_type != record_type: + if indent == 0: + print(f"{pad} {self._b(self._n(record_type))}") + last_record_type = record_type + + _handle(vertex, indent=indent+1) + + print("") + _handle(configuration) + print("") + + def _do_text_list_rl(self, params: KeeperParams, gateway_context: GatewayContext, debug_level: int = 0): + + print("") + + record_link = RecordLink(record=gateway_context.configuration, params=params, logger=logging, + debug_level=debug_level) + configuration = record_link.dag.get_root + + record = vault.KeeperRecord.load(params, configuration.uid) # type: Optional[TypedRecord] + if record is None: + print(self._f("Configuration record does not exists.")) + return + + print(self._h(f"{record.record_type}, {record.title}, {record.record_uid}")) + + def _group(configuration_vertex: DAGVertex) -> dict: + + group = { + PAM_USER: [], + PAM_DIRECTORY: [], + PAM_MACHINE: [], + PAM_DATABASE: [], + "NO_RECORD": [] + } + + for vertex in configuration_vertex.has_vertices(): + record = vault.KeeperRecord.load(params, vertex.uid) # type: Optional[TypedRecord] + if record is None: + group["NO_RECORD"].append({ + "v": vertex + }) + continue + group[record.record_type].append({ + "v": vertex, + "r": record + }) + + return group + + group = _group(configuration) + + for record_type in [PAM_USER, PAM_DIRECTORY, PAM_MACHINE, PAM_DATABASE]: + if len(group[record_type]) > 0: + print(" " + self._b(self._n(record_type))) + for item in group[record_type]: + vertex = item.get("v") # type: DAGVertex + record = item.get("r") # type: TypedRecord + text = self._gr(f"{record.title}; {record.record_uid}") + if vertex.active is False: + text += " " + self._f("Inactive") + print(f" * {text}") + + # These are cloud users + if record_type == PAM_USER: + acl = record_link.get_acl(vertex.uid, configuration.uid) + if acl is None: + print(f" {self._f('missing ACL')}") + else: + if acl.is_admin is True: + print(f" . is the {self._b('Admin')}") + if acl.belongs_to is True: + print(f" . belongs to this resource") + else: + print(f" . looks like directory user") + continue + + children = vertex.has_vertices() + if len(children) > 0: + bad = [] + for child in children: + child_record = vault.KeeperRecord.load(params, child.uid) # type: Optional[TypedRecord] + if child_record is None: + if child.active is True: + bad.append(self._f(f"- Record UID {child.uid} does not exists.")) + continue + else: + print(f" - {child_record.title}; {child_record.record_uid}") + acl = record_link.get_acl(child.uid, vertex.uid) + if acl is None: + print(f" {self._f('missing ACL')}") + else: + if acl.is_admin is True: + print(f" . is the {self._b('Admin')}") + if acl.belongs_to is True: + print(f" . belongs to this resource") + else: + print(f" . looks like directory user") + for i in bad: + print(" " + i) + + @staticmethod + def _do_text_list_service(params: KeeperParams, gateway_context: GatewayContext, debug_level: int = 0): + + user_service = UserService(record=gateway_context.configuration, params=params, logger=logging, + debug_level=debug_level) + configuration = user_service.dag.get_root + + def _handle(current_vertex: DAGVertex, parent_vertex: Optional[DAGVertex] = None, indent: int = 0): + + pad = "" + if indent > 0: + pad = "".ljust(2 * indent, ' ') + "* " + + record = vault.KeeperRecord.load(params, current_vertex.uid) # type: Optional[TypedRecord] + if record is None: + if current_vertex.active is False: + print(f"{pad}Record {current_vertex.uid} does not exists, inactive in the graph.") + else: + print(f"{pad}Record {current_vertex.uid} does not exists, active in the graph.") + return + elif current_vertex.active is False: + print(f"{pad}{record.record_type}, {record.title}, {record.record_uid} exists, " + "inactive in the graph.") + return + + acl_text = "" + acl = user_service.get_acl(parent_vertex, current_vertex) + if acl is not None: + acl_parts = [] + if acl.is_service is True: + acl_parts.append("Service") + if acl.is_task is True: + acl_parts.append("Task") + if len(acl_parts) > 0: + acl_text = ", ".join(acl_parts) + + print(f"{pad}{record.record_type}, {record.title}, {record.record_uid}{acl_text}") + + for vertex in current_vertex.has_vertices(): + _handle(current_vertex=vertex, parent_vertex=current_vertex, indent=indent+1) + + _handle(current_vertex=configuration, parent_vertex=None) + + def _do_render_infra(self, params: KeeperParams, gateway_context: GatewayContext, filepath: str, graph_format: str, + debug_level: int = 0): + + infra = Infrastructure(record=gateway_context.configuration, params=params, logger=logging, + debug_level=debug_level) + infra.load(sync_point=0) + + print("") + dot_instance = infra.to_dot( + graph_type=graph_format if graph_format != "raw" else "dot", + show_only_active_vertices=False, + show_only_active_edges=False + ) + if graph_format == "raw": + print(dot_instance) + else: + try: + dot_instance.render(filepath) + print(f"Infrastructure graph rendered to {self._gr(filepath)}") + except Exception as err: + print(self._f(f"Could not generate graph: {err}")) + raise err + print("") + + def _do_render_rl(self, params: KeeperParams, gateway_context: GatewayContext, filepath: str, graph_format: str, + debug_level: int = 0): + + rl = RecordLink(record=gateway_context.configuration, params=params, logger=logging, debug_level=debug_level) + + print("") + dot_instance = rl.to_dot( + graph_type=graph_format if graph_format != "raw" else "dot", + show_only_active_vertices=False, + show_only_active_edges=False + ) + if graph_format == "raw": + print(dot_instance) + else: + try: + dot_instance.render(filepath) + print(f"Record linking graph rendered to {self._gr(filepath)}") + except Exception as err: + print(self._f(f"Could not generate graph: {err}")) + raise err + print("") + + def _do_render_service(self, params: KeeperParams, gateway_context: GatewayContext, filepath: str, + graph_format: str, debug_level: int = 0): + + service = UserService(record=gateway_context.configuration, params=params, logger=logging, + debug_level=debug_level) + + print("") + dot_instance = service.to_dot( + graph_type=graph_format if graph_format != "raw" else "dot", + show_only_active_vertices=False, + show_only_active_edges=False + ) + if graph_format == "raw": + print(dot_instance) + else: + try: + dot_instance.render(filepath) + print(f"User service/tasks graph rendered to {self._gr(filepath)}") + except Exception as err: + print(self._f(f"Could not generate graph: {err}")) + raise err + print("") + + @staticmethod + def get_connection(params: KeeperParams) -> Connection: + if value_to_boolean(os.environ.get("USE_LOCAL_DAG", False)) is False: + return CommanderConnection(params=params) + else: + return LocalConnection() + + def _do_raw_text_list(self, params: KeeperParams, gateway_context: GatewayContext, graph_id: int = 0, + debug_level: int = 0): + + logging.debug(f"loading graph id {graph_id}, for record uid {gateway_context.configuration.record_uid}") + + conn = self.get_connection(params=params) + dag = DAG(conn=conn, record=gateway_context.configuration, graph_id=graph_id, fail_on_corrupt=False, + logger=logging, debug_level=debug_level) + dag.load(sync_point=0) + print("") + if dag.is_corrupt is True: + print(f"{bcolors.FAIL}The graph is corrupt at Vertex UIDs: {', '.join(dag.corrupt_uids)}") + print("") + + logging.debug("DAG DOT -------------------------------") + logging.debug(str(dag.to_dot())) + logging.debug("DAG DOT -------------------------------") + + line_start = { + 0: "", + 1: "* ", + 2: "- ", + 3: ". ", + } + + color_func = { + 0: self._h, + 1: self._gr, + 2: self._bl, + 3: self._p + } + + def _handle(current_vertex: DAGVertex, last_vertex: Optional[DAGVertex] = None, indent: int = 0): + + pad = "" + if indent > 0: + pad = "".ljust(4 * indent, ' ') + + ls = line_start.get(indent, " ") + cf = color_func.get(indent, self._p) + text = f"{pad}{ls}{cf(current_vertex.uid)}" + + edge_types = [] + if last_vertex is not None: + for edge in current_vertex.edges: # type: DAGEdge + if edge.active is False: + continue + if edge.head_uid == last_vertex.uid: + edge_types.append(edge.edge_type.value) + if len(edge_types) > 0: + text += f"; edges: {', '.join(edge_types)}" + + if current_vertex.active is False: + text += " " + self._f("Inactive") + if current_vertex.corrupt is True: + text += " " + self._f("Corrupt") + + print(text) + + if current_vertex.active is False: + logging.debug(f"vertex {current_vertex.uid} is not active, will not get children.") + return + + vertices = current_vertex.has_vertices() + if len(vertices) == 0: + logging.debug(f"vertex {current_vertex.uid} does not have any children.") + return + + for vertex in vertices: + _handle(vertex, current_vertex, indent=indent + 1) + + print("") + _handle(dag.get_root) + print("") + + def _do_raw_render_graph(self, params: KeeperParams, gateway_context: GatewayContext, filepath: str, + graph_format: str, graph_id: int = 0, debug_level: int = 0): + + conn = self.get_connection(params=params) + dag = DAG(conn=conn, record=gateway_context.configuration, graph_id=graph_id, fail_on_corrupt=False, + logger=logging, debug_level=debug_level) + dag.load(sync_point=0) + dot = dag.to_dot(graph_format=graph_format) + if graph_format == "raw": + print(dot) + else: + try: + dot.render(filepath) + print(f"Graph rendered to {self._gr(filepath)}") + except Exception as err: + print(self._f(f"Could not generate graph: {err}")) + raise err + + print("") + + def execute(self, params: KeeperParams, **kwargs): + + gateway = kwargs.get("gateway") + raw = kwargs.get("raw", False) + graph_type = kwargs.get("graph_type") + do_text_list = kwargs.get("do_text_list") + do_render = kwargs.get("do_render") + debug_level = int(kwargs.get("debug_level", 0)) + + gateway_context = GatewayContext.from_gateway(params, gateway) + if gateway_context is None: + print(f"{bcolors.FAIL}Could not find the gateway configuration for {gateway}.") + return + + if raw is True: + if do_text_list is True: + self._do_raw_text_list(params=params, + gateway_context=gateway_context, + graph_id=PAMDebugGraphCommand.graph_id_map.get(graph_type), + debug_level=debug_level) + if do_render is True: + filepath = kwargs.get("filepath") + graph_format = kwargs.get("format") + self._do_raw_render_graph(params=params, + gateway_context=gateway_context, + filepath=filepath, + graph_format=graph_format, + graph_id=PAMDebugGraphCommand.graph_id_map.get(graph_type), + debug_level=debug_level) + else: + if do_text_list is True: + list_func = getattr(self, f"_do_text_list_{graph_type}") + list_func(params=params, + gateway_context=gateway_context, + debug_level=debug_level) + if do_render is True: + filepath = kwargs.get("filepath") + graph_format = kwargs.get("format") + render_func = getattr(self, f"_do_render_{graph_type}") + render_func(params=params, + gateway_context=gateway_context, + filepath=filepath, + graph_format=graph_format, + debug_level=debug_level) diff --git a/keepercommander/commands/pam_debug/info.py b/keepercommander/commands/pam_debug/info.py new file mode 100644 index 000000000..6ca241bb5 --- /dev/null +++ b/keepercommander/commands/pam_debug/info.py @@ -0,0 +1,386 @@ +from __future__ import annotations +import argparse +from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext +from ...display import bcolors +from ... import vault +from discovery_common.infrastructure import Infrastructure +from discovery_common.record_link import RecordLink +from discovery_common.user_service import UserService +from discovery_common.types import UserAcl, DiscoveryObject +from discovery_common.constants import PAM_USER, PAM_MACHINE, PAM_DATABASE, PAM_DIRECTORY +from keeper_dag import EdgeType +import time +import re +from typing import Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from ...vault import TypedRecord + from ...params import KeeperParams + + +class PAMDebugInfoCommand(PAMGatewayActionDiscoverCommandBase): + parser = argparse.ArgumentParser(prog='dr-pam-command-debug') + + type_name_map = { + PAM_USER: "PAM User", + PAM_MACHINE: "PAM Machine", + PAM_DATABASE: "PAM Database", + PAM_DIRECTORY: "PAM Directory", + } + + # The record to base everything on. + parser.add_argument('--record-uid', '-i', required=True, dest='record_uid', action='store', + help='Keeper PAM record UID.') + + def get_parser(self): + return PAMDebugInfoCommand.parser + + def execute(self, params: KeeperParams, **kwargs): + + record_uid = kwargs.get("record_uid") + record = vault.KeeperRecord.load(params, record_uid) # type: Optional[TypedRecord] + if record is None: + print(f"{bcolors.FAIL}Record does not exists.{bcolors.ENDC}") + return + + if record.record_type not in ["pamUser", "pamMachine", "pamDatabase", "pamDirectory"]: + if re.search(r'^pam.*Configuration$', record.record_type) is None: + print(f"{bcolors.FAIL}The record is a {record.record_type}. This is not a PAM record.{bcolors.ENDC}") + return + + record_rotation = params.record_rotation_cache.get(record_uid) + if record_rotation is None: + print(f"{bcolors.FAIL}PAM record does not have rotation settings.{bcolors.ENDC}") + return + + # TODO: Not sure if this is going away. If not we are going to have to scan the graphs. + controller_uid = record_rotation.get("configuration_uid") + if controller_uid is None: + print(f"{bcolors.FAIL}Record does not have the PAM Configuration set.{bcolors.ENDC}") + return + + configuration_record = vault.KeeperRecord.load(params, controller_uid) # type: Optional[TypedRecord] + + gateway_context = GatewayContext.from_configuration_uid(params, controller_uid) + + infra = Infrastructure(record=configuration_record, params=params) + record_link = RecordLink(record=configuration_record, params=params) + user_service = UserService(record=configuration_record, params=params) + + print("") + print(self._h("Record Information")) + print(f" {self._b('Record UID')}: {record_uid}") + print(f" {self._b('Record Title')}: {record.title}") + print(f" {self._b('Record Type')}: {record.record_type}") + print(f" {self._b('Configuration UID')}: {configuration_record.record_uid}") + print(f" {self._b('Configuration Key Bytes Hex')}: {configuration_record.record_key.hex()}") + if gateway_context is not None: + print(f" {self._b('Gateway Name')}: {gateway_context.gateway_name}") + print(f" {self._b('Gateway UID')}: {gateway_context.gateway_uid}") + else: + print(f" {self._f('Cannot get gateway information. Gateway may not be up.')}") + print("") + + discovery_vertices = infra.dag.search_content({"record_uid": record.record_uid}) + record_vertex = record_link.dag.get_vertex(record.record_uid) + + if record_vertex is not None: + print(self._h("Record Linking")) + record_parent_vertices = record_vertex.belongs_to_vertices() + print(self._b(" Parent Records")) + if len(record_parent_vertices) > 0: + for record_parent_vertex in record_parent_vertices: + + parent_record = vault.KeeperRecord.load(params, + record_parent_vertex.uid) # type: Optional[TypedRecord] + if parent_record is None: + print(f"{bcolors.FAIL} * Parent record {record_parent_vertex.uid} " + f"does not exists.{bcolors.ENDC}") + continue + + acl_edge = record_vertex.get_edge(record_parent_vertex, EdgeType.ACL) + if acl_edge is not None: + acl_content = acl_edge.content_as_object(UserAcl) + print(f" * ACL to {self._n(parent_record.record_type)}; {parent_record.title}; " + f"{record_parent_vertex.uid}") + if acl_content.is_admin is True: + print(f" . Is {self._gr('Admin')}") + if acl_content.belongs_to is True: + print(f" . Belongs") + else: + print(f" . Is {self._bl('Remote user')}") + + link_edge = record_vertex.get_edge(record_parent_vertex, EdgeType.LINK) + if link_edge is not None: + print(f" * LINK to {self._n(parent_record.record_type)}; {parent_record.title}; " + f"{record_parent_vertex.uid}") + else: + # This really should not happen + print(f"{bcolors.FAIL} Record does not have a parent record.{bcolors.ENDC}") + print("") + + record_child_vertices = record_vertex.has_vertices() + print(self._b(" Child Records")) + if len(record_child_vertices) > 0: + for record_child_vertex in record_child_vertices: + child_record = vault.KeeperRecord.load(params, + record_child_vertex.uid) # type: Optional[TypedRecord] + + if child_record is None: + print(f"{bcolors.FAIL} * Child record {record_child_vertex.uid} " + f"does not exists.{bcolors.ENDC}") + continue + + acl_edge = record_child_vertex.get_edge(record_vertex, EdgeType.ACL) + link_edge = record_child_vertex.get_edge(record_vertex, EdgeType.LINK) + if acl_edge is not None: + acl_content = acl_edge.content_as_object(UserAcl) + print(f" * ACL from {self._n(child_record.record_type)}; {child_record.title}; " + f"{record_child_vertex.uid}") + if acl_content.is_admin is True: + print(f" . Is {self._gr('Admin')}") + if acl_content.belongs_to is True: + print(f" . Belongs") + else: + print(f" . Is {self._bl('Remote user')}") + elif link_edge is not None: + print(f" * LINK from {self._n(child_record.record_type)}; {child_record.title}; " + "{record_child_vertex.uid}") + else: + for edge in record_vertex.edges: # List[DAGEdge] + print(f" * {self._f(edge.edge_type)}?") + + else: + # This is OK + print(f" Record does not have any children.") + print("") + + else: + print(f"{bcolors.FAIL}Cannot find record in record linking.{bcolors.ENDC}") + + # Only PAM User and PAM Machine can have services and tasks. + # This is really only Windows machines. + if record.record_type == PAM_USER or record.record_type == PAM_MACHINE: + user_service_vertex = user_service.dag.get_vertex(record_uid) + if user_service_vertex is not None: + + # Show what machines this user is used for a service or task. + if record.record_type == PAM_USER: + tasks = [] + services = [] + for resource_vertex in user_service.get_resource_uids(record_uid): + resource_content = DiscoveryObject.get_discovery_object(resource_vertex) + resource_record = vault.KeeperRecord.load(params, + resource_vertex.uid) # type: Optional[TypedRecord] + acl = user_service.get_acl(resource_vertex, user_service_vertex) + if acl.is_task is True: + if resource_record is None: + tasks.append(f" * Record {resource_vertex.uid}, {resource_content.title} " + "does not exists.") + else: + tasks.append(f" * {resource_record.title}, {resource_vertex.uid}") + if acl.is_service is True: + if resource_record is None: + services.append(f" * Record {resource_vertex.uid}, {resource_content.title} " + "does not exists.") + else: + services.append(f" * {resource_record.title}, {resource_vertex.uid}") + + print(f"{bcolors.HEADER}Service on Machines{bcolors.ENDC}") + if len(services) > 0: + for service in services: + print(service) + else: + print(" PAM User is not used for any services.") + print("") + + print(f"{bcolors.HEADER}Scheduled Tasks on Machines{bcolors.ENDC}") + if len(tasks) > 0: + for task in tasks: + print(task) + else: + print(" PAM User is not used for any scheduled tasks.") + print("") + + # Show the users that are used for services and tasks. + else: + tasks = [] + services = [] + for user_vertex in user_service.get_user_uids(record_uid): + user_content = DiscoveryObject.get_discovery_object(user_vertex) + user_record = vault.KeeperRecord.load(params, user_vertex.uid) # type: Optional[TypedRecord] + acl = user_service.get_acl(user_vertex, user_service_vertex) + if acl.is_task is True: + if user_record is None: + tasks.append(f" * Record {user_vertex.uid}, {user_content.title} " + "does not exists.") + else: + tasks.append(f" * {user_record.title}, {user_content.user}, {user_vertex.uid}") + if acl.is_service is True: + if user_record is None: + services.append(f" * Record {user_vertex.uid}, {user_content.title} " + "does not exists.") + else: + services.append(f" * {user_record.title}, {user_content.user}, {user_vertex.uid}") + + print(f"{bcolors.HEADER}Services Users{bcolors.ENDC}") + if len(services) > 0: + for service in services: + print(service) + else: + print(" Machine does not use any non-builtin users for services.") + print("") + + print(f"{bcolors.HEADER}Scheduled Tasks Users{bcolors.ENDC}") + if len(tasks) > 0: + for task in tasks: + print(task) + else: + print("Machine does not use any non-builtin users for scheduled tasks.") + print("") + else: + print(self._f("There are no services of schedule tasks associated with this record.")) + print("") + try: + if len(discovery_vertices) == 0: + print(f"{bcolors.FAIL}Could not find any discovery infrastructure vertices for " + f"{record.record_uid}{bcolors.ENDC}") + elif len(discovery_vertices) > 0: + + if len(discovery_vertices) > 1: + print(f"{bcolors.FAIL}Found multiple vertices with the record UID of " + f"{record.record_uid}{bcolors.ENDC}") + for vertex in discovery_vertices: + print(f" * Infrastructure Vertex UID: {vertex.uid}") + print("") + + discovery_vertex = discovery_vertices[0] + content = DiscoveryObject.get_discovery_object(discovery_vertex) + + missing_since = "NA" + if content.missing_since_ts is not None: + missing_since = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(content.missing_since_ts)) + + print(self._h("Discovery Object Information")) + print(f" {self._b('Vertex UID')}: {content.uid}") + print(f" {self._b('Object ID')}: {content.id}") + print(f" {self._b('Record UID')}: {content.record_uid}") + print(f" {self._b('Parent Record UID')}: {content.parent_record_uid}") + print(f" {self._b('Shared Folder UID')}: {content.shared_folder_uid}") + print(f" {self._b('Record Type')}: {content.record_type}") + print(f" {self._b('Object Type')}: {content.object_type_value}") + print(f" {self._b('Ignore Object')}: {content.ignore_object}") + print(f" {self._b('Rule Engine Result')}: {content.action_rules_result}") + print(f" {self._b('Name')}: {content.name}") + print(f" {self._b('Generated Title')}: {content.title}") + print(f" {self._b('Generated Description')}: {content.description}") + print(f" {self._b('Missing Since')}: {missing_since}") + print(f" {self._b('Discovery Notes')}:") + for note in content.notes: + print(f" * {note}") + if content.error is not None: + print(f"{bcolors.FAIL} Error: {content.error}{bcolors.ENDC}") + if content.stacktrace is not None: + print(f"{bcolors.FAIL} Stack Trace:{bcolors.ENDC}") + print(f"{bcolors.FAIL}{content.stacktrace}{bcolors.ENDC}") + print("") + print(f"{bcolors.HEADER}Record Type Specifics{bcolors.ENDC}") + + if record.record_type == PAM_USER: + print(f" {self._b('User')}: {content.item.user}") + print(f" {self._b('DN')}: {content.item.dn}") + print(f" {self._b('Database')}: {content.item.database}") + print(f" {self._b('Active')}: {content.item.active}") + print(f" {self._b('Expired')}: {content.item.expired}") + print(f" {self._b('Source')}: {content.item.source}") + elif record.record_type == PAM_MACHINE: + print(f" {self._b('Host')}: {content.item.host}") + print(f" {self._b('IP')}: {content.item.ip}") + print(f" {self._b('Port')}: {content.item.port}") + print(f" {self._b('Operating System')}: {content.item.os}") + print(f" {self._b('Provider Region')}: {content.item.provider_region}") + print(f" {self._b('Provider Group')}: {content.item.provider_group}") + print(f" {self._b('Is the Gateway')}: {content.item.is_gateway}") + print("") + # If facts are not set, inside discover may not have been performed for the machine. + if content.item.facts.id is not None and content.item.facts.name is not None: + print(f" {self._b('Machine Name')}: {content.item.facts.name}") + print(f" {self._b('Machine ID')}: {content.item.facts.id.machine_id}") + print(f" {self._b('Product ID')}: {content.item.facts.id.product_id}") + print(f" {self._b('Board Serial')}: {content.item.facts.id.board_serial}") + print(f" {self._b('Directories')}:") + if content.item.facts.directories is not None and len(content.item.facts.directories) > 0: + for directory in content.item.facts.directories: + print(f" * Directory Domain: {directory.domain}") + print(f" Software: {directory.software}") + print(f" Login Format: {directory.login_format}") + else: + print(" Machines is not using any directories.") + + print("") + print(f" {self._b('Services')} (Non Builtin Users):") + if len(content.item.facts.services) > 0: + for service in content.item.facts.services: + print(f" * {service.name} = {service.user}") + else: + print(" Machines has no services that are using non-builtin users.") + + print(f" {self._b('Scheduled Tasks')} (Non Builtin Users)") + if len(content.item.facts.tasks) > 0: + for task in content.item.facts.tasks: + print(f" * {task.name} = {task.user}") + else: + print(" Machines has no schedules tasks that are using non-builtin users.") + else: + print(f"{bcolors.FAIL} Machine facts are not set. Discover inside may not have been " + f"performed.{bcolors.ENDC}") + elif record.record_type == PAM_DATABASE: + print(f" {self._b('Host')}: {content.item.host}") + print(f" {self._b('IP')}: {content.item.ip}") + print(f" {self._b('Port')}: {content.item.port}") + print(f" {self._b('Database Type')}: {content.item.type}") + print(f" {self._b('Database')}: {content.item.database}") + print(f" {self._b('Use SSL')}: {content.item.use_ssl}") + print(f" {self._b('Provider Region')}: {content.item.provider_region}") + print(f" {self._b('Provider Group')}: {content.item.provider_group}") + elif record.record_type == PAM_DIRECTORY: + print(f" {self._b('Host')}: {content.item.host}") + print(f" {self._b('IP')}: {content.item.ip}") + print(f" {self._b('Port')}: {content.item.port}") + print(f" {self._b('Directory Type')}: {content.item.type}") + print(f" {self._b('Use SSL')}: {content.item.use_ssl}") + print(f" {self._b('Provider Region')}: {content.item.provider_region}") + print(f" {self._b('Provider Group')}: {content.item.provider_group}") + + print("") + print(self._h("Belongs To Vertices (Parents)")) + vertices = discovery_vertex.belongs_to_vertices() + for vertex in vertices: + content = DiscoveryObject.get_discovery_object(vertex) + print(f" * {content.description} ({vertex.uid})") + for edge_type in [EdgeType.LINK, EdgeType.ACL, EdgeType.KEY, EdgeType.DELETION]: + edge = discovery_vertex.get_edge(vertex, edge_type=edge_type) + if edge is not None: + print(f" . {edge_type}, active: {edge.active}") + + if len(vertices) == 0: + print(f"{bcolors.FAIL} Does not belong to anyone{bcolors.ENDC}") + + print("") + print(f"{bcolors.HEADER}Vertices Belonging To (Children){bcolors.ENDC}") + vertices = discovery_vertex.has_vertices() + for vertex in vertices: + content = DiscoveryObject.get_discovery_object(vertex) + print(f" * {content.description} ({vertex.uid})") + for edge_type in [EdgeType.LINK, EdgeType.ACL, EdgeType.KEY, EdgeType.DELETION]: + edge = vertex.get_edge(discovery_vertex, edge_type=edge_type) + if edge is not None: + print(f" . {edge_type}, active: {edge.active}") + if len(vertices) == 0: + print(f" Does not have any children.") + + print("") + else: + print(f"{bcolors.FAIL}Could not find infrastructure vertex.{bcolors.ENDC}") + except Exception as err: + print(f"{bcolors.FAIL}Could not get information on infrastructure: {err}{bcolors.ENDC}") diff --git a/keepercommander/commands/pam_debug/verify.py b/keepercommander/commands/pam_debug/verify.py new file mode 100644 index 000000000..c2d1e8af4 --- /dev/null +++ b/keepercommander/commands/pam_debug/verify.py @@ -0,0 +1,30 @@ +from __future__ import annotations +import argparse +import os +from ..discover import PAMGatewayActionDiscoverCommandBase +from ...display import bcolors +from ... import vault +from discovery_common.infrastructure import Infrastructure +from discovery_common.record_link import RecordLink +from discovery_common.types import UserAcl, DiscoveryObject +from keeper_dag import EdgeType +from importlib.metadata import version +from typing import Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from ...vault import TypedRecord + from ...params import KeeperParams + + +class PAMDebugVerifyCommand(PAMGatewayActionDiscoverCommandBase): + parser = argparse.ArgumentParser(prog='dr-pam-command-debug') + + # The record to base everything on. + parser.add_argument('--gateway', '-g', required=False, dest='gateway', action='store', + help='Gateway name or UID.') + + def get_parser(self): + return PAMDebugVerifyCommand.parser + + def execute(self, params: KeeperParams, **kwargs): + pass \ No newline at end of file diff --git a/keepercommander/commands/pam_debug/version.py b/keepercommander/commands/pam_debug/version.py new file mode 100644 index 000000000..cfbb879da --- /dev/null +++ b/keepercommander/commands/pam_debug/version.py @@ -0,0 +1,20 @@ +from __future__ import annotations +import argparse +from ..discover import PAMGatewayActionDiscoverCommandBase +from ...display import bcolors +from importlib.metadata import version +from typing import Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from ...params import KeeperParams + + +class PAMDebugVersionCommand(PAMGatewayActionDiscoverCommandBase): + parser = argparse.ArgumentParser(prog='dr-pam-command-debug') + + def execute(self, params: KeeperParams, **kwargs): + + print("") + print(f"{bcolors.BOLD}keeper-dag version:{bcolors.ENDC} {version('keeper-dag')}") + print(f"{bcolors.BOLD}discovery-common version:{bcolors.ENDC} {version('discovery-common')}") + print("") \ No newline at end of file diff --git a/keepercommander/record_facades.py b/keepercommander/record_facades.py index 6f5b3036d..04b5ada0c 100644 --- a/keepercommander/record_facades.py +++ b/keepercommander/record_facades.py @@ -87,6 +87,40 @@ def setter(obj, value): field.value.clear() return setter +def boolean_getter(name): # type: (str) -> Callable[[TypedRecordFacade], bool] + def getter(obj): + field = getattr(obj, name) + if isinstance(field, TypedField): + value = field.value[0] if len(field.value) > 0 else None + if value is None: + return None + elif isinstance(value, bool) is True: + return value + + if str(value).lower() in ['true', 'yes', '1', 'on']: + return True + elif str(value).lower() in ['false', 'no', '0', 'off']: + return False + return None + return getter + +def boolean_setter(name): # type: (str) -> Callable[[Any, str], None] + def setter(obj, value): + field = getattr(obj, name) + if isinstance(field, TypedField): + if value is not None: + if isinstance(value, bool) is not True: + if str(value).lower() in ['true', 'yes', '1', 'on']: + value = True + elif str(value).lower() in ['false', 'no', '0', 'off']: + value = False + if len(field.value) > 0: + field.value[0] = value + else: + field.value.append(value) + else: + field.value.clear() + return setter def string_element_getter(name, element_name): # type: (str, str) -> Callable[[Any], str] def getter(obj): diff --git a/keepercommander/utils.py b/keepercommander/utils.py index 65fa62a6f..fe1b14ea0 100644 --- a/keepercommander/utils.py +++ b/keepercommander/utils.py @@ -321,7 +321,6 @@ def size_to_str(size): # type: (int) -> str size = size / 1024 return f'{size:,.2f} Gb' - def parse_totp_uri(uri): # type: (str) -> Dict[str, Union[str, int, None]] def parse_int(val): return val and int(val) @@ -359,3 +358,15 @@ def decode_uri_component(component): # type: (str) -> str } return result + +def value_to_boolean(value): + """ + Replacement for distutils.util.strtobool + """ + value = str(value) + if value.lower() in ['true', 'yes', 'on', '1']: + return True + elif value.lower() in ['false', 'no', 'off', '0']: + return False + else: + return None diff --git a/keepercommander/vault.py b/keepercommander/vault.py index bb3df936d..bed0c78e5 100644 --- a/keepercommander/vault.py +++ b/keepercommander/vault.py @@ -362,7 +362,7 @@ def export_host_field(value): # type: (dict) -> Optional[str] port = value.get('port') or '' if host or port: if port: - host += ':' + port + host += ':' + str(port) return host @staticmethod diff --git a/libs/discovery_common-1.0.10-py3-none-any.whl b/libs/discovery_common-1.0.10-py3-none-any.whl new file mode 100644 index 000000000..4fe6de402 Binary files /dev/null and b/libs/discovery_common-1.0.10-py3-none-any.whl differ diff --git a/libs/keeper_dag-1.0.10-py3-none-any.whl b/libs/keeper_dag-1.0.10-py3-none-any.whl new file mode 100644 index 000000000..a9f6cce74 Binary files /dev/null and b/libs/keeper_dag-1.0.10-py3-none-any.whl differ diff --git a/requirements.txt b/requirements.txt index 975e787a5..16e85083e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,9 +9,15 @@ PySocks tabulate websockets fido2 -requests; python_version<'3.7' requests>=2.30.0; python_version>='3.7' cryptography>=39.0.1 protobuf>=3.19.0 keeper-secrets-manager-core>=16.6.0 -aiortc; python_version>='3.8' \ No newline at end of file +aiortc; python_version>='3.8' +pydantic>=2.6.4 + +# python3 setup.py wheel --whlsrc ~/src/keeper-dag --libdir $PWD/libs --reqfiles $PWD/requirements.txt +./libs/keeper_dag-1.0.10-py3-none-any.whl + +# python3 setup.py wheel --whlsrc ~/src/discovery-common --libdir $PWD/libs --reqfiles $PWD/requirements.txt +./libs/discovery_common-1.0.10-py3-none-any.whl diff --git a/setup.py b/setup.py index 6b40b52bf..6f8e94a0f 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,102 @@ from setuptools import setup +from setuptools.command.install import install as install_command +import os +import subprocess +import shutil +import re + + +class Wheel(install_command): + + user_options = install_command.user_options + [ + ('whlsrc=', None, "Build a wheel for the python code that is in this directory. Copy into 'libs' directory."), + ('libdir=', None, "The directory to put the whl files."), + ('reqfiles=', None, "List of requirement.txt to update."), + ] + + def initialize_options(self): + install_command.initialize_options(self) + self.whlsrc = None + self.libdir = None + self.reqfiles = None + + def finalize_options(self): + install_command.finalize_options(self) + + def run(self): + global whlsrc + global libdir + global reqfiles + whlsrc = self.whlsrc + libdir = self.libdir + reqfiles = self.reqfiles + + if isinstance(reqfiles, list) is False: + reqfiles = [reqfiles] + + current_dir = os.getcwd() + try: + # Get existing fiels in the lib directory. + os.chdir(self.libdir) + sp = subprocess.run(["ls"], capture_output=True, text=True) + existing_whls = [] + for file in sp.stdout.split("\n"): + if file.endswith("whl") is True: + existing_whls.append(file) + + # Installed required modules and build a wheel + os.chdir(whlsrc) + subprocess.run(["pip3", "install", "-r", "requirements.txt"]) + subprocess.run(["python3", "setup.py", "bdist_wheel"]) + + # Find the whl file in the dist folder. + os.chdir(os.path.join(whlsrc, "dist")) + sp = subprocess.run(["ls"], capture_output=True, text=True) + wheel_file = None + for file in sp.stdout.split("\n"): + if file.endswith("whl") is True: + wheel_file = file + break + if wheel_file is None: + raise ValueError(f"Cannot find a whl file in the dist directory of the {whlsrc} project.") + + # Copy the whl to the lib directory + subprocess.run(["cp", wheel_file, self.libdir]) + + project_name = wheel_file[:wheel_file.index("-")] + + # Remove old versions of the wheel. + os.chdir(self.libdir) + for existing_whl in existing_whls: + if existing_whl.startswith(project_name) is False: + continue + if existing_whl == wheel_file: + continue + os.unlink(existing_whl) + + for req in reqfiles: + shutil.copy(req, f"{req}.bak") + requirement_data = [] + with open(req, "r") as fh: + requirement_data = fh.readlines() + fh.close() + + pattern = re.compile(re.escape(project_name) + "-.*?.whl" ) + with open(req, "w") as fh: + for line in requirement_data: + line = re.sub(pattern, wheel_file, line) + fh.write(line) + fh.close() + os.unlink(f"{req}.bak") + + finally: + os.chdir(current_dir) + + if __name__ == '__main__': - setup() + setup( + cmdclass={ + 'wheel': Wheel + } + )