From ee079224528e8f01d49818e0d2cfd1d861d1b191 Mon Sep 17 00:00:00 2001 From: Jacob Levy <129657918+jlevypaloalto@users.noreply.github.com> Date: Wed, 2 Oct 2024 16:07:37 +0300 Subject: [PATCH] [XDR - IR] revert #35932 (#36580) * init * init * CR changes * Update 6_1_78.md --- .../Integrations/CortexXDRIR/CortexXDRIR.py | 109 ++++++++-------- .../CortexXDRIR/CortexXDRIR_test.py | 120 +++--------------- .../test_data/get_incidents_list_dedup.json | 46 ------- Packs/CortexXDR/ReleaseNotes/6_1_78.md | 6 + Packs/CortexXDR/pack_metadata.json | 2 +- 5 files changed, 77 insertions(+), 206 deletions(-) create mode 100644 Packs/CortexXDR/ReleaseNotes/6_1_78.md diff --git a/Packs/CortexXDR/Integrations/CortexXDRIR/CortexXDRIR.py b/Packs/CortexXDR/Integrations/CortexXDRIR/CortexXDRIR.py index 589227d387a..a41e3e8596a 100644 --- a/Packs/CortexXDR/Integrations/CortexXDRIR/CortexXDRIR.py +++ b/Packs/CortexXDR/Integrations/CortexXDRIR/CortexXDRIR.py @@ -9,6 +9,8 @@ from CoreIRApiModule import * +# Disable insecure warnings +urllib3.disable_warnings() TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" NONCE_LENGTH = 64 @@ -385,8 +387,8 @@ def get_tenant_info(self): return reply.get('reply', {}) def get_multiple_incidents_extra_data(self, exclude_artifacts, incident_id_list=[], gte_creation_time_milliseconds=0, - statuses=[], starred=None, starred_incidents_fetch_window=None, - page_number=0, limit=100, offset=0): + status=None, starred=None, starred_incidents_fetch_window=None, + page_number=0, limit=100): """ Returns incident by id :param incident_id_list: The list ids of incidents @@ -394,23 +396,16 @@ def get_multiple_incidents_extra_data(self, exclude_artifacts, incident_id_list= Maximum number alerts to get in Maximum number alerts to get in "get_multiple_incidents_extra_data" is 50, not sorted """ global ALERTS_LIMIT_PER_INCIDENTS - request_data = { - 'search_from': offset, - 'search_to': offset + limit, - 'sort': { - 'field': 'creation_time', - 'keyword': 'asc', - } - } + request_data = {} filters: List[Any] = [] if incident_id_list: - incident_id_list = argToList(incident_id_list, transform=str) + incident_id_list = argToList(incident_id_list, transform=lambda x: str(x)) filters.append({"field": "incident_id_list", "operator": "in", "value": incident_id_list}) - if statuses: + if status: filters.append({ 'field': 'status', - 'operator': 'in', - 'value': statuses + 'operator': 'eq', + 'value': status }) if exclude_artifacts: request_data['fields_to_exclude'] = FIELDS_TO_EXCLUDE # type: ignore @@ -439,7 +434,7 @@ def get_multiple_incidents_extra_data(self, exclude_artifacts, incident_id_list= }) if len(filters) > 0: request_data['filters'] = filters - demisto.debug(f'Before sending get_multiple_incidents_extra_data request: {request_data=}') + reply = self._http_request( method='POST', url_suffix='/incidents/get_multiple_incidents_extra_data/', @@ -1073,16 +1068,14 @@ def update_related_alerts(client: Client, args: dict): return_results(update_alerts_in_xdr_command(client, args_for_command)) -def fetch_incidents(client: Client, first_fetch_time, integration_instance, exclude_artifacts: bool, last_run: dict, - max_fetch: int = 10, statuses: list = [], starred: Optional[bool] = None, +def fetch_incidents(client, first_fetch_time, integration_instance, exclude_artifacts: bool, last_run: dict = None, + max_fetch: int = 10, statuses: List = [], starred: Optional[bool] = None, starred_incidents_fetch_window: str = None): global ALERTS_LIMIT_PER_INCIDENTS # Get the last fetch time, if exists - last_fetch = last_run.get('time') - incidents_from_previous_run = last_run.get('incidents_from_previous_run', []) - - offset = int(last_run.get('offset', 0)) - + last_fetch = last_run.get('time') if isinstance(last_run, dict) else None + incidents_from_previous_run = last_run.get('incidents_from_previous_run', []) if isinstance(last_run, + dict) else [] # Handle first time fetch, fetch incidents retroactively if last_fetch is None: last_fetch, _ = parse_date_range(first_fetch_time, to_timestamp=True) @@ -1090,24 +1083,38 @@ def fetch_incidents(client: Client, first_fetch_time, integration_instance, excl if starred: starred_incidents_fetch_window, _ = parse_date_range(starred_incidents_fetch_window, to_timestamp=True) + incidents = [] if incidents_from_previous_run: raw_incidents = incidents_from_previous_run - ALERTS_LIMIT_PER_INCIDENTS = last_run.get('alerts_limit_per_incident', -1) + ALERTS_LIMIT_PER_INCIDENTS = last_run.get('alerts_limit_per_incident', -1) if isinstance(last_run, dict) else -1 else: - raw_incidents = client.get_multiple_incidents_extra_data( - gte_creation_time_milliseconds=last_fetch, - statuses=statuses, limit=max_fetch, starred=starred, - starred_incidents_fetch_window=starred_incidents_fetch_window, - exclude_artifacts=exclude_artifacts, offset=offset - ) + if statuses: + raw_incidents = [] + for status in statuses: + raw_incident_status = client.get_multiple_incidents_extra_data( + gte_creation_time_milliseconds=last_fetch, + status=status, + limit=max_fetch, starred=starred, + starred_incidents_fetch_window=starred_incidents_fetch_window, + exclude_artifacts=exclude_artifacts) + raw_incidents.extend(raw_incident_status) + raw_incidents = sorted(raw_incidents, key=lambda inc: inc.get('incident', {}).get('creation_time')) + else: + raw_incidents = client.get_multiple_incidents_extra_data( + gte_creation_time_milliseconds=last_fetch, limit=max_fetch, + starred=starred, + starred_incidents_fetch_window=starred_incidents_fetch_window, + exclude_artifacts=exclude_artifacts) # save the last 100 modified incidents to the integration context - for mirroring purposes client.save_modified_incidents_to_integration_context() # maintain a list of non created incidents in a case of a rate limit exception non_created_incidents: list = raw_incidents.copy() + next_run = {} try: - incidents = [] + count_incidents = 0 + for raw_incident in raw_incidents: incident_data: dict[str, Any] = sort_incident_data(raw_incident) if raw_incident.get('incident') else raw_incident incident_id = incident_data.get('incident_id') @@ -1118,49 +1125,43 @@ def fetch_incidents(client: Client, first_fetch_time, integration_instance, excl raw_incident_ = client.get_incident_extra_data(incident_id=incident_id) incident_data = sort_incident_data(raw_incident_) sort_all_list_incident_fields(incident_data) - incident_data |= { - 'mirror_direction': MIRROR_DIRECTION.get(demisto.params().get('mirror_direction', 'None')), - 'mirror_instance': integration_instance, - 'last_mirrored_in': int(datetime.now().timestamp() * 1000), - } + incident_data['mirror_direction'] = MIRROR_DIRECTION.get(demisto.params().get('mirror_direction', 'None'), + None) + incident_data['mirror_instance'] = integration_instance + incident_data['last_mirrored_in'] = int(datetime.now().timestamp() * 1000) description = incident_data.get('description') occurred = timestamp_to_datestring(incident_data['creation_time'], TIME_FORMAT + 'Z') - incident: dict[str, Any] = { + incident: Dict[str, Any] = { 'name': f'XDR Incident {incident_id} - {description}', 'occurred': occurred, 'rawJSON': json.dumps(incident_data), } if demisto.params().get('sync_owners') and incident_data.get('assigned_user_mail'): - incident['owner'] = demisto.findUser(email=incident_data['assigned_user_mail']).get('username') + incident['owner'] = demisto.findUser(email=incident_data.get('assigned_user_mail')).get('username') # Update last run and add incident if the incident is newer than last fetch if incident_data.get('creation_time', 0) > last_fetch: - demisto.debug(f'updating last_fetch, setting offset = 1; {incident_id=}') last_fetch = incident_data['creation_time'] - offset = 1 - elif incident_data.get('creation_time') == last_fetch: - demisto.debug(f'updating offset += 1; {incident_id=}') - offset += 1 - else: - demisto.debug(f"{incident_data['creation_time']=} < last_fetch; {incident_id=}") - incidents.append(incident) non_created_incidents.remove(raw_incident) + count_incidents += 1 + if count_incidents == max_fetch: + break + except Exception as e: if "Rate limit exceeded" in str(e): demisto.info(f"Cortex XDR - rate limit exceeded, number of non created incidents is: " - f"{len(non_created_incidents)!r}.\n The incidents will be created in the next fetch") + f"'{len(non_created_incidents)}'.\n The incidents will be created in the next fetch") else: raise - next_run = { - 'incidents_from_previous_run': non_created_incidents, - 'time': last_fetch, - 'offset': str(offset), - } - if non_created_incidents: + next_run['incidents_from_previous_run'] = non_created_incidents next_run['alerts_limit_per_incident'] = ALERTS_LIMIT_PER_INCIDENTS # type: ignore[assignment] + else: + next_run['incidents_from_previous_run'] = [] + + next_run['time'] = last_fetch + 1 return next_run, incidents @@ -1333,17 +1334,17 @@ def main(): # pragma: no cover elif command == 'fetch-incidents': integration_instance = demisto.integrationInstance() - last_run_obj = demisto.getLastRun() next_run, incidents = fetch_incidents(client=client, first_fetch_time=first_fetch_time, integration_instance=integration_instance, exclude_artifacts=exclude_artifacts, - last_run=demisto.getLastRun().get('next_run', {}), + last_run=demisto.getLastRun().get('next_run'), max_fetch=max_fetch, statuses=statuses, starred=starred, starred_incidents_fetch_window=starred_incidents_fetch_window, ) + last_run_obj = demisto.getLastRun() last_run_obj['next_run'] = next_run demisto.setLastRun(last_run_obj) demisto.incidents(incidents) diff --git a/Packs/CortexXDR/Integrations/CortexXDRIR/CortexXDRIR_test.py b/Packs/CortexXDR/Integrations/CortexXDRIR/CortexXDRIR_test.py index 0347cac1b73..b2ce1a64c52 100644 --- a/Packs/CortexXDR/Integrations/CortexXDRIR/CortexXDRIR_test.py +++ b/Packs/CortexXDR/Integrations/CortexXDRIR/CortexXDRIR_test.py @@ -22,7 +22,7 @@ def load_test_data(json_path): def get_incident_by_status(incident_id_list=None, lte_modification_time=None, gte_modification_time=None, lte_creation_time=None, gte_creation_time=None, starred=None, - starred_incidents_fetch_window=None, statuses=None, sort_by_modification_time=None, + starred_incidents_fetch_window=None, status=None, sort_by_modification_time=None, sort_by_creation_time=None, page_number=0, limit=100, gte_creation_time_milliseconds=0): """ The function simulate the client.get_incidents method for the test_fetch_incidents_filtered_by_status @@ -31,7 +31,7 @@ def get_incident_by_status(incident_id_list=None, lte_modification_time=None, gt that are in the given status. """ incidents_list = load_test_data('./test_data/get_incidents_list.json')['reply']['incidents'] - return [incident for incident in incidents_list if incident['status'] in statuses] + return [incident for incident in incidents_list if incident['status'] == status] def get_incident_extra_data_by_status(incident_id, alerts_limit): @@ -72,7 +72,7 @@ def test_fetch_incidents(requests_mock, mocker): modified_raw_incident.get('alerts')[0].get('host_ip').split(',') mocker.patch("CortexXDRIR.ALERTS_LIMIT_PER_INCIDENTS", new=50) mocker.patch.object(Client, 'save_modified_incidents_to_integration_context') - next_run, incidents = fetch_incidents(client, '3 month', 'MyInstance', exclude_artifacts=False, last_run={}) + next_run, incidents = fetch_incidents(client, '3 month', 'MyInstance', exclude_artifacts=False) sort_all_list_incident_fields(modified_raw_incident) assert len(incidents) == 1 assert incidents[0]['name'] == "XDR Incident 1 - desc1" @@ -95,17 +95,16 @@ def test_fetch_incidents_filtered_by_status(requests_mock, mocker): client = Client( base_url=f'{XDR_URL}/public_api/v1', verify=False, timeout=120, proxy=False) - incident_extra_data_under_investigation = load_test_data( - './test_data/get_incident_extra_data_host_id_array.json')['reply']['incidents'] - incident_extra_data_new = load_test_data('./test_data/get_incident_extra_data_new_status.json')['reply']['incidents'] - mocker.patch.object(Client, 'get_multiple_incidents_extra_data', return_value=( - incident_extra_data_under_investigation + incident_extra_data_new)) + incident_extra_data_under_investigation = load_test_data('./test_data/get_incident_extra_data_host_id_array.json') \ + .get('reply', {}).get('incidents') + incident_extra_data_new = load_test_data('./test_data/get_incident_extra_data_new_status.json').get('reply').get('incidents') + mocker.patch.object(Client, 'get_multiple_incidents_extra_data', side_effect=[incident_extra_data_under_investigation, + incident_extra_data_new]) mocker.patch("CortexXDRIR.ALERTS_LIMIT_PER_INCIDENTS", new=50) mocker.patch.object(Client, 'save_modified_incidents_to_integration_context') statuses_to_fetch = ['under_investigation', 'new'] - next_run, incidents = fetch_incidents( - client, '3 month', 'MyInstance', exclude_artifacts=False, statuses=statuses_to_fetch, last_run={}) + next_run, incidents = fetch_incidents(client, '3 month', 'MyInstance', exclude_artifacts=False, statuses=statuses_to_fetch) assert len(incidents) == 2 assert incidents[0]['name'] == "XDR Incident 1 - 'Local Analysis Malware' generated by XDR Agent detected on host AAAAAA " \ @@ -144,7 +143,7 @@ def test_fetch_incidents_with_rate_limit_error(requests_mock, mocker): client = Client( base_url=f'{XDR_URL}/public_api/v1', verify=False, timeout=120, proxy=False) with pytest.raises(Exception) as e: - next_run, incidents = fetch_incidents(client, '3 month', 'MyInstance', exclude_artifacts=False, last_run={}) + next_run, incidents = fetch_incidents(client, '3 month', 'MyInstance', exclude_artifacts=False) assert str(e.value) == 'Rate limit exceeded' @@ -214,7 +213,7 @@ def test_fetch_only_starred_incidents(self, mocker): client = Client( base_url=f'{XDR_URL}/public_api/v1', verify=False, timeout=120, proxy=False) next_run, incidents = fetch_incidents(client, '3 month', 'MyInstance', exclude_artifacts=False, - last_run=last_run_obj.get('next_run', {}), + last_run=last_run_obj.get('next_run'), starred=True, starred_incidents_fetch_window='3 days') assert len(incidents) == 2 @@ -428,6 +427,7 @@ def test_get_remote_data_command_with_rate_limit_exception(mocker): incident. """ from CortexXDRIR import get_remote_data_command, Client + import sys client = Client( base_url=f'{XDR_URL}/public_api/v1', verify=False, timeout=120, proxy=False) args = { @@ -435,13 +435,11 @@ def test_get_remote_data_command_with_rate_limit_exception(mocker): 'lastUpdate': 0 } - mocker.patch.object(demisto, 'results') + mocker.patch('CortexXDRIR.return_error', side_effect=sys.exit) mocker.patch('CortexXDRIR.get_incident_extra_data_command', side_effect=Exception("Rate limit exceeded")) with pytest.raises(SystemExit): _ = get_remote_data_command(client, args) - assert demisto.results.call_args[0][0].get('Contents') == "API rate limit" - def test_get_remote_data_command_should_not_update(requests_mock, mocker): """ @@ -743,7 +741,7 @@ def test_fetch_incidents_extra_data(requests_mock, mocker): mocker.patch.object(Client, 'save_modified_incidents_to_integration_context') mocker.patch.object(Client, 'save_modified_incidents_to_integration_context') mocker.patch("CortexXDRIR.ALERTS_LIMIT_PER_INCIDENTS", new=2) - next_run, incidents = fetch_incidents(client, '3 month', 'MyInstance', exclude_artifacts=False, last_run={}) + next_run, incidents = fetch_incidents(client, '3 month', 'MyInstance', exclude_artifacts=False) assert len(incidents) == 2 assert incidents[0]['name'] == 'XDR Incident 1 - desc1' assert json.loads(incidents[0]['rawJSON']).get('incident_id') == '1' @@ -1245,7 +1243,7 @@ def test_get_multiple_incidents_extra_data(self, requests_mock, mocker): client = Client( base_url=f'{XDR_URL}/public_api/v1', verify=False, timeout=10, proxy=False) outputs = Client.get_multiple_incidents_extra_data(client, - statuses=['new'], + status=['new'], starred=True, starred_incidents_fetch_window=1575806909185, incident_id_list=['1', '2'], @@ -1584,91 +1582,3 @@ def test_get_xsoar_close_reasons(mocker): } mocker.patch.object(demisto, 'internalHttpRequest', return_value=mock_response) assert get_xsoar_close_reasons() == list(XSOAR_RESOLVED_STATUS_TO_XDR.keys()) + ['CustomReason1', 'CustomReason 2', 'Foo'] - - -@freeze_time('1970-01-01 00:00:00.100') -def test_fetch_incidents_dedup(): - """ - Unit test to verify that incidents that occur in the same instant are not not missed or duplicated. - - Given: - - Two incidents occur in the same instant. - When: - - Fetching incidents. - Then: - - Assert no incidents are missed or duplicated. - """ - from CortexXDRIR import fetch_incidents - - last_run = {'time': 0} - - class MockClient: - - incidents = load_test_data('./test_data/get_incidents_list_dedup.json') - - def save_modified_incidents_to_integration_context(self): ... - - def get_multiple_incidents_extra_data(self, gte_creation_time_milliseconds=0, limit=100, offset=0, **_): - return [ - inc for inc in self.incidents - if inc['creation_time'] >= gte_creation_time_milliseconds - ][offset:offset + limit] - - mock_client = MockClient() - - last_run, result_1 = fetch_incidents( - client=mock_client, - first_fetch_time='3 days', - integration_instance={}, - exclude_artifacts=True, - last_run=last_run, - max_fetch=2, - ) - - assert len(result_1) == 2 - assert 'XDR Incident 1' in result_1[0]['name'] - assert 'XDR Incident 2' in result_1[1]['name'] - assert last_run['offset'] == '1' - - last_run, result_2 = fetch_incidents( - client=mock_client, - first_fetch_time='3 days', - integration_instance={}, - exclude_artifacts=True, - last_run=last_run, - max_fetch=2, - ) - - assert len(result_2) == 2 - assert 'XDR Incident 3' in result_2[0]['name'] - assert 'XDR Incident 4' in result_2[1]['name'] - assert last_run['offset'] == '3' - - last_run, result_3 = fetch_incidents( - client=mock_client, - first_fetch_time='3 days', - integration_instance={}, - exclude_artifacts=True, - last_run=last_run, - max_fetch=2, - ) - - assert len(result_3) == 2 - assert 'XDR Incident 5' in result_3[0]['name'] - assert 'XDR Incident 6' in result_3[1]['name'] - assert last_run['offset'] == '1' - - # run empty test and assert last_run['offset'] stays the same - last_run['offset'] = '10' - - last_run, empty_result = fetch_incidents( - client=mock_client, - first_fetch_time='3 days', - integration_instance={}, - exclude_artifacts=True, - last_run=last_run, - max_fetch=2, - ) - - assert not empty_result - assert last_run['offset'] == '10' diff --git a/Packs/CortexXDR/Integrations/CortexXDRIR/test_data/get_incidents_list_dedup.json b/Packs/CortexXDR/Integrations/CortexXDRIR/test_data/get_incidents_list_dedup.json index bcc8cf68ac6..1fb03efcf9c 100644 --- a/Packs/CortexXDR/Integrations/CortexXDRIR/test_data/get_incidents_list_dedup.json +++ b/Packs/CortexXDR/Integrations/CortexXDRIR/test_data/get_incidents_list_dedup.json @@ -70,52 +70,6 @@ }, { "incident_id": "4", - "creation_time": 100000001, - "modification_time": 1575813875168, - "detection_time": null, - "status": "new", - "severity": "high", - "description": "'Local Analysis Malware' generated by XDR Agent detected on host BBBBB involving user Administrator", - "assigned_user_mail": null, - "assigned_user_pretty_name": null, - "alert_count": 1, - "low_severity_alert_count": 0, - "med_severity_alert_count": 1, - "high_severity_alert_count": 0, - "user_count": 1, - "host_count": 1, - "notes": null, - "resolve_comment": null, - "manual_severity": null, - "manual_description": null, - "xdr_url": "https://demisto.hello.com/incident-view/2", - "starred": false - }, - { - "incident_id": "5", - "creation_time": 100000001, - "modification_time": 1575813875168, - "detection_time": null, - "status": "under_investigation", - "severity": "medium", - "description": "'Local Analysis Malware' generated by XDR Agent detected on host AAAAA involving user Administrator", - "assigned_user_mail": null, - "assigned_user_pretty_name": null, - "alert_count": 1, - "low_severity_alert_count": 0, - "med_severity_alert_count": 1, - "high_severity_alert_count": 0, - "user_count": 1, - "host_count": 1, - "notes": null, - "resolve_comment": null, - "manual_severity": null, - "manual_description": null, - "xdr_url": "https://demisto.hello.com/incident-view/1", - "starred": false - }, - { - "incident_id": "6", "creation_time": 100000002, "modification_time": 1575813875168, "detection_time": null, diff --git a/Packs/CortexXDR/ReleaseNotes/6_1_78.md b/Packs/CortexXDR/ReleaseNotes/6_1_78.md new file mode 100644 index 00000000000..b3e228b1c48 --- /dev/null +++ b/Packs/CortexXDR/ReleaseNotes/6_1_78.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### Palo Alto Networks Cortex XDR - Investigation and Response + +- Fixed an issue in which XDR incidents were missed in the fetch. This reverts the de-duplication mechanism added in version `6.1.73`. diff --git a/Packs/CortexXDR/pack_metadata.json b/Packs/CortexXDR/pack_metadata.json index f9552eccdf6..c6e20605135 100644 --- a/Packs/CortexXDR/pack_metadata.json +++ b/Packs/CortexXDR/pack_metadata.json @@ -2,7 +2,7 @@ "name": "Cortex XDR by Palo Alto Networks", "description": "Automates Cortex XDR incident response, and includes custom Cortex XDR incident views and layouts to aid analyst investigations.", "support": "xsoar", - "currentVersion": "6.1.77", + "currentVersion": "6.1.78", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "",