Skip to content

Commit

Permalink
Merge pull request #5134 from wazuh/fix/5101-system-tests
Browse files Browse the repository at this point in the history
Fix enrollment cluster system test
  • Loading branch information
davidjiglesias authored Apr 4, 2024
2 parents 71cc66f + e293cb4 commit d784d5f
Show file tree
Hide file tree
Showing 10 changed files with 40 additions and 24 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ All notable changes to this project will be documented in this file.

### Fixed

- Fix enrollment cluster system tests ([#5134](https://github.com/wazuh/wazuh-qa/pull/5134/)) \- (Tests)
- Fix `test_synchronization` system test ([#5089](https://github.com/wazuh/wazuh-qa/pull/5089)) \- (Framework + Tests)
- Fix number of files and their size for `test_zip_size_limit` ([#5133](https://github.com/wazuh/wazuh-qa/pull/5133)) \- (Tests)
- Fix test_shutdown_message system test ([#5087](https://github.com/wazuh/wazuh-qa/pull/5087)) \- (Tests)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,13 @@
# Remove the agent once the test has finished
@pytest.fixture(scope='module')
def clean_environment():

yield

host_manager.control_service(host='wazuh-agent1', service='wazuh', state="stopped")
agent_id = host_manager.run_command('wazuh-master', f'cut -c 1-3 {WAZUH_PATH}/etc/client.keys')
host_manager.get_host('wazuh-master').ansible("command", f'{WAZUH_PATH}/bin/manage_agents -r {agent_id}',
check=False)
host_manager.control_service(host='wazuh-agent1', service='wazuh', state="stopped")
host_manager.clear_file(host='wazuh-agent1', file_path=os.path.join(WAZUH_PATH, 'etc', 'client.keys'))
host_manager.clear_file(host='wazuh-agent1', file_path=os.path.join(WAZUH_LOGS_PATH, 'ossec.log'))

Expand All @@ -47,7 +49,7 @@ def test_agent_enrollment(clean_environment):
# Start the agent enrollment process by restarting the wazuh-agent
host_manager.control_service(host='wazuh-master', service='wazuh', state="restarted")
host_manager.control_service(host='wazuh-worker1', service='wazuh', state="restarted")
host_manager.get_host('wazuh-agent1').ansible('command', f'service wazuh-agent restart', check=False)
host_manager.control_service(host='wazuh-agent1', service='wazuh', state="restarted")

# Run the callback checks for the ossec.log and the cluster.log
HostMonitor(inventory_path=inventory_path,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
wazuh-master:
- regex: .*Agent 'AGENT_ID' with file 'merged.mg' MD5 .*
path: var/ossec/logs/ossec.log
timeout: 30
timeout: 60
- regex: ".*Group assigned: 'GROUP_ID'"
path: /var/ossec/logs/ossec.log
timeout: 10
timeout: 60
- regex: .*Agent 'AGENT_ID' group is 'GROUP_ID'
path: /var/ossec/logs/ossec.log
timeout: 10
timeout: 60
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,17 @@ wazuh-worker1:
- regex: ".*Sending message to master node: '{\"daemon_name\":\"remoted\",\"message\":{\"command\":\"assigngroup\",\
\"parameters\":{\"agent\":\"AGENT_ID\",\"md5\":.*"
path: /var/ossec/logs/ossec.log
timeout: 30
timeout: 60
- regex: ".*Message received from master node: '{\"error\":0,\"message\":\"ok\",\"data\":{\"group\":\"GROUP_ID\"}}'"
path: /var/ossec/logs/ossec.log
timeout: 10
timeout: 60
- regex: .*Agent 'AGENT_ID' group is 'GROUP_ID'
path: /var/ossec/logs/ossec.log
timeout: 10
timeout: 60
wazuh-master:
- regex: .*Agent 'AGENT_ID' with file 'merged.mg' MD5 .*
path: /var/ossec/logs/ossec.log
timeout: 30
timeout: 60
- regex: ".*Group assigned: 'GROUP_ID'"
path: /var/ossec/logs/ossec.log
timeout: 10
timeout: 60
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@

# Variables
test_group = 'group_test'
timeout = 25
timeout = 40


# Tests
Expand Down Expand Up @@ -107,12 +107,15 @@ def test_assign_agent_to_a_group(agent_target, initial_status, clean_environment
restart_cluster(test_infra_agents, host_manager)

time.sleep(timeout)

# Check that agent status is active in cluster
check_agent_status(agent_id, agent_name, agent_ip, AGENT_STATUS_ACTIVE, host_manager, test_infra_managers)

if (initial_status == AGENT_STATUS_DISCONNECTED):
host_manager.control_service(host='wazuh-agent1', service=WAZUH_SERVICE_PREFIX, state=WAZUH_SERVICES_STOPPED)

time.sleep(timeout)

check_agent_status(agent_id, agent_name, agent_ip, AGENT_STATUS_DISCONNECTED, host_manager, test_infra_managers)

try:
Expand All @@ -123,6 +126,7 @@ def test_assign_agent_to_a_group(agent_target, initial_status, clean_environment
assign_agent_to_new_group('wazuh-master', test_group, agent_id, host_manager)

time.sleep(timeout)

# Check that agent has group set to group_test on Managers
check_agent_groups(agent_id, test_group, test_infra_managers, host_manager)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@
tmp_path = os.path.join(local_path, 'tmp')

# Variables
timeout = 10
timeout = 30
test_group = 'group_test'


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -276,6 +276,7 @@ def test_guess_multigroups(n_agents, target_node, status_guess_agent_group, clea
# Run the callback checks for the ossec.log
messages_path = master_messages_path if target_node == 'wazuh-master' else worker_messages_path
replace_regex_in_file(['AGENT_ID', 'GROUP_ID'], [agent1_id, expected_group], messages_path)

HostMonitor(inventory_path=inventory_path,
messages_path=messages_path,
tmp_path=tmp_path).run(update_position=True)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,13 +107,13 @@ def test_group_hash(target_node, group, n_agents, configure_groups, clean_enviro

# Restart agent
restart_cluster(test_infra_agents, host_manager)
time.sleep(fw.T_10)
time.sleep(fw.T_20)

# Assing group for multigroups case
for agent in range(n_agents):
if group != 'default':
assign_agent_to_new_group(test_infra_managers[0], group, agents_data[agent][1], host_manager)
time.sleep(fw.T_10)
time.sleep(fw.T_20)

# Calculate global hash
expected_global_hash = calculate_global_hash(test_infra_managers[0], host_manager)
Expand All @@ -129,7 +129,7 @@ def test_group_hash(target_node, group, n_agents, configure_groups, clean_enviro
# Unassign one agent from group

unassign_agent_from_group(test_infra_managers[0], group, agents_data[0][1], host_manager)
time.sleep(fw.T_10)
time.sleep(fw.T_20)

# Calculate global hash
expected_global_hash = calculate_global_hash(test_infra_managers[0], host_manager)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
'..', '..', 'provisioning', 'enrollment_cluster', 'roles', 'agent-role',
'files', 'ossec.conf')
t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(test_cases_yaml)

TIMEOUT_SECOND_CHECK = 10

@pytest.fixture()
def group_creation_and_assignation(metadata, target_node):
Expand Down Expand Up @@ -141,34 +141,40 @@ def test_group_sync_status(metadata, target_node, clean_environment, group_creat
cluster recreates groups without syncreq status.
'''
# Delete group folder

delete_agent_group(metadata['delete_target'], metadata['group_folder_deleted'], host_manager, 'folder')

# Set values
first_time_check = 'synced'
second_time_check = ''

# Check each 0.25 seconds/10 seconds sync_status
# Check each 0.10 seconds/10 seconds sync_status
for _ in range(T_10):
time.sleep(T_025)
agent1_status = json.loads(execute_wdb_query(query, test_infra_hosts[0], host_manager))[1]['group_sync_status']
agent2_status = json.loads(execute_wdb_query(query, test_infra_hosts[0], host_manager))[2]['group_sync_status']
status_info = json.loads(execute_wdb_query(query, test_infra_hosts[0], host_manager))[1:3]
agent1_status = status_info[0]['group_sync_status']
agent2_status = status_info[1]['group_sync_status']

if metadata['agent_in_group'] == 'agent1':
if 'syncreq' == agent1_status and 'synced' == agent2_status:
if agent1_status == 'syncreq' and agent2_status == 'synced':
first_time_check = "syncreq"
break

elif metadata['agent_in_group'] == 'agent2':
if 'synced' == agent1_status and 'syncreq' == agent2_status:
if agent1_status == 'synced' and agent2_status == 'syncreq':
first_time_check = "syncreq"
break

else:
if agent1_status == 'syncreq' and agent2_status == 'syncreq':
first_time_check = 'syncreq'
break

time.sleep(T_5)
time.sleep(0.10)

assert metadata['expected_first_check'] == first_time_check

time.sleep(TIMEOUT_SECOND_CHECK)

# Check after 5 seconds, sync_status
if 'syncreq' in execute_wdb_query(query, test_infra_hosts[0], host_manager):
second_time_check = 'syncreq'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
# Variables
t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path)
queries = ['sql select `group` from agent;', 'sql select name from `group`;', 'sql select id_group from belongs;']

TIMEOUT_GET_GROUPS_ID = 3

# Fixtures
@pytest.fixture()
Expand Down Expand Up @@ -112,6 +112,8 @@ def test_remove_group(metadata, group, target_node, pre_configured_groups, clean
messages_path=messages_path,
tmp_path=tmp_path).run(update_position=True)

sleep(TIMEOUT_GET_GROUPS_ID)

for manager in test_infra_managers:
group_ids[manager] = str(get_group_id(group, manager, host_manager))

Expand Down

0 comments on commit d784d5f

Please sign in to comment.