diff --git a/test/libsinsp_e2e/CMakeLists.txt b/test/libsinsp_e2e/CMakeLists.txt index ac4b15c8804..67f3cd67880 100755 --- a/test/libsinsp_e2e/CMakeLists.txt +++ b/test/libsinsp_e2e/CMakeLists.txt @@ -36,14 +36,19 @@ add_executable(libsinsp_e2e_tests container/container_cgroup.cpp container/docker_utils.cpp event_capture.cpp + forking.cpp fs.cpp + ipv6.cpp main.cpp paths.cpp process.cpp subprocess.cpp sys_call_test.cpp + tcp_client_server.cpp + tcp_client_server_ipv4_mapped.cpp threadinfo.cpp thread_state.cpp + udp_client_server.cpp ) if(BUILD_BPF) diff --git a/test/libsinsp_e2e/forking.cpp b/test/libsinsp_e2e/forking.cpp new file mode 100644 index 00000000000..a5286fc0747 --- /dev/null +++ b/test/libsinsp_e2e/forking.cpp @@ -0,0 +1,1113 @@ +// SPDX-License-Identifier: Apache-2.0 +/* +Copyright (C) 2024 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#include "event_capture.h" +#include "sys_call_test.h" + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#define FILENAME "test_tmpfile" + +TEST_F(sys_call_test, forking) +{ + // int callnum = 0; + + int ptid; // parent tid + int ctid; // child tid + int gptid; // grandparent tid + int xstatus = 33; // child exit value + + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) + { return evt->get_tid() == ptid || evt->get_tid() == ctid; }; + + // + // TEST CODE + // + run_callback_t test = [&](concurrent_object_handle inspector) + { + pid_t childtid; + int status; + childtid = fork(); + + int fd = creat(FILENAME, S_IRWXU); + + if (childtid >= 0) // fork succeeded + { + if (childtid == 0) // fork() returns 0 to the child process + { + ctid = getpid(); + usleep(100); // sleep for 0.1 seconds + close(fd); + _exit(xstatus); // child exits with specific return code + } + else // fork() returns new pid to the parent process + { + ptid = getpid(); + gptid = getppid(); + + close(fd); + + wait(&status); // wait for child to exit, and store its status + // Use WEXITSTATUS to validate status. + } + } + else + { + FAIL(); + } + }; + + // + // OUTPUT VALDATION + // + captured_event_callback_t callback = [&](const callback_param& param) {}; + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); +} + +TEST_F(sys_call_test, forking_while_scap_stopped) +{ + int ptid; // parent tid + int ctid; // child tid + int xstatus = 33; // child exit value + + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) + { return evt->get_tid() == ptid || evt->get_tid() == ctid; }; + + // + // TEST CODE + // + run_callback_t test = [&](concurrent_object_handle inspector_handle) + { + int status; + + // + // Stop the capture just before the fork so we lose the event. + // + { + std::scoped_lock inspector_handle_lock(inspector_handle); + inspector_handle->stop_capture(); + } + + ctid = fork(); + + int fd = creat(FILENAME, S_IRWXU); + + if (ctid >= 0) // fork succeeded + { + if (ctid == 0) // fork() returns 0 to the child process + { + // + // Restart the capture. + // This is a bit messy because we are in the child + // but it works because the underlying scap's fds + // are duplicated so the ioctl will make its way to + // the parent process as well. + // It's a simple way to make sure the capture is started + // after the child's clone returned. + // + inspector_handle.unsafe_ptr()->start_capture(); + + // + // Wait for 5 seconds to make sure the process will still + // exist when the sinsp will do the lookup to /proc + // + usleep(5000000); + close(fd); + _exit(xstatus); // child exits with specific return code + } + else // fork() returns new pid to the parent process + { + ptid = getpid(); + + close(fd); + + wait(&status); // wait for child to exit, and store its status + // Use WEXITSTATUS to validate status. + } + } + else + { + FAIL(); + } + }; + + // + // OUTPUT VALDATION + // + bool child_exists = false; + bool parent_exists = false; + + captured_event_callback_t callback = [&](const callback_param& param) + { + sinsp_evt* e = param.m_evt; + + if (e->get_type() == PPME_SCHEDSWITCH_1_E || e->get_type() == PPME_SCHEDSWITCH_6_E || + e->get_type() == PPME_PROCINFO_E) + { + return; + } + + // + // In both cases, the process should exist + // + if (e->get_tid() == ptid && !parent_exists) + { + sinsp_threadinfo* ti = e->get_thread_info(false); + if (ti) + { + parent_exists = true; + } + + EXPECT_NE((sinsp_threadinfo*)NULL, ti); + } + + if (e->get_tid() == ctid && !child_exists) + { + sinsp_threadinfo* ti = e->get_thread_info(false); + if (ti) + { + child_exists = true; + } + + EXPECT_NE((sinsp_threadinfo*)NULL, ti); + } + }; + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); + + EXPECT_TRUE(child_exists); + EXPECT_TRUE(parent_exists); +} + +TEST_F(sys_call_test, forking_process_expired) +{ + int ptid; // parent tid + int ctid; // child tid + int status; + + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) { return evt->get_tid() == ptid; }; + + // + // TEST CODE + // + run_callback_t test = [&](concurrent_object_handle inspector) + { + ctid = fork(); + + if (ctid >= 0) // fork succeeded + { + if (ctid == 0) // fork() returns 0 to the child process + { + pause(); + FAIL(); + } + else // fork() returns new pid to the parent process + { + ptid = getpid(); + + // + // Wait 10 seconds. During this time, the process should NOT be removed + // + struct timespec req, rem; + req.tv_sec = 10; + req.tv_nsec = 0; + + syscall(__NR_nanosleep, &req, &rem); + + kill(ctid, SIGUSR1); + wait(&status); + } + } + else + { + FAIL(); + } + }; + + bool sleep_caught = false; + + // + // OUTPUT VALDATION + // + captured_event_callback_t callback = [&](const callback_param& param) + { + sinsp_evt* e = param.m_evt; + + if (e->get_tid() == ptid) + { + if (e->get_type() == PPME_SYSCALL_NANOSLEEP_E && !sleep_caught) + { + // + // The child should exist + // + sinsp_threadinfo* ti = param.m_inspector->get_thread_ref(ctid, false, true).get(); + EXPECT_NE((sinsp_threadinfo*)NULL, ti); + } + else if (e->get_type() == PPME_SYSCALL_NANOSLEEP_X && !sleep_caught) + { + // + // The child should exist + // + sinsp_threadinfo* ti = param.m_inspector->get_thread_ref(ctid, false, true).get(); + EXPECT_NE((sinsp_threadinfo*)NULL, ti); + sleep_caught = true; + } + } + else + { + FAIL(); + } + }; + + ASSERT_NO_FATAL_FAILURE({ + event_capture::run(test, + callback, + filter, + event_capture::do_nothing, + event_capture::do_nothing, + event_capture::always_continue, + 131072, + 5 * ONE_SECOND_IN_NS, + ONE_SECOND_IN_NS); + }); + + EXPECT_TRUE(sleep_caught); +} + +/////////////////////////////////////////////////////////////////////////////// +// CLONE VARIANTS +/////////////////////////////////////////////////////////////////////////////// +int ctid; // child tid + +typedef struct +{ + int fd; + int signal; +} clone_params; + +static int clone_callback_1(void* arg) +{ + clone_params* cp; + + cp = (clone_params*)arg; /* Cast arg to true form */ + // getpid() is cached by glibc, usually is invalidated + // correctly in case of fork() or clone() but since we are + // using a weird clone() here something goes wrong with + // recent version of glibc + ctid = syscall(SYS_getpid); + close(cp->fd); + return 0; +} + +TEST_F(sys_call_test, forking_clone_fs) +{ + int callnum = 0; + char bcwd[1024]; + int prfd; + int ptid; // parent tid + int flags = CLONE_FILES | CLONE_FS | CLONE_VM; + int drflags = PPM_CL_CLONE_FILES | PPM_CL_CLONE_FS | PPM_CL_CLONE_VM; + + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) + { return evt->get_tid() == ptid || evt->get_tid() == ctid; }; + + // + // TEST CODE + // + run_callback_t test = [&](concurrent_object_handle inspector) + { + const int STACK_SIZE = 65536; /* Stack size for cloned child */ + char* stack; /* Start of stack buffer area */ + char* stackTop; /* End of stack buffer area */ + clone_params cp; /* Passed to child function */ + int status; + pid_t pid; + + ptid = getpid(); + + /* Set up an argument structure to be passed to cloned child, and + set some process attributes that will be modified by child */ + + cp.fd = open(FILENAME, O_CREAT | O_WRONLY, S_IRWXU); /* Child will close this fd */ + if (cp.fd == -1) + FAIL(); + prfd = cp.fd; + + cp.signal = SIGTERM; /* Child will change disposition */ + if (signal(cp.signal, SIG_IGN) == SIG_ERR) + FAIL(); + + /* Initialize clone flags using command-line argument (if supplied) */ + + /* Allocate stack for child */ + + stack = (char*)malloc(STACK_SIZE); + if (stack == NULL) + FAIL(); + stackTop = stack + STACK_SIZE; /* Assume stack grows downward */ + + /* Create child; child commences execution in childFunc() */ + + if (clone(clone_callback_1, stackTop, flags, &cp) == -1) + FAIL(); + + /* Parent falls through to here. Wait for child; __WCLONE option is + required for child notifying with signal other than SIGCHLD. */ + + pid = waitpid(-1, &status, __WCLONE); + if (pid == -1) + FAIL(); + + close(cp.fd); + + sleep(1); + free(stack); + }; + + // + // OUTPUT VALDATION + // + captured_event_callback_t callback = [&](const callback_param& param) + { + sinsp_evt* e = param.m_evt; + if (e->get_type() == PPME_SYSCALL_CLONE_20_X) + { + uint64_t res = std::stoll(e->get_param_value_str("res", false)); + sinsp_threadinfo* ti = e->get_thread_info(false); + + if (ti->get_comm() != "libsinsp_e2e_te") + { + return; + } + + if (res == 0) + { + EXPECT_EQ(ctid, ti->m_tid); + } + else + { + EXPECT_EQ(ptid, ti->m_tid); + } + + EXPECT_NE(std::string::npos, e->get_param_value_str("exe").find("libsinsp_e2e_tests")); + EXPECT_EQ("libsinsp_e2e_te", ti->get_comm()); + std::string tmps = getcwd(bcwd, 1024); + EXPECT_EQ(tmps + "/", ti->get_cwd()); + EXPECT_EQ("", e->get_param_value_str("cwd")); + EXPECT_EQ(drflags, std::stol(e->get_param_value_str("flags", false))); + callnum++; + } + else if (e->get_type() == PPME_SYSCALL_CLOSE_E) + { + sinsp_threadinfo* ti = e->get_thread_info(false); + + if (ti->m_tid == ptid || ti->m_tid == ctid) + { + int64_t clfd = std::stoll(e->get_param_value_str("fd", false)); + + if (clfd == prfd) + { + callnum++; + } + } + } + else if (e->get_type() == PPME_SYSCALL_CLOSE_X) + { + sinsp_threadinfo* ti = e->get_thread_info(false); + + if (callnum < 3) + { + return; + } + + int64_t res = std::stoll(e->get_param_value_str("res", false)); + + if (ti->m_tid == ptid) + { + EXPECT_GT(0, res); + } + else if (ti->m_tid == ctid) + { + EXPECT_EQ(0, res); + } + + callnum++; + } + }; + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); + + if (callnum != 6 && callnum != 7) + { + FAIL() << "callnum=" << callnum; + } +} + +TEST_F(sys_call_test, forking_clone_nofs) +{ + int callnum = 0; + char bcwd[1024]; + int prfd; + int ptid; // parent tid + int flags = CLONE_FS | CLONE_VM; + int drflags = PPM_CL_CLONE_FS | PPM_CL_CLONE_VM; + + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) + { return evt->get_tid() == ptid || evt->get_tid() == ctid; }; + + // + // TEST CODE + // + run_callback_t test = [&](concurrent_object_handle inspector) + { + const int STACK_SIZE = 65536; /* Stack size for cloned child */ + char* stack; /* Start of stack buffer area */ + char* stackTop; /* End of stack buffer area */ + clone_params cp; /* Passed to child function */ + int status; + pid_t pid; + + ptid = getpid(); + + /* Set up an argument structure to be passed to cloned child, and + set some process attributes that will be modified by child */ + + cp.fd = open(FILENAME, O_CREAT | O_WRONLY, S_IRWXU); /* Child will close this fd */ + if (cp.fd == -1) + FAIL(); + prfd = cp.fd; + + cp.signal = SIGTERM; /* Child will change disposition */ + if (signal(cp.signal, SIG_IGN) == SIG_ERR) + FAIL(); + + /* Initialize clone flags using command-line argument (if supplied) */ + + /* Allocate stack for child */ + + stack = (char*)malloc(STACK_SIZE); + if (stack == NULL) + FAIL(); + stackTop = stack + STACK_SIZE; /* Assume stack grows downward */ + + /* Create child; child commences execution in childFunc() */ + + if (clone(clone_callback_1, stackTop, flags, &cp) == -1) + FAIL(); + + /* Parent falls through to here. Wait for child; __WCLONE option is + required for child notifying with signal other than SIGCHLD. */ + + pid = waitpid(-1, &status, __WCLONE); + if (pid == -1) + FAIL(); + + close(cp.fd); + + sleep(1); + free(stack); + }; + + // + // OUTPUT VALDATION + // + captured_event_callback_t callback = [&](const callback_param& param) + { + sinsp_evt* e = param.m_evt; + if (e->get_type() == PPME_SYSCALL_CLONE_20_X) + { + uint64_t res = std::stoull(e->get_param_value_str("res", false)); + sinsp_threadinfo* ti = e->get_thread_info(false); + + if (ti->get_comm() != "libsinsp_e2e_te") + { + return; + } + + if (res == 0) + { + EXPECT_EQ(ctid, ti->m_tid); + } + else + { + EXPECT_EQ(ptid, ti->m_tid); + } + + EXPECT_NE(std::string::npos, e->get_param_value_str("exe").find("libsinsp_e2e_te")); + EXPECT_EQ("libsinsp_e2e_te", ti->get_comm()); + std::string tmps = getcwd(bcwd, 1024); + EXPECT_EQ(tmps + "/", ti->get_cwd()); + EXPECT_EQ("", e->get_param_value_str("cwd")); + EXPECT_EQ(drflags, std::stol(e->get_param_value_str("flags", false))); + callnum++; + } + else if (e->get_type() == PPME_SYSCALL_CLOSE_E) + { + sinsp_threadinfo* ti = e->get_thread_info(false); + + if (ti->m_tid == ptid || ti->m_tid == ctid) + { + int64_t clfd = std::stoll(e->get_param_value_str("fd", false)); + + if (clfd == prfd) + { + callnum++; + } + } + } + else if (e->get_type() == PPME_SYSCALL_CLOSE_X) + { + sinsp_threadinfo* ti = e->get_thread_info(false); + + if (callnum < 3) + { + return; + } + + int64_t res = std::stoll(e->get_param_value_str("res", false)); + + if (ti->m_tid == ptid) + { + EXPECT_EQ(0, res); + } + else if (ti->m_tid == ctid) + { + EXPECT_EQ(0, res); + } + + callnum++; + } + }; + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); + + if (callnum != 6 && callnum != 7) + { + FAIL(); + } +} + +static int clone_callback_2(void* arg) +{ + char bcwd[256]; + + if (chdir("/") != 0) + { + return -1; + } + std::string tmps = getcwd(bcwd, 256); + syscall(SYS_exit); + return -1; +} + +TEST_F(sys_call_test, forking_clone_cwd) +{ + int callnum = 0; + char oriwd[1024]; + char bcwd[256]; + int ptid; // parent tid + int flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_THREAD; + int drflags = PPM_CL_CLONE_VM | PPM_CL_CLONE_FS | PPM_CL_CLONE_FILES | PPM_CL_CLONE_SIGHAND | + PPM_CL_CLONE_THREAD; + + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) { return evt->get_tid() == ptid; }; + + // + // TEST CODE + // + run_callback_t test = [&](concurrent_object_handle inspector) + { + const int STACK_SIZE = 65536; /* Stack size for cloned child */ + char* stack; /* Start of stack buffer area */ + char* stackTop; /* End of stack buffer area */ + clone_params cp; /* Passed to child function */ + + ptid = getpid(); + + ASSERT_TRUE(getcwd(oriwd, 1024) != NULL); + + /* Allocate stack for child */ + + stack = (char*)malloc(STACK_SIZE); + if (stack == NULL) + FAIL(); + stackTop = stack + STACK_SIZE; /* Assume stack grows downward */ + + /* Create child; child commences execution in childFunc() */ + + if (clone(clone_callback_2, stackTop, flags, &cp) == -1) + { + FAIL(); + } + + sleep(1); + + std::string tmps = getcwd(bcwd, 256); + + ASSERT_TRUE(chdir(oriwd) == 0); + + sleep(1); + free(stack); + }; + + // + // OUTPUT VALDATION + // + captured_event_callback_t callback = [&](const callback_param& param) + { + sinsp_evt* e = param.m_evt; + if (e->get_type() == PPME_SYSCALL_CLONE_20_X) + { + uint64_t res = std::stoull(e->get_param_value_str("res", false)); + sinsp_threadinfo* ti = e->get_thread_info(false); + if (ti->get_comm() != "libsinsp_e2e_te") + { + return; + } + + if (res == 0) + { + EXPECT_EQ(ctid, ti->m_tid); + } + else + { + EXPECT_EQ(ptid, ti->m_tid); + } + + EXPECT_NE(std::string::npos, e->get_param_value_str("exe").find("libsinsp_e2e_tests")); + EXPECT_EQ("libsinsp_e2e_te", ti->get_comm()); + EXPECT_EQ(drflags, std::stol(e->get_param_value_str("flags", false))); + callnum++; + } + else if (e->get_type() == PPME_SYSCALL_GETCWD_E) + { + sinsp_threadinfo* ti = e->get_thread_info(false); + + if (ti->m_tid == ptid) + { + if (callnum > 1) + { + EXPECT_EQ(bcwd, ti->get_cwd()); + } + } + else if (ti->m_tid == ctid) + { + EXPECT_EQ("/", ti->get_cwd()); + } + + callnum++; + } + }; + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); + + EXPECT_EQ(3, callnum); +} + +TEST_F(sys_call_test, forking_main_thread_exit) +{ + int evtnum = 0; + int callnum = 0; + int fd; + pid_t cpid; // parent tid + + event_filter_t filter = [&](sinsp_evt* evt) + { + sinsp_threadinfo* ti = evt->get_thread_info(); + if (ti) + { + return ti->m_pid == cpid; + } + else + { + return false; + } + }; + + run_callback_t test = [&](concurrent_object_handle inspector) + { + int status; + + // ptid = getpid(); + + cpid = fork(); + EXPECT_NE(-1, cpid); + if (cpid == 0) + { + execlp(LIBSINSP_TEST_RESOURCES_PATH "/forking_main_thread_exit", + LIBSINSP_TEST_RESOURCES_PATH "/forking_main_thread_exit", + NULL); + perror("execlp"); + FAIL(); + } + else + { + // + // Father, just wait for termination + // + wait(&status); + } + }; + + captured_event_callback_t callback = [&](const callback_param& param) + { + evtnum++; + if (param.m_evt->get_type() == PPME_SYSCALL_OPEN_X) + { + if (param.m_evt->get_param_value_str("name") == "/etc/passwd") + { + EXPECT_EQ("/etc/passwd", param.m_evt->get_param_value_str("fd")); + fd = *(int64_t*)param.m_evt->get_param(0)->m_val; + ++callnum; + } + } + else if (param.m_evt->get_type() == PPME_SYSCALL_OPENAT_2_X) + { + if (param.m_evt->get_param_value_str("name") == "/etc/passwd") + { + EXPECT_EQ("/etc/passwd", param.m_evt->get_param_value_str("fd")); + memcpy(&fd, (int64_t*)param.m_evt->get_param(0)->m_val, sizeof(fd)); + ++callnum; + } + } + else if (param.m_evt->get_type() == PPME_PROCEXIT_1_E && param.m_evt->get_tid() == cpid) + { + ++callnum; + } + else if (param.m_evt->get_type() == PPME_SYSCALL_READ_E) + { + if (memcmp(&fd, param.m_evt->get_param(0)->m_val, sizeof(fd)) == 0) + { + EXPECT_EQ("/etc/passwd", param.m_evt->get_param_value_str("fd")); + ++callnum; + } + } + }; + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); + EXPECT_EQ(3, callnum); +} + +// This test generally does the following: +// - Ensures that a stale process exists +// - Starts another process with the same pid as the stale process, in a pid +// namespace (which counts as "in a container"). +// - Checks to see if the stale process information is used. +// +// To distinguish between the stale process and up-to-date process, use the +// working directory of the process. The stale process sets its working +// directory to "/dev". +// +// Prior to the fix for 664, the stale process would be used and the +// working directory of the second process would (mistakenly) be +// /dev. With the fix, the stale process information is detected and +// removed. +// + +// Create the initial stale process. It chdir()s to "/dev", stops the +// inspector, and returns. +static int stop_sinsp_and_exit(void* arg) +{ + // Get our own, unlocked concurrent inspector handle + concurrent_object_handle inspector_handle = *(concurrent_object_handle*)arg; + + if (chdir("/dev") != 0) + { + return 1; + } + + { + std::scoped_lock inspector_handle_lock(inspector_handle); + inspector_handle->stop_capture(); + } + + // Wait 5 seconds. This ensures that the state for this + // process will be considered stale when the second process + // with the same pid runs. + sleep(5); + + return 0; +} + +// Immediately return. Started by launcher. +static int do_nothing(void* arg) +{ + return 0; +} + +struct stale_clone_ctx +{ + std::mutex m_perform_clone_mtx; + std::condition_variable m_perform_clone; + bool m_clone_ready; + bool m_clone_complete; +}; + +static pid_t clone_helper(int (*func)(void*), + void* arg, + int addl_clone_args = 0, + bool wait_for_complete = true, + char** stackp = NULL); + +// Wait until signaled by the main test thread, start a single +// do_nothing(), signal the main test thread, and exit. +static int launcher(void* arg) +{ + stale_clone_ctx* ctx = (stale_clone_ctx*)arg; + std::unique_lock lk(ctx->m_perform_clone_mtx); + ctx->m_perform_clone.wait(lk, [&] { return ctx->m_clone_ready; }); + + pid_t child = clone_helper(do_nothing, NULL); + EXPECT_NE(child, 0); + + ctx->m_clone_complete = true; + lk.unlock(); + ctx->m_perform_clone.notify_one(); + + if (child == 0) + { + return 1; + } + + return 0; +} + +// Start a new thread using clone(), passing the provided arg. On +// success, returns the process id of the thread that was created. +// On failure, returns 0. Used to start all the other actions. + +static pid_t clone_helper(int (*func)(void*), + void* arg, + int addl_clone_args, + bool wait_for_complete, + char** stackp) +{ + const int STACK_SIZE = 65536; /* Stack size for cloned child */ + char* stack; /* Start of stack buffer area */ + char* stackTop; /* End of stack buffer area */ + int flags = CLONE_VM | CLONE_FILES | SIGCHLD | addl_clone_args; + pid_t pid = 0; + + /* Allocate stack for child */ + stack = (char*)malloc(STACK_SIZE); + if (stack == NULL) + { + return 0; + } + + stackTop = stack + STACK_SIZE; /* Assume stack grows downward */ + + if ((pid = clone(func, stackTop, flags, arg)) == -1) + { + free(stack); + return 0; + } + + if (wait_for_complete) + { + int status; + + if (waitpid(pid, &status, 0) == -1 || status != 0) + { + pid = 0; + } + free(stack); + } + else + { + *stackp = stack; + } + + return pid; +} + +TEST_F(sys_call_test, remove_stale_thread_clone_exit) +{ + std::atomic clones_seen(0); + stale_clone_ctx ctx; + std::atomic recycle_pid(0); + const char* last_pid_filename = "/proc/sys/kernel/ns_last_pid"; + struct stat info; + + ctx.m_clone_ready = false; + ctx.m_clone_complete = false; + + // On some operating systems, + // /proc/sys/kernel/ns_last_pid does not exist. In + // those cases, we print a message and trivially pass + // the test. + + if (stat(last_pid_filename, &info) == -1 && errno == ENOENT) + { + fprintf(stderr, "Doing nothing as %s does not exist\n", last_pid_filename); + return; + } + + // All events matching recycle_pid are selected. Since + // recycle_pid is only set once the first thread exits, this + // effectively captures the actions of the second thread that + // uses the recycled pid. + event_filter_t filter = [&](sinsp_evt* evt) + { + sinsp_threadinfo* tinfo = evt->get_thread_info(); + pid_t rp = recycle_pid.load(); + return (rp != 0 && tinfo && tinfo->m_tid == rp); + }; + + run_callback_t test = [&](concurrent_object_handle inspector_handle) + { + pid_t launcher_pid; + char* launcher_stack = NULL; + + // Start a thread that simply waits until signaled, + // and then creates a second do-nothing thread. We'll + // arrange that the host-facing pid is set to a known + // value before this thread creates the second thread. + launcher_pid = clone_helper(launcher, &ctx, CLONE_NEWPID, false, &launcher_stack); + ASSERT_GE(launcher_pid, 0); + + // This is asynchronous so wait to make sure the thread has started. + sleep(1); + + // Start a thread that runs and stops the inspector_handle right + // before exiting. This gives us a pid we can use for the + // second thread. + recycle_pid.store(clone_helper(stop_sinsp_and_exit, &inspector_handle)); + ASSERT_GE(recycle_pid.load(), 0); + + // The first thread has started, turned off the capturing, and + // exited, so start capturing again. + { + std::scoped_lock inspector_handle_lock(inspector_handle); + inspector_handle->start_capture(); + } + + // Arrange that the next thread/process created has + // pid ctx.m_desired pid by writing to + // ns_last_pid. Unfortunately, this has a race + // condition--it's possible that after writing to + // ns_last_pid another different process is started, + // stealing the pid. However, as long as the process + // doesn't have a working directory of "/dev", that + // will be enough to distinguish it from the stale + // process. + + FILE* last_pid_file; + + { + std::lock_guard lk(ctx.m_perform_clone_mtx); + + last_pid_file = fopen(last_pid_filename, "w"); + + ASSERT_NE(last_pid_file, (FILE*)NULL); + + ASSERT_EQ(flock(fileno(last_pid_file), LOCK_EX), 0); + + ASSERT_GT(fprintf(last_pid_file, "%d", recycle_pid.load() - 1), 0); + + fclose(last_pid_file); + + ctx.m_clone_ready = true; + } + + // Signal the launcher thread telling it to start the do_nothing thread. + ctx.m_perform_clone.notify_one(); + + // Wait to be signaled back from the launcher thread that it's done. + { + std::unique_lock lk(ctx.m_perform_clone_mtx); + + ctx.m_perform_clone.wait(lk, [&] { return ctx.m_clone_complete; }); + } + + // The launcher thread should have exited, but just to + // make sure explicitly kill it. + ASSERT_EQ(kill(launcher_pid, SIGTERM), 0); + + free(launcher_stack); + + return; + }; + + // To verify the actions, the filter selects all events + // related to pid recycled_pid. It should see: + // - a clone() representing the second thread using the recycled pid. + // - events with pid=recycled_pid (the do_nothing started by + // create_do_nothings) and cwd= + // + // If any event with pid=recycled_pid has a cwd of + // /dev/, the test fails. + + captured_event_callback_t callback = [&](const callback_param& param) + { + sinsp_evt* e = param.m_evt; + uint16_t etype = e->get_type(); + sinsp_threadinfo* tinfo = e->get_thread_info(); + ASSERT_TRUE((tinfo != NULL)); + + if ((etype == PPME_SYSCALL_CLONE_11_X || etype == PPME_SYSCALL_CLONE_16_X || + etype == PPME_SYSCALL_CLONE_17_X || etype == PPME_SYSCALL_CLONE_20_X) && + e->get_direction() == SCAP_ED_OUT) + { + ++clones_seen; + } + + EXPECT_STRNE(tinfo->get_cwd().c_str(), "/dev/"); + }; + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); + + // We must have seen one clone related to the recycled + // pid. Otherwise it never actually checked the cwd at all. + EXPECT_EQ(clones_seen.load(), 1u); +} diff --git a/test/libsinsp_e2e/ipv6.cpp b/test/libsinsp_e2e/ipv6.cpp new file mode 100644 index 00000000000..49c03ec8fcf --- /dev/null +++ b/test/libsinsp_e2e/ipv6.cpp @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: Apache-2.0 +/* +Copyright (C) 2024 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#include "scap_file_reader.h" +#include "libsinsp_test_var.h" + +#include + +#include + +#include +#include +#include +#include + +typedef std::function validate_func_t; + +class ipv6_filtercheck_test : public testing::Test +{ +protected: + struct cstring_comp + { + bool operator()(const char* s1, const char* s2) const { return strcmp(s1, s2) < 0; } + }; + + typedef std::set cstringset_t; + + virtual void SetUp() {} + + virtual void TearDown() {} + + virtual void read_file(const char* filename, + const char* extra_filter, + std::function evtcb, + bool generate_ip_net_filters = true) + { + m_inspector = file_reader.setup_read_file(); + + m_socket_connected = false; + m_check_local_remote = false; + m_check_is_server = false; + + if (generate_ip_net_filters) + { + gen_ip_net_filters(); + } + + std::string filter = + "evt.type in (socket, connect, recvfrom, sendto, close, accept, connect, bind, read, " + "write, poll) and evt.dir=< and fd.type!=file and fd.type!=unix and fd.type!=file and " + "fd.type!=pipe"; + if (extra_filter) + { + filter += " and "; + filter += extra_filter; + } + + file_reader.run_inspector(filename, filter, evtcb); + } + + void check_ipv6_filterchecks(sinsp_evt* evt) + { + std::string full_output; + std::string full = + "*%evt.num %evt.outputtime %evt.cpu %proc.name (%thread.tid) %evt.dir %evt.type " + "%evt.info"; + sinsp_evt_formatter(m_inspector.get(), full, m_filterlist).tostring(evt, &full_output); + + verify_filtercheck(evt, "*%fd.type", "ipv6", full_output); + verify_filtercheck(evt, "*%fd.typechar", "6", full_output); + verify_filtercheck(evt, "*%fd.sockfamily", "ip", full_output); + + if (m_socket_connected) + { + verify_filtercheck(evt, "*%fd.name", m_conn_names, full_output); + + verify_filtercheck(evt, "*%fd.cip", m_client_ip, full_output); + verify_filtercheck(evt, "*%fd.sip", m_server_ip, full_output); + + verify_filtercheck(evt, "*%fd.cport", m_client_port, full_output); + verify_filtercheck(evt, "*%fd.sport", m_server_ports, full_output); + + ASSERT_TRUE(m_ip_client_filter->run(evt)) + << "fd.ip=" << m_client_ip + << " did not match event. Full event output: " << full_output; + ASSERT_TRUE(m_ip_server_filter->run(evt)) + << "fd.ip=" << m_server_ip + << " did not match event. Full event output: " << full_output; + + ASSERT_TRUE(m_net_client_filter->run(evt)) + << "fd.net=" << m_client_net + << " did not match event. Full event output: " << full_output; + ASSERT_TRUE(m_net_server_filter->run(evt)) + << "fd.net=" << m_server_net + << " did not match event. Full event output: " << full_output; + + ASSERT_TRUE(m_cnet_filter->run(evt)) + << "fd.cnet=" << m_client_net + << " did not match event. Full event output: " << full_output; + ASSERT_TRUE(m_snet_filter->run(evt)) + << "fd.snet=" << m_server_net + << " did not match event. Full event output: " << full_output; + + verify_filtercheck(evt, "*%fd.cproto", m_client_proto, full_output); + verify_filtercheck(evt, "*%fd.sproto", m_server_protos, full_output); + + verify_filtercheck(evt, "*%fd.l4proto", m_l4proto, full_output); + + if (m_check_is_server) + { + verify_filtercheck(evt, "*%fd.is_server", m_is_server, full_output); + } + } + + if (m_check_local_remote) + { + verify_filtercheck(evt, "*%fd.lip", m_client_ip, full_output); + verify_filtercheck(evt, "*%fd.rip", m_server_ip, full_output); + + verify_filtercheck(evt, "*%fd.lport", m_client_port, full_output); + verify_filtercheck(evt, "*%fd.rport", m_server_ports, full_output); + + ASSERT_TRUE(m_lnet_filter->run(evt)) + << "fd.lnet=" << m_client_net + << " did not match event. Full event output: " << full_output; + ASSERT_TRUE(m_rnet_filter->run(evt)) + << "fd.rnet=" << m_server_net + << " did not match event. Full event output: " << full_output; + + verify_filtercheck(evt, "*%fd.lproto", m_client_proto, full_output); + verify_filtercheck(evt, "*%fd.rproto", m_server_protos, full_output); + } + } + + void verify_filtercheck(sinsp_evt* evt, + const char* format, + const char* expectedc, + std::string full_output) + { + cstringset_t expected; + expected.insert(expectedc); + + verify_filtercheck(evt, format, expected, full_output); + } + + void verify_filtercheck(sinsp_evt* evt, + const char* format, + std::string& expecteds, + std::string full_output) + { + cstringset_t expected; + expected.insert(expecteds.c_str()); + + verify_filtercheck(evt, format, expected, full_output); + } + + void verify_filtercheck(sinsp_evt* evt, + const char* cformat, + cstringset_t& expected, + std::string full_output) + { + std::string output; + std::string format = cformat; + + sinsp_evt_formatter(m_inspector.get(), format, m_filterlist).tostring(evt, &output); + + auto it = expected.find(output.c_str()); + + ASSERT_TRUE(it != expected.end()) + << " Result of format " << cformat + << " did not match any expected value. Full event output: " << full_output; + } + + void gen_ip_net_filters() + { + auto inspector = file_reader.setup_read_file(); + sinsp_filter_compiler ip_client(inspector.get(), "fd.ip=" + m_client_ip); + m_ip_client_filter = std::move(ip_client.compile()); + + sinsp_filter_compiler ip_server(inspector.get(), "fd.ip=" + m_server_ip); + m_ip_server_filter = std::move(ip_server.compile()); + + sinsp_filter_compiler net_client(inspector.get(), "fd.net=" + m_client_net); + m_net_client_filter = std::move(net_client.compile()); + + sinsp_filter_compiler net_server(inspector.get(), "fd.net=" + m_server_net); + m_net_server_filter = std::move(net_server.compile()); + + sinsp_filter_compiler cnet(inspector.get(), "fd.cnet=" + m_client_net); + m_cnet_filter = std::move(cnet.compile()); + + sinsp_filter_compiler snet(inspector.get(), "fd.snet=" + m_server_net); + m_snet_filter = std::move(snet.compile()); + + sinsp_filter_compiler lnet(inspector.get(), "fd.lnet=" + m_client_net); + m_lnet_filter = std::move(lnet.compile()); + + sinsp_filter_compiler rnet(inspector.get(), "fd.rnet=" + m_server_net); + m_rnet_filter = std::move(rnet.compile()); + } + + std::string m_client_ip; + std::string m_server_ip; + std::string m_client_port; + cstringset_t m_server_ports; + std::string m_client_net; + std::string m_server_net; + std::string m_client_proto; + cstringset_t m_server_protos; + cstringset_t m_conn_names; + std::string m_l4proto; + std::string m_is_server; + + sinsp_filter_check_list m_filterlist; + std::shared_ptr m_ip_client_filter; + std::shared_ptr m_ip_server_filter; + std::shared_ptr m_net_client_filter; + std::shared_ptr m_net_server_filter; + std::shared_ptr m_cnet_filter; + std::shared_ptr m_snet_filter; + std::shared_ptr m_lnet_filter; + std::shared_ptr m_rnet_filter; + std::shared_ptr m_inspector; + scap_file_reader file_reader; + bool m_socket_connected; + bool m_check_local_remote; + bool m_check_is_server; +}; + +TEST_F(ipv6_filtercheck_test, curl_google_dnsreq) +{ + m_client_ip = "2600:1f18:262c:6542:9aa6:df7a:9a47:d29e"; + m_server_ip = "2001:4860:4860::8888"; + m_client_port = "40251"; + m_server_ports = {"53"}; + m_client_net = "2600:1f18:262c:6542::/64"; + m_server_net = "2001:4860:4860::/64"; + m_client_proto = "40251"; + m_server_protos = {"domain"}; + m_conn_names = {"2600:1f18:262c:6542:9aa6:df7a:9a47:d29e:40251->2001:4860:4860::8888:domain"}; + m_l4proto = "udp"; + m_is_server = "false"; + + read_file(LIBSINSP_TEST_CAPTURES_PATH "/curl_google.scap", + "thread.tid=17498", + [this](sinsp_evt* evt) + { + std::string evname = std::string(evt->get_name()); + + // Once we see a connect or bind, we can assume the + // socket is connected and it's possible to get + // client/server and local/remote information. + if (evname == "connect" || evname == "bind") + { + m_socket_connected = true; + m_check_local_remote = true; + m_check_is_server = true; + } + + check_ipv6_filterchecks(evt); + }); +} + +TEST_F(ipv6_filtercheck_test, curl_google_www) +{ + m_client_ip = "2600:1f18:262c:6542:9aa6:df7a:9a47:d29e"; + m_server_ip = "2607:f8b0:4004:802::2004"; + m_client_port = "37140"; + m_server_ports = {"80"}; + m_client_net = "2600:1f18:262c:6542::/64"; + m_server_net = "2607:f8b0:4004:802::/64"; + m_client_proto = "37140"; + m_server_protos = {"http"}; + m_conn_names = {"2600:1f18:262c:6542:9aa6:df7a:9a47:d29e:37140->2607:f8b0:4004:802::2004:http"}; + m_l4proto = "tcp"; + m_is_server = "false"; + + read_file(LIBSINSP_TEST_CAPTURES_PATH "/curl_google.scap", + "thread.tid=17497", + [this](sinsp_evt* evt) + { + std::string evname = std::string(evt->get_name()); + + // Once we see a connect or bind, we can assume the + // socket is connected and it's possible to get + // client/server and local/remote information. + if (evname == "connect" || evname == "bind") + { + m_socket_connected = true; + m_check_local_remote = true; + m_check_is_server = true; + } + + check_ipv6_filterchecks(evt); + }); +} + +TEST_F(ipv6_filtercheck_test, single_ipv6_conn_client) +{ + m_client_ip = "2001:db8::4"; + m_server_ip = "2001:db8::3"; + m_client_port = "54405"; + + // Some /etc/services map port 1234 to search-agent, so we + // allow both. + m_server_ports = {"1234", "search-agent"}; + + m_client_net = "2001:db8::/64"; + m_server_net = "2001:db8::/64"; + m_client_proto = "54405"; + m_server_protos = {"1234", "search-agent"}; + m_conn_names = {"2001:db8::4:54405->2001:db8::3:1234", + "2001:db8::4:54405->2001:db8::3:search-agent"}; + m_l4proto = "tcp"; + m_is_server = "false"; + + read_file(LIBSINSP_TEST_CAPTURES_PATH "/single_ipv6_conn.scap", + "proc.pid=25888", + [this](sinsp_evt* evt) + { + std::string evname = std::string(evt->get_name()); + + // Once we see a connect, we can assume the + // socket is connected and it's possible to get + // client/server information. However, we can *not* + // get local/remote information as this connection was + // done between two ips on the same local interface. + if (evname == "connect") + { + m_socket_connected = true; + } + + check_ipv6_filterchecks(evt); + }); +} + +TEST_F(ipv6_filtercheck_test, single_ipv6_conn_server) +{ + m_client_ip = "2001:db8::4"; + m_server_ip = "2001:db8::3"; + m_client_port = "54405"; + m_server_ports = {"1234", "search-agent"}; + m_client_net = "2001:db8::/64"; + m_server_net = "2001:db8::/64"; + m_client_proto = "54405"; + m_server_protos = {"1234", "search-agent"}; + m_conn_names = {"2001:db8::4:54405->2001:db8::3:1234", + "2001:db8::4:54405->2001:db8::3:search-agent"}; + m_l4proto = "tcp"; + m_is_server = "server"; + + read_file(LIBSINSP_TEST_CAPTURES_PATH "/single_ipv6_conn.scap", + "proc.pid=25886", + [this](sinsp_evt* evt) + { + std::string evname = std::string(evt->get_name()); + + // Once we see a connect, we can assume the + // socket is connected and it's possible to get + // client/server information. However, we can *not* + // get local/remote information as this connection was + // done between two ips on the same local interface. + if (evname == "connect") + { + m_socket_connected = true; + } + + check_ipv6_filterchecks(evt); + }); +} + +TEST_F(ipv6_filtercheck_test, test_ipv6_client) +{ + // test_ipv6_client.cpp does the following: + // 1. sendto() on an unconnected socket to ::1 + // 2. connect to ::1, port 2345 + // 3. send() on the connected socket (to ::1) + // 4. connect to google dns server, port 53 + // 5. send() on the connected socket (to google dns server) + // 6. sendto() back to ::1, port 2345 + // + // Some /etc/services map port 2345 to dbm, so we allow both. + + // The test verifies that the addresses/ports on the socket + // change properly for the connects/sendtos. + + enum state_t + { + sendto_unconnected, + send_connected, + send_reconnected, + sendto_reconnected, + done + }; + + state_t state = sendto_unconnected; + + read_file( + LIBSINSP_TEST_CAPTURES_PATH "/test_ipv6_client.scap", + "proc.name=test_ipv6_clien", + [&](sinsp_evt* evt) + { + std::string evname = std::string(evt->get_name()); + + std::string full_output; + std::string full = + "*%evt.num %evt.outputtime %evt.cpu %proc.name (%thread.tid) %evt.dir %evt.type " + "%evt.info"; + sinsp_evt_formatter(m_inspector.get(), full, m_filterlist).tostring(evt, &full_output); + + cstringset_t unconnected_names = {"::1:0->::1:2345", "::1:0->::1:dbm"}; + cstringset_t connected_names = {"::1:38255->::1:2345", "::1:38255->::1:dbm"}; + cstringset_t reconnected_names = {"::1:38255->::1:2345", "::1:38255->::1:dbm"}; + + if (evname == "send" || evname == "sendto") + { + switch (state) + { + case sendto_unconnected: + verify_filtercheck(evt, "*%fd.name", unconnected_names, full_output); + state = send_connected; + break; + case send_connected: + verify_filtercheck(evt, "*%fd.name", connected_names, full_output); + state = send_reconnected; + break; + case send_reconnected: + verify_filtercheck(evt, + "*%fd.name", + "::1:38255->2001:4860:4860::8888:domain", + full_output); + state = sendto_reconnected; + break; + case sendto_reconnected: + verify_filtercheck(evt, "*%fd.name", reconnected_names, full_output); + state = done; + break; + case done: + break; + } + } + }, + false); + + ASSERT_TRUE(state == done); +} diff --git a/test/libsinsp_e2e/libsinsp_test_var.h.in b/test/libsinsp_e2e/libsinsp_test_var.h.in index 2f9a83e65ba..c08f7e8df0e 100644 --- a/test/libsinsp_e2e/libsinsp_test_var.h.in +++ b/test/libsinsp_e2e/libsinsp_test_var.h.in @@ -27,7 +27,7 @@ limitations under the License. // Absolute path to the bpf probe .o file #define LIBSINSP_TEST_BPF_PROBE_PATH "${CMAKE_BINARY_DIR}/driver/bpf/probe.o" -#define LIBSINSP_TEST_CAPTURES_PATH "${CMAKE_BINARY_DIR}/test/libsinsp_e2e/captures/" +#define LIBSINSP_TEST_CAPTURES_PATH "${CMAKE_BINARY_DIR}/test/libsinsp_e2e/resources/captures/" #define LIBSINSP_TEST_RESOURCES_PATH "${CMAKE_BINARY_DIR}/test/libsinsp_e2e/resources/" diff --git a/test/libsinsp_e2e/resources/CMakeLists.txt b/test/libsinsp_e2e/resources/CMakeLists.txt index 10cbb57a140..fccdf9eae22 100644 --- a/test/libsinsp_e2e/resources/CMakeLists.txt +++ b/test/libsinsp_e2e/resources/CMakeLists.txt @@ -15,6 +15,19 @@ execute_process( OUTPUT_STRIP_TRAILING_WHITESPACE ) +add_executable(forking_main_thread_exit forking_main_thread_exit.c) +target_link_libraries(forking_main_thread_exit pthread) +add_dependencies(libsinsp_e2e_tests forking_main_thread_exit) + +add_executable(forking_nested forking_nested.c) +target_link_libraries(forking_nested pthread) +add_dependencies(libsinsp_e2e_tests forking_nested) + +add_executable(chname chname.cpp) +target_link_libraries(chname pthread) +add_dependencies(libsinsp_e2e_tests chname) + + if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8") add_executable(execve execve.c) add_dependencies(libsinsp_e2e_tests execve) diff --git a/test/libsinsp_e2e/resources/captures/curl_google.scap b/test/libsinsp_e2e/resources/captures/curl_google.scap new file mode 100644 index 00000000000..26c88ba9172 Binary files /dev/null and b/test/libsinsp_e2e/resources/captures/curl_google.scap differ diff --git a/test/libsinsp_e2e/resources/captures/single_ipv6_conn.scap b/test/libsinsp_e2e/resources/captures/single_ipv6_conn.scap new file mode 100644 index 00000000000..10cce5b5e87 Binary files /dev/null and b/test/libsinsp_e2e/resources/captures/single_ipv6_conn.scap differ diff --git a/test/libsinsp_e2e/resources/captures/test_ipv6_client.scap b/test/libsinsp_e2e/resources/captures/test_ipv6_client.scap new file mode 100644 index 00000000000..2029ea26802 Binary files /dev/null and b/test/libsinsp_e2e/resources/captures/test_ipv6_client.scap differ diff --git a/test/libsinsp_e2e/resources/chname.cpp b/test/libsinsp_e2e/resources/chname.cpp new file mode 100644 index 00000000000..34e4ff526fe --- /dev/null +++ b/test/libsinsp_e2e/resources/chname.cpp @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: Apache-2.0 +/* +Copyright (C) 2024 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +void run() +{ + while (true) + { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } +} + +void changer(char** argv) +{ + char pname[] = "sysdig"; + memcpy((void*)argv[0], pname, sizeof(pname)); + while (true) + { + std::this_thread::sleep_for(std::chrono::seconds(2)); + } +} + +int main(int argc, char** argv) +{ + char pname[] = "savonarola"; + prctl(PR_SET_NAME, (unsigned long)&pname, 0, 0, 0); + std::vector> threads; + for (int j = 0; j < 20; ++j) + { + threads.push_back(std::make_shared(run)); + } + + auto binded_changer = std::bind(changer, argv); + std::thread changer(binded_changer); + run(); +} diff --git a/test/libsinsp_e2e/resources/forking_main_thread_exit.c b/test/libsinsp_e2e/resources/forking_main_thread_exit.c new file mode 100644 index 00000000000..2193844b5f8 --- /dev/null +++ b/test/libsinsp_e2e/resources/forking_main_thread_exit.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: Apache-2.0 +/* +Copyright (C) 2024 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#include +#include +#include +#include + +#include +#include + +static int fd; + +void* callback(void* arg) +{ + char buf[1024]; + sleep(1); + if (read(fd, buf, sizeof(buf)) < 0) + { + perror("read"); + } + sleep(10); + return NULL; +} + +// +// This is outside the test files because gtest doesn't like +// pthread_exit() since it triggers an exception to unwind the stack +// +int main() +{ + pthread_t thread; + + fd = open("/etc/passwd", O_RDONLY); + if (fd == -1) + { + perror("open"); + } + + pthread_create(&thread, NULL, callback, NULL); + pthread_exit(NULL); +} diff --git a/test/libsinsp_e2e/resources/forking_nested.c b/test/libsinsp_e2e/resources/forking_nested.c new file mode 100644 index 00000000000..9e18320095c --- /dev/null +++ b/test/libsinsp_e2e/resources/forking_nested.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: Apache-2.0 +/* +Copyright (C) 2024 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#include +#include +#include +#include + +#include +#include + +void* callback(void* arg) +{ + return NULL; +} + +int main() +{ + int ctid; + int cctid, cctid1, cctid2, cctid3, cctid4, cctid5; + + ctid = fork(); + + if (ctid == 0) + { + // + // CHILD PROCESS + // + printf("*1\n"); + pthread_t thread; + pthread_create(&thread, NULL, callback, NULL); + + usleep(100000); + cctid = fork(); + + if (cctid == 0) + { + // + // CHILD PROCESS + // + printf("*2\n"); + pthread_t thread; + pthread_create(&thread, NULL, callback, NULL); + + usleep(100000); + cctid1 = fork(); + + if (cctid1 == 0) + { + // + // CHILD PROCESS + // + printf("*3\n"); + pthread_t thread; + pthread_create(&thread, NULL, callback, NULL); + + usleep(100000); + cctid2 = fork(); + + if (cctid2 == 0) + { + // + // CHILD PROCESS + // + printf("*4\n"); + pthread_t thread; + pthread_create(&thread, NULL, callback, NULL); + + usleep(100000); + cctid3 = fork(); + + if (cctid3 == 0) + { + printf("*5\n"); + // + // CHILD PROCESS + // + pthread_t thread; + pthread_create(&thread, NULL, callback, NULL); + + usleep(100000); + cctid4 = fork(); + + if (cctid4 == 0) + { + printf("*6\n"); + // + // CHILD PROCESS + // + pthread_t thread; + pthread_create(&thread, NULL, callback, NULL); + + usleep(100000); + cctid5 = fork(); + + if (cctid5 == 0) + { + printf("*7\n"); + return 0; + } + else + { + return 0; + } + } + else + { + return 0; + } + } + else + { + return 0; + } + } + else + { + return 0; + } + } + else + { + return 0; + } + } + else + { + return 0; + } + } + else + { + return 0; + } +} diff --git a/test/libsinsp_e2e/scap_file_reader.h b/test/libsinsp_e2e/scap_file_reader.h new file mode 100644 index 00000000000..94bbcd9cf21 --- /dev/null +++ b/test/libsinsp_e2e/scap_file_reader.h @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: Apache-2.0 +/* +Copyright (C) 2024 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#pragma once + +#include + +#include +#include +#include + +class scap_file_reader +{ +public: + virtual ~scap_file_reader() { m_inspector = nullptr; } + + virtual std::shared_ptr setup_read_file() + { + if (!m_inspector) + { + m_inspector = std::make_shared(); + m_inspector->set_hostname_and_port_resolution_mode(true); + } + return m_inspector; + } + + virtual void run_inspector(const char* filename, + const std::string filter, + std::function evtcb) + { + m_inspector->open_savefile(filename); + m_inspector->set_filter(filter.c_str()); + + while (true) + { + int32_t res; + sinsp_evt* evt; + + res = m_inspector->next(&evt); + + if (res == SCAP_TIMEOUT) + { + continue; + } + else if (res == SCAP_FILTERED_EVENT) + { + continue; + } + else if (res == SCAP_EOF) + { + break; + } + else if (res != SCAP_SUCCESS) + { + break; + } + + evtcb(evt); + } + + m_inspector->close(); + } + + virtual void read_file_filtered(const char* filename, + const std::string filter, + std::function evtcb) + { + setup_read_file(); + run_inspector(filename, filter, evtcb); + } + +private: + std::shared_ptr m_inspector; +}; diff --git a/test/libsinsp_e2e/tcp_client_server.cpp b/test/libsinsp_e2e/tcp_client_server.cpp new file mode 100644 index 00000000000..914709e56c6 --- /dev/null +++ b/test/libsinsp_e2e/tcp_client_server.cpp @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: Apache-2.0 +/* +Copyright (C) 2024 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#include "utils.h" + +#include "event_capture.h" +#include "subprocess.h" +#include "sys_call_test.h" +#include "tcp_client_server.h" + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include + +static const std::string default_payload = "0123456789QWERTYUIOPASDFGHJKLZXCVBNM"; +static const std::string http_payload = + "GET / " + "0123456789QWERTYUIOPASDFGHJKLZXCVBNM0123456789QWERTYUIOPASDFGHJKLZXCVBNM0123456789QWERTYUIOPAS" + "DFGHJKLZXCVBNM0123456789QWERTYUIOPASDFGHJKLZXCVBNM0123456789QWERTYUIOPASDFGHJKLZXCVBNM01234567" + "89QWERTYUIOPASDFGHJKLZXCVBNO"; + +void runtest(iotype iot, + const std::string& payload = default_payload, + bool use_shutdown = false, + bool use_accept4 = false, + uint32_t ntransactions = 1, + bool exit_no_close = false, + bool ia32_mode = false) +{ + proc_started_filter client_started_filter; + proc_started_filter server_started_filter; + auto stringify_bool = [](bool v) + { + return v ? "true" : "false"; + }; + unsigned callnum = 0; + std::string helper_exe = LIBSINSP_TEST_PATH "/test_helper"; + if (ia32_mode) + { + helper_exe += "_32"; + } + auto iot_s = std::to_string(iot); + auto ntransactions_s = std::to_string(ntransactions); + subprocess server_proc(helper_exe, + {"tcp_server", + iot_s.c_str(), + "false", + stringify_bool(use_shutdown), + stringify_bool(use_accept4), + ntransactions_s.c_str(), + stringify_bool(exit_no_close)}, false); + int64_t server_pid; + int64_t client_pid; + struct in_addr server_in_addr; + server_in_addr.s_addr = get_server_address(); + char* server_address = inet_ntoa(server_in_addr); + std::string sport; + subprocess test_proc(helper_exe, + {"tcp_client", + server_address, + iot_s.c_str(), + payload, + stringify_bool(false), + ntransactions_s, + stringify_bool(exit_no_close)}, false); + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) + { + auto tinfo = evt->get_thread_info(false); + if (tinfo && tinfo->m_exe == helper_exe) + { + if (tinfo->m_pid == server_pid) + { + return server_started_filter(evt); + } + else if (tinfo->m_pid == client_pid) + { + return client_started_filter(evt); + } + } + return false; + }; + + // + // INITIALIZATION + // + run_callback_t test = [&](concurrent_object_handle inspector_handle) + { + { + std::scoped_lock inspector_handle_lock(inspector_handle); + inspector_handle->dynamic_snaplen(true); + } + server_proc.start(); + server_proc.wait_for_start(); + server_pid = server_proc.get_pid(); + + test_proc.start(); + test_proc.wait_for_start(); + client_pid = test_proc.get_pid(); + + // We use a random call to tee to signal that we're done + tee(-1, -1, 0, 0); + }; + + std::function log_param = [](const callback_param& param) + { + // cerr << param.m_evt->get_name() << endl; + }; + + // + // OUTPUT VALIDATION + // + captured_event_callback_t callback = [&](const callback_param& param) + { + std::string src_addr; + std::string src_port; + std::string dst_addr; + std::string dst_port; + + sinsp_evt* evt = param.m_evt; + if (evt->get_type() == PPME_SOCKET_CONNECT_X) + { + std::string tuple = evt->get_param_value_str("tuple"); + + EXPECT_NE((sinsp_fdinfo*)NULL, evt->get_fd_info()); + + if (evt->get_fd_info()->m_type != SCAP_FD_IPV4_SOCK) + { + // + // Skip non-tcp sockets. Python opens unix sockets + // to god knows what. + // + return; + } + + parse_tuple(tuple, src_addr, src_port, dst_addr, dst_port); + + EXPECT_EQ(server_address, src_addr); + if (sport == "") + { + EXPECT_NE("0", src_port); + sport = src_port; + } + else + { + EXPECT_EQ(sport, src_port); + } + + EXPECT_EQ(server_address, dst_addr); + if (!exit_no_close) + { + EXPECT_EQ(SERVER_PORT_STR, dst_port); + } + log_param(param); + callnum++; + } + else if (evt->get_type() == PPME_SOCKET_LISTEN_E) + { + EXPECT_EQ("1", evt->get_param_value_str("backlog")); + log_param(param); + callnum++; + } + else if (evt->get_type() == PPME_SOCKET_LISTEN_X) + { + EXPECT_EQ("0", evt->get_param_value_str("res")); + log_param(param); + callnum++; + } + else if (evt->get_type() == PPME_SOCKET_ACCEPT4_6_E) + { + EXPECT_EQ("0", evt->get_param_value_str("flags")); + } + else if (evt->get_type() == PPME_SOCKET_ACCEPT_5_X || + evt->get_type() == PPME_SOCKET_ACCEPT4_6_X) + { + parse_tuple(evt->get_param_value_str("tuple"), src_addr, src_port, dst_addr, dst_port); + + EXPECT_EQ(server_address, src_addr); + if (sport == "") + { + EXPECT_NE("0", src_port); + sport = src_port; + } + else + { + EXPECT_EQ(sport, src_port); + } + + EXPECT_EQ(server_address, dst_addr); + if (!exit_no_close) + { + EXPECT_EQ(SERVER_PORT_STR, dst_port); + } + + log_param(param); + callnum++; + } + + if (callnum < 1) + { + return; + } + + // + // 32bit uses send() and recv(), while 64bit always uses sendto() and + // recvfrom() and sets the address to NULL + // + if ((evt->get_type() == PPME_SOCKET_SEND_E || evt->get_type() == PPME_SOCKET_RECV_E || + evt->get_type() == PPME_SOCKET_SENDTO_E || evt->get_type() == PPME_SOCKET_RECVFROM_E || + evt->get_type() == PPME_SYSCALL_READ_E || evt->get_type() == PPME_SYSCALL_WRITE_E || + evt->get_type() == PPME_SYSCALL_READV_E || evt->get_type() == PPME_SYSCALL_WRITEV_E) && + evt->get_fd_info()->m_type == SCAP_FD_IPV4_SOCK) + { + if (evt->get_type() == PPME_SOCKET_RECVFROM_E) + { + if (evt->get_param_value_str("tuple") != "") + { + EXPECT_EQ("NULL", evt->get_param_value_str("tuple")); + } + } + + std::string tuple = evt->get_param_value_str("fd"); + tuple = tuple.substr(tuple.find(">")+1); + parse_tuple(tuple, src_addr, src_port, dst_addr, dst_port); + EXPECT_EQ(server_address, src_addr); + EXPECT_EQ(sport, src_port); + + EXPECT_EQ(server_address, dst_addr); + if (!exit_no_close) + { + EXPECT_EQ(SERVER_PORT_STR, dst_port); + } + + log_param(param); + callnum++; + } + else if ((evt->get_type() == PPME_SOCKET_RECV_X || + evt->get_type() == PPME_SOCKET_RECVFROM_X || + evt->get_type() == PPME_SYSCALL_READ_X || + evt->get_type() == PPME_SYSCALL_READV_X || + evt->get_type() == PPME_SYSCALL_WRITEV_X || + evt->get_type() == PPME_SYSCALL_WRITE_X || + evt->get_type() == PPME_SOCKET_SENDTO_X || + evt->get_type() == PPME_SOCKET_SEND_X) && + evt->get_fd_info()->m_type == SCAP_FD_IPV4_SOCK) + { + if (evt->get_type() == PPME_SOCKET_RECVFROM_X) + { + if(event_capture::get_engine() != MODERN_BPF_ENGINE) + { + EXPECT_EQ("NULL", evt->get_param_value_str("tuple")); + } + else + { + if(!parse_tuple(evt->get_param_value_str("tuple"), src_addr, src_port, dst_addr, dst_port)) + { + return; + } + EXPECT_EQ(server_address, src_addr); + EXPECT_EQ(server_address, dst_addr); + + if(callnum == 7) + { + EXPECT_EQ(sport, src_port); + EXPECT_EQ(SERVER_PORT_STR, dst_port); + } + else if(callnum == 9) + { + EXPECT_EQ(sport, dst_port); + EXPECT_EQ(SERVER_PORT_STR, src_port); + } + } + } + + EXPECT_EQ(payload, evt->get_param_value_str("data")); + + log_param(param); + callnum++; + } + + }; + + // + // OUTPUT VALDATION + // + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); + ASSERT_GT(callnum,0); +} + +TEST_F(sys_call_test, tcp_client_server) +{ + runtest(SENDRECEIVE); +} + +TEST_F(sys_call_test, tcp_client_server_read_write) +{ + runtest(READWRITE); +} + +TEST_F(sys_call_test, tcp_client_server_readv_writev) +{ + runtest(READVWRITEV); +} + +TEST_F(sys_call_test, tcp_client_server_shutdown) +{ + runtest(SENDRECEIVE, default_payload, true); +} + +TEST_F(sys_call_test, tcp_client_server_accept4) +{ + runtest(SENDRECEIVE, default_payload, false, true); +} + +TEST_F(sys_call_test, tcp_client_server_multiple) +{ + runtest(SENDRECEIVE, default_payload, false, false, 10); +} + +TEST_F(sys_call_test, tcp_client_server_noclose) +{ + runtest(SENDRECEIVE, default_payload, false, false, 1, true); +} + +TEST_F(sys_call_test, tcp_client_server_http_snaplen) +{ + runtest(SENDRECEIVE, http_payload); +} + +TEST_F(sys_call_test, tcp_client_server_read_write_http_snaplen) +{ + runtest(READWRITE, http_payload); +} + +TEST_F(sys_call_test, tcp_client_server_readv_writev_http_snaplen) +{ + runtest(READVWRITEV, http_payload); +} + +TEST_F(sys_call_test, tcp_client_server_with_connection_before_capturing_starts) +{ + std::thread server_thread; + std::thread client_thread; + tcp_server server(SENDRECEIVE, true); + uint32_t server_ip_address = get_server_address(); + tcp_client client(server_ip_address, SENDRECEIVE, default_payload, true); + + int state = 0; + + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) + { return evt->get_tid() == server.get_tid() || evt->get_tid() == client.get_tid(); }; + + // + // INITIALIZATION + // + run_callback_t test = [&](concurrent_object_handle inspector_handle) + { + server.signal_continue(); + client.signal_continue(); + server_thread.join(); + client_thread.join(); + }; + + // + // OUTPUT VALIDATION + // + captured_event_callback_t callback = [&](const callback_param& param) + { + sinsp_evt* evt = param.m_evt; + if (PPME_SYSCALL_CLOSE_X == evt->get_type() && evt->get_tid() == server.get_tid()) + { + state = 1; + } + }; + + server_thread = std::thread(&tcp_server::run, &server); + client_thread = std::thread(&tcp_client::run, &client); + server.wait_till_ready(); + client.wait_till_ready(); + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); + ASSERT_EQ(1, state); +} + + +#ifdef __x86_64__ +TEST_F(sys_call_test32, tcp_client_server) +{ + runtest(SENDRECEIVE, default_payload, false, false, 1, false, true); +} + +TEST_F(sys_call_test32, tcp_client_server_read_write) +{ + runtest(READWRITE, default_payload, false, false, 1, false, true); +} + +TEST_F(sys_call_test32, tcp_client_server_readv_writev) +{ + runtest(READVWRITEV, default_payload, false, false, 1, false, true); +} + +TEST_F(sys_call_test32, tcp_client_server_shutdown) +{ + runtest(SENDRECEIVE, default_payload, true, false, 1, false, true); +} + +TEST_F(sys_call_test32, tcp_client_server_accept4) +{ + runtest(SENDRECEIVE, default_payload, false, true, 1, false, true); +} + +TEST_F(sys_call_test32, tcp_client_server_multiple) +{ + runtest(SENDRECEIVE, default_payload, false, false, 10, false, true); +} + +TEST_F(sys_call_test32, tcp_client_server_noclose) +{ + runtest(SENDRECEIVE, default_payload, false, false, 1, true, true); +} +#endif diff --git a/test/libsinsp_e2e/tcp_client_server.h b/test/libsinsp_e2e/tcp_client_server.h new file mode 100644 index 00000000000..801b126dd48 --- /dev/null +++ b/test/libsinsp_e2e/tcp_client_server.h @@ -0,0 +1,476 @@ +// SPDX-License-Identifier: Apache-2.0 +/* +Copyright (C) 2024 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#ifndef HELPER_32 +#include +#endif + +#define SERVER_PORT 3555 +#define SERVER_PORT_STR "3555" +#define FALSE 0 + +typedef enum iotype +{ + READWRITE, + SENDRECEIVE, + READVWRITEV +} iotype; + +class std_event +{ +public: + void set() + { + std::lock_guard lock(m_mutex); + m_is_set = true; + m_cond.notify_one(); + } + void wait() + { + std::unique_lock lock(m_mutex); + if (m_is_set) + { + return; + } + else + { + m_cond.wait(lock, [this]() { return m_is_set; }); + } + } + +private: + std::mutex m_mutex; + std::condition_variable m_cond; + bool m_is_set{false}; +}; + +class tcp_server +{ +public: + tcp_server(iotype iot, + bool wait_for_signal_to_continue = false, + bool use_shutdown = false, + bool use_accept4 = false, + uint32_t ntransactions = 1, + bool exit_no_close = false) + { + m_iot = iot; + m_wait_for_signal_to_continue = wait_for_signal_to_continue; + m_use_shutdown = use_shutdown; + m_use_accept4 = use_accept4; + m_ntransactions = ntransactions; + m_exit_no_close = exit_no_close; + } + + void run() + { + int servSock; + int clntSock; + struct sockaddr_in server_address; + struct sockaddr_in client_address; + unsigned int client_len; + uint32_t j; + int port = (m_exit_no_close) ? SERVER_PORT + 1 : SERVER_PORT; + + m_tid = syscall(SYS_gettid); + + /* Create socket for incoming connections */ + if ((servSock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) + { + perror("socket() failed"); + return; + } + + /* Construct local address structure */ + memset(&server_address, 0, sizeof(server_address)); /* Zero out structure */ + server_address.sin_family = AF_INET; /* Internet address family */ + server_address.sin_addr.s_addr = htonl(INADDR_ANY); /* Any incoming interface */ + server_address.sin_port = htons(port); /* Local port */ + + int yes = 1; + if (setsockopt(servSock, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(int)) == -1) + { +#ifdef FAIL + FAIL() << "setsockopt() failed"; +#endif + } + + /* Bind to the local address */ + if (::bind(servSock, (struct sockaddr*)&server_address, sizeof(server_address)) < 0) + { +#ifdef FAIL + FAIL() << "bind() failed"; +#endif + return; + } + /* Mark the socket so it will listen for incoming connections */ + if (listen(servSock, 1) < 0) + { + close(servSock); +#ifdef FAIL + FAIL() << "listen() failed"; +#endif + return; + } + std::cout << "SERVER UP" << std::endl; + do + { + /* Set the size of the in-out parameter */ + client_len = sizeof(client_address); + signal_ready(); + + /* Wait for a client to connect */ + if (m_use_accept4) + { + if ((clntSock = + accept4(servSock, (struct sockaddr*)&client_address, &client_len, 0)) < 0) + { + close(servSock); +#ifdef FAIL + FAIL() << "accept() failed"; +#endif + break; + } + } + else + { + if ((clntSock = accept(servSock, (struct sockaddr*)&client_address, &client_len)) < + 0) + { + close(servSock); +#ifdef FAIL + FAIL() << "accept() failed"; +#endif + break; + } + } + + /* clntSock is connected to a client! */ + wait_for_continue(); + char echoBuffer[1024]; /* Buffer for echo string */ + int recvMsgSize; /* Size of received message */ + for (j = 0; j < m_ntransactions; j++) + { + if (m_iot == SENDRECEIVE) + { + if ((recvMsgSize = recv(clntSock, echoBuffer, sizeof(echoBuffer), 0)) < 0) + { +#ifdef FAIL + FAIL() << "recv() failed"; +#endif + break; + } + + if (send(clntSock, echoBuffer, recvMsgSize, 0) != recvMsgSize) + { +#ifdef FAIL + FAIL() << "send() failed"; +#endif + break; + } + } + else if (m_iot == READWRITE || m_iot == READVWRITEV) + { + if ((recvMsgSize = read(clntSock, echoBuffer, sizeof(echoBuffer))) < 0) + { +#ifdef FAIL + FAIL() << "recv() failed"; +#endif + break; + } + + if (write(clntSock, echoBuffer, recvMsgSize) != recvMsgSize) + { +#ifdef FAIL + FAIL() << "send() failed"; +#endif + break; + } + } + } + + if (m_exit_no_close) + { + return; + } + + if (m_use_shutdown) + { +#ifdef ASSERT_EQ + ASSERT_EQ(0, shutdown(clntSock, SHUT_WR)); +#endif + } + else + { + close(clntSock); /* Close client socket */ + } + break; + } while (0); + + if (m_use_shutdown) + { +#ifdef ASSERT_EQ + ASSERT_EQ(0, shutdown(servSock, SHUT_RDWR)); +#endif + } + else + { + close(servSock); + } + } + + void wait_till_ready() { m_ready.wait(); } + + void signal_continue() { m_continue.set(); } + + int64_t get_tid() { return m_tid; } + +private: + void signal_ready() { m_ready.set(); } + + void wait_for_continue() + { + if (m_wait_for_signal_to_continue) + { + m_continue.wait(); + } + } + + std_event m_ready; + std_event m_continue; + bool m_wait_for_signal_to_continue; + int64_t m_tid; + iotype m_iot; + bool m_use_shutdown; + bool m_use_accept4; + uint32_t m_ntransactions; + bool m_exit_no_close; +}; + +class tcp_client +{ +public: + tcp_client(uint32_t server_ip_address, + iotype iot, + const std::string& payload = "0123456789QWERTYUIOPASDFGHJKLZXCVBNM", + bool on_thread = false, + uint32_t ntransactions = 1, + bool exit_no_close = false) + { + m_server_ip_address = server_ip_address; + m_iot = iot; + m_payload = payload; + m_on_thread = on_thread; + m_ntransactions = ntransactions; + m_exit_no_close = exit_no_close; + } + + void run() + { + int sock; + struct sockaddr_in server_address; + char buffer[m_payload.size() + 1]; + int bytes_received; + uint32_t j; + int port = (m_exit_no_close) ? SERVER_PORT + 1 : SERVER_PORT; + + m_tid = syscall(SYS_gettid); + + /* Create a reliable, stream socket using TCP */ + if ((sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) + { +#ifdef FAIL + FAIL() << "socket() failed"; +#endif + return; + } + + /* Construct the server address structure */ + memset(&server_address, 0, sizeof(server_address)); /* Zero out structure */ + server_address.sin_family = AF_INET; /* Internet address family */ + server_address.sin_addr.s_addr = m_server_ip_address; /* Server IP address */ + server_address.sin_port = htons(port); /* Server port */ + + /* Establish the connection to the server */ + if (connect(sock, (struct sockaddr*)&server_address, sizeof(server_address)) < 0) + { +#ifdef FAIL + FAIL() << "connect() failed"; +#endif + return; + } + signal_ready(); + wait_for_continue(); + + for (j = 0; j < m_ntransactions; j++) + { + /* Send the string to the server */ + if (m_iot == SENDRECEIVE) + { + if (send(sock, m_payload.c_str(), m_payload.length(), 0) != + (ssize_t)m_payload.length()) + { + close(sock); +#ifdef FAIL + FAIL() << "send() sent a different number of bytes than expected"; +#endif + return; + } + + if ((bytes_received = recv(sock, buffer, m_payload.length(), 0)) <= 0) + { + close(sock); +#ifdef FAIL + FAIL() << "recv() failed or connection closed prematurely"; +#endif + return; + } + + buffer[bytes_received] = '\0'; /* Terminate the string! */ +#ifdef ASSERT_STREQ + ASSERT_STREQ(m_payload.c_str(), buffer); +#endif + } + else if (m_iot == READWRITE) + { + if (write(sock, m_payload.c_str(), m_payload.length()) != + (ssize_t)m_payload.length()) + { + close(sock); +#ifdef FAIL + FAIL() << "send() sent a different number of bytes than expected"; +#endif + return; + } + + if ((bytes_received = read(sock, buffer, m_payload.length())) <= 0) + { + close(sock); +#ifdef FAIL + FAIL() << "recv() failed or connection closed prematurely"; +#endif + return; + } + + buffer[bytes_received] = '\0'; /* Terminate the string! */ +#ifdef ASSERT_STREQ + ASSERT_STREQ(m_payload.c_str(), buffer); +#endif + } + else if (m_iot == READVWRITEV) + { + int wv_count; + char msg1[m_payload.length() / 3 + 1]; + char msg2[m_payload.length() / 3 + 1]; + char msg3[m_payload.length() / 3 + 1]; + struct iovec wv[3]; + + memcpy(msg1, + m_payload.substr(0, m_payload.length() / 3).c_str(), + m_payload.length() / 3); + memcpy(msg2, + m_payload.substr(m_payload.length() / 3, m_payload.length() * 2 / 3).c_str(), + m_payload.length() / 3); + memcpy(msg3, + m_payload.substr(m_payload.length() * 2 / 3, m_payload.length()).c_str(), + m_payload.length() / 3); + + wv[0].iov_base = msg1; + wv[1].iov_base = msg2; + wv[2].iov_base = msg3; + wv[0].iov_len = m_payload.length() / 3; + wv[1].iov_len = m_payload.length() / 3; + wv[2].iov_len = m_payload.length() / 3; + wv_count = 3; + + if (writev(sock, wv, wv_count) != (ssize_t)m_payload.length()) + { + close(sock); +#ifdef FAIL + FAIL() << "send() sent a different number of bytes than expected"; +#endif + return; + } + + if ((bytes_received = readv(sock, wv, wv_count)) <= 0) + { + close(sock); +#ifdef FAIL + FAIL() << "recv() failed or connection closed prematurely"; +#endif + return; + } + } + } + + if (m_exit_no_close) + { + return; + } + + close(sock); + } + + void wait_till_ready() { m_ready.wait(); } + + void signal_continue() { m_continue.set(); } + + int64_t get_tid() { return m_tid; } + +private: + void signal_ready() { m_ready.set(); } + + void wait_for_continue() + { + if (m_on_thread) + { + m_continue.wait(); + } + } + + uint32_t m_server_ip_address; + iotype m_iot; + std_event m_ready; + std_event m_continue; + int64_t m_tid; + bool m_on_thread; + uint32_t m_ntransactions; + bool m_exit_no_close; + std::string m_payload; +}; diff --git a/test/libsinsp_e2e/tcp_client_server_ipv4_mapped.cpp b/test/libsinsp_e2e/tcp_client_server_ipv4_mapped.cpp new file mode 100644 index 00000000000..4b6aa1f504c --- /dev/null +++ b/test/libsinsp_e2e/tcp_client_server_ipv4_mapped.cpp @@ -0,0 +1,765 @@ +// SPDX-License-Identifier: Apache-2.0 +/* +Copyright (C) 2024 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#include "event_capture.h" +#include "sys_call_test.h" +#include "tcp_client_server.h" +#include "utils.h" + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#define SERVER_PORT 3555 +#define SERVER_PORT_STR "3555" +#define SERVER_PORT_NOCLOSE_STR "3557" +#define PAYLOAD "0123456789QWERTYUIOPASDFGHJKLZXCVBNM" +#define BUFFER_LENGTH sizeof(PAYLOAD) +#define FALSE 0 + +class tcp_server_ipv4m +{ +public: + tcp_server_ipv4m(iotype iot, + bool wait_for_signal_to_continue = false, + bool use_shutdown = false, + bool use_accept4 = false, + uint32_t ntransactions = 1, + bool exit_no_close = false) + { + m_iot = iot; + m_wait_for_signal_to_continue = wait_for_signal_to_continue; + m_use_shutdown = use_shutdown; + m_use_accept4 = use_accept4; + m_ntransactions = ntransactions; + m_exit_no_close = exit_no_close; + } + + void run() + { + int servSock; + int clntSock; + struct sockaddr_in6 server_address; + struct sockaddr_in6 client_address; + unsigned int client_len; + uint32_t j; + + int port = (m_exit_no_close) ? SERVER_PORT + 2 : SERVER_PORT; + + m_tid = syscall(SYS_gettid); + + /* Create socket for incoming connections */ + if ((servSock = socket(AF_INET6, SOCK_STREAM, 0)) < 0) + { + perror("socket() failed"); + return; + } + + /* Construct local address structure */ + memset(&server_address, 0, sizeof(server_address)); + server_address.sin6_family = AF_INET6; + server_address.sin6_port = htons(port); + server_address.sin6_addr = in6addr_any; + + int yes = 1; + if (setsockopt(servSock, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(int)) == -1) + { + FAIL() << "setsockopt() failed"; + } + + /* Bind to the local address */ + if (::bind(servSock, (struct sockaddr*)&server_address, sizeof(server_address)) < 0) + { + perror("bind() failed"); + FAIL(); + return; + } + /* Mark the socket so it will listen for incoming connections */ + if (listen(servSock, 1) < 0) + { + close(servSock); + FAIL() << "listen() failed"; + return; + } + do + { + /* Set the size of the in-out parameter */ + client_len = sizeof(client_address); + signal_ready(); + + /* Wait for a client to connect */ + if (m_use_accept4) + { + if ((clntSock = + accept4(servSock, (struct sockaddr*)&client_address, &client_len, 0)) < 0) + { + close(servSock); + FAIL() << "accept() failed"; + break; + } + } + else + { + if ((clntSock = accept(servSock, (struct sockaddr*)&client_address, &client_len)) < + 0) + { + close(servSock); + FAIL() << "accept() failed"; + break; + } + } + + /* clntSock is connected to a client! */ + wait_for_continue(); + char echoBuffer[BUFFER_LENGTH]; /* Buffer for echo string */ + int recvMsgSize; /* Size of received message */ + for (j = 0; j < m_ntransactions; j++) + { + if (m_iot == SENDRECEIVE) + { + if ((recvMsgSize = recv(clntSock, echoBuffer, BUFFER_LENGTH, 0)) < 0) + { + FAIL() << "recv() failed"; + break; + } + + if (send(clntSock, echoBuffer, recvMsgSize, 0) != recvMsgSize) + { + FAIL() << "send() failed"; + break; + } + } + else if (m_iot == READWRITE || m_iot == READVWRITEV) + { + if ((recvMsgSize = read(clntSock, echoBuffer, BUFFER_LENGTH)) < 0) + { + FAIL() << "recv() failed"; + break; + } + + if (write(clntSock, echoBuffer, recvMsgSize) != recvMsgSize) + { + FAIL() << "send() failed"; + break; + } + } + } + + if (m_exit_no_close) + { + return; + } + + if (m_use_shutdown) + { + ASSERT_EQ(0, shutdown(clntSock, SHUT_WR)); + } + else + { + close(clntSock); /* Close client socket */ + } + break; + } while (0); + + if (m_use_shutdown) + { + ASSERT_EQ(0, shutdown(servSock, SHUT_RDWR)); + } + else + { + close(servSock); + } + } + + void wait_till_ready() { m_ready.wait(); } + + void signal_continue() { m_continue.set(); } + + int64_t get_tid() { return m_tid; } + +private: + void signal_ready() { m_ready.set(); } + + void wait_for_continue() + { + if (m_wait_for_signal_to_continue) + { + m_continue.wait(); + } + } + + std_event m_ready; + std_event m_continue; + bool m_wait_for_signal_to_continue; + int64_t m_tid; + iotype m_iot; + bool m_use_shutdown; + bool m_use_accept4; + uint32_t m_ntransactions; + bool m_exit_no_close; +}; + +class tcp_client_ipv4m +{ +public: + tcp_client_ipv4m(uint32_t server_ip_address, + iotype iot, + bool on_thread = false, + uint32_t ntransactions = 1, + bool exit_no_close = false) + { + m_server_ip_address = server_ip_address; + m_iot = iot; + m_on_thread = on_thread; + m_ntransactions = ntransactions; + m_exit_no_close = exit_no_close; + } + + void run() + { + int sock; + struct sockaddr_in server_address; + char buffer[BUFFER_LENGTH]; + int payload_length; + int bytes_received; + uint32_t j; + int port = (m_exit_no_close) ? SERVER_PORT + 2 : SERVER_PORT; + + m_tid = syscall(SYS_gettid); + + /* Create a reliable, stream socket using TCP */ + if ((sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) + { + FAIL() << "socket() failed"; + return; + } + + /* Construct the server address structure */ + memset(&server_address, 0, sizeof(server_address)); /* Zero out structure */ + server_address.sin_family = AF_INET; /* Internet address family */ + server_address.sin_addr.s_addr = m_server_ip_address; /* Server IP address */ + server_address.sin_port = htons(port); /* Server port */ + + /* Establish the connection to the server */ + if (connect(sock, (struct sockaddr*)&server_address, sizeof(server_address)) < 0) + { + perror("connect() failed"); + FAIL(); + return; + } + signal_ready(); + wait_for_continue(); + payload_length = strlen(PAYLOAD); /* Determine input length */ + + for (j = 0; j < m_ntransactions; j++) + { + /* Send the string to the server */ + if (m_iot == SENDRECEIVE) + { + if (send(sock, PAYLOAD, payload_length, 0) != payload_length) + { + close(sock); + FAIL() << "send() sent a different number of bytes than expected"; + return; + } + + if ((bytes_received = recv(sock, buffer, BUFFER_LENGTH - 1, 0)) <= 0) + { + close(sock); + FAIL() << "recv() failed or connection closed prematurely"; + return; + } + + buffer[bytes_received] = '\0'; /* Terminate the string! */ + ASSERT_STREQ(PAYLOAD, buffer); + } + else if (m_iot == READWRITE) + { + if (write(sock, PAYLOAD, payload_length) != payload_length) + { + close(sock); + FAIL() << "send() sent a different number of bytes than expected"; + return; + } + + if ((bytes_received = read(sock, buffer, BUFFER_LENGTH - 1)) <= 0) + { + close(sock); + FAIL() << "recv() failed or connection closed prematurely"; + return; + } + + buffer[bytes_received] = '\0'; /* Terminate the string! */ + ASSERT_STREQ(PAYLOAD, buffer); + } + else if (m_iot == READVWRITEV) + { + std::string ps(PAYLOAD); + int wv_count; + char msg1[BUFFER_LENGTH / 3 + 1]; + char msg2[BUFFER_LENGTH / 3 + 1]; + char msg3[BUFFER_LENGTH / 3 + 1]; + struct iovec wv[3]; + + memcpy(msg1, ps.substr(0, BUFFER_LENGTH / 3).c_str(), BUFFER_LENGTH / 3); + memcpy(msg2, + ps.substr(BUFFER_LENGTH / 3, BUFFER_LENGTH * 2 / 3).c_str(), + BUFFER_LENGTH / 3); + memcpy(msg3, + ps.substr(BUFFER_LENGTH * 2 / 3, BUFFER_LENGTH).c_str(), + BUFFER_LENGTH / 3); + + wv[0].iov_base = msg1; + wv[1].iov_base = msg2; + wv[2].iov_base = msg3; + wv[0].iov_len = BUFFER_LENGTH / 3; + wv[1].iov_len = BUFFER_LENGTH / 3; + wv[2].iov_len = BUFFER_LENGTH / 3; + wv_count = 3; + + if (writev(sock, wv, wv_count) != payload_length) + { + close(sock); + FAIL() << "send() sent a different number of bytes than expected"; + return; + } + + if ((bytes_received = readv(sock, wv, wv_count)) <= 0) + { + close(sock); + FAIL() << "recv() failed or connection closed prematurely"; + return; + } + } + } + + if (m_exit_no_close) + { + return; + } + + close(sock); + } + + void wait_till_ready() { m_ready.wait(); } + + void signal_continue() { m_continue.set(); } + + int64_t get_tid() { return m_tid; } + +private: + void signal_ready() { m_ready.set(); } + + void wait_for_continue() + { + if (m_on_thread) + { + m_continue.wait(); + } + } + + uint32_t m_server_ip_address; + iotype m_iot; + std_event m_ready; + std_event m_continue; + int64_t m_tid; + bool m_on_thread; + uint32_t m_ntransactions; + bool m_exit_no_close; +}; + +void runtest_ipv4m(iotype iot, + bool use_shutdown = false, + bool use_accept4 = false, + uint32_t ntransactions = 1, + bool exit_no_close = false) +{ + int callnum = 0; + std::thread server_thread; + std::shared_ptr server = std::make_shared(iot, false, use_shutdown, use_accept4, ntransactions, exit_no_close); + + uint32_t server_ip_address = get_server_address(); + + struct in_addr server_in_addr; + server_in_addr.s_addr = get_server_address(); + + char* server_address = inet_ntoa(server_in_addr); + std::string sport; + int state = 0; + int ctid; + int tid = getpid(); + + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) + { + return evt->get_tid() == server->get_tid() || evt->get_tid() == tid; + }; + + // + // INITIALIZATION + // + run_callback_t test = [&](concurrent_object_handle inspector) + { + server_thread = std::thread(&tcp_server_ipv4m::run, server); + server->wait_till_ready(); + + tcp_client_ipv4m client(server_ip_address, iot, false, ntransactions, exit_no_close); + + client.run(); + + ctid = client.get_tid(); + sleep(1); + server_thread.join(); + + // We use a random call to tee to signal that we're done + tee(-1, -1, 0, 0); + }; + + std::function log_param = [](const callback_param& param) + { + //std::cerr << param.m_evt->get_name() << std::endl; + }; + + // + // OUTPUT VALIDATION + // + captured_event_callback_t callback = [&](const callback_param& param) + { + std::string src_addr; + std::string src_port; + std::string dst_addr; + std::string dst_port; + + sinsp_evt* evt = param.m_evt; + if (evt->get_type() == PPME_SOCKET_CONNECT_X) + { + std::string tuple = evt->get_param_value_str("tuple"); + if(!parse_tuple(tuple, src_addr, src_port, dst_addr, dst_port)) + { + return; + } + + EXPECT_NE((sinsp_fdinfo*)NULL, evt->get_fd_info()); + + if (evt->get_fd_info()->m_type != SCAP_FD_IPV4_SOCK) + { + // + // Skip non-tcp sockets. Python opens unix sockets + // to god knows what. + // + return; + } + + EXPECT_EQ(server_address, src_addr); + if (sport == "") + { + EXPECT_NE("0", src_port); + sport = src_port; + } + else + { + EXPECT_EQ(sport, src_port); + } + + EXPECT_EQ(server_address, dst_addr); + if (!exit_no_close) + { + EXPECT_EQ(SERVER_PORT_STR, dst_port); + } + log_param(param); + callnum++; + } + else if (evt->get_type() == PPME_SOCKET_LISTEN_E) + { + EXPECT_EQ("1", evt->get_param_value_str("backlog")); + log_param(param); + callnum++; + } + else if (evt->get_type() == PPME_SOCKET_LISTEN_X) + { + EXPECT_EQ("0", evt->get_param_value_str("res")); + log_param(param); + callnum++; + } + else if (evt->get_type() == PPME_SOCKET_ACCEPT4_6_E) + { + EXPECT_EQ("0", evt->get_param_value_str("flags")); + } + else if (evt->get_type() == PPME_SOCKET_ACCEPT_5_X || + evt->get_type() == PPME_SOCKET_ACCEPT4_6_X) + { + if(!parse_tuple(evt->get_param_value_str("tuple"), src_addr, src_port, dst_addr, dst_port)) + { + return; + } + + EXPECT_EQ(server_address, src_addr); + if (sport == "") + { + EXPECT_NE("0", src_port); + sport = src_port; + } + else + { + EXPECT_EQ(sport, src_port); + } + + EXPECT_EQ(server_address, dst_addr); + if (!exit_no_close) + { + EXPECT_EQ(SERVER_PORT_STR, dst_port); + } + + log_param(param); + callnum++; + } + + if (callnum < 1) + { + return; + } + + // + // 32bit uses send() and recv(), while 64bit always uses sendto() and + // recvfrom() and sets the address to NULL + // + if (evt->get_type() == PPME_SOCKET_SEND_E || evt->get_type() == PPME_SOCKET_RECV_E || + evt->get_type() == PPME_SOCKET_SENDTO_E || evt->get_type() == PPME_SOCKET_RECVFROM_E || + evt->get_type() == PPME_SYSCALL_READ_E || evt->get_type() == PPME_SYSCALL_WRITE_E || + evt->get_type() == PPME_SYSCALL_READV_E || evt->get_type() == PPME_SYSCALL_WRITEV_E) + { + if (evt->get_type() == PPME_SOCKET_RECVFROM_E) + { + if (evt->get_param_value_str("tuple") != "") + { + EXPECT_EQ("NULL", evt->get_param_value_str("tuple")); + } + } + + std::string tuple = evt->get_param_value_str("fd"); + tuple = tuple.substr(tuple.find(">")+1); + if(!parse_tuple(tuple, src_addr, src_port, dst_addr, dst_port)) + { + return; + } + + EXPECT_EQ(server_address, src_addr); + EXPECT_EQ(sport, src_port); + + EXPECT_EQ(server_address, dst_addr); + if (!exit_no_close) + { + EXPECT_EQ(SERVER_PORT_STR, dst_port); + } + + log_param(param); + callnum++; + } + else if (evt->get_type() == PPME_SOCKET_RECV_X || + evt->get_type() == PPME_SOCKET_RECVFROM_X || + evt->get_type() == PPME_SYSCALL_READ_X) + { + if (evt->get_type() == PPME_SOCKET_RECVFROM_X) + { + if(event_capture::get_engine() != MODERN_BPF_ENGINE) + { + EXPECT_EQ("NULL", evt->get_param_value_str("tuple")); + } + else + { + if(!parse_tuple(evt->get_param_value_str("tuple"), src_addr, src_port, dst_addr, dst_port)) + { + return; + } + EXPECT_EQ(server_address, src_addr); + EXPECT_EQ(server_address, dst_addr); + + if(callnum == 7) + { + EXPECT_EQ(sport, src_port); + if (!exit_no_close) + { + EXPECT_EQ(SERVER_PORT_STR, dst_port); + } + else + { + EXPECT_EQ(SERVER_PORT_NOCLOSE_STR, dst_port); + } + } + else if(callnum == 9) + { + EXPECT_EQ(sport, dst_port); + if (!exit_no_close) + { + EXPECT_EQ(SERVER_PORT_STR, src_port); + } + else + { + EXPECT_EQ(SERVER_PORT_NOCLOSE_STR, src_port); + } + } + + } + } + + EXPECT_EQ(PAYLOAD, evt->get_param_value_str("data")); + + log_param(param); + callnum++; + } + else if (evt->get_type() == PPME_SYSCALL_READV_X) + { + std::string ds = evt->get_param_value_str("data"); + + EXPECT_EQ(ds, evt->get_param_value_str("data")); + + log_param(param); + callnum++; + } + + if ((PPME_SYSCALL_CLOSE_X == evt->get_type() || + PPME_SOCKET_SHUTDOWN_X == evt->get_type()) && + 0 == state && evt->get_tid() == server->get_tid()) + { + if (exit_no_close) + { + FAIL(); + } + + state = 1; + } + + if (!(use_shutdown || exit_no_close)) + { + if (evt->get_type() == PPME_GENERIC_E) + { + if (std::stoll(evt->get_param_value_str("ID", false)) == PPM_SC_TEE) + { + sinsp_threadinfo* ti = param.m_inspector->get_thread_ref(server->get_tid(), false, true).get(); + ASSERT_NE(ti, nullptr); + ti = param.m_inspector->get_thread_ref(ctid, false, true).get(); + ASSERT_NE(ti, nullptr); + } + } + } + }; + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); +} + +TEST_F(sys_call_test, tcp_client_server_ipv4m) +{ + runtest_ipv4m(SENDRECEIVE); +} + +TEST_F(sys_call_test, tcp_client_server_read_write_ipv4m) +{ + runtest_ipv4m(READWRITE); +} + +TEST_F(sys_call_test, tcp_client_server_readv_writev_ipv4m) +{ + runtest_ipv4m(READVWRITEV); +} + +TEST_F(sys_call_test, tcp_client_server_shutdown_ipv4m) +{ + runtest_ipv4m(SENDRECEIVE, true); +} + +TEST_F(sys_call_test, tcp_client_server_accept4_ipv4m) +{ + runtest_ipv4m(SENDRECEIVE, false, true); +} + +TEST_F(sys_call_test, tcp_client_server_multiple_ipv4m) +{ + runtest_ipv4m(SENDRECEIVE, false, false, 10); +} + +TEST_F(sys_call_test, tcp_client_server_noclose_ipv4m) +{ + runtest_ipv4m(SENDRECEIVE, false, false, 1, true); +} + +TEST_F(sys_call_test, tcp_client_server_with_connection_before_capturing_starts_ipv4m) +{ + std::thread server_thread; + std::thread client_thread; + tcp_server_ipv4m server(SENDRECEIVE, true); + uint32_t server_ip_address = get_server_address(); + tcp_client_ipv4m client(server_ip_address, SENDRECEIVE, true); + + int state = 0; + + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) + { return evt->get_tid() == server.get_tid() || evt->get_tid() == client.get_tid(); }; + + // + // INITIALIZATION + // + run_callback_t test = [&](concurrent_object_handle inspector) + { + server.signal_continue(); + client.signal_continue(); + server_thread.join(); + client_thread.join(); + }; + + // + // OUTPUT VALIDATION + // + captured_event_callback_t callback = [&](const callback_param& param) + { + sinsp_evt* evt = param.m_evt; + if (PPME_SYSCALL_CLOSE_X == evt->get_type() && evt->get_tid() == server.get_tid()) + { + state = 1; + } + }; + + server_thread = std::thread(&tcp_server_ipv4m::run, &server); + client_thread = std::thread(&tcp_client_ipv4m::run, &client); + server.wait_till_ready(); + client.wait_till_ready(); + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); + ASSERT_EQ(1, state); +} diff --git a/test/libsinsp_e2e/test_helper.cpp b/test/libsinsp_e2e/test_helper.cpp index 76d982da11f..ec41c445abd 100644 --- a/test/libsinsp_e2e/test_helper.cpp +++ b/test/libsinsp_e2e/test_helper.cpp @@ -1,3 +1,21 @@ +// SPDX-License-Identifier: Apache-2.0 +/* +Copyright (C) 2024 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + #include #include #include @@ -20,6 +38,9 @@ #include #include +#define HELPER_32 +#include "tcp_client_server.h" + #include #include @@ -660,6 +681,31 @@ void cri_container_sleep_lzcat(const vector& args) const unordered_map&)>> func_map = { {"proc_mgmt", proc_mgmt}, {"mmap_test", mmap_test}, + {"tcp_client", + [](const vector& args) + { + auto iot = static_cast(stoi(args.at(1))); + tcp_client client(inet_addr(args.at(0).c_str()), + iot, + args.at(2), + str_to_bool(args.at(3)), + stoi(args.at(4)), + str_to_bool(args.at(5))); + client.run(); + }}, + {"tcp_server", + [](const vector& args) + { + auto iot = static_cast(stoi(args.at(0))); + + tcp_server server(iot, + str_to_bool(args.at(1)), + str_to_bool(args.at(2)), + str_to_bool(args.at(3)), + stoi(args.at(4)), + str_to_bool(args.at(5))); + server.run(); + }}, {"pread_pwrite", pread_pwrite}, {"preadv_pwritev", preadv_pwritev}, {"quotactl_ko", quotactl_ko}, diff --git a/test/libsinsp_e2e/udp_client_server.cpp b/test/libsinsp_e2e/udp_client_server.cpp new file mode 100644 index 00000000000..93b52ec0dff --- /dev/null +++ b/test/libsinsp_e2e/udp_client_server.cpp @@ -0,0 +1,1099 @@ +// SPDX-License-Identifier: Apache-2.0 +/* +Copyright (C) 2024 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#include "sys_call_test.h" + +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +using namespace std; + +#include + +#define SERVER_PORT 3555 +#define SERVER_PORT_STR "3555" +#define PAYLOAD "0123456789QWERTYUIOPASDFGHJKLZXCVBNM" +#define BUFFER_LENGTH (sizeof(PAYLOAD) - 1) +#define FALSE 0 +#define NTRANSACTIONS 2 + +class udp_server +{ + public: + udp_server(bool use_unix, bool use_sendmsg, bool recvmsg_twobufs, uint32_t port_offset = 0) + { + m_use_unix = use_unix; + m_use_sendmsg = use_sendmsg; + m_recvmsg_twobufs = recvmsg_twobufs; + m_port = SERVER_PORT + port_offset; + m_server_ready = false; + } + + void run() + { + int sd = -1, rc; + char buffer[BUFFER_LENGTH + 10]; + char buffer1[BUFFER_LENGTH - 10]; + struct sockaddr_in serveraddr; + struct sockaddr_in clientaddr; + socklen_t clientaddrlen = sizeof(clientaddr); + int j; + int domain; + + m_tid = syscall(SYS_gettid); + + if (m_use_unix) + { + domain = AF_UNIX; + } + else + { + domain = AF_INET; + } + + do + { + sd = socket(domain, SOCK_DGRAM, 0); + if (sd < 0) + { + perror("socket() failed"); + break; + } + + memset(&serveraddr, 0, sizeof(serveraddr)); + serveraddr.sin_family = domain; + serveraddr.sin_port = htons(m_port); + serveraddr.sin_addr.s_addr = htonl(INADDR_ANY); + + rc = ::bind(sd, (struct sockaddr*)&serveraddr, sizeof(serveraddr)); + if (rc < 0) + { + perror("bind() failed"); + break; + } + + { + std::unique_lock lock(m_mutex); + m_server_ready = true; + m_condition_server_ready.notify_one(); + } + + for (j = 0; j < NTRANSACTIONS; j++) + { + if (m_use_sendmsg) + { + struct msghdr msg; + struct iovec iov[2]; + + if (m_recvmsg_twobufs) + { + iov[0].iov_base = buffer1; + iov[0].iov_len = BUFFER_LENGTH - 10; + iov[1].iov_base = buffer; + iov[1].iov_len = BUFFER_LENGTH - 10; + + msg.msg_name = &clientaddr; + msg.msg_namelen = clientaddrlen; + msg.msg_iov = iov; + msg.msg_iovlen = 2; + msg.msg_control = 0; + msg.msg_controllen = 0; + msg.msg_flags = 0; + + // + // Receive the data + // + int res = recvmsg(sd, &msg, 0); + EXPECT_EQ(res, (int)BUFFER_LENGTH); + + // + // Set the send buffer + // + iov[0].iov_len = BUFFER_LENGTH - 10; + iov[1].iov_len = 10; + } + else + { + iov[0].iov_base = buffer; + iov[0].iov_len = BUFFER_LENGTH + 10; + + msg.msg_name = &clientaddr; + msg.msg_namelen = clientaddrlen; + msg.msg_iov = iov; + msg.msg_iovlen = 1; + msg.msg_control = 0; + msg.msg_controllen = 0; + msg.msg_flags = 0; + + // + // Receive the data + // + int res = recvmsg(sd, &msg, 0); + EXPECT_EQ(res, (int)BUFFER_LENGTH); + + // + // Set the send buffer + // + iov[0].iov_len = BUFFER_LENGTH; + } + + // + // Echo the data back to the client + // + if (sendmsg(sd, &msg, 0) == -1) + { + perror("sendmsg() failed"); + break; + } + } + else + { + // + // Receive the data + // + rc = recvfrom(sd, + buffer, + sizeof(buffer), + 0, + (struct sockaddr*)&clientaddr, + &clientaddrlen); + if (rc < 0) + { + perror("recvfrom() failed"); + break; + } + + // + // Echo the data back to the client + // + rc = sendto(sd, + buffer, + sizeof(buffer), + 0, + (struct sockaddr*)&clientaddr, + sizeof(clientaddr)); + if (rc < 0) + { + FAIL(); + perror("sendto() failed"); + break; + } + } + } + } while (FALSE); + + if (sd != -1) + close(sd); + } + + void wait_for_server_ready() + { + { + std::unique_lock lock(m_mutex); + m_condition_server_ready.wait(lock, [this]() { + return m_server_ready; + }); + m_server_ready = false; + } + } + + int64_t get_tid() { return m_tid; } + + private: + std::mutex m_mutex; + std::condition_variable m_condition_server_ready; + bool m_server_ready; + int64_t m_tid; + bool m_use_unix; + bool m_use_sendmsg; + bool m_recvmsg_twobufs; + uint16_t m_port; +}; + +class udp_client +{ + public: + udp_client(uint32_t server_ip_address, + bool use_connect, + uint16_t base_port = SERVER_PORT, + uint32_t num_servers = 1) + : m_use_sendmsg(false), + m_recv(true), + m_payload(PAYLOAD), + m_ignore_errors(false), + m_n_transactions(NTRANSACTIONS) + { + m_use_unix = false; + m_server_ip_address = server_ip_address; + m_use_connect = use_connect; + for (uint32_t idx = 0; idx < num_servers; idx++) + { + m_server_ports.push_back(base_port + idx); + } + } + + void run() + { + int sd; + int domain; + + if (m_use_unix) + { + domain = AF_UNIX; + } + else + { + domain = AF_INET; + } + + sd = socket(domain, SOCK_DGRAM, 0); + if (sd < 0) + { + FAIL(); + } + + for (auto port : m_server_ports) + { + run_using_port(sd, domain, port); + } + + if (sd != -1) + { + close(sd); + } + } + + void run_using_port(int sd, int domain, uint16_t port) + { + int rc; + int j; + struct sockaddr_in serveraddr; + socklen_t serveraddrlen = sizeof(serveraddr); + + memset(&serveraddr, 0, sizeof(serveraddr)); + serveraddr.sin_family = domain; + serveraddr.sin_port = htons(port); + serveraddr.sin_addr.s_addr = m_server_ip_address; + + if (m_use_connect) + { + if (connect(sd, (struct sockaddr*)&serveraddr, sizeof(serveraddr)) < 0 && + !m_ignore_errors) + { + close(sd); + FAIL() << "connect() failed"; + } + } + + for (j = 0; j < m_n_transactions; j++) + { + if (!m_use_sendmsg) + { + if (m_use_connect) + { + rc = sendto(sd, m_payload.data(), m_payload.size(), 0, NULL, 0); + } + else + { + rc = sendto(sd, + m_payload.data(), + m_payload.size(), + 0, + (struct sockaddr*)&serveraddr, + sizeof(serveraddr)); + } + } + else + { + struct msghdr msg = {0}; + if (m_use_connect) + { + msg.msg_name = NULL; + } + else + { + msg.msg_name = (void*)&serveraddr; + msg.msg_namelen = sizeof(serveraddr); + } + struct iovec iov; + iov.iov_base = (void*)m_payload.data(); + iov.iov_len = m_payload.size(); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + rc = sendmsg(sd, &msg, MSG_DONTWAIT); + } + if (rc < 0 && !m_ignore_errors) + { + close(sd); + FAIL(); + } + + // + // Use the recvfrom() function to receive the data back from the + // server. + // + if (m_recv) + { + char* buffer = (char*)malloc(m_payload.size()); + rc = recvfrom(sd, + buffer, + m_payload.size(), + 0, + (struct sockaddr*)&serveraddr, + &serveraddrlen); + free(buffer); + if (rc < 0 && !m_ignore_errors) + { + close(sd); + FAIL(); + } + } + } + } + + bool m_use_sendmsg; + bool m_recv; + std::string m_payload; + bool m_use_connect; + bool m_ignore_errors; + int m_n_transactions; + + private: + bool m_use_unix; + uint32_t m_server_ip_address; + std::vector m_server_ports; +}; + +class udp_servers_and_client +{ + public: + udp_servers_and_client(bool use_unix, + bool use_sendmsg, + bool recvmsg_twobufs, + bool use_connect, + uint32_t num_servers) + { + m_server_ip_address = get_server_address(); + struct in_addr server_in_addr; + server_in_addr.s_addr = m_server_ip_address; + m_server_address = inet_ntoa(server_in_addr); + m_use_connect = use_connect; + + for (uint32_t idx = 0; idx < num_servers; idx++) + { + m_server_ports.insert(SERVER_PORT + idx); + m_servers.emplace_back( + std::make_shared(use_unix, use_sendmsg, recvmsg_twobufs, idx)); + } + } + + uint32_t server_ip_address() { return m_server_ip_address; } + + std::string& server_address() { return m_server_address; } + + bool is_server_tid(int64_t tid) + { + for (auto& srv : m_servers) + { + if (tid == srv->get_tid()) + { + return true; + } + } + + return false; + } + + std::vector>& get_servers() { return m_servers; } + + bool is_server_port(std::string& portstr) + { + uint16_t port = std::stoi(portstr); + + return (port >= SERVER_PORT && port < SERVER_PORT + m_servers.size()); + } + + bool filter(sinsp_evt* evt) { return is_server_tid(evt->get_tid()); } + + std::string server_port_yaml() + { + std::stringstream out; + for (auto port : m_server_ports) + { + out << " - " << port << "\n"; + } + return out.str(); + } + + void start() + { + for (uint32_t idx = 0; idx < m_servers.size(); idx++) + { + m_threads.emplace_back(std::thread(&udp_server::run, m_servers[idx])); + m_servers[idx]->wait_for_server_ready(); + } + + udp_client client(m_server_ip_address, m_use_connect, SERVER_PORT, m_servers.size()); + client.run(); + + for (auto& thread : m_threads) + { + thread.join(); + } + } + + private: + uint32_t m_server_ip_address; + std::string m_server_address; + std::vector m_threads; + std::vector> m_servers; + std::set m_server_ports; + bool m_use_connect; +}; + +inline void parse_tuple(const std::string& tuple, + std::string& src_addr, + std::string& src_port, + std::string& dst_addr, + std::string& dst_port) +{ + std::string token; + std::stringstream ss(tuple); + std::vector tst; + while (std::getline(ss, token, '>')) { + tst.push_back(token); + } + + std::string srcstr = tst[0].substr(0, tst[0].size() - 1); + std::string dststr = tst[1]; + + ss.clear(); + ss.str(srcstr); + std::vector sst; + while (std::getline(ss, token, ':')) { + sst.push_back(token); + } + + EXPECT_EQ(2, (int)sst.size()); + src_addr = sst[0]; + src_port = sst[1]; + + ss.clear(); + ss.str(dststr); + std::vector dst; + while (std::getline(ss, token, ':')) { + dst.push_back(token); + } + EXPECT_EQ(2, (int)dst.size()); + dst_addr = dst[0]; + dst_port = dst[1]; + +} + +TEST_F(sys_call_test, udp_client_server) +{ + int32_t state = 0; + bool use_unix = false, use_sendmsg = false, recvmsg_twobufs = false, use_connect = false; + uint32_t num_servers = 1; + udp_servers_and_client udps(use_unix, use_sendmsg, recvmsg_twobufs, use_connect, num_servers); + int64_t fd_server_socket = 0; + + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) { return udps.filter(evt) || m_tid_filter(evt); }; + + // + // INITIALIZATION + // + run_callback_t test = [&](concurrent_object_handle inspector_handle) { udps.start(); }; + + // + // OUTPUT VALDATION + // + captured_event_callback_t callback = [&](const callback_param& param) + { + sinsp_evt* e = param.m_evt; + uint16_t type = e->get_type(); + std::string src_addr; + std::string src_port; + std::string dst_addr; + std::string dst_port; + + if (type == PPME_SOCKET_RECVFROM_E) + { + memcpy(&fd_server_socket, e->get_param(0)->m_val, sizeof(fd_server_socket)); + } + switch (state) + { + case 0: + EXPECT_NE(PPME_SOCKET_SENDTO_X, type); + EXPECT_NE(PPME_SOCKET_RECVFROM_X, type); + + if (type == PPME_SOCKET_SENDTO_E) + { + parse_tuple(e->get_param_value_str("tuple"), src_addr, + src_port, dst_addr, dst_port); + EXPECT_EQ("0.0.0.0", src_addr); + + EXPECT_EQ(udps.server_address(), dst_addr); + EXPECT_TRUE(udps.is_server_port(dst_port)); + + state++; + } + break; + case 1: + if (type == PPME_SOCKET_RECVFROM_X) + { + parse_tuple(e->get_param_value_str("tuple"), src_addr, + src_port, dst_addr, dst_port); + + EXPECT_EQ(udps.server_address(), src_addr); + EXPECT_NE("0", src_port); + EXPECT_EQ("0.0.0.0", dst_addr); + EXPECT_TRUE(udps.is_server_port(dst_port)); + + EXPECT_EQ(PAYLOAD, e->get_param_value_str("data")); + sinsp_fdinfo* fdinfo = e->get_thread_info(false)->get_fd(fd_server_socket); + ASSERT_TRUE(fdinfo); + EXPECT_EQ(udps.server_ip_address(), fdinfo->m_sockinfo.m_ipv4info.m_fields.m_sip); + + EXPECT_EQ(PAYLOAD, e->get_param_value_str("data")); + + state++; + } + break; + case 2: + EXPECT_NE(PPME_SOCKET_SENDTO_X, type); + EXPECT_NE(PPME_SOCKET_RECVFROM_X, type); + + if (type == PPME_SOCKET_SENDTO_E) + { + parse_tuple(e->get_param_value_str("tuple"), src_addr, + src_port, dst_addr, dst_port); + + EXPECT_EQ("0.0.0.0", src_addr); + EXPECT_TRUE(udps.is_server_port(src_port)); + EXPECT_EQ(udps.server_address(), dst_addr); + EXPECT_NE("0", dst_port); + + state++; + } + break; + case 3: + if (type == PPME_SOCKET_RECVFROM_X) + { + parse_tuple(e->get_param_value_str("tuple"), src_addr, + src_port, dst_addr, dst_port); + + EXPECT_EQ(udps.server_address(), src_addr); + EXPECT_TRUE(udps.is_server_port(src_port)); + + EXPECT_EQ("0.0.0.0", dst_addr); + EXPECT_NE("0", dst_port); + + EXPECT_EQ(PAYLOAD, e->get_param_value_str("data")); + sinsp_fdinfo* fdinfo = e->get_thread_info(false)->get_fd(fd_server_socket); + ASSERT_TRUE(fdinfo); + EXPECT_EQ(udps.server_ip_address(), fdinfo->m_sockinfo.m_ipv4info.m_fields.m_sip); + + state = 4; + } + break; + case 4: + break; + default: + FAIL(); + break; + } + }; + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); +} + +TEST_F(sys_call_test, udp_client_server_with_connect_by_client) +{ + bool use_unix = false, use_sendmsg = false, recvmsg_twobufs = false, use_connect = true; + uint32_t num_servers = 1; + udp_servers_and_client udps(use_unix, use_sendmsg, recvmsg_twobufs, use_connect, num_servers); + std::string src_addr; + std::string src_port; + std::string dst_addr; + std::string dst_port; + + int callnum = 0; + std::string client_port; + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) { return udps.filter(evt) || m_tid_filter(evt); }; + + // + // INITIALIZATION + // + run_callback_t test = [&](concurrent_object_handle inspector_handle) { udps.start(); }; + + // + // OUTPUT VALDATION + // + captured_event_callback_t callback = [&](const callback_param& param) + { + sinsp_evt* e = param.m_evt; + uint16_t type = e->get_type(); + if (PPME_SOCKET_CONNECT_X == type) + { + parse_tuple(e->get_param_value_str("tuple"), src_addr, + src_port, dst_addr, dst_port); + + EXPECT_EQ(udps.server_address(), src_addr); + + EXPECT_EQ(udps.server_address(), dst_addr); + EXPECT_TRUE(udps.is_server_port(dst_port)); + + callnum++; + } + }; + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); + ASSERT_EQ(1, callnum); +} + +TEST_F(sys_call_test, udp_client_server_sendmsg) +{ + bool use_unix = false, use_sendmsg = true, recvmsg_twobufs = false, use_connect = false; + uint32_t num_servers = 1; + udp_servers_and_client udps(use_unix, use_sendmsg, recvmsg_twobufs, use_connect, num_servers); + + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) { return udps.filter(evt) || m_tid_filter(evt); }; + + // + // INITIALIZATION + // + run_callback_t test = [&](concurrent_object_handle inspector_handle) { udps.start(); }; + + // + // OUTPUT VALDATION + // + captured_event_callback_t callback = [&](const callback_param& param) + { + sinsp_evt* e = param.m_evt; + uint16_t type = e->get_type(); + std::string src_addr; + std::string src_port; + std::string dst_addr; + std::string dst_port; + + if (type == PPME_SOCKET_RECVMSG_X) + { + parse_tuple(e->get_param_value_str("tuple"), src_addr, src_port, + dst_addr, dst_port); + + EXPECT_EQ(udps.server_address(), src_addr); + EXPECT_NE("0", src_port); + EXPECT_EQ("0.0.0.0", dst_addr); + EXPECT_TRUE(udps.is_server_port(dst_port)); + + EXPECT_EQ(PAYLOAD, e->get_param_value_str("data")); + + EXPECT_EQ(udps.server_ip_address(), + e->get_fd_info()->m_sockinfo.m_ipv4info.m_fields.m_sip); + } + else if (type == PPME_SOCKET_SENDMSG_E) + { + parse_tuple(e->get_param_value_str("tuple"), src_addr, src_port, + dst_addr, dst_port); + + EXPECT_EQ("0.0.0.0", src_addr); + EXPECT_TRUE(udps.is_server_port(src_port)); + EXPECT_EQ(udps.server_address(), dst_addr); + EXPECT_NE("0", dst_port); + + EXPECT_EQ((int)BUFFER_LENGTH, std::stoi(e->get_param_value_str("size"))); + } + else if (type == PPME_SOCKET_SENDMSG_X) + { + EXPECT_EQ(PAYLOAD, e->get_param_value_str("data")); + } + }; + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); +} + +TEST_F(sys_call_test, udp_client_server_sendmsg_2buf) +{ + bool use_unix = false, use_sendmsg = true, recvmsg_twobufs = true, use_connect = false; + uint32_t num_servers = 1; + udp_servers_and_client udps(use_unix, use_sendmsg, recvmsg_twobufs, use_connect, num_servers); + + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) { return udps.filter(evt) || m_tid_filter(evt); }; + + // + // INITIALIZATION + // + run_callback_t test = [&](concurrent_object_handle inspector_handle) { udps.start(); }; + + // + // OUTPUT VALDATION + // + captured_event_callback_t callback = [&](const callback_param& param) + { + sinsp_evt* e = param.m_evt; + uint16_t type = e->get_type(); + std::string src_addr; + std::string src_port; + std::string dst_addr; + std::string dst_port; + + if (type == PPME_SOCKET_RECVMSG_X) + { + parse_tuple(e->get_param_value_str("tuple"), src_addr, src_port, + dst_addr, dst_port); + + EXPECT_EQ(udps.server_address(), src_addr); + EXPECT_NE("0", src_port); + EXPECT_EQ("0.0.0.0", dst_addr); + EXPECT_TRUE(udps.is_server_port(dst_port)); + + EXPECT_EQ(PAYLOAD, e->get_param_value_str("data")); + + EXPECT_EQ(udps.server_ip_address(), + e->get_fd_info()->m_sockinfo.m_ipv4info.m_fields.m_sip); + } + else if (type == PPME_SOCKET_SENDMSG_E) + { + parse_tuple(e->get_param_value_str("tuple"), src_addr, src_port, + dst_addr, dst_port); + + EXPECT_EQ("0.0.0.0", src_addr); + EXPECT_TRUE(udps.is_server_port(src_port)); + + EXPECT_EQ(udps.server_address(), dst_addr); + EXPECT_NE("0", dst_port); + EXPECT_EQ((int)BUFFER_LENGTH, std::stoi(e->get_param_value_str("size"))); + } + else if (type == PPME_SOCKET_SENDMSG_X) + { + EXPECT_EQ(PAYLOAD, e->get_param_value_str("data")); + } + }; + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); +} + +static void run_fd_name_changed_test(bool use_sendmsg, + bool recvmsg_twobufs, + bool use_connect, + event_filter_t m_tid_filter, + uint32_t expected_name_changed_evts) +{ + bool use_unix = false; + uint32_t num_servers = 2; + udp_servers_and_client udps(use_unix, use_sendmsg, recvmsg_twobufs, use_connect, num_servers); + + unique_ptr fd_name_changed; + + uint32_t num_name_changed_evts = 0; + + // INIT FILTER + before_open_t before_open = [&](sinsp* inspector) + { + sinsp_filter_compiler compiler(inspector, "fd.name_changed=true"); + fd_name_changed = std::move(compiler.compile()); + }; + + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) { return udps.filter(evt) || m_tid_filter(evt); }; + + // + // INITIALIZATION + // + run_callback_t test = [&](concurrent_object_handle inspector_handle) { udps.start(); }; + + // + // OUTPUT VALDATION + // + captured_event_callback_t callback = [&](const callback_param& param) + { + sinsp_evt* e = param.m_evt; + if (fd_name_changed->run(e)) + { + num_name_changed_evts++; + } + }; + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter, before_open); }); + + ASSERT_EQ(num_name_changed_evts, expected_name_changed_evts); +} + +TEST_F(sys_call_test, udp_client_server_fd_name_changed) +{ + bool use_sendmsg = false, recvmsg_twobufs = false, use_connect = false; + + // This test only needs to count events. We want to + // see 7 events, representing the following: + // - The servers bind()ing their sockets to their server ports. + // - the udp client sending to the first server. + // - the first udp server receiving from the udp client + // - the udp client receiving the echoed response from the first udp server. + // This results in an event, even though this fd has already + // been used between the server and client, because this + // recvfrom sets the client side port as a result of + // the recvfrom(). + // - the udp client sending to the second server + // - the second udp server receiving from the udp client + // + // Events that do *not* trigger name_changed are: + // - the first/second udp server sending the echoed response to the udp client. This is because + // it's using + // the same client/server address + port as when it received the packet from the udp client. + // - the udp client receiving the second echo back from the second server. This is because + // the client side port was already set from the communication with the first server. + + run_fd_name_changed_test(use_sendmsg, recvmsg_twobufs, use_connect, m_tid_filter, 7); +} + +TEST_F(sys_call_test, udp_client_server_connect_fd_name_changed) +{ + bool use_sendmsg = false, recvmsg_twobufs = false, use_connect = true; + + // When the client uses connect, there is one fewer name + // changed event, as there is no name change when the client + // receives the echoed response from the server. + + run_fd_name_changed_test(use_sendmsg, recvmsg_twobufs, use_connect, m_tid_filter, 6); +} + +TEST_F(sys_call_test, udp_client_server_sendmsg_fd_name_changed) +{ + bool use_sendmsg = true, recvmsg_twobufs = false, use_connect = false; + + run_fd_name_changed_test(use_sendmsg, recvmsg_twobufs, use_connect, m_tid_filter, 7); +} + +TEST_F(sys_call_test, udp_client_server_multiple_connect_name_changed) +{ + unique_ptr fd_name_changed; + uint32_t num_name_changed_evts = 0; + + // INIT FILTER + before_open_t before_open = [&](sinsp* inspector) + { + sinsp_filter_compiler compiler(inspector, "fd.name_changed=true"); + fd_name_changed = std::move(compiler.compile()); + }; + + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) { return m_tid_filter(evt); }; + + // + // INITIALIZATION + // + run_callback_t test = [&](concurrent_object_handle inspector_handle) + { + int sd; + + sd = socket(AF_INET, SOCK_DGRAM, 0); + if (sd < 0) + { + FAIL(); + } + + std::list ports = {8172, 8193, 8193, 8172, 8171}; + + for (auto& port : ports) + { + struct sockaddr_in serveraddr; + + memset(&serveraddr, 0, sizeof(serveraddr)); + serveraddr.sin_family = AF_INET; + serveraddr.sin_port = htons(port); + serveraddr.sin_addr.s_addr = get_server_address(); + + if (connect(sd, (struct sockaddr*)&serveraddr, sizeof(serveraddr)) < 0) + { + close(sd); + FAIL() << "connect() failed"; + } + } + }; + + // + // OUTPUT VALDATION + // + captured_event_callback_t callback = [&](const callback_param& param) + { + sinsp_evt* e = param.m_evt; + if (fd_name_changed->run(e)) + { + num_name_changed_evts++; + } + }; + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter, before_open); }); + + // Every connect should result in a name changed event other than the duplicate port. + ASSERT_EQ(num_name_changed_evts, 4u); +} + +TEST_F(sys_call_test, udp_client_server_sendmsg_2buf_fd_name_changed) +{ + bool use_sendmsg = true, recvmsg_twobufs = true, use_connect = false; + + run_fd_name_changed_test(use_sendmsg, recvmsg_twobufs, use_connect, m_tid_filter, 7); +} + +TEST_F(sys_call_test, statsd_client_snaplen) +{ + // Test if the driver correctly increase snaplen for statsd traffic + std::string payload = + "soluta.necessitatibus.voluptatem.consequuntur.dignissimos.repudiandae.nostrum.lorem.ipsum:" + "18|c"; + + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) + { + return m_tid_filter(evt) && (evt->get_type() == PPME_SOCKET_SENDMSG_X || + evt->get_type() == PPME_SOCKET_SENDTO_X); + }; + + // + // INITIALIZATION + // + run_callback_t test = [&](concurrent_object_handle inspector_handle) + { + // sendto with addr + udp_client client(0x0100007F, false, 8125); + client.m_payload = payload; + client.m_ignore_errors = true; + client.m_recv = false; + client.m_n_transactions = 1; + client.run(); + + // sendto without addr (connect) + client.m_use_connect = true; + client.run(); + + // sendmsg with addr + client.m_use_connect = false; + client.m_use_sendmsg = true; + client.run(); + + // sendmsg without addr + client.m_use_connect = true; + client.run(); + }; + + // + // OUTPUT VALDATION + // + int n = 0; + captured_event_callback_t callback = [&](const callback_param& param) + { + sinsp_evt* e = param.m_evt; + ++n; + EXPECT_EQ(payload, e->get_param_value_str("data")) + << "Failure on " << e->get_name() << " n=" << n; + }; + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); + EXPECT_EQ(4, n); +} + +TEST_F(sys_call_test, statsd_client_no_snaplen) +{ + // Test if the driver correctly increase snaplen for statsd traffic + std::string payload = + "soluta.necessitatibus.voluptatem.consequuntur.dignissimos.repudiandae.nostrum.lorem.ipsum:" + "18|c"; + + // + // FILTER + // + event_filter_t filter = [&](sinsp_evt* evt) + { + return m_tid_filter(evt) && (evt->get_type() == PPME_SOCKET_SENDMSG_X || + evt->get_type() == PPME_SOCKET_SENDTO_X); + }; + + // + // INITIALIZATION + // + run_callback_t test = [&](concurrent_object_handle inspector_handle) + { + // sendto with addr + // Different port + udp_client client(0x0100007F, false, 8126); + client.m_payload = payload; + client.m_ignore_errors = true; + client.m_recv = false; + client.m_n_transactions = 1; + client.run(); + + // sendto without addr (connect) + client.m_use_connect = true; + client.run(); + + // sendmsg with addr + client.m_use_connect = false; + client.m_use_sendmsg = true; + client.run(); + + // sendmsg without addr + client.m_use_connect = true; + client.run(); + }; + + // + // OUTPUT VALDATION + // + int n = 0; + captured_event_callback_t callback = [&](const callback_param& param) + { + sinsp_evt* e = param.m_evt; + ++n; + EXPECT_EQ(payload.substr(0, 80), e->get_param_value_str("data")) + << "Failure on " << e->get_name() << " n=" << n; + }; + + ASSERT_NO_FATAL_FAILURE({ event_capture::run(test, callback, filter); }); + EXPECT_EQ(4, n); +} diff --git a/test/libsinsp_e2e/utils.h b/test/libsinsp_e2e/utils.h new file mode 100644 index 00000000000..0514a0eab1a --- /dev/null +++ b/test/libsinsp_e2e/utils.h @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: Apache-2.0 +/* +Copyright (C) 2024 The Falco Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +#pragma once + +#include +#include +#include + +#include + +inline bool parse_tuple(const std::string& tuple, + std::string& src_addr, + std::string& src_port, + std::string& dst_addr, + std::string& dst_port) +{ + std::string token; + std::stringstream ss(tuple); + std::vector tst; + std::string srcstr; + std::string dststr; + + if(tuple.find("->") == std::string::npos) + { + return false; + } + + while (std::getline(ss, token, '>')) { + tst.push_back(token); + } + + srcstr = tst[0].substr(0, tst[0].size() - 1); + dststr = tst[1]; + + ss.clear(); + ss.str(srcstr); + std::vector sst; + while (std::getline(ss, token, ':')) { + sst.push_back(token); + } + + EXPECT_EQ(2, (int)sst.size()); + src_addr = sst[0]; + src_port = sst[1]; + + ss.clear(); + ss.str(dststr); + std::vector dst; + while (std::getline(ss, token, ':')) { + dst.push_back(token); + } + EXPECT_EQ(2, (int)dst.size()); + dst_addr = dst[0]; + dst_port = dst[1]; + + return true; +} + +class nsenter +{ +public: + nsenter(int pid, const std::string& type); + virtual ~nsenter(); + +private: + int open_ns_fd(int pid, const std::string& type); + static std::unordered_map m_home_ns; + std::string m_type; +};