From 5773a7f6b61011465a34b0f93e4cb27b9416c852 Mon Sep 17 00:00:00 2001 From: Riven Zheng Date: Mon, 29 Apr 2024 20:28:50 +0800 Subject: [PATCH] Reconstructing kernel file structure and remove useless file and code. --- .github/remote_build/native_gcc/main.c | 10 +- README.md | 32 +- build_version.h | 6 +- clock/clock_native_gcc.c | 14 +- clock/clock_systick.c | 14 +- include/CMakeLists.txt | 7 - include/clock_tick.h | 14 +- include/kernel/at_rtos.h | 216 +++++++---- include/kernel/event.h | 29 -- include/kernel/kernel.h | 44 ++- include/kernel/kthread.h | 25 -- include/kernel/ktype.h | 41 +- include/kernel/mutex.h | 29 -- include/kernel/pool.h | 29 -- include/kernel/queue.h | 29 -- include/kernel/semaphore.h | 30 -- include/kernel/thread.h | 33 -- include/kernel/timer.h | 21 +- include/kernel/trace.h | 14 +- include/port.h | 38 +- kernel/event.c | 340 ++++++++-------- kernel/kernel.c | 223 ++++++----- kernel/kthread.c | 121 +++--- kernel/mutex.c | 206 +++++----- kernel/pool.c | 346 ++++++++--------- kernel/queue.c | 442 +++++++++++---------- kernel/semaphore.c | 355 +++++++++-------- kernel/thread.c | 439 ++++++++++----------- kernel/timer.c | 514 ++++++++++++++----------- kernel/trace.c | 14 +- package.json | 6 +- port/port_common.c | 14 +- port/port_iar.s | 16 +- port/port_keil_ac5.c | 10 +- port/port_keil_ac6.c | 16 +- port/port_native_gcc.c | 14 +- 36 files changed, 1810 insertions(+), 1941 deletions(-) delete mode 100644 include/kernel/event.h delete mode 100644 include/kernel/kthread.h delete mode 100644 include/kernel/mutex.h delete mode 100644 include/kernel/pool.h delete mode 100644 include/kernel/queue.h delete mode 100644 include/kernel/semaphore.h delete mode 100644 include/kernel/thread.h diff --git a/.github/remote_build/native_gcc/main.c b/.github/remote_build/native_gcc/main.c index cf09c62..08152d5 100644 --- a/.github/remote_build/native_gcc/main.c +++ b/.github/remote_build/native_gcc/main.c @@ -15,7 +15,7 @@ extern "C" { #define _PC_CMPT_FAILED PC_FAILED(PC_CMPT_KERNEL) #define SAMPLE_THREAD_STACK_SIZE (1024u) -ATOS_THREAD_DEFINE(sample_thread, SAMPLE_THREAD_STACK_SIZE, 5); +OS_THREAD_DEFINE(sample_thread, SAMPLE_THREAD_STACK_SIZE, 5); static os_thread_id_t g_sample_thread_id; @@ -26,20 +26,20 @@ static void sample_entry_thread(void) { while(1) { /* Put the current thread into sleep state */ - AtOS.thread_sleep(1000); + os.thread_sleep(1000); } } int main(void) { - g_sample_thread_id = AtOS.thread_init(sample_thread, sample_entry_thread); + g_sample_thread_id = os.thread_init(sample_thread, sample_entry_thread); - if (AtOS.id_isInvalid(g_sample_thread_id)) { + if (os.id_isInvalid(g_sample_thread_id)) { /* return _PC_CMPT_FAILED; */ } /* At_RTOS kernel running starts */ - AtOS.schedule_run(); + os.schedule_run(); D_ASSERT(0); while(1) {}; diff --git a/README.md b/README.md index 67d8b72..8f7b6c8 100644 --- a/README.md +++ b/README.md @@ -174,13 +174,13 @@ The following sample codes illustrates how to create your first thread: #include "at_rtos.h" /* Define a thread hook to specific the stack size and prioriy of the thread */ -ATOS_THREAD_DEFINE(your_thread_define, 1024, 7); // Set the thread stack size to 1024 bytes and the schedule prioriy level to 7. +OS_THREAD_DEFINE(your_thread_define, 1024, 7); // Set the thread stack size to 1024 bytes and the schedule prioriy level to 7. /* User thread's entry function. */ static void your_thread_entry_function(void) { while(1) { - AtOS.thread_sleep(1000u); + os.thread_sleep(1000u); } } @@ -188,25 +188,31 @@ static void your_thread_entry_function(void) int main(void) { /* Initialize the your your thread. */ - os_thread_id_t your_thread_id = AtOS.thread_init(your_thread_define, your_thread_entry_function); - if (AtOS.id_isInvalid(your_thread_id)) { + os_thread_id_t your_thread_id = os.thread_init(your_thread_define, your_thread_entry_function); + if (os.id_isInvalid(your_thread_id)) { printf("Thread %s init failed\n", your_thread_id.pName); } /* The At-RTOS kernel schedule starts to run. */ - AtOS.schedule_run(); + os.schedule_run(); } ``` -Note that: The following c/h files path must be included/contained in your project workshop. +The following kernel H file path must be included in your project workshop. -- ``\At-RTOS\ -- ``\At-RTOS\include\ -- ``\At-RTOS\include\kernel\ -- ``\At-RTOS\kernel\.c -- ``\At-RTOS\clock\clock_systick.c -- ``\At-RTOS\port\port_common.c -- ``\At-RTOS\port\.c +```shell +\ +\include\ +\include\kernel\ +``` +The following kernel C file should be placed in your project workshop based on your chip feature and compiler. + +```shell +\kernel\.c +\clock\clock_systick.c +\port\port_common.c +\port\.c +``` ## Roadmap diff --git a/build_version.h b/build_version.h index aa51289..f8d61e8 100644 --- a/build_version.h +++ b/build_version.h @@ -12,11 +12,11 @@ extern "C" { #endif -#define ATOS_BUILD_TIME "2024-04-20,15:06" -#define ATOS_COMMIT_HEAD_ID "15ae539784cbd1d3a83e2f08c4cf5c4d7e6a71e8" +#define ATOS_BUILD_TIME "2024-04-29,19:55" +#define ATOS_COMMIT_HEAD_ID "5fdcc8b72c9420a21d603575c9090f051d54301c" #define ATOS_VERSION_MAJOR_NUMBER (1u) #define ATOS_VERSION_MINOR_NUMBER (4u) -#define ATOS_VERSION_PATCH_NUMBER (4u) +#define ATOS_VERSION_PATCH_NUMBER (5u) #define ATOS_VERSION_MAJOR_NUMBER_MASK (0x03FFu) #define ATOS_VERSION_MAJOR_NUMBER_POS (22u) diff --git a/clock/clock_native_gcc.c b/clock/clock_native_gcc.c index 14324cf..2766b13 100644 --- a/clock/clock_native_gcc.c +++ b/clock/clock_native_gcc.c @@ -74,7 +74,7 @@ static void _clock_time_elapsed_report(u32_t us) /** * @brief It's invoked in the SysTick_Handler to maintain the clock system. */ -void _impl_clock_isr(void) +void clock_isr(void) { /* Nothing need to do for kernel cmake sample build. */ } @@ -84,7 +84,7 @@ void _impl_clock_isr(void) * * @param Value of the next timeout. */ -void _impl_clock_time_interval_set(u32_t interval_us) +void clock_time_interval_set(u32_t interval_us) { /* Nothing need to do for kernel cmake sample build. */ } @@ -94,7 +94,7 @@ void _impl_clock_time_interval_set(u32_t interval_us) * * @return Value of the unreported elapse time. */ -u32_t _impl_clock_time_elapsed_get(void) +u32_t clock_time_elapsed_get(void) { /* Nothing need to do for kernel cmake sample build. */ return 0u; @@ -105,7 +105,7 @@ u32_t _impl_clock_time_elapsed_get(void) * * @return Value of the current clock time. */ -u32_t _impl_clock_time_get(void) +u32_t clock_time_get(void) { /* Nothing need to do for kernel cmake sample build. */ return 0u; @@ -114,7 +114,7 @@ u32_t _impl_clock_time_get(void) /** * @brief Enable the time clock. */ -void _impl_clock_time_enable(void) +void clock_time_enable(void) { /* Nothing need to do for kernel cmake sample build. */ } @@ -122,7 +122,7 @@ void _impl_clock_time_enable(void) /** * @brief Disable the time clock. */ -void _impl_clock_time_disable(void) +void clock_time_disable(void) { /* Nothing need to do for kernel cmake sample build. */ } @@ -130,7 +130,7 @@ void _impl_clock_time_disable(void) /** * @brief Init the time clock. */ -void _impl_clock_time_init(time_report_handler_t pTime_function) +void clock_time_init(time_report_handler_t pTime_function) { g_clock_resource.pCallFunc = pTime_function; diff --git a/clock/clock_systick.c b/clock/clock_systick.c index fa60b63..7e9450c 100644 --- a/clock/clock_systick.c +++ b/clock/clock_systick.c @@ -107,7 +107,7 @@ static void _clock_time_elapsed_report(u32_t us) /** * @brief It's invoked in the SysTick_Handler to maintain the clock system. */ -void _impl_clock_isr(void) +void clock_isr(void) { /** * For maintain purpose. @@ -129,7 +129,7 @@ void _impl_clock_isr(void) * * @param Value of the next timeout. */ -void _impl_clock_time_interval_set(u32_t interval_us) +void clock_time_interval_set(u32_t interval_us) { if (interval_us == OS_TIME_FOREVER_VAL) { SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk; @@ -196,7 +196,7 @@ void _impl_clock_time_interval_set(u32_t interval_us) * * @return Value of the unreported elapse time. */ -u32_t _impl_clock_time_elapsed_get(void) +u32_t clock_time_elapsed_get(void) { ARCH_ENTER_CRITICAL_SECTION(); @@ -212,7 +212,7 @@ u32_t _impl_clock_time_elapsed_get(void) * * @return Value of the current clock time. */ -u32_t _impl_clock_time_get(void) +u32_t clock_time_get(void) { ARCH_ENTER_CRITICAL_SECTION(); @@ -226,7 +226,7 @@ u32_t _impl_clock_time_get(void) /** * @brief Enable the time clock. */ -void _impl_clock_time_enable(void) +void clock_time_enable(void) { if (!g_clock_resource.ctrl_enabled) { g_clock_resource.ctrl_enabled = TRUE; @@ -237,7 +237,7 @@ void _impl_clock_time_enable(void) /** * @brief Disable the time clock. */ -void _impl_clock_time_disable(void) +void clock_time_disable(void) { SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk; } @@ -245,7 +245,7 @@ void _impl_clock_time_disable(void) /** * @brief Init the time clock. */ -void _impl_clock_time_init(time_report_handler_t pTime_function) +void clock_time_init(time_report_handler_t pTime_function) { g_clock_resource.pCallFunc = pTime_function; diff --git a/include/CMakeLists.txt b/include/CMakeLists.txt index 24c53e9..50836c6 100644 --- a/include/CMakeLists.txt +++ b/include/CMakeLists.txt @@ -10,20 +10,13 @@ target_sources(kernel_include ${KERNEL_PATH}/include/kernel/at_rtos.h ${KERNEL_PATH}/include/kernel/compiler.h ${KERNEL_PATH}/include/kernel/configuration.h - ${KERNEL_PATH}/include/kernel/event.h ${KERNEL_PATH}/include/kernel/kernel.h ${KERNEL_PATH}/include/kernel/linker.h ${KERNEL_PATH}/include/kernel/list.h ${KERNEL_PATH}/include/kernel/kstruct.h - ${KERNEL_PATH}/include/kernel/mutex.h ${KERNEL_PATH}/include/kernel/postcode.h - ${KERNEL_PATH}/include/kernel/queue.h - ${KERNEL_PATH}/include/kernel/semaphore.h - ${KERNEL_PATH}/include/kernel/thread.h ${KERNEL_PATH}/include/kernel/timer.h ${KERNEL_PATH}/include/kernel/trace.h ${KERNEL_PATH}/include/kernel/typedef.h ${KERNEL_PATH}/include/kernel/ktype.h - ${KERNEL_PATH}/include/kernel/kthread.h - ${KERNEL_PATH}/include/kernel/pool.h ) diff --git a/include/clock_tick.h b/include/clock_tick.h index e2782e8..7e285c8 100644 --- a/include/clock_tick.h +++ b/include/clock_tick.h @@ -22,13 +22,13 @@ typedef void (*time_report_handler_t)(u32_t); /** * The implement function lists for rtos kernel internal use. */ -void _impl_clock_isr(void); -void _impl_clock_time_interval_set(u32_t interval_us); -u32_t _impl_clock_time_elapsed_get(void); -u32_t _impl_clock_time_get(void); -void _impl_clock_time_enable(void); -void _impl_clock_time_disable(void); -void _impl_clock_time_init(time_report_handler_t pTime_function); +void clock_isr(void); +void clock_time_interval_set(u32_t interval_us); +u32_t clock_time_elapsed_get(void); +u32_t clock_time_get(void); +void clock_time_enable(void); +void clock_time_disable(void); +void clock_time_init(time_report_handler_t pTime_function); #ifdef __cplusplus } diff --git a/include/kernel/at_rtos.h b/include/kernel/at_rtos.h index d38e386..7c8fd51 100644 --- a/include/kernel/at_rtos.h +++ b/include/kernel/at_rtos.h @@ -9,22 +9,16 @@ #define _AT_RTOS_H_ #include "ktype.h" +#include "kstruct.h" #include "configuration.h" #include "postcode.h" #include "trace.h" -#include "thread.h" -#include "timer.h" -#include "semaphore.h" -#include "event.h" -#include "mutex.h" -#include "queue.h" -#include "pool.h" #ifdef __cplusplus extern "C" { #endif -#define ATOS_THREAD_DEFINE(thread_name, stack_size, thread_priority) \ +#define OS_THREAD_DEFINE(thread_name, stack_size, thread_priority) \ static os_thread_symbol_t thread_name[((u32_t)(stack_size) / sizeof(u32_t))] = { \ [0] = {.size = stack_size}, \ [1] = {.priority = thread_priority}, \ @@ -49,18 +43,18 @@ extern "C" { * demo usage: *#include "at_rtos.h" * - *ATOS_THREAD_DEFINE(sample_thread, 512, 5); + * OS_THREAD_DEFINE(sample_thread, 512, 5); * * void thread_sample_function(void) * { * while(1) { - * AtOS.thread_sleep(1000u); + * os.thread_sleep(1000u); * } * } * * int main(void) * { - * os_thread_id_t sample_id = thread_init(sample_thread, thread_sample_function); + * os_thread_id_t sample_id = os_thread_init(sample_thread, thread_sample_function); * if (os_id_is_invalid(sample_id)) { * printf("Thread %s init failed\n", sample_id.pName); * } @@ -68,8 +62,11 @@ extern "C" { * } * */ -static inline os_thread_id_t thread_init(os_thread_symbol_t *pThread_symbol, pThread_entryFunc_t pEntryFun) +static inline os_thread_id_t os_thread_init(os_thread_symbol_t *pThread_symbol, pThread_entryFunc_t pEntryFun) { + extern u32_t _impl_thread_os_id_to_number(os_id_t id); + extern os_id_t _impl_thread_init(void (*pThread_entryFunc_t)(void), u32_t *pAddress, u32_t size, u16_t priority, const char_t *pName); + os_thread_id_t id = {0u}; u32_t *pStackAddress = (u32_t *)pThread_symbol; u32_t stackSize = (u32_t)pThread_symbol[0].size; @@ -95,12 +92,14 @@ static inline os_thread_id_t thread_init(os_thread_symbol_t *pThread_symbol, pTh * void thread_sample_function(void) * { * while(1) { - * thread_sleep(1000); // Put the thread to sleep mode 1 sec. + * os_thread_sleep(1000); // Put the thread to sleep mode 1 sec. * } * } */ -static inline u32p_t thread_sleep(u32_t ms) +static inline u32p_t os_thread_sleep(u32_t ms) { + extern u32p_t _impl_thread_sleep(u32_t ms); + return (u32p_t)_impl_thread_sleep(ms); } @@ -113,10 +112,12 @@ static inline u32p_t thread_sleep(u32_t ms) ** * demo usage: * - * thread_resume(sample_id); // The variable sample_id created in above thread init demo. + * os_thread_resume(sample_id); // The variable sample_id created in above thread init demo. */ -static inline u32p_t thread_resume(os_thread_id_t id) +static inline u32p_t os_thread_resume(os_thread_id_t id) { + extern u32p_t _impl_thread_resume(os_id_t id); + return (u32p_t)_impl_thread_resume(id.val); } @@ -129,10 +130,12 @@ static inline u32p_t thread_resume(os_thread_id_t id) ** * demo usage: * - * thread_suspend(sample_id); // The variable sample_id created in above thread init demo. + * os_thread_suspend(sample_id); // The variable sample_id created in above thread init demo. */ -static inline u32p_t thread_suspend(os_thread_id_t id) +static inline u32p_t os_thread_suspend(os_thread_id_t id) { + extern u32p_t _impl_thread_suspend(os_id_t id); + return (u32p_t)_impl_thread_suspend(id.val); } @@ -148,12 +151,14 @@ static inline u32p_t thread_suspend(os_thread_id_t id) * void thread_sample_function(void) * { * while(1) { - * thread_yield(); // Put current thread to sleep mode manually. + * os_thread_yield(); // Put current thread to sleep mode manually. * } * } */ -static inline u32p_t thread_yield(void) +static inline u32p_t os_thread_yield(void) { + extern u32p_t _impl_thread_yield(void); + return (u32p_t)_impl_thread_yield(); } @@ -166,10 +171,12 @@ static inline u32p_t thread_yield(void) ** * demo usage: * - * thread_delete(sample_id); // The variable sample_id created in above thread init demo. + * os_thread_delete(sample_id); // The variable sample_id created in above thread init demo. */ -static inline u32p_t thread_delete(os_thread_id_t id) +static inline u32p_t os_thread_delete(os_thread_id_t id) { + extern u32p_t _impl_thread_delete(os_id_t id); + return (u32p_t)_impl_thread_delete(id.val); } @@ -190,14 +197,17 @@ static inline u32p_t thread_delete(os_thread_id_t id) * // The function will be called per 1 seconds. * } * - * os_timer_id_t sample_id = timer_init(sample_timer_function, TRUE, 1000u, "sample"); + * os_timer_id_t sample_id = os_timer_init(sample_timer_function, TRUE, 1000u, "sample"); * if (os_id_is_invalid(sample_id)) { * printf("Timer %s init failed\n", sample_id.pName); * } * ... */ -static inline os_timer_id_t timer_init(pTimer_callbackFunc_t pEntryFun, b_t isCycle, u32_t timeout_ms, const char_t *pName) +static inline os_timer_id_t os_timer_init(pTimer_callbackFunc_t pEntryFun, b_t isCycle, u32_t timeout_ms, const char_t *pName) { + extern u32_t _impl_timer_os_id_to_number(u32_t id); + extern os_id_t _impl_timer_init(pTimer_callbackFunc_t pCallFun, b_t isCycle, u32_t timeout_ms, const char_t *pName); + os_timer_id_t id = {0u}; id.val = _impl_timer_init(pEntryFun, isCycle, timeout_ms, pName); @@ -219,13 +229,15 @@ static inline os_timer_id_t timer_init(pTimer_callbackFunc_t pEntryFun, b_t isCy ** * demo usage: * - * u32p_t postcode = timer_start(sample_id, FALSE, 1000u); + * u32p_t postcode = os_timer_start(sample_id, FALSE, 1000u); * if (PC_IOK(postcode)) { * printf("Timer start successful\n"); * } */ -static inline u32p_t timer_start(os_timer_id_t id, b_t isCycle, u32_t timeout_ms) +static inline u32p_t os_timer_start(os_timer_id_t id, b_t isCycle, u32_t timeout_ms) { + extern u32p_t _impl_timer_start(os_id_t id, b_t isCycle, u32_t timeout_ms); + return (u32p_t)_impl_timer_start(id.val, isCycle, timeout_ms); } @@ -238,13 +250,15 @@ static inline u32p_t timer_start(os_timer_id_t id, b_t isCycle, u32_t timeout_ms ** * demo usage: * - * u32p_t postcode = timer_stop(sample_id); + * u32p_t postcode = os_timer_stop(sample_id); * if (PC_IOK(postcode)) { * printf("Timer stop successful\n"); * } */ -static inline u32p_t timer_stop(os_timer_id_t id) +static inline u32p_t os_timer_stop(os_timer_id_t id) { + extern u32p_t _impl_timer_stop(os_id_t id); + return (u32p_t)_impl_timer_stop(id.val); } @@ -257,13 +271,15 @@ static inline u32p_t timer_stop(os_timer_id_t id) ** * demo usage: * - * if(timer_isBusy(sample_id)) { + * if(os_timer_busy(sample_id)) { * printf("Timer %s is busy\n", sample_id.pName); * } */ -static inline u32p_t timer_isBusy(os_timer_id_t id) +static inline u32p_t os_timer_busy(os_timer_id_t id) { - return (u32p_t)_impl_timer_status_isBusy(id.val); + extern b_t _impl_timer_busy(os_id_t id); + + return (u32p_t)_impl_timer_busy(id.val); } /** @@ -273,10 +289,12 @@ static inline u32p_t timer_isBusy(os_timer_id_t id) ** * demo usage: * - * printf("The system consume time: %d\n", timer_system_total_ms()); + * printf("The system consume time: %d\n", os_timer_system_total_ms()); */ -static inline u32_t timer_system_total_ms(void) +static inline u32_t os_timer_system_total_ms(void) { + extern u32_t _impl_timer_total_system_ms_get(void); + return (u32_t)_impl_timer_total_system_ms_get(); } @@ -294,14 +312,17 @@ static inline u32_t timer_system_total_ms(void) * demo usage: * * // Init a binary semaphore count. - * os_sem_id_t sample_id = sem_init(0u, 1u, FALSE, "sample"); + * os_sem_id_t sample_id = os_sem_init(0u, 1u, FALSE, "sample"); * if (os_id_is_invalid(sample_id)) { * printf("Semaphore %s init failed\n", sample_id.pName); * } * ... */ -static inline os_sem_id_t sem_init(u8_t initial, u8_t limit, b_t permit, const char_t *pName) +static inline os_sem_id_t os_sem_init(u8_t initial, u8_t limit, b_t permit, const char_t *pName) { + u32_t _impl_semaphore_os_id_to_number(os_id_t id); + os_id_t _impl_semaphore_init(u8_t initialCount, u8_t limitCount, b_t permit, const char_t *pName); + os_sem_id_t id = {0u}; id.val = _impl_semaphore_init(initial, limit, permit, pName); @@ -320,7 +341,7 @@ static inline os_sem_id_t sem_init(u8_t initial, u8_t limit, b_t permit, const c ** * demo usage: * - * u32p_t postcode = sem_take(sample_id, 1000u); + * u32p_t postcode = os_sem_take(sample_id, 1000u); * if (PC_IOK(postcode)) { * if (postcode == PC_SC_TIMEOUT) { * printf("Semaphore take wait timeout\n"); @@ -331,15 +352,17 @@ static inline os_sem_id_t sem_init(u8_t initial, u8_t limit, b_t permit, const c * printf("Semaphore take error: 0x%x\n", postcode); * } * - * u32p_t postcode = sem_take(sample_id, OS_WAIT_FOREVER); + * u32p_t postcode = os_sem_take(sample_id, OS_WAIT_FOREVER); * if (PC_IOK(postcode)) { * printf("Semaphore take successful\n"); * } else { * printf("Semaphore take error: 0x%x\n", postcode); * } */ -static inline u32p_t sem_take(os_sem_id_t id, u32_t timeout_ms) +static inline u32p_t os_sem_take(os_sem_id_t id, u32_t timeout_ms) { + extern u32p_t _impl_semaphore_take(os_id_t id, u32_t timeout_ms); + return (u32p_t)_impl_semaphore_take(id.val, timeout_ms); } @@ -352,15 +375,17 @@ static inline u32p_t sem_take(os_sem_id_t id, u32_t timeout_ms) ** * demo usage: * - * u32p_t postcode = sem_give(sample_id); + * u32p_t postcode = os_sem_give(sample_id); * if (PC_IOK(postcode)) { * printf("Semaphore give successful\n"); * } else { * printf("Semaphore give error: 0x%x\n", postcode); * } */ -static inline u32p_t sem_give(os_sem_id_t id) +static inline u32p_t os_sem_give(os_sem_id_t id) { + extern u32_t _impl_semaphore_give(os_id_t id); + return (u32p_t)_impl_semaphore_give(id.val); } @@ -373,15 +398,17 @@ static inline u32p_t sem_give(os_sem_id_t id) ** * demo usage: * - * u32p_t postcode = sem_flush(sample_id); + * u32p_t postcode = os_sem_flush(sample_id); * if (PC_IOK(postcode)) { * printf("Semaphore flush successful\n"); * } else { * printf("Semaphore flush error: 0x%x\n", postcode); * } */ -static inline u32p_t sem_flush(os_sem_id_t id) +static inline u32p_t os_sem_flush(os_sem_id_t id) { + extern u32_t _impl_semaphore_flush(os_id_t id); + return (u32p_t)_impl_semaphore_flush(id.val); } @@ -394,14 +421,17 @@ static inline u32p_t sem_flush(os_sem_id_t id) ** * demo usage: * - * os_mutex_id_t sample_id = mutex_init("sample"); + * os_mutex_id_t sample_id = os_mutex_init("sample"); * if (os_id_is_invalid(sample_id)) { * printf("Mutex %s init failed\n", sample_id.pName); * } * ... */ -static inline os_mutex_id_t mutex_init(const char_t *pName) +static inline os_mutex_id_t os_mutex_init(const char_t *pName) { + extern u32_t _impl_mutex_os_id_to_number(os_id_t id); + extern os_id_t _impl_mutex_init(const char_t *pName); + os_mutex_id_t id = {0u}; id.val = _impl_mutex_init(pName); @@ -420,15 +450,17 @@ static inline os_mutex_id_t mutex_init(const char_t *pName) ** * demo usage: * - * u32p_t postcode = mutex_lock(sample_id); + * u32p_t postcode = os_mutex_lock(sample_id); * if (PC_IOK(postcode)) { * printf("Mutex lock successful\n"); * } else { * printf("Mutex lock error: 0x%x\n", postcode); * } */ -static inline u32p_t mutex_lock(os_mutex_id_t id) +static inline u32p_t os_mutex_lock(os_mutex_id_t id) { + extern u32p_t _impl_mutex_lock(os_id_t id); + return (u32p_t)_impl_mutex_lock(id.val); } @@ -441,15 +473,17 @@ static inline u32p_t mutex_lock(os_mutex_id_t id) ** * demo usage: * - * u32p_t postcode = mutex_unlock(sample_id); + * u32p_t postcode = os_mutex_unlock(sample_id); * if (PC_IOK(postcode)) { * printf("Mutex unlock successful\n"); * } else { * printf("Mutex unlock error: 0x%x\n", postcode); * } */ -static inline u32p_t mutex_unlock(os_mutex_id_t id) +static inline u32p_t os_mutex_unlock(os_mutex_id_t id) { + extern u32p_t _impl_mutex_unlock(os_id_t id); + return (u32p_t)_impl_mutex_unlock(id.val); } @@ -465,14 +499,17 @@ static inline u32p_t mutex_unlock(os_mutex_id_t id) ** * demo usage: * - * os_evt_id_t sample_id = evt_init(0u, 0u, "sample"); + * os_evt_id_t sample_id = os_evt_init(0u, 0u, "sample"); * if (os_id_is_invalid(sample_id)) { * printf("Event %s init failed\n", sample_id.pName); * } * ... */ -static inline os_evt_id_t evt_init(u32_t edgeMask, u32_t clrDisMask, const char_t *pName) +static inline os_evt_id_t os_evt_init(u32_t edgeMask, u32_t clrDisMask, const char_t *pName) { + extern u32_t _impl_event_os_id_to_number(os_id_t id); + extern os_id_t _impl_event_init(u32_t edgeMask, u32_t clrDisMask, const char_t *pName); + os_msgq_id_t id = {0u}; id.val = _impl_event_init(edgeMask, clrDisMask, pName); @@ -494,15 +531,17 @@ static inline os_evt_id_t evt_init(u32_t edgeMask, u32_t clrDisMask, const char_ ** * demo usage: * - * u32p_t postcode = evt_set(sample_id, 0x01u, 0u, 0u); + * u32p_t postcode = os_evt_set(sample_id, 0x01u, 0u, 0u); * if (PC_IOK(postcode)) { * printf("Event set successful\n"); * } else { * printf("Event set error: 0x%x\n", postcode); * } */ -static inline u32p_t evt_set(os_evt_id_t id, u32_t set, u32_t clear, u32_t toggle) +static inline u32p_t os_evt_set(os_evt_id_t id, u32_t set, u32_t clear, u32_t toggle) { + extern u32p_t _impl_event_set(os_id_t id, u32_t set, u32_t clear, u32_t toggle); + return (u32p_t)_impl_event_set(id.val, set, clear, toggle); } @@ -521,14 +560,14 @@ static inline u32p_t evt_set(os_evt_id_t id, u32_t set, u32_t clear, u32_t toggl * demo usage: * * u32_t event = 0u; - * u32p_t postcode = evt_wait(sample_id, &event, 0xFFFFFFFu, 0x01u, 0x01u, OS_WAIT_FOREVER); + * u32p_t postcode = os_evt_wait(sample_id, &event, 0xFFFFFFFu, 0x01u, 0x01u, OS_WAIT_FOREVER); * if (PC_IOK(postcode)) { * printf("Event wait successful, The event value is 0x%x\n", event); * } else { * printf("Event wait error: 0x%x\n", postcode); * } * - * u32p_t postcode = evt_wait(sample_id, &event, 0xFFFFFFFu, 0x03u, 0x03u, 1000u); + * u32p_t postcode = os_evt_wait(sample_id, &event, 0xFFFFFFFu, 0x03u, 0x03u, 1000u); * if (PC_IOK(postcode)) { * if (postcode == PC_SC_TIMEOUT) { * printf("Event wait timeout\n"); @@ -539,9 +578,12 @@ static inline u32p_t evt_set(os_evt_id_t id, u32_t set, u32_t clear, u32_t toggl * printf("Event wait error: 0x%x\n", postcode); * } */ -static inline u32p_t evt_wait(os_evt_id_t id, os_evt_val_t *pEvtData, u32_t desired_val, u32_t listen_mask, u32_t group_mask, - u32_t timeout_ms) +static inline u32p_t os_evt_wait(os_evt_id_t id, os_evt_val_t *pEvtData, u32_t desired_val, u32_t listen_mask, u32_t group_mask, + u32_t timeout_ms) { + extern u32p_t _impl_event_wait(os_id_t id, os_evt_val_t * pEvtData, u32_t desired_val, u32_t listen_mask, u32_t group_mask, + u32_t timeout_ms); + return (u32p_t)_impl_event_wait(id.val, pEvtData, desired_val, listen_mask, group_mask, timeout_ms); } @@ -558,13 +600,16 @@ static inline u32p_t evt_wait(os_evt_id_t id, os_evt_val_t *pEvtData, u32_t desi * demo usage: * static u8_t g_sample_msgq[3 * 10] = {0u}; * - * os_msgq_id_t sample_id = msgq_init((u8_t*)g_sample_msgq, 3u, 10u, "sample"); + * os_msgq_id_t sample_id = os_msgq_init((u8_t*)g_sample_msgq, 3u, 10u, "sample"); * if (os_id_is_invalid(sample_id)) { * printf("Message queue %s init failed\n", sample_id.pName); * } */ -static inline os_msgq_id_t msgq_init(const void *pQueueBufferAddr, u16_t elementLen, u16_t elementNum, const char_t *pName) +static inline os_msgq_id_t os_msgq_init(const void *pQueueBufferAddr, u16_t elementLen, u16_t elementNum, const char_t *pName) { + extern u32_t _impl_queue_os_id_to_number(os_id_t id); + extern os_id_t _impl_queue_init(const void *pQueueBufferAddr, u16_t elementLen, u16_t elementNum, const char_t *pName); + os_msgq_id_t id = {0u}; id.val = _impl_queue_init(pQueueBufferAddr, elementLen, elementNum, pName); @@ -588,14 +633,14 @@ static inline os_msgq_id_t msgq_init(const void *pQueueBufferAddr, u16_t element * demo usage: * * u8_t txdata = 0u; - * u32p_t postcode = msgq_send(sample_id, &txdata, 0x01u, FALSE, OS_WAIT_FOREVER); + * u32p_t postcode = os_msgq_put(sample_id, &txdata, 0x01u, FALSE, OS_WAIT_FOREVER); * if (PC_IOK(postcode)) { * printf("Message queue send successful\n"); * } else { * printf("Message queue send error: 0x%x\n", postcode); * } * - * postcode = msgq_send(sample_id, &txdata, 0x01u, TRUE, 1000u); + * postcode = os_msgq_put(sample_id, &txdata, 0x01u, TRUE, 1000u); * if (PC_IOK(postcode)) { * if (postcode == PC_SC_TIMEOUT) { * printf("Message queue send timeout\n"); @@ -606,8 +651,10 @@ static inline os_msgq_id_t msgq_init(const void *pQueueBufferAddr, u16_t element * printf("Message queue send error: 0x%x\n", postcode); * } */ -static inline u32p_t msgq_put(os_msgq_id_t id, const u8_t *pUserBuffer, u16_t bufferSize, b_t isToFront, u32_t timeout_ms) +static inline u32p_t os_msgq_put(os_msgq_id_t id, const u8_t *pUserBuffer, u16_t bufferSize, b_t isToFront, u32_t timeout_ms) { + extern u32p_t _impl_queue_send(os_id_t id, const u8_t *pUserBuffer, u16_t bufferSize, b_t isToFront, u32_t timeout_ms); + return (u32p_t)_impl_queue_send(id.val, pUserBuffer, bufferSize, isToFront, timeout_ms); } @@ -625,14 +672,14 @@ static inline u32p_t msgq_put(os_msgq_id_t id, const u8_t *pUserBuffer, u16_t bu * demo usage: * * u8_t rxdata = 0u; - * u32p_t postcode = msgq_receive(sample_id, &rxdata, 0x01u, TRUE, OS_WAIT_FOREVER); + * u32p_t postcode = os_msgq_get(sample_id, &rxdata, 0x01u, TRUE, OS_WAIT_FOREVER); * if (PC_IOK(postcode)) { * printf("Message queue receive successful, the rx data is 0x%x\n", rxdata); * } else { * printf("Message queue receive error: 0x%x\n", postcode); * } * - * postcode = msgq_receive(sample_id, &rxdata, 0x01u, FALSE, 1000u); + * postcode = os_msgq_get(sample_id, &rxdata, 0x01u, FALSE, 1000u); * if (PC_IOK(postcode)) { * if (postcode == PC_SC_TIMEOUT) { * printf("Message queue receive timeout\n"); @@ -643,8 +690,10 @@ static inline u32p_t msgq_put(os_msgq_id_t id, const u8_t *pUserBuffer, u16_t bu * printf("Message queue receive error: 0x%x\n", postcode); * } */ -static inline u32p_t msgq_get(os_msgq_id_t id, const u8_t *pUserBuffer, u16_t bufferSize, b_t isFromBack, u32_t timeout_ms) +static inline u32p_t os_msgq_get(os_msgq_id_t id, const u8_t *pUserBuffer, u16_t bufferSize, b_t isFromBack, u32_t timeout_ms) { + extern u32p_t _impl_queue_receive(os_id_t id, const u8_t *pUserBuffer, u16_t bufferSize, b_t isFromBack, u32_t timeout_ms); + return (u32p_t)_impl_queue_receive(id.val, pUserBuffer, bufferSize, isFromBack, timeout_ms); } @@ -661,13 +710,16 @@ static inline u32p_t msgq_get(os_msgq_id_t id, const u8_t *pUserBuffer, u16_t bu * demo usage: * static u8_t g_sample_pool[3 * 10] = {0u}; * - * os_pool_id_t sample_id = pool_init((const void*)g_sample_pool, 10u, 3u, "sample"); + * os_pool_id_t sample_id = os_pool_init((const void*)g_sample_pool, 10u, 3u, "sample"); * if (os_id_is_invalid(sample_id)) { * printf("Memory pool %s init failed\n", sample_id.pName); * } */ -static inline os_pool_id_t pool_init(const void *pMemAddress, u16_t elementLen, u16_t elementNum, const char_t *pName) +static inline os_pool_id_t os_pool_init(const void *pMemAddress, u16_t elementLen, u16_t elementNum, const char_t *pName) { + extern u32_t _impl_pool_os_id_to_number(os_id_t id); + extern os_id_t _impl_pool_init(const void *pMemAddr, u16_t elementLen, u16_t elementNum, const char_t *pName); + os_pool_id_t id = {0u}; id.val = _impl_pool_init(pMemAddress, elementLen, elementNum, pName); @@ -690,14 +742,14 @@ static inline os_pool_id_t pool_init(const void *pMemAddress, u16_t elementLen, * demo usage: * * u8_t* pTakeMem = NULL; - * u32p_t postcode = pool_take(sample_id, (void **)&pTakeMem, 10u, OS_WAIT_FOREVER); + * u32p_t postcode = os_pool_take(sample_id, (void **)&pTakeMem, 10u, OS_WAIT_FOREVER); * if (PC_IOK(postcode)) { * printf("Memory pool take successful\n"); * } else { * printf("Memory pool take error: 0x%x\n", postcode); * } * - * u32p_t postcode = pool_take(sample_id, (void **)&pTakeMem, 10u, 2000u); + * u32p_t postcode = os_pool_take(sample_id, (void **)&pTakeMem, 10u, 2000u); * if (PC_IOK(postcode)) { * if (postcode == PC_SC_TIMEOUT) { * printf("Memory pool take timeout\n"); @@ -708,8 +760,10 @@ static inline os_pool_id_t pool_init(const void *pMemAddress, u16_t elementLen, * printf("Memory pool take error: 0x%x\n", postcode); * } */ -static inline u32p_t pool_take(os_pool_id_t id, void **ppUserBuffer, u16_t bufferSize, u32_t timeout_ms) +static inline u32p_t os_pool_take(os_pool_id_t id, void **ppUserBuffer, u16_t bufferSize, u32_t timeout_ms) { + extern u32p_t _impl_pool_take(os_id_t id, void **ppUserBuffer, u16_t bufferSize, u32_t timeout_ms); + return (u32p_t)_impl_pool_take(id.val, ppUserBuffer, bufferSize, timeout_ms); } @@ -724,11 +778,11 @@ static inline u32p_t pool_take(os_pool_id_t id, void **ppUserBuffer, u16_t buffe * demo usage: * * u8_t* pTakeMem = NULL; - * u32p_t postcode = pool_take(sample_id, (void **)&pTakeMem, 10u, OS_WAIT_FOREVER); + * u32p_t postcode = os_pool_take(sample_id, (void **)&pTakeMem, 10u, OS_WAIT_FOREVER); * if (PC_IOK(postcode)) { * printf("Memory pool take successful\n"); * - * u32p_t postcode = pool_release(sample_id, (void **)&pTakeMem); + * u32p_t postcode = os_pool_release(sample_id, (void **)&pTakeMem); * if (PC_IOK(postcode)) { * printf("Memory pool release successful\n"); * } else { @@ -739,8 +793,10 @@ static inline u32p_t pool_take(os_pool_id_t id, void **ppUserBuffer, u16_t buffe * printf("Memory pool take error: 0x%x\n", postcode); * } */ -static inline u32p_t pool_release(os_pool_id_t id, void **ppUserBuffer) +static inline u32p_t os_pool_release(os_pool_id_t id, void **ppUserBuffer) { + extern u32p_t _impl_pool_release(os_id_t id, void **ppUserBuffer); + return (u32p_t)_impl_pool_release(id.val, ppUserBuffer); } @@ -761,7 +817,7 @@ static inline u32p_t pool_release(os_pool_id_t id, void **ppUserBuffer) */ static inline b_t os_id_is_invalid(struct os_id id) { - return (b_t)_impl_kernel_os_id_is_invalid(id); + return (b_t)kernel_os_id_is_invalid(id); } /** @@ -777,8 +833,12 @@ static inline b_t os_id_is_invalid(struct os_id id) */ static inline os_thread_id_t os_id_current_thread(void) { + extern const char_t *_impl_thread_name_get(os_id_t id); + extern os_id_t kernel_thread_runIdGet(void); + extern u32_t _impl_thread_os_id_to_number(os_id_t id); + os_thread_id_t id = {0u}; - id.val = _impl_kernel_thread_runIdGet(); + id.val = kernel_thread_runIdGet(); id.number = _impl_thread_os_id_to_number(id.val); id.pName = _impl_thread_name_get(id.val); @@ -795,6 +855,8 @@ static inline os_thread_id_t os_id_current_thread(void) */ static inline u32p_t os_kernel_run(void) { + extern u32p_t _impl_kernel_at_rtos_run(void); + return _impl_kernel_at_rtos_run(); } @@ -812,6 +874,8 @@ static inline u32p_t os_kernel_run(void) */ static inline b_t os_kernel_is_running(void) { + extern b_t _impl_kernel_rtos_isRun(void); + return (b_t)(_impl_kernel_rtos_isRun() ? (TRUE) : (FALSE)); } @@ -864,7 +928,7 @@ typedef struct { os_timer_id_t (*timer_init)(pTimer_callbackFunc_t, b_t, u32_t, const char_t *); u32p_t (*timer_start)(os_timer_id_t, b_t, u32_t); u32p_t (*timer_stop)(os_timer_id_t); - u32p_t (*timer_isBusy)(os_timer_id_t); + u32p_t (*timer_busy)(os_timer_id_t); u32_t (*timer_system_total_ms)(void); os_sem_id_t (*sem_init)(u8_t, u8_t, b_t, const char_t *); @@ -898,7 +962,7 @@ typedef struct { void (*trace_kernel)(void); } at_rtos_api_t; -extern const at_rtos_api_t AtOS; +extern const at_rtos_api_t os; #endif #ifdef __cplusplus diff --git a/include/kernel/event.h b/include/kernel/event.h deleted file mode 100644 index 015499c..0000000 --- a/include/kernel/event.h +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright (c) Riven Zheng (zhengheiot@gmail.com). - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - **/ - -#ifndef _EVENT_H_ -#define _EVENT_H_ - -#include "kstruct.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * The implement function lists for rtos kernel internal use. - */ -u32_t _impl_event_os_id_to_number(os_id_t id); -os_id_t _impl_event_init(u32_t edgeMask, u32_t clearMask, const char_t *pName); -u32p_t _impl_event_set(os_id_t id, u32_t set, u32_t clear, u32_t toggle); -u32p_t _impl_event_wait(os_id_t id, os_evt_val_t *pEvtData, u32_t desired_val, u32_t listen_mask, u32_t group_mask, u32_t timeout_ms); - -#ifdef __cplusplus -} -#endif - -#endif /* _EVENT_H_ */ diff --git a/include/kernel/kernel.h b/include/kernel/kernel.h index c5a1fbb..32200e2 100644 --- a/include/kernel/kernel.h +++ b/include/kernel/kernel.h @@ -64,27 +64,29 @@ enum { #define KERNEL_MEMBER_MAP_8 (KERNEL_MEMBER_MAP_7 + KERNEL_POOL_MEMORY_SIZE) #define KERNEL_MEMBER_MAP_NUMBER (KERNEL_MEMBER_MAP_8 + 1u) -list_t *_impl_kernel_member_list_get(u8_t member_id, u8_t list_id); -u32_t _impl_kernel_member_id_unifiedConvert(u8_t member_id, u32_t unified_id); -void _impl_kernel_thread_list_transfer_toEntry(linker_head_t *pCurHead); -u32p_t _impl_kernel_thread_exit_trigger(os_id_t id, os_id_t hold, list_t *pToList, u32_t timeout_us, void (*pCallback)(os_id_t)); -u32p_t _impl_kernel_thread_entry_trigger(os_id_t id, os_id_t release, u32_t result, void (*pCallback)(os_id_t)); -u32_t _impl_kernel_schedule_entry_result_take(action_schedule_t *pSchedule); -void _impl_kernel_thread_list_transfer_toPend(linker_head_t *pCurHead); -list_t *_impl_kernel_list_pendingHeadGet(void); -u32_t _impl_kernel_stack_frame_init(void (*pEntryFunction)(void), u32_t *pAddress, u32_t size); -b_t _impl_kernel_isInThreadMode(void); -u32_t _impl_kernel_thread_schedule_request(void); -void _impl_kernel_message_notification(void); -void _impl_kernel_scheduler_inPendSV_c(u32_t **ppCurPsp, u32_t **ppNextPSP); -u32_t _impl_kernel_schedule_time_get(void); -u32_t impl_kernel_thread_use_percent_take(os_id_t id); -void _impl_kernel_privilege_call_inSVC_c(u32_t *svc_args); -u32_t _impl_kernel_privilege_invoke(const void *pCallFun, arguments_t *pArgs); -void _impl_kernel_thread_schedule(void); -void _impl_kernel_thread_idle(void); -void _impl_kernel_semaphore_list_transfer_toLock(linker_head_t *pCurHead); -thread_context_t *_impl_kernel_thread_runContextGet(void); +thread_context_t *kernel_thread_runContextGet(void); +list_t *kernel_member_list_get(u8_t member_id, u8_t list_id); +void kernel_thread_list_transfer_toEntry(linker_head_t *pCurHead); +u32p_t kernel_thread_exit_trigger(os_id_t id, os_id_t hold, list_t *pToList, u32_t timeout_us, void (*pCallback)(os_id_t)); +u32p_t kernel_thread_entry_trigger(os_id_t id, os_id_t release, u32_t result, void (*pCallback)(os_id_t)); +u32_t kernel_schedule_entry_result_take(action_schedule_t *pSchedule); +void kernel_thread_list_transfer_toPend(linker_head_t *pCurHead); +list_t *kernel_list_pendingHeadGet(void); +u32_t kernel_stack_frame_init(void (*pEntryFunction)(void), u32_t *pAddress, u32_t size); +b_t kernel_isInThreadMode(void); +u32_t kernel_thread_schedule_request(void); +void kernel_message_notification(void); +void kernel_scheduler_inPendSV_c(u32_t **ppCurPsp, u32_t **ppNextPSP); +u32_t kernel_schedule_time_get(void); +u32_t kernel_thread_use_percent_take(os_id_t id); +void kernel_privilege_call_inSVC_c(u32_t *svc_args); +u32_t kernel_privilege_invoke(const void *pCallFun, arguments_t *pArgs); +void kernel_semaphore_list_transfer_toLock(linker_head_t *pCurHead); +void kernel_schedule_thread(void); +void kernel_idle_thread(void); +void kthread_init(void); +void kthread_message_notification(void); +u32_t kthread_message_arrived(void); #ifdef __cplusplus } diff --git a/include/kernel/kthread.h b/include/kernel/kthread.h deleted file mode 100644 index c785609..0000000 --- a/include/kernel/kthread.h +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Copyright (c) Riven Zheng (zhengheiot@gmail.com). - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - **/ - -#ifndef _KTHREAD_H_ -#define _KTHREAD_H_ - -#include "typedef.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void _impl_kernel_thread_message_notification(void); -u32_t _impl_kernel_thread_message_arrived(void); -void _impl_kernel_thread_init(void); - -#ifdef __cplusplus -} -#endif - -#endif /* _KTHREAD_H_ */ diff --git a/include/kernel/ktype.h b/include/kernel/ktype.h index 87d5c22..44f127a 100644 --- a/include/kernel/ktype.h +++ b/include/kernel/ktype.h @@ -9,6 +9,7 @@ #define _KTYPE_H_ #include "typedef.h" +#include "linker.h" #ifdef __cplusplus extern "C" { @@ -42,8 +43,20 @@ typedef struct os_priority os_priority_t; typedef struct os_time os_time_t; typedef struct { + + linker_t linker; + + os_id_t id; + + u32_t desired; + + u32_t value; +} evt_cushion_t; + +typedef struct { + evt_cushion_t cushion; + u32_t value; - u32_t defer; } os_evt_val_t; #define OS_INVALID_ID_VAL (0xFFFFFFFFu) @@ -68,20 +81,18 @@ typedef struct { #define OS_INVALID_ID OS_INVALID_ID_VAL -u8_t *_impl_kernel_member_unified_id_toContainerAddress(u32_t unified_id); -u32_t _impl_kernel_member_containerAddress_toUnifiedid(u32_t container_address); -u32_t _impl_kernel_member_id_toUnifiedIdStart(u8_t member_id); -u8_t *_impl_kernel_member_id_toContainerStartAddress(u32_t member_id); -u8_t *_impl_kernel_member_id_toContainerEndAddress(u32_t member_id); -b_t _impl_kernel_member_unified_id_isInvalid(u32_t member_id, u32_t unified_id); -u8_t _impl_kernel_member_unified_id_toId(u32_t unified_id); -u32_t _impl_kernel_member_unified_id_threadToTimer(u32_t unified_id); -u32_t _impl_kernel_member_unified_id_timerToThread(u32_t unified_id); -u32_t _impl_kernel_member_id_unifiedConvert(u8_t member_id, u32_t unified_id); -b_t _impl_kernel_os_id_is_invalid(struct os_id id); -u32p_t _impl_kernel_at_rtos_run(void); -b_t _impl_kernel_rtos_isRun(void); -os_id_t _impl_kernel_thread_runIdGet(void); +u8_t *kernel_member_unified_id_toContainerAddress(u32_t unified_id); +u32_t kernel_member_containerAddress_toUnifiedid(u32_t container_address); +u32_t kernel_member_id_toUnifiedIdStart(u8_t member_id); +u8_t *kernel_member_id_toContainerStartAddress(u32_t member_id); +u8_t *kernel_member_id_toContainerEndAddress(u32_t member_id); +b_t kernel_member_unified_id_isInvalid(u32_t member_id, u32_t unified_id); +u8_t kernel_member_unified_id_toId(u32_t unified_id); +u32_t kernel_member_unified_id_threadToTimer(u32_t unified_id); +u32_t kernel_member_unified_id_timerToThread(u32_t unified_id); +u32_t kernel_member_id_unifiedConvert(u8_t member_id, u32_t unified_id); +os_id_t kernel_thread_runIdGet(void); +b_t kernel_os_id_is_invalid(struct os_id id); #ifdef __cplusplus } diff --git a/include/kernel/mutex.h b/include/kernel/mutex.h deleted file mode 100644 index 38c0c76..0000000 --- a/include/kernel/mutex.h +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright (c) Riven Zheng (zhengheiot@gmail.com). - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - **/ - -#ifndef _MUXTEX_H_ -#define _MUXTEX_H_ - -#include "kstruct.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * The implement function lists for rtos kernel internal use. - */ -u32_t _impl_mutex_os_id_to_number(os_id_t id); -os_id_t _impl_mutex_init(const char_t *pName); -u32p_t _impl_mutex_lock(os_id_t id); -u32p_t _impl_mutex_unlock(os_id_t id); - -#ifdef __cplusplus -} -#endif - -#endif /* _MUXTEX_H_ */ diff --git a/include/kernel/pool.h b/include/kernel/pool.h deleted file mode 100644 index e3b08e0..0000000 --- a/include/kernel/pool.h +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright (c) Riven Zheng (zhengheiot@gmail.com). - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - **/ - -#ifndef _POOL_H_ -#define _POOL_H_ - -#include "kstruct.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * The implement function lists for rtos kernel internal use. - */ -u32_t _impl_pool_os_id_to_number(os_id_t id); -os_id_t _impl_pool_init(const void *pMemAddr, u16_t elementLen, u16_t elementNum, const char_t *pName); -u32p_t _impl_pool_take(os_id_t id, void **ppUserBuffer, u16_t bufferSize, u32_t timeout_ms); -u32p_t _impl_pool_release(os_id_t id, void **ppUserBuffer); - -#ifdef __cplusplus -} -#endif - -#endif /* _POOL_H_ */ diff --git a/include/kernel/queue.h b/include/kernel/queue.h deleted file mode 100644 index d575417..0000000 --- a/include/kernel/queue.h +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright (c) Riven Zheng (zhengheiot@gmail.com). - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - **/ - -#ifndef _QUEUE_H_ -#define _QUEUE_H_ - -#include "kstruct.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * The implement function lists for rtos kernel internal use. - */ -u32_t _impl_queue_os_id_to_number(os_id_t id); -os_id_t _impl_queue_init(const void *pQueueBufferAddr, u16_t elementLen, u16_t elementNum, const char_t *pName); -u32p_t _impl_queue_send(os_id_t id, const u8_t *pUserBuffer, u16_t bufferSize, b_t isToFront, u32_t timeout_ms); -u32p_t _impl_queue_receive(os_id_t id, const u8_t *pUserBuffer, u16_t bufferSize, b_t isFromBack, u32_t timeout_ms); - -#ifdef __cplusplus -} -#endif - -#endif /* _QUEUE_H_ */ diff --git a/include/kernel/semaphore.h b/include/kernel/semaphore.h deleted file mode 100644 index 843538e..0000000 --- a/include/kernel/semaphore.h +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright (c) Riven Zheng (zhengheiot@gmail.com). - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - **/ - -#ifndef _SEMAPHORE_H_ -#define _SEMAPHORE_H_ - -#include "kstruct.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * The implement function lists for rtos kernel internal use. - */ -u32_t _impl_semaphore_os_id_to_number(os_id_t id); -os_id_t _impl_semaphore_init(u8_t initialCount, u8_t limitCount, b_t permit, const char_t *pName); -u32p_t _impl_semaphore_take(os_id_t id, u32_t timeout_ms); -u32p_t _impl_semaphore_give(os_id_t id); -u32p_t _impl_semaphore_flush(os_id_t id); - -#ifdef __cplusplus -} -#endif - -#endif /* _SEMAPHORE_H_ */ diff --git a/include/kernel/thread.h b/include/kernel/thread.h deleted file mode 100644 index 15c41f8..0000000 --- a/include/kernel/thread.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright (c) Riven Zheng (zhengheiot@gmail.com). - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - **/ - -#ifndef _THREAD_H_ -#define _THREAD_H_ - -#include "kstruct.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * The implement function lists for rtos kernel internal use. - */ -const char_t *_impl_thread_name_get(os_id_t id); -u32_t _impl_thread_os_id_to_number(os_id_t id); -os_id_t _impl_thread_init(void (*pThread_entryFunc_t)(void), u32_t *pAddress, u32_t size, u16_t priority, const char_t *pName); -u32p_t _impl_thread_sleep(u32_t ms); -u32p_t _impl_thread_resume(os_id_t id); -u32p_t _impl_thread_suspend(os_id_t id); -u32p_t _impl_thread_yield(void); -u32p_t _impl_thread_delete(os_id_t id); - -#ifdef __cplusplus -} -#endif - -#endif /* _THREAD_H_ */ diff --git a/include/kernel/timer.h b/include/kernel/timer.h index fdad277..e9e8d6f 100644 --- a/include/kernel/timer.h +++ b/include/kernel/timer.h @@ -17,18 +17,15 @@ extern "C" { /** * The implement function lists for rtos kernel internal use. */ -u32p_t _impl_kernel_timer_schedule_request(void); -void _impl_thread_timer_init(os_id_t id); -void _impl_thread_timer_start(os_id_t id, u32_t timeout_ms, void (*pCallback)(os_id_t)); -u32_t _impl_timer_os_id_to_number(os_id_t id); -b_t _impl_timer_status_isBusy(os_id_t id); -os_id_t _impl_timer_init(pTimer_callbackFunc_t pCallFun, b_t isCycle, u32_t timeout_ms, const char_t *pName); -u32p_t _impl_timer_start(os_id_t id, b_t isCycle, u32_t timeout_ms); -u32p_t _impl_timer_stop(os_id_t id); -u32_t _impl_timer_total_system_us_get(void); -u32_t _impl_timer_total_system_ms_get(void); -void _impl_timer_reamining_elapsed_handler(void); -void _impl_timer_elapsed_handler(u32_t elapsed_us); +void timer_init_for_thread(os_id_t id); +void timer_start_for_thread(os_id_t id, u32_t timeout_ms, void (*pCallback)(os_id_t)); +u32p_t timer_stop_for_thread(os_id_t id); +b_t timer_busy(os_id_t id); +u32_t timer_total_system_ms_get(void); +u32_t timer_total_system_us_get(void); +u32p_t timer_schedule(void); +void timer_reamining_elapsed_handler(void); +void timer_elapsed_handler(u32_t elapsed_us); #ifdef __cplusplus } diff --git a/include/kernel/trace.h b/include/kernel/trace.h index d6f7822..ff2d580 100644 --- a/include/kernel/trace.h +++ b/include/kernel/trace.h @@ -105,13 +105,13 @@ typedef struct { #pragma warning restore #endif -b_t _impl_trace_thread_snapshot(u32_t instance, kernel_snapshot_t *pMsgs); -b_t _impl_trace_semaphore_snapshot(u32_t instance, kernel_snapshot_t *pMsgs); -b_t _impl_trace_mutex_snapshot(u32_t instance, kernel_snapshot_t *pMsgs); -b_t _impl_trace_event_snapshot(u32_t instance, kernel_snapshot_t *pMsgs); -b_t _impl_trace_queue_snapshot(u32_t instance, kernel_snapshot_t *pMsgs); -b_t _impl_trace_timer_snapshot(u32_t instance, kernel_snapshot_t *pMsgs); -b_t _impl_trace_pool_snapshot(u32_t instance, kernel_snapshot_t *pMsgs); +b_t thread_snapshot(u32_t instance, kernel_snapshot_t *pMsgs); +b_t semaphore_snapshot(u32_t instance, kernel_snapshot_t *pMsgs); +b_t mutex_snapshot(u32_t instance, kernel_snapshot_t *pMsgs); +b_t event_snapshot(u32_t instance, kernel_snapshot_t *pMsgs); +b_t queue_snapshot(u32_t instance, kernel_snapshot_t *pMsgs); +b_t pool_snapshot(u32_t instance, kernel_snapshot_t *pMsgs); +b_t timer_snapshot(u32_t instance, kernel_snapshot_t *pMsgs); void _impl_trace_firmware_snapshot_print(void); void _impl_trace_postcode_snapshot_print(void); diff --git a/include/port.h b/include/port.h index 2679113..31cb30b 100644 --- a/include/port.h +++ b/include/port.h @@ -128,18 +128,18 @@ typedef struct { /** * @brief Trigger system svc call. */ -__svc(SVC_KERNEL_INVOKE_NUMBER) u32_t _impl_kernel_svc_call(u32_t args_0, u32_t args_1, u32_t args_2, u32_t args_3); +__svc(SVC_KERNEL_INVOKE_NUMBER) u32_t kernel_svc_call(u32_t args_0, u32_t args_1, u32_t args_2, u32_t args_3); /** * @brief Schedule the first thread. */ -__asm void _impl_port_run_theFirstThread(u32_t sp); +__asm void port_run_theFirstThread(u32_t sp); #if 0 /* Disable it to use CMSIS Library */ /** * @brief To check if it's in interrupt content. */ -static inline b_t _impl_port_isInInterruptContent(void) +static inline b_t port_isInInterruptContent(void) { register u32_t reg_ipsr __asm("ipsr"); if (reg_ipsr) { @@ -157,7 +157,7 @@ static inline b_t _impl_port_isInInterruptContent(void) /** * @brief To check if it's in kernel thread content. */ -static inline b_t _impl_port_isInThreadMode(void) +static inline b_t port_isInThreadMode(void) { register u32_t reg_ipsr __asm("ipsr"); if (reg_ipsr) { @@ -173,7 +173,7 @@ static inline b_t _impl_port_isInThreadMode(void) /** * @brief Trigger system svc call. */ -static inline u32_t _impl_kernel_svc_call(u32_t args_0, u32_t args_1, u32_t args_2, u32_t args_3) +static inline u32_t kernel_svc_call(u32_t args_0, u32_t args_1, u32_t args_2, u32_t args_3) { register u32_t r0 __asm__("r0") = args_0; register u32_t r1 __asm__("r1") = args_1; @@ -188,13 +188,13 @@ static inline u32_t _impl_kernel_svc_call(u32_t args_0, u32_t args_1, u32_t args /** * @brief Schedule the first thread. */ -void _impl_port_run_theFirstThread(u32_t sp); +void port_run_theFirstThread(u32_t sp); #if 0 /* Disable it to use CMSIS Library */ /** * @brief To check if it's in interrupt content. */ -static inline b_t _impl_port_isInInterruptContent(void) +static inline b_t port_isInInterruptContent(void) { u32_t ipsr; @@ -216,7 +216,7 @@ static inline b_t _impl_port_isInInterruptContent(void) /** * @brief To check if it's in kernel thread content. */ -static inline b_t _impl_port_isInThreadMode(void) +static inline b_t port_isInThreadMode(void) { u32_t ipsr; @@ -235,18 +235,18 @@ static inline b_t _impl_port_isInThreadMode(void) /** * @brief Trigger system svc call. */ -__swi u32_t _impl_kernel_svc_call(u32_t args_0, u32_t args_1, u32_t args_2, u32_t args_3); +__swi u32_t kernel_svc_call(u32_t args_0, u32_t args_1, u32_t args_2, u32_t args_3); /** * @brief Schedule the first thread. */ -void _impl_port_run_theFirstThread(u32_t sp); +void port_run_theFirstThread(u32_t sp); #if 0 /* Disable it to use CMSIS Library */ /** * @brief To check if it's in interrupt content. */ -static inline b_t _impl_port_isInInterruptContent(void) +static inline b_t port_isInInterruptContent(void) { register u32_t reg_ipsr = __arm_rsr("IPSR"); if (reg_ipsr) { @@ -264,7 +264,7 @@ static inline b_t _impl_port_isInInterruptContent(void) /** * @brief To check if it's in kernel thread content. */ -static inline b_t _impl_port_isInThreadMode(void) +static inline b_t port_isInThreadMode(void) { register u32_t reg_ipsr = __arm_rsr("IPSR"); if (reg_ipsr) { @@ -285,8 +285,8 @@ static inline b_t _impl_port_isInThreadMode(void) /* TODO */ #elif defined(ARCH_NATIVE_GCC) -u32_t _impl_kernel_svc_call(u32_t args_0, u32_t args_1, u32_t args_2, u32_t args_3); -void _impl_port_run_theFirstThread(u32_t sp); +u32_t kernel_svc_call(u32_t args_0, u32_t args_1, u32_t args_2, u32_t args_3); +void port_run_theFirstThread(u32_t sp); #else #warning Not supported compiler type @@ -295,11 +295,11 @@ void _impl_port_run_theFirstThread(u32_t sp); /** * The implement function lists for rtos kernel internal use. */ -b_t _impl_port_isInInterruptContent(void); -b_t _impl_port_isInThreadMode(void); -void _impl_port_setPendSV(void); -void _impl_port_interrupt_init(void); -u32_t _impl_port_stack_frame_init(void (*pEntryFunction)(void), u32_t *pAddress, u32_t size); +b_t port_isInInterruptContent(void); +b_t port_isInThreadMode(void); +void port_setPendSV(void); +void port_interrupt_init(void); +u32_t port_stack_frame_init(void (*pEntryFunction)(void), u32_t *pAddress, u32_t size); #ifdef __cplusplus } diff --git a/kernel/event.c b/kernel/event.c index fff632b..f80ffa5 100644 --- a/kernel/event.c +++ b/kernel/event.c @@ -7,7 +7,6 @@ #include "kernel.h" #include "timer.h" -#include "event.h" #include "postcode.h" #include "trace.h" @@ -20,15 +19,6 @@ extern "C" { */ #define _PC_CMPT_FAILED PC_FAILED(PC_CMPT_EVENT_6) -/** - * The local function lists for current file internal use. - */ -static u32_t _event_init_privilege_routine(arguments_t *pArgs); -static u32_t _event_set_privilege_routine(arguments_t *pArgs); -static u32_t _event_wait_privilege_routine(arguments_t *pArgs); - -static void _event_schedule(os_id_t id); - /** * @brief Get the event context based on provided unique id. * @@ -38,7 +28,7 @@ static void _event_schedule(os_id_t id); */ static event_context_t *_event_object_contextGet(os_id_t id) { - return (event_context_t *)(_impl_kernel_member_unified_id_toContainerAddress(id)); + return (event_context_t *)(kernel_member_unified_id_toContainerAddress(id)); } /** @@ -48,7 +38,7 @@ static event_context_t *_event_object_contextGet(os_id_t id) */ static list_t *_event_list_initHeadGet(void) { - return (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_EVENT, KERNEL_MEMBER_LIST_EVENT_INIT); + return (list_t *)kernel_member_list_get(KERNEL_MEMBER_EVENT, KERNEL_MEMBER_LIST_EVENT_INIT); } /** @@ -58,7 +48,7 @@ static list_t *_event_list_initHeadGet(void) */ static list_t *_event_list_activeHeadGet(void) { - return (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_EVENT, KERNEL_MEMBER_LIST_EVENT_ACTIVE); + return (list_t *)kernel_member_list_get(KERNEL_MEMBER_EVENT, KERNEL_MEMBER_LIST_EVENT_ACTIVE); } /** @@ -114,7 +104,7 @@ static void _event_list_transfer_toActive(linker_head_t *pCurHead) */ static b_t _event_id_isInvalid(u32_t id) { - return _impl_kernel_member_unified_id_isInvalid(KERNEL_MEMBER_EVENT, id); + return kernel_member_unified_id_isInvalid(KERNEL_MEMBER_EVENT, id); } /** @@ -132,140 +122,56 @@ static b_t _event_object_isInit(u32_t id) } /** - * @brief The event timeout callback fucntion. + * @brief The event schedule routine execute the the pendsv context. * - * @param id The event unique id. + * @param id The unique id of the entry thread. */ -static void _event_callback_fromTimeOut(os_id_t id) +static void _event_schedule(os_id_t id) { - _impl_kernel_thread_entry_trigger(_impl_kernel_member_unified_id_timerToThread(id), id, PC_SC_TIMEOUT, _event_schedule); -} + thread_context_t *pEntryThread = (thread_context_t *)(kernel_member_unified_id_toContainerAddress(id)); + thread_entry_t *pEntry = NULL; + b_t isAvail = FALSE; -/** - * @brief Convert the internal os id to kernel member number. - * - * @param id The provided unique id. - * - * @return The value of member number. - */ -u32_t _impl_event_os_id_to_number(os_id_t id) -{ - if (_event_id_isInvalid(id)) { - return 0u; + if (kernel_member_unified_id_toId(pEntryThread->schedule.hold) != KERNEL_MEMBER_EVENT) { + pEntryThread->schedule.entry.result = _PC_CMPT_FAILED; + return; } - return (u32_t)((id - _impl_kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_EVENT)) / sizeof(event_context_t)); -} - -/** - * @brief Initialize a new event. - * - * @param edgeMask specific the event desired condition of edge or level. - * @param clearMask automatically clear the set events. - * @param pName The event name. - * - * @return The event unique id. - */ -os_id_t _impl_event_init(u32_t edgeMask, u32_t clrDisMask, const char_t *pName) -{ - arguments_t arguments[] = { - [0] = {.u32_val = (u32_t)edgeMask}, - [1] = {.u32_val = (u32_t)clrDisMask}, - [2] = {.pch_val = (const char_t *)pName}, - }; - - return _impl_kernel_privilege_invoke((const void *)_event_init_privilege_routine, arguments); -} - -u32p_t _impl_event_wait_callfunc_register(pEvent_callbackFunc_t pCallFun) -{ - return 0u; -} - -/** - * @brief Set/clear/toggle a event bits. - * - * @param id The event unique id. - * @param set The event value bits set. - * @param clear The event value bits clear. - * @param toggle The event value bits toggle. - * - * @return The result of the operation. - */ -u32p_t _impl_event_set(os_id_t id, u32_t set, u32_t clear, u32_t toggle) -{ - if (_event_id_isInvalid(id)) { - return _PC_CMPT_FAILED; + if ((pEntryThread->schedule.entry.result != PC_SC_SUCCESS) && (pEntryThread->schedule.entry.result != PC_SC_TIMEOUT)) { + return; } - if (!_event_object_isInit(id)) { - return _PC_CMPT_FAILED; + pEntry = &pEntryThread->schedule.entry; + if (!timer_busy(kernel_member_unified_id_threadToTimer(pEntryThread->head.id))) { + if (kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_TIMER_INTERNAL) { + pEntry->result = PC_SC_TIMEOUT; + } else { + isAvail = true; + } + } else if (kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_EVENT) { + timer_stop_for_thread(kernel_member_unified_id_threadToTimer(pEntryThread->head.id)); + isAvail = true; + } else { + pEntry->result = _PC_CMPT_FAILED; } - arguments_t arguments[] = { - [0] = {.u32_val = (u32_t)id}, - [1] = {.u32_val = (u32_t)set}, - [2] = {.u32_val = (u32_t)clear}, - [3] = {.u32_val = (u32_t)toggle}, - }; + /* Auto clear user configuration */ + pEntryThread->event.listen = 0u; + pEntryThread->event.desired = 0u; - return _impl_kernel_privilege_invoke((const void *)_event_set_privilege_routine, arguments); + if (isAvail) { + pEntry->result = PC_SC_SUCCESS; + } } /** - * @brief Wait a target event. + * @brief The event timeout callback fucntion. * * @param id The event unique id. - * @param pEvtData The pointer of event value. - * @param desired_val If the desired is not zero, All changed bits seen can wake up the thread to handle event. - * @param listen_mask Current thread listen which bits in the event. - * @param group_mask To define a group event. - * @param timeout_ms The event wait timeout setting. - * - * @return The result of the operation. */ -u32p_t _impl_event_wait(os_id_t id, os_evt_val_t *pEvtData, u32_t desired_val, u32_t listen_mask, u32_t group_mask, u32_t timeout_ms) +static void _event_callback_fromTimeOut(os_id_t id) { - if (_event_id_isInvalid(id)) { - return _PC_CMPT_FAILED; - } - - if (!_event_object_isInit(id)) { - return _PC_CMPT_FAILED; - } - - if (!pEvtData) { - return _PC_CMPT_FAILED; - } - - if (!timeout_ms) { - return _PC_CMPT_FAILED; - } - - if (!_impl_kernel_isInThreadMode()) { - return _PC_CMPT_FAILED; - } - - arguments_t arguments[] = { - [0] = {.u32_val = (u32_t)id}, [1] = {.pv_val = (void *)pEvtData}, [2] = {.u32_val = (u32_t)desired_val}, - [3] = {.u32_val = (u32_t)listen_mask}, [4] = {.u32_val = (u32_t)group_mask}, [5] = {.u32_val = (u32_t)timeout_ms}, - }; - - u32p_t postcode = _impl_kernel_privilege_invoke((const void *)_event_wait_privilege_routine, arguments); - - ENTER_CRITICAL_SECTION(); - - if (PC_IOK(postcode)) { - thread_context_t *pCurThread = (thread_context_t *)_impl_kernel_thread_runContextGet(); - postcode = (u32p_t)_impl_kernel_schedule_entry_result_take((action_schedule_t *)&pCurThread->schedule); - } - - if (PC_IOK(postcode) && (postcode != PC_SC_TIMEOUT)) { - postcode = PC_SC_SUCCESS; - } - - EXIT_CRITICAL_SECTION(); - return postcode; + kernel_thread_entry_trigger(kernel_member_unified_id_timerToThread(id), id, PC_SC_TIMEOUT, _event_schedule); } /** @@ -285,10 +191,10 @@ static u32_t _event_init_privilege_routine(arguments_t *pArgs) u32_t endAddr = 0u; event_context_t *pCurEvent = NULL; - pCurEvent = (event_context_t *)_impl_kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_EVENT); - endAddr = (u32_t)_impl_kernel_member_id_toContainerEndAddress(KERNEL_MEMBER_EVENT); + pCurEvent = (event_context_t *)kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_EVENT); + endAddr = (u32_t)kernel_member_id_toContainerEndAddress(KERNEL_MEMBER_EVENT); do { - os_id_t id = _impl_kernel_member_containerAddress_toUnifiedid((u32_t)pCurEvent); + os_id_t id = kernel_member_containerAddress_toUnifiedid((u32_t)pCurEvent); if (_event_id_isInvalid(id)) { break; } @@ -370,11 +276,11 @@ static u32_t _event_set_privilege_routine(arguments_t *pArgs) if (pCurThread->event.group) { // Group event if (pCurThread->event.group == (pCurThread->event.pEvtVal->value & pCurThread->event.group)) { - _impl_kernel_thread_entry_trigger(pCurThread->head.id, id, PC_SC_SUCCESS, _event_schedule); + kernel_thread_entry_trigger(pCurThread->head.id, id, PC_SC_SUCCESS, _event_schedule); } } else { if (pCurThread->event.pEvtVal->value) { // Single event - _impl_kernel_thread_entry_trigger(pCurThread->head.id, id, PC_SC_SUCCESS, _event_schedule); + kernel_thread_entry_trigger(pCurThread->head.id, id, PC_SC_SUCCESS, _event_schedule); } } } @@ -415,20 +321,32 @@ static u32_t _event_wait_privilege_routine(arguments_t *pArgs) thread_context_t *pCurThread = NULL; event_context_t *pCurEvent = NULL; u32_t report = 0u; + u32_t report_cushion = 0u; u32_t reported = 0u; u32p_t postcode = PC_SC_SUCCESS; pCurEvent = _event_object_contextGet(id); - pCurThread = _impl_kernel_thread_runContextGet(); + pCurThread = kernel_thread_runContextGet(); pCurThread->event.listen = listen; pCurThread->event.desired = desired; pCurThread->event.group = group; pCurThread->event.pEvtVal = pEvtData; + if (pEvtData->cushion.id != id) { + pEvtData->cushion.desired = desired; + pEvtData->cushion.id = id; + pEvtData->cushion.value = 0u; + } + + report_cushion = (u32_t)pEvtData->cushion.value & pCurEvent->edgeMask; // Cushion edge trigger + pEvtData->cushion.value = 0u; + report = (u32_t)(pCurEvent->defer & pCurEvent->edgeMask); // Edge trigger report |= (u32_t)(pCurEvent->value & (~pCurEvent->edgeMask)); // Level trigger - reported = ~(report ^ desired) & listen; + reported = ~(report_cushion ^ pEvtData->cushion.desired); + + reported |= ~(report ^ desired) & listen; if (reported) { pCurThread->event.pEvtVal->value = reported; pCurEvent->defer &= ~reported; // Clear reported defer @@ -446,53 +364,137 @@ static u32_t _event_wait_privilege_routine(arguments_t *pArgs) } postcode = - _impl_kernel_thread_exit_trigger(pCurThread->head.id, id, _event_list_blockingHeadGet(id), timeout_ms, _event_callback_fromTimeOut); + kernel_thread_exit_trigger(pCurThread->head.id, id, _event_list_blockingHeadGet(id), timeout_ms, _event_callback_fromTimeOut); EXIT_CRITICAL_SECTION(); return postcode; } /** - * @brief The event schedule routine execute the the pendsv context. + * @brief Convert the internal os id to kernel member number. * - * @param id The unique id of the entry thread. + * @param id The provided unique id. + * + * @return The value of member number. */ -static void _event_schedule(os_id_t id) +u32_t _impl_event_os_id_to_number(os_id_t id) { - thread_context_t *pEntryThread = (thread_context_t *)(_impl_kernel_member_unified_id_toContainerAddress(id)); - thread_entry_t *pEntry = NULL; - b_t isAvail = FALSE; + if (_event_id_isInvalid(id)) { + return 0u; + } - if (_impl_kernel_member_unified_id_toId(pEntryThread->schedule.hold) != KERNEL_MEMBER_EVENT) { - pEntryThread->schedule.entry.result = _PC_CMPT_FAILED; - return; + return (u32_t)((id - kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_EVENT)) / sizeof(event_context_t)); +} + +/** + * @brief Initialize a new event. + * + * @param edgeMask specific the event desired condition of edge or level. + * @param clearMask automatically clear the set events. + * @param pName The event name. + * + * @return The event unique id. + */ +os_id_t _impl_event_init(u32_t edgeMask, u32_t clrDisMask, const char_t *pName) +{ + arguments_t arguments[] = { + [0] = {.u32_val = (u32_t)edgeMask}, + [1] = {.u32_val = (u32_t)clrDisMask}, + [2] = {.pch_val = (const char_t *)pName}, + }; + + return kernel_privilege_invoke((const void *)_event_init_privilege_routine, arguments); +} + +u32p_t _impl_event_wait_callfunc_register(pEvent_callbackFunc_t pCallFun) +{ + return 0u; +} + +/** + * @brief Set/clear/toggle a event bits. + * + * @param id The event unique id. + * @param set The event value bits set. + * @param clear The event value bits clear. + * @param toggle The event value bits toggle. + * + * @return The result of the operation. + */ +u32p_t _impl_event_set(os_id_t id, u32_t set, u32_t clear, u32_t toggle) +{ + if (_event_id_isInvalid(id)) { + return _PC_CMPT_FAILED; } - if ((pEntryThread->schedule.entry.result != PC_SC_SUCCESS) && (pEntryThread->schedule.entry.result != PC_SC_TIMEOUT)) { - return; + if (!_event_object_isInit(id)) { + return _PC_CMPT_FAILED; } - pEntry = &pEntryThread->schedule.entry; - if (!_impl_timer_status_isBusy(_impl_kernel_member_unified_id_threadToTimer(pEntryThread->head.id))) { - if (_impl_kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_TIMER_INTERNAL) { - pEntry->result = PC_SC_TIMEOUT; - } else { - isAvail = true; - } - } else if (_impl_kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_EVENT) { - _impl_timer_stop(_impl_kernel_member_unified_id_threadToTimer(pEntryThread->head.id)); - isAvail = true; - } else { - pEntry->result = _PC_CMPT_FAILED; + arguments_t arguments[] = { + [0] = {.u32_val = (u32_t)id}, + [1] = {.u32_val = (u32_t)set}, + [2] = {.u32_val = (u32_t)clear}, + [3] = {.u32_val = (u32_t)toggle}, + }; + + return kernel_privilege_invoke((const void *)_event_set_privilege_routine, arguments); +} + +/** + * @brief Wait a target event. + * + * @param id The event unique id. + * @param pEvtData The pointer of event value. + * @param desired_val If the desired is not zero, All changed bits seen can wake up the thread to handle event. + * @param listen_mask Current thread listen which bits in the event. + * @param group_mask To define a group event. + * @param timeout_ms The event wait timeout setting. + * + * @return The result of the operation. + */ +u32p_t _impl_event_wait(os_id_t id, os_evt_val_t *pEvtData, u32_t desired_val, u32_t listen_mask, u32_t group_mask, u32_t timeout_ms) +{ + if (_event_id_isInvalid(id)) { + return _PC_CMPT_FAILED; } - /* Auto clear user configuration */ - pEntryThread->event.listen = 0u; - pEntryThread->event.desired = 0u; + if (!_event_object_isInit(id)) { + return _PC_CMPT_FAILED; + } - if (isAvail) { - pEntry->result = PC_SC_SUCCESS; + if (!pEvtData) { + return _PC_CMPT_FAILED; } + + if (!timeout_ms) { + return _PC_CMPT_FAILED; + } + + if (!kernel_isInThreadMode()) { + return _PC_CMPT_FAILED; + } + + arguments_t arguments[] = { + [0] = {.u32_val = (u32_t)id}, [1] = {.pv_val = (void *)pEvtData}, [2] = {.u32_val = (u32_t)desired_val}, + [3] = {.u32_val = (u32_t)listen_mask}, [4] = {.u32_val = (u32_t)group_mask}, [5] = {.u32_val = (u32_t)timeout_ms}, + }; + + u32p_t postcode = kernel_privilege_invoke((const void *)_event_wait_privilege_routine, arguments); + + ENTER_CRITICAL_SECTION(); + + if (PC_IOK(postcode)) { + thread_context_t *pCurThread = (thread_context_t *)kernel_thread_runContextGet(); + postcode = (u32p_t)kernel_schedule_entry_result_take((action_schedule_t *)&pCurThread->schedule); + } + + if (PC_IOK(postcode) && (postcode != PC_SC_TIMEOUT)) { + postcode = PC_SC_SUCCESS; + } + + EXIT_CRITICAL_SECTION(); + return postcode; } /** @@ -503,7 +505,7 @@ static void _event_schedule(os_id_t id) * * @return TRUE: Operation pass, FALSE: Operation failed. */ -b_t _impl_trace_event_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) +b_t event_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) { #if defined KTRACE event_context_t *pCurEvent = NULL; @@ -513,8 +515,8 @@ b_t _impl_trace_event_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) ENTER_CRITICAL_SECTION(); offset = sizeof(event_context_t) * instance; - pCurEvent = (event_context_t *)(_impl_kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_EVENT) + offset); - id = _impl_kernel_member_containerAddress_toUnifiedid((u32_t)pCurEvent); + pCurEvent = (event_context_t *)(kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_EVENT) + offset); + id = kernel_member_containerAddress_toUnifiedid((u32_t)pCurEvent); _memset((u8_t *)pMsgs, 0x0u, sizeof(kernel_snapshot_t)); if (_event_id_isInvalid(id)) { diff --git a/kernel/kernel.c b/kernel/kernel.c index aae8ee9..db184f2 100644 --- a/kernel/kernel.c +++ b/kernel/kernel.c @@ -7,10 +7,10 @@ #include "at_rtos.h" #include "kernel.h" +#include "timer.h" #include "compiler.h" #include "clock_tick.h" #include "ktype.h" -#include "kthread.h" #include "postcode.h" #include "trace.h" @@ -73,7 +73,7 @@ static u32_t _kernel_start_privilege_routine(arguments_t *pArgs); */ static void _kernel_setPendSV(void) { - _impl_port_setPendSV(); + port_setPendSV(); } /** @@ -83,7 +83,7 @@ static void _kernel_setPendSV(void) */ static b_t _kernel_isInPrivilegeMode(void) { - return _impl_port_isInInterruptContent(); + return port_isInInterruptContent(); } /** @@ -91,7 +91,7 @@ static b_t _kernel_isInPrivilegeMode(void) */ static void _kernel_pendsv_time_update(void) { - g_kernel_resource.pendsv_ms = _impl_timer_total_system_ms_get(); + g_kernel_resource.pendsv_ms = timer_total_system_ms_get(); } /** @@ -118,7 +118,7 @@ static void _kernel_schedule_exit_time_analyze(os_id_t id) static void _kernel_schedule_entry_time_analyze(os_id_t id) { #if defined KTRACE - thread_context_t *pEntryThread = (thread_context_t *)_impl_kernel_member_unified_id_toContainerAddress(id); + thread_context_t *pEntryThread = (thread_context_t *)kernel_member_unified_id_toContainerAddress(id); pEntryThread->schedule.analyze.pend_ms = _kernel_pendsv_time_get(); #endif } @@ -129,8 +129,8 @@ static void _kernel_schedule_entry_time_analyze(os_id_t id) static void _kernel_schedule_run_time_analyze(os_id_t from, os_id_t to) { #if defined KTRACE - thread_context_t *pFromThread = (thread_context_t *)_impl_kernel_member_unified_id_toContainerAddress(from); - thread_context_t *pToThread = (thread_context_t *)_impl_kernel_member_unified_id_toContainerAddress(to); + thread_context_t *pFromThread = (thread_context_t *)kernel_member_unified_id_toContainerAddress(from); + thread_context_t *pToThread = (thread_context_t *)kernel_member_unified_id_toContainerAddress(to); u32_t sv_ms = _kernel_pendsv_time_get(); pFromThread->schedule.analyze.active_ms += (u32_t)(sv_ms - pFromThread->schedule.analyze.run_ms); @@ -149,7 +149,7 @@ static void _kernel_schedule_run_time_analyze(os_id_t from, os_id_t to) */ static u32_t *_kernel_thread_PSP_Get(os_id_t id) { - thread_context_t *pCurThread = (thread_context_t *)_impl_kernel_member_unified_id_toContainerAddress(id); + thread_context_t *pCurThread = (thread_context_t *)kernel_member_unified_id_toContainerAddress(id); return (u32_t *)&pCurThread->PSPStartAddr; } @@ -162,7 +162,7 @@ static u32_t *_kernel_thread_PSP_Get(os_id_t id) static os_id_t _kernel_thread_nextIdGet(void) { ENTER_CRITICAL_SECTION(); - list_t *pListPending = (list_t *)_impl_kernel_list_pendingHeadGet(); + list_t *pListPending = (list_t *)kernel_list_pendingHeadGet(); linker_head_t *pHead = (linker_head_t *)(pListPending->pHead); EXIT_CRITICAL_SECTION(); @@ -220,7 +220,7 @@ static void _kernel_thread_entry_schedule(void) list_iterator_t it = ITERATION_NULL; thread_entry_t *pEntry = NULL; - list_t *pList = (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_THREAD, KERNEL_MEMBER_LIST_THREAD_ENTRY); + list_t *pList = (list_t *)kernel_member_list_get(KERNEL_MEMBER_THREAD, KERNEL_MEMBER_LIST_THREAD_ENTRY); list_iterator_init(&it, pList); while (list_iterator_next_condition(&it, (void *)&pCurThread)) { pEntry = &pCurThread->schedule.entry; @@ -233,7 +233,7 @@ static void _kernel_thread_entry_schedule(void) } _kernel_schedule_entry_time_analyze(pCurThread->head.id); - _impl_kernel_thread_list_transfer_toPend((linker_head_t *)&pCurThread->head); + kernel_thread_list_transfer_toPend((linker_head_t *)&pCurThread->head); } } @@ -250,14 +250,13 @@ static b_t _kernel_thread_exit_schedule(void) b_t request = FALSE; /* The thread block */ - list_t *pList = (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_THREAD, KERNEL_MEMBER_LIST_THREAD_EXIT); + list_t *pList = (list_t *)kernel_member_list_get(KERNEL_MEMBER_THREAD, KERNEL_MEMBER_LIST_THREAD_EXIT); list_iterator_init(&it, pList); while (list_iterator_next_condition(&it, (void *)&pCurThread)) { pExit = &pCurThread->schedule.exit; if (pExit->timeout_us) { - _impl_thread_timer_start(_impl_kernel_member_unified_id_threadToTimer(pCurThread->head.id), pExit->timeout_us, - pExit->pTimeoutCallFun); + timer_start_for_thread(kernel_member_unified_id_threadToTimer(pCurThread->head.id), pExit->timeout_us, pExit->pTimeoutCallFun); if (pExit->timeout_us != OS_TIME_FOREVER_VAL) { request = TRUE; @@ -284,16 +283,16 @@ static u32_t _kernel_start_privilege_routine(arguments_t *pArgs) ENTER_CRITICAL_SECTION(); - _impl_kernel_thread_init(); - _impl_port_interrupt_init(); - _impl_clock_time_init(_impl_timer_elapsed_handler); + kthread_init(); + port_interrupt_init(); + clock_time_init(timer_elapsed_handler); g_kernel_resource.current = _kernel_thread_nextIdGet(); g_kernel_resource.run = TRUE; EXIT_CRITICAL_SECTION(); - _impl_port_run_theFirstThread(*(_kernel_thread_PSP_Get(g_kernel_resource.current))); + port_run_theFirstThread(*(_kernel_thread_PSP_Get(g_kernel_resource.current))); // nothing arrive return _PC_CMPT_FAILED; @@ -328,7 +327,15 @@ static u32_t _kernel_member_id_toUnifiedIdRange(u8_t member_id) return 0u; } - return (u32_t)(_kernel_member_id_toUnifiedIdEnd(member_id) - _impl_kernel_member_id_toUnifiedIdStart(member_id)); + return (u32_t)(_kernel_member_id_toUnifiedIdEnd(member_id) - kernel_member_id_toUnifiedIdStart(member_id)); +} + +/** + * @brief To check if the kernel message arrived. + */ +static u32_t _kernel_message_arrived(void) +{ + return kthread_message_arrived(); } /** @@ -336,7 +343,7 @@ static u32_t _kernel_member_id_toUnifiedIdRange(u8_t member_id) * * @return The value of pendsv executed time. */ -u32_t _impl_kernel_schedule_time_get(void) +u32_t kernel_schedule_time_get(void) { return _kernel_pendsv_time_get(); } @@ -346,10 +353,10 @@ u32_t _impl_kernel_schedule_time_get(void) * * @return The value of thread use percent. */ -u32_t impl_kernel_thread_use_percent_take(os_id_t id) +u32_t kernel_thread_use_percent_take(os_id_t id) { #if defined KTRACE - thread_context_t *pCurThread = (thread_context_t *)_impl_kernel_member_unified_id_toContainerAddress(id); + thread_context_t *pCurThread = (thread_context_t *)kernel_member_unified_id_toContainerAddress(id); pCurThread->schedule.analyze.percent = (pCurThread->schedule.analyze.active_ms * 1000u) / (_kernel_pendsv_time_get() - pCurThread->schedule.analyze.cycle_ms); @@ -368,12 +375,12 @@ u32_t impl_kernel_thread_use_percent_take(os_id_t id) * @param ppCurThreadPsp The current thread psp address. * @param ppNextThreadPSP The next thread psp address. */ -void _impl_kernel_scheduler_inPendSV_c(u32_t **ppCurPsp, u32_t **ppNextPSP) +void kernel_scheduler_inPendSV_c(u32_t **ppCurPsp, u32_t **ppNextPSP) { _kernel_pendsv_time_update(); if (_kernel_thread_exit_schedule()) { - _impl_kernel_timer_schedule_request(); + timer_schedule(); } _kernel_thread_entry_schedule(); @@ -394,7 +401,7 @@ void _impl_kernel_scheduler_inPendSV_c(u32_t **ppCurPsp, u32_t **ppNextPSP) * * @return The pointer of the list pointer. */ -list_t *_impl_kernel_member_list_get(u8_t member_id, u8_t list_id) +list_t *kernel_member_list_get(u8_t member_id, u8_t list_id) { if (member_id >= KERNEL_MEMBER_NUMBER) { return NULL; @@ -418,7 +425,7 @@ list_t *_impl_kernel_member_list_get(u8_t member_id, u8_t list_id) * * @return The pointer of the memeber address. */ -u8_t *_impl_kernel_member_unified_id_toContainerAddress(u32_t unified_id) +u8_t *kernel_member_unified_id_toContainerAddress(u32_t unified_id) { if (unified_id >= KERNEL_MEMBER_MAP_NUMBER) { return NULL; @@ -434,7 +441,7 @@ u8_t *_impl_kernel_member_unified_id_toContainerAddress(u32_t unified_id) * * @return The value of the kernel unified id. */ -u32_t _impl_kernel_member_containerAddress_toUnifiedid(u32_t container_address) +u32_t kernel_member_containerAddress_toUnifiedid(u32_t container_address) { u32_t start = (u32_t)(u8_t *)&g_kernel_resource.member.pMemoryContainer[0]; @@ -456,7 +463,7 @@ u32_t _impl_kernel_member_containerAddress_toUnifiedid(u32_t container_address) * * @return The value of the kernel member unified id. */ -u32_t _impl_kernel_member_id_toUnifiedIdStart(u8_t member_id) +u32_t kernel_member_id_toUnifiedIdStart(u8_t member_id) { if (member_id >= KERNEL_MEMBER_NUMBER) { return OS_INVALID_ID; @@ -476,13 +483,13 @@ u32_t _impl_kernel_member_id_toUnifiedIdStart(u8_t member_id) * * @return The value of the kernel member address range. */ -u8_t *_impl_kernel_member_id_toContainerStartAddress(u32_t member_id) +u8_t *kernel_member_id_toContainerStartAddress(u32_t member_id) { if (member_id >= KERNEL_MEMBER_NUMBER) { return NULL; } - return (u8_t *)_impl_kernel_member_unified_id_toContainerAddress(_impl_kernel_member_id_toUnifiedIdStart(member_id)); + return (u8_t *)kernel_member_unified_id_toContainerAddress(kernel_member_id_toUnifiedIdStart(member_id)); } /** @@ -492,13 +499,13 @@ u8_t *_impl_kernel_member_id_toContainerStartAddress(u32_t member_id) * * @return The value of the kernel member ending address. */ -u8_t *_impl_kernel_member_id_toContainerEndAddress(u32_t member_id) +u8_t *kernel_member_id_toContainerEndAddress(u32_t member_id) { if (member_id >= KERNEL_MEMBER_NUMBER) { return NULL; } - return (u8_t *)_impl_kernel_member_unified_id_toContainerAddress(_kernel_member_id_toUnifiedIdEnd(member_id)); + return (u8_t *)kernel_member_unified_id_toContainerAddress(_kernel_member_id_toUnifiedIdEnd(member_id)); } /** @@ -509,13 +516,13 @@ u8_t *_impl_kernel_member_id_toContainerEndAddress(u32_t member_id) * * @return The value of the kernel member number. */ -u32_t _impl_kernel_member_id_unifiedConvert(u8_t member_id, u32_t unified_id) +u32_t kernel_member_id_unifiedConvert(u8_t member_id, u32_t unified_id) { if (member_id >= KERNEL_MEMBER_NUMBER) { return 0u; } - u32_t diff = _impl_kernel_member_id_toUnifiedIdStart(member_id); + u32_t diff = kernel_member_id_toUnifiedIdStart(member_id); if (unified_id >= diff) { diff = unified_id - diff; } else { @@ -533,7 +540,7 @@ u32_t _impl_kernel_member_id_unifiedConvert(u8_t member_id, u32_t unified_id) * * @return The value of true is invalid, otherwise is valid. */ -b_t _impl_kernel_member_unified_id_isInvalid(u32_t member_id, u32_t unified_id) +b_t kernel_member_unified_id_isInvalid(u32_t member_id, u32_t unified_id) { if (member_id >= KERNEL_MEMBER_NUMBER) { return TRUE; @@ -543,7 +550,7 @@ b_t _impl_kernel_member_unified_id_isInvalid(u32_t member_id, u32_t unified_id) return TRUE; } - if (unified_id < _impl_kernel_member_id_toUnifiedIdStart(member_id)) { + if (unified_id < kernel_member_id_toUnifiedIdStart(member_id)) { return TRUE; } @@ -561,7 +568,7 @@ b_t _impl_kernel_member_unified_id_isInvalid(u32_t member_id, u32_t unified_id) * * @return The value of true is invalid, otherwise is valid. */ -b_t _impl_kernel_os_id_is_invalid(struct os_id id) +b_t kernel_os_id_is_invalid(struct os_id id) { if (id.val == OS_INVALID_ID) { return TRUE; @@ -581,17 +588,17 @@ b_t _impl_kernel_os_id_is_invalid(struct os_id id) * * @return The value of timer unified id. */ -u32_t _impl_kernel_member_unified_id_threadToTimer(u32_t unified_id) +u32_t kernel_member_unified_id_threadToTimer(u32_t unified_id) { u32_t uid = OS_INVALID_ID; - if (_impl_kernel_member_unified_id_isInvalid(KERNEL_MEMBER_THREAD, unified_id)) { + if (kernel_member_unified_id_isInvalid(KERNEL_MEMBER_THREAD, unified_id)) { return OS_INVALID_ID; } - uid = (unified_id - _impl_kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_THREAD)) / sizeof(thread_context_t); + uid = (unified_id - kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_THREAD)) / sizeof(thread_context_t); - return (u32_t)((uid * sizeof(timer_context_t)) + _impl_kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_TIMER_INTERNAL)); + return (u32_t)((uid * sizeof(timer_context_t)) + kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_TIMER_INTERNAL)); } /** @@ -601,16 +608,16 @@ u32_t _impl_kernel_member_unified_id_threadToTimer(u32_t unified_id) * * @return The value of thread unified id. */ -u32_t _impl_kernel_member_unified_id_timerToThread(u32_t unified_id) +u32_t kernel_member_unified_id_timerToThread(u32_t unified_id) { u32_t uid = OS_INVALID_ID; - if (_impl_kernel_member_unified_id_isInvalid(KERNEL_MEMBER_TIMER_INTERNAL, unified_id)) { + if (kernel_member_unified_id_isInvalid(KERNEL_MEMBER_TIMER_INTERNAL, unified_id)) { return OS_INVALID_ID; } - uid = (unified_id - _impl_kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_TIMER_INTERNAL)) / sizeof(timer_context_t); - return (u32_t)((uid * sizeof(thread_context_t)) + _impl_kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_THREAD)); + uid = (unified_id - kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_TIMER_INTERNAL)) / sizeof(timer_context_t); + return (u32_t)((uid * sizeof(thread_context_t)) + kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_THREAD)); } /** @@ -620,12 +627,12 @@ u32_t _impl_kernel_member_unified_id_timerToThread(u32_t unified_id) * * @return The value of the kernel member id. */ -u8_t _impl_kernel_member_unified_id_toId(u32_t unified_id) +u8_t kernel_member_unified_id_toId(u32_t unified_id) { u8_t member_id = KERNEL_MEMBER_THREAD; - while ((member_id < KERNEL_MEMBER_NUMBER) && ((unified_id < _impl_kernel_member_id_toUnifiedIdStart(member_id)) || - (unified_id >= _kernel_member_id_toUnifiedIdEnd(member_id)))) { + while ((member_id < KERNEL_MEMBER_NUMBER) && + ((unified_id < kernel_member_id_toUnifiedIdStart(member_id)) || (unified_id >= _kernel_member_id_toUnifiedIdEnd(member_id)))) { member_id++; } @@ -642,9 +649,9 @@ u8_t _impl_kernel_member_unified_id_toId(u32_t unified_id) * * @return The PSP stack address. */ -u32_t _impl_kernel_stack_frame_init(void (*pEntryFunction)(void), u32_t *pAddress, u32_t size) +u32_t kernel_stack_frame_init(void (*pEntryFunction)(void), u32_t *pAddress, u32_t size) { - return (u32_t)_impl_port_stack_frame_init(pEntryFunction, pAddress, size); + return (u32_t)port_stack_frame_init(pEntryFunction, pAddress, size); } /** @@ -652,7 +659,7 @@ u32_t _impl_kernel_stack_frame_init(void (*pEntryFunction)(void), u32_t *pAddres * * @return The pending list head. */ -list_t *_impl_kernel_list_pendingHeadGet(void) +list_t *kernel_list_pendingHeadGet(void) { return (list_t *)(&g_kernel_resource.list); } @@ -662,7 +669,7 @@ list_t *_impl_kernel_list_pendingHeadGet(void) * * @return The id of current running thread. */ -os_id_t _impl_kernel_thread_runIdGet(void) +os_id_t kernel_thread_runIdGet(void) { return (os_id_t)g_kernel_resource.current; } @@ -672,9 +679,9 @@ os_id_t _impl_kernel_thread_runIdGet(void) * * @return The context pointer of current running thread. */ -thread_context_t *_impl_kernel_thread_runContextGet(void) +thread_context_t *kernel_thread_runContextGet(void) { - return (void *)_impl_kernel_member_unified_id_toContainerAddress(_impl_kernel_thread_runIdGet()); + return (void *)kernel_member_unified_id_toContainerAddress(kernel_thread_runIdGet()); } /** @@ -682,11 +689,11 @@ thread_context_t *_impl_kernel_thread_runContextGet(void) * * @param pCurHead The pointer of the thread linker head. */ -static void _impl_kernel_thread_list_transfer_toExit(linker_head_t *pCurHead) +static void kernel_thread_list_transfer_toExit(linker_head_t *pCurHead) { ENTER_CRITICAL_SECTION(); - list_t *pToExitList = (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_THREAD, KERNEL_MEMBER_LIST_THREAD_EXIT); + list_t *pToExitList = (list_t *)kernel_member_list_get(KERNEL_MEMBER_THREAD, KERNEL_MEMBER_LIST_THREAD_EXIT); linker_list_transaction_common(&pCurHead->linker, pToExitList, LIST_TAIL); EXIT_CRITICAL_SECTION(); @@ -697,11 +704,11 @@ static void _impl_kernel_thread_list_transfer_toExit(linker_head_t *pCurHead) * * @param pCurHead The pointer of the thread linker head. */ -void _impl_kernel_thread_list_transfer_toEntry(linker_head_t *pCurHead) +void kernel_thread_list_transfer_toEntry(linker_head_t *pCurHead) { ENTER_CRITICAL_SECTION(); - list_t *pToEntryList = (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_THREAD, KERNEL_MEMBER_LIST_THREAD_ENTRY); + list_t *pToEntryList = (list_t *)kernel_member_list_get(KERNEL_MEMBER_THREAD, KERNEL_MEMBER_LIST_THREAD_ENTRY); linker_list_transaction_common(&pCurHead->linker, pToEntryList, LIST_TAIL); EXIT_CRITICAL_SECTION(); @@ -712,11 +719,11 @@ void _impl_kernel_thread_list_transfer_toEntry(linker_head_t *pCurHead) * * @param pCurHead The pointer of the thread linker head. */ -void _impl_kernel_thread_list_transfer_toPend(linker_head_t *pCurHead) +void kernel_thread_list_transfer_toPend(linker_head_t *pCurHead) { ENTER_CRITICAL_SECTION(); - list_t *pToPendList = (list_t *)_impl_kernel_list_pendingHeadGet(); + list_t *pToPendList = (list_t *)kernel_list_pendingHeadGet(); linker_list_transaction_specific(&pCurHead->linker, pToPendList, _kernel_thread_node_Order_compare_condition); EXIT_CRITICAL_SECTION(); @@ -727,11 +734,11 @@ void _impl_kernel_thread_list_transfer_toPend(linker_head_t *pCurHead) * * @param pCurHead The pointer of the semaphore linker head. */ -void _impl_kernel_semaphore_list_transfer_toLock(linker_head_t *pCurHead) +void kernel_semaphore_list_transfer_toLock(linker_head_t *pCurHead) { ENTER_CRITICAL_SECTION(); - list_t *pToLockList = (list_t *)(list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_SEMAPHORE, KERNEL_MEMBER_LIST_SEMAPHORE_LOCK); + list_t *pToLockList = (list_t *)(list_t *)kernel_member_list_get(KERNEL_MEMBER_SEMAPHORE, KERNEL_MEMBER_LIST_SEMAPHORE_LOCK); linker_list_transaction_common(&pCurHead->linker, pToLockList, LIST_TAIL); EXIT_CRITICAL_SECTION(); @@ -748,17 +755,17 @@ void _impl_kernel_semaphore_list_transfer_toLock(linker_head_t *pCurHead) * * @return The result of exit operation. */ -u32p_t _impl_kernel_thread_exit_trigger(os_id_t id, os_id_t hold, list_t *pToList, u32_t timeout_us, void (*pCallback)(os_id_t)) +u32p_t kernel_thread_exit_trigger(os_id_t id, os_id_t hold, list_t *pToList, u32_t timeout_us, void (*pCallback)(os_id_t)) { - thread_context_t *pCurThread = (thread_context_t *)(_impl_kernel_member_unified_id_toContainerAddress(id)); + thread_context_t *pCurThread = (thread_context_t *)(kernel_member_unified_id_toContainerAddress(id)); pCurThread->schedule.hold = hold; pCurThread->schedule.exit.pToList = pToList; pCurThread->schedule.exit.timeout_us = timeout_us; pCurThread->schedule.exit.pTimeoutCallFun = pCallback; - _impl_kernel_thread_list_transfer_toExit((linker_head_t *)&pCurThread->head); - return _impl_kernel_thread_schedule_request(); + kernel_thread_list_transfer_toExit((linker_head_t *)&pCurThread->head); + return kernel_thread_schedule_request(); } /** @@ -771,16 +778,16 @@ u32p_t _impl_kernel_thread_exit_trigger(os_id_t id, os_id_t hold, list_t *pToLis * * @return The result of entry operation. */ -u32p_t _impl_kernel_thread_entry_trigger(os_id_t id, os_id_t release, u32_t result, void (*pCallback)(os_id_t)) +u32p_t kernel_thread_entry_trigger(os_id_t id, os_id_t release, u32_t result, void (*pCallback)(os_id_t)) { - thread_context_t *pCurThread = (thread_context_t *)(_impl_kernel_member_unified_id_toContainerAddress(id)); + thread_context_t *pCurThread = (thread_context_t *)(kernel_member_unified_id_toContainerAddress(id)); pCurThread->schedule.entry.release = release; pCurThread->schedule.entry.result = result; pCurThread->schedule.entry.pEntryCallFun = pCallback; - _impl_kernel_thread_list_transfer_toEntry((linker_head_t *)&pCurThread->head); - return _impl_kernel_thread_schedule_request(); + kernel_thread_list_transfer_toEntry((linker_head_t *)&pCurThread->head); + return kernel_thread_schedule_request(); } /** @@ -790,7 +797,7 @@ u32p_t _impl_kernel_thread_entry_trigger(os_id_t id, os_id_t release, u32_t resu * * @return The result of entry action schedule. */ -u32_t _impl_kernel_schedule_entry_result_take(action_schedule_t *pSchedule) +u32_t kernel_schedule_entry_result_take(action_schedule_t *pSchedule) { if (!pSchedule) { return 0u; @@ -807,9 +814,9 @@ u32_t _impl_kernel_schedule_entry_result_take(action_schedule_t *pSchedule) * * @return The true indicates the kernel is in thread mode. */ -b_t _impl_kernel_isInThreadMode(void) +b_t kernel_isInThreadMode(void) { - return _impl_port_isInThreadMode(); + return port_isInThreadMode(); } /** @@ -817,7 +824,7 @@ b_t _impl_kernel_isInThreadMode(void) * * @return The result of the request operation. */ -u32p_t _impl_kernel_thread_schedule_request(void) +u32p_t kernel_thread_schedule_request(void) { if (!_kernel_isInPrivilegeMode()) { return _PC_CMPT_FAILED; @@ -827,41 +834,23 @@ u32p_t _impl_kernel_thread_schedule_request(void) return PC_SC_SUCCESS; } -/** - * @brief To check if the kernel OS is running. - * - * return The true indicates the kernel OS is running. - */ -b_t _impl_kernel_rtos_isRun(void) -{ - return (b_t)((g_kernel_resource.run) ? (TRUE) : (FALSE)); -} - /** * @brief To issue a kernel message notification. */ -void _impl_kernel_message_notification(void) -{ - _impl_kernel_thread_message_notification(); -} - -/** - * @brief To check if the kernel message arrived. - */ -static u32_t _kernel_message_arrived(void) +void kernel_message_notification(void) { - return _impl_kernel_thread_message_arrived(); + kthread_message_notification(); } /** * @brief The kernel thread only serve for RTOS with highest priority. */ -void _impl_kernel_thread_schedule(void) +void kernel_schedule_thread(void) { while (1) { u32p_t postcode = _kernel_message_arrived(); if (PC_IOK(postcode)) { - _impl_timer_reamining_elapsed_handler(); + timer_reamining_elapsed_handler(); } } } @@ -869,31 +858,19 @@ void _impl_kernel_thread_schedule(void) /** * @brief The idle thread entry function. */ -void _impl_kernel_thread_idle(void) +void kernel_idle_thread(void) { while (1) { /* TODO: Power Management */ } } -/** - * @brief The kernel OS start to run. - */ -u32p_t _impl_kernel_at_rtos_run(void) -{ - if (_impl_kernel_rtos_isRun()) { - return PC_SC_SUCCESS; - } - - return _impl_kernel_privilege_invoke((const void *)_kernel_start_privilege_routine, NULL); -} - /** * @brief kernel call privilege function in SVC interrupt content. * * @param svc_args The function arguments. */ -void _impl_kernel_privilege_call_inSVC_c(u32_t *svc_args) +void kernel_privilege_call_inSVC_c(u32_t *svc_args) { /* * Stack contains: @@ -918,14 +895,14 @@ void _impl_kernel_privilege_call_inSVC_c(u32_t *svc_args) * * @return The result of the privilege function. */ -u32_t _impl_kernel_privilege_invoke(const void *pCallFun, arguments_t *pArgs) +u32_t kernel_privilege_invoke(const void *pCallFun, arguments_t *pArgs) { if (!pCallFun) { return _PC_CMPT_FAILED; } if (!_kernel_isInPrivilegeMode()) { - return (u32_t)_impl_kernel_svc_call((u32_t)pCallFun, (u32_t)pArgs, 0u, 0u); + return (u32_t)kernel_svc_call((u32_t)pCallFun, (u32_t)pArgs, 0u, 0u); } ENTER_CRITICAL_SECTION(); @@ -936,6 +913,28 @@ u32_t _impl_kernel_privilege_invoke(const void *pCallFun, arguments_t *pArgs) return ret; } +/** + * @brief To check if the kernel OS is running. + * + * return The true indicates the kernel OS is running. + */ +b_t _impl_kernel_rtos_isRun(void) +{ + return (b_t)((g_kernel_resource.run) ? (TRUE) : (FALSE)); +} + +/** + * @brief The kernel OS start to run. + */ +u32p_t _impl_kernel_at_rtos_run(void) +{ + if (_impl_kernel_rtos_isRun()) { + return PC_SC_SUCCESS; + } + + return kernel_privilege_invoke((const void *)_kernel_start_privilege_routine, NULL); +} + #ifdef __cplusplus } #endif diff --git a/kernel/kthread.c b/kernel/kthread.c index 0fe5749..08cec12 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -8,7 +8,7 @@ #include "at_rtos.h" #include "ktype.h" #include "kernel.h" -#include "kthread.h" +#include "timer.h" #include "postcode.h" #ifdef __cplusplus @@ -25,40 +25,40 @@ static u32_t _kernel_idle[((u32_t)(KERNEL_IDLE_THREAD_STACK_SIZE) / sizeof(u32_t * Global At_RTOS application interface init. */ #if (OS_INTERFACE_EXTERN_USE_ENABLE) -const at_rtos_api_t AtOS = { - .thread_init = thread_init, - .thread_sleep = thread_sleep, - .thread_resume = thread_resume, - .thread_suspend = thread_suspend, - .thread_yield = thread_yield, - .thread_delete = thread_delete, - - .timer_init = timer_init, - .timer_start = timer_start, - .timer_stop = timer_stop, - .timer_isBusy = timer_isBusy, - .timer_system_total_ms = timer_system_total_ms, - - .sem_init = sem_init, - .sem_take = sem_take, - .sem_give = sem_give, - .sem_flush = sem_flush, - - .mutex_init = mutex_init, - .mutex_lock = mutex_lock, - .mutex_unlock = mutex_unlock, - - .evt_init = evt_init, - .evt_set = evt_set, - .evt_wait = evt_wait, - - .msgq_init = msgq_init, - .msgq_put = msgq_put, - .msgq_get = msgq_get, - - .pool_init = pool_init, - .pool_take = pool_take, - .pool_release = pool_release, +const at_rtos_api_t os = { + .thread_init = os_thread_init, + .thread_sleep = os_thread_sleep, + .thread_resume = os_thread_resume, + .thread_suspend = os_thread_suspend, + .thread_yield = os_thread_yield, + .thread_delete = os_thread_delete, + + .timer_init = os_timer_init, + .timer_start = os_timer_start, + .timer_stop = os_timer_stop, + .timer_busy = os_timer_busy, + .timer_system_total_ms = os_timer_system_total_ms, + + .sem_init = os_sem_init, + .sem_take = os_sem_take, + .sem_give = os_sem_give, + .sem_flush = os_sem_flush, + + .mutex_init = os_mutex_init, + .mutex_lock = os_mutex_lock, + .mutex_unlock = os_mutex_unlock, + + .evt_init = os_evt_init, + .evt_set = os_evt_set, + .evt_wait = os_evt_wait, + + .msgq_init = os_msgq_init, + .msgq_put = os_msgq_put, + .msgq_get = os_msgq_get, + + .pool_init = os_pool_init, + .pool_take = os_pool_take, + .pool_release = os_pool_release, .id_isInvalid = os_id_is_invalid, .schedule_run = os_kernel_run, @@ -83,12 +83,12 @@ typedef struct { /* kernel schedule semaphore id */ os_sem_id_t sem_id; -} _kernel_thread_resource_t; +} _kthread_resource_t; /** * Local timer resource */ -static _kernel_thread_resource_t g_kernel_thread_resource = { +static _kthread_resource_t g_kernel_thread_resource = { .schedule_id = { .pName = "kernel", @@ -111,9 +111,9 @@ static _kernel_thread_resource_t g_kernel_thread_resource = { /** * @brief To issue a kernel message notification. */ -void _impl_kernel_thread_message_notification(void) +void kthread_message_notification(void) { - u32p_t postcode = sem_give(g_kernel_thread_resource.sem_id); + u32p_t postcode = os_sem_give(g_kernel_thread_resource.sem_id); if (PC_IER(postcode)) { /* TODO */ } @@ -122,15 +122,15 @@ void _impl_kernel_thread_message_notification(void) /** * @brief To check if the kernel message arrived. */ -u32_t _impl_kernel_thread_message_arrived(void) +u32_t kthread_message_arrived(void) { - return sem_take(g_kernel_thread_resource.sem_id, OS_TIME_FOREVER_VAL); + return os_sem_take(g_kernel_thread_resource.sem_id, OS_TIME_FOREVER_VAL); } /** * @brief The AtOS kernel internal use thread and semaphore init. */ -void _impl_kernel_thread_init(void) +void kthread_init(void) { ENTER_CRITICAL_SECTION(); @@ -147,11 +147,11 @@ void _impl_kernel_thread_init(void) { .level = OS_PRIORITY_KERNEL_THREAD_SCHEDULE_LEVEL, }, - .pEntryFunc = _impl_kernel_thread_schedule, + .pEntryFunc = kernel_schedule_thread, .pStackAddr = (u32_t *)&_kernel_schedule[0], .stackSize = KERNEL_SCHEDULE_THREAD_STACK_SIZE, - .PSPStartAddr = (u32_t)_impl_kernel_stack_frame_init(_impl_kernel_thread_schedule, (u32_t *)&_kernel_schedule[0], - KERNEL_SCHEDULE_THREAD_STACK_SIZE), + .PSPStartAddr = (u32_t)kernel_stack_frame_init(kernel_schedule_thread, (u32_t *)&_kernel_schedule[0], + KERNEL_SCHEDULE_THREAD_STACK_SIZE), }, @@ -167,25 +167,24 @@ void _impl_kernel_thread_init(void) { .level = OS_PRIORITY_KERNEL_THREAD_IDLE_LEVEL, }, - .pEntryFunc = _impl_kernel_thread_idle, + .pEntryFunc = kernel_idle_thread, .pStackAddr = (u32_t *)&_kernel_idle[0], .stackSize = KERNEL_IDLE_THREAD_STACK_SIZE, - .PSPStartAddr = (u32_t)_impl_kernel_stack_frame_init(_impl_kernel_thread_idle, (u32_t *)&_kernel_idle[0], - KERNEL_IDLE_THREAD_STACK_SIZE), + .PSPStartAddr = + (u32_t)kernel_stack_frame_init(kernel_idle_thread, (u32_t *)&_kernel_idle[0], KERNEL_IDLE_THREAD_STACK_SIZE), }, }; - thread_context_t *pCurThread = (thread_context_t *)_impl_kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_THREAD); + thread_context_t *pCurThread = (thread_context_t *)kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_THREAD); _memcpy((u8_t *)pCurThread, (u8_t *)kernel_thread, (sizeof(thread_context_t) * KERNEL_APPLICATION_THREAD_INSTANCE)); - pCurThread = - (thread_context_t *)_impl_kernel_member_unified_id_toContainerAddress(kernel_thread[KERNEL_SCHEDULE_THREAD_INSTANCE].head.id); - _impl_thread_timer_init(_impl_kernel_member_unified_id_threadToTimer(pCurThread->head.id)); - _impl_kernel_thread_list_transfer_toPend((linker_head_t *)&pCurThread->head); + pCurThread = (thread_context_t *)kernel_member_unified_id_toContainerAddress(kernel_thread[KERNEL_SCHEDULE_THREAD_INSTANCE].head.id); + timer_init_for_thread(kernel_member_unified_id_threadToTimer(pCurThread->head.id)); + kernel_thread_list_transfer_toPend((linker_head_t *)&pCurThread->head); - pCurThread = (thread_context_t *)_impl_kernel_member_unified_id_toContainerAddress(kernel_thread[KERNEL_IDLE_THREAD_INSTANCE].head.id); - _impl_thread_timer_init(_impl_kernel_member_unified_id_threadToTimer(pCurThread->head.id)); - _impl_kernel_thread_list_transfer_toPend((linker_head_t *)&pCurThread->head); + pCurThread = (thread_context_t *)kernel_member_unified_id_toContainerAddress(kernel_thread[KERNEL_IDLE_THREAD_INSTANCE].head.id); + timer_init_for_thread(kernel_member_unified_id_threadToTimer(pCurThread->head.id)); + kernel_thread_list_transfer_toPend((linker_head_t *)&pCurThread->head); semaphore_context_t kernel_semaphore[KERNEL_APPLICATION_SEMAPHORE_INSTANCE] = { [KERNEL_SCHEDULE_SEMAPHORE_INSTANCE] = @@ -201,14 +200,14 @@ void _impl_kernel_thread_init(void) }, }; - semaphore_context_t *pCurSemaphore = (semaphore_context_t *)_impl_kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_SEMAPHORE); - g_kernel_thread_resource.sem_id.val = _impl_kernel_member_containerAddress_toUnifiedid((u32_t)pCurSemaphore); + semaphore_context_t *pCurSemaphore = (semaphore_context_t *)kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_SEMAPHORE); + g_kernel_thread_resource.sem_id.val = kernel_member_containerAddress_toUnifiedid((u32_t)pCurSemaphore); kernel_semaphore[KERNEL_SCHEDULE_SEMAPHORE_INSTANCE].head.id = g_kernel_thread_resource.sem_id.val; _memcpy((u8_t *)pCurSemaphore, (u8_t *)kernel_semaphore, (sizeof(semaphore_context_t) * KERNEL_APPLICATION_SEMAPHORE_INSTANCE)); - pCurSemaphore = (semaphore_context_t *)_impl_kernel_member_unified_id_toContainerAddress( - kernel_semaphore[KERNEL_SCHEDULE_SEMAPHORE_INSTANCE].head.id); - _impl_kernel_semaphore_list_transfer_toLock((linker_head_t *)&pCurSemaphore->head); + pCurSemaphore = + (semaphore_context_t *)kernel_member_unified_id_toContainerAddress(kernel_semaphore[KERNEL_SCHEDULE_SEMAPHORE_INSTANCE].head.id); + kernel_semaphore_list_transfer_toLock((linker_head_t *)&pCurSemaphore->head); } #ifdef __cplusplus diff --git a/kernel/mutex.c b/kernel/mutex.c index eed2d69..de801ec 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -7,7 +7,6 @@ #include "kernel.h" #include "timer.h" -#include "mutex.h" #include "postcode.h" #include "trace.h" @@ -20,13 +19,6 @@ extern "C" { */ #define _PC_CMPT_FAILED PC_FAILED(PC_CMPT_MUTEX_4) -/** - * The local function lists for current file internal use. - */ -static u32_t _mutex_init_privilege_routine(arguments_t *pArgs); -static u32_t _mutex_lock_privilege_routine(arguments_t *pArgs); -static u32_t _mutex_unlock_privilege_routine(arguments_t *pArgs); - /** * @brief Get the mutex context based on provided unique id. * @@ -36,7 +28,7 @@ static u32_t _mutex_unlock_privilege_routine(arguments_t *pArgs); */ static mutex_context_t *_mutex_object_contextGet(os_id_t id) { - return (mutex_context_t *)(_impl_kernel_member_unified_id_toContainerAddress(id)); + return (mutex_context_t *)(kernel_member_unified_id_toContainerAddress(id)); } /** @@ -46,7 +38,7 @@ static mutex_context_t *_mutex_object_contextGet(os_id_t id) */ static list_t *_mutex_list_lockingHeadGet(void) { - return (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_MUTEX, KERNEL_MEMBER_LIST_MUTEX_LOCK); + return (list_t *)kernel_member_list_get(KERNEL_MEMBER_MUTEX, KERNEL_MEMBER_LIST_MUTEX_LOCK); } /** @@ -56,7 +48,7 @@ static list_t *_mutex_list_lockingHeadGet(void) */ static list_t *_mutex_list_unlockingHeadGet(void) { - return (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_MUTEX, KERNEL_MEMBER_LIST_MUTEX_UNLOCK); + return (list_t *)kernel_member_list_get(KERNEL_MEMBER_MUTEX, KERNEL_MEMBER_LIST_MUTEX_UNLOCK); } /** @@ -127,7 +119,7 @@ static linker_head_t *_mutex_linker_head_fromBlocking(os_id_t id) */ static b_t _mutex_id_isInvalid(u32_t id) { - return _impl_kernel_member_unified_id_isInvalid(KERNEL_MEMBER_MUTEX, id); + return kernel_member_unified_id_isInvalid(KERNEL_MEMBER_MUTEX, id); } /** @@ -144,90 +136,6 @@ static b_t _mutex_object_isInit(i32_t id) return ((pCurMutex) ? (((pCurMutex->head.linker.pList) ? (TRUE) : (FALSE))) : FALSE); } -/** - * @brief Convert the internal os id to kernel member number. - * - * @param id The provided unique id. - * - * @return The value of member number. - */ -u32_t _impl_mutex_os_id_to_number(os_id_t id) -{ - if (_mutex_id_isInvalid(id)) { - return 0u; - } - - return (u32_t)((id - _impl_kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_MUTEX)) / sizeof(mutex_context_t)); -} - -/** - * @brief Initialize a new mutex. - * - * @param pName The mutex name. - * - * @return The mutex unique id. - */ -os_id_t _impl_mutex_init(const char_t *pName) -{ - arguments_t arguments[] = { - [0] = {.pch_val = (const char_t *)pName}, - }; - - return _impl_kernel_privilege_invoke((const void *)_mutex_init_privilege_routine, arguments); -} - -/** - * @brief Mutex lock to avoid another thread access this resource. - * - * @param id The mutex unique id. - * - * @return The result of the operation. - */ -u32p_t _impl_mutex_lock(os_id_t id) -{ - if (_mutex_id_isInvalid(id)) { - return _PC_CMPT_FAILED; - } - - if (!_mutex_object_isInit(id)) { - return _PC_CMPT_FAILED; - } - - if (!_impl_kernel_isInThreadMode()) { - return _PC_CMPT_FAILED; - } - - arguments_t arguments[] = { - [0] = {.u32_val = (u32_t)id}, - }; - - return _impl_kernel_privilege_invoke((const void *)_mutex_lock_privilege_routine, arguments); -} - -/** - * @brief Mutex unlock to allow another access the resource. - * - * @param id The mutex unique id. - * - * @return The result of the operation. - */ -u32p_t _impl_mutex_unlock(os_id_t id) -{ - if (_mutex_id_isInvalid(id)) { - return _PC_CMPT_FAILED; - } - - if (!_mutex_object_isInit(id)) { - return _PC_CMPT_FAILED; - } - - arguments_t arguments[] = { - [0] = {.u32_val = (u32_t)id}, - }; - - return _impl_kernel_privilege_invoke((const void *)_mutex_unlock_privilege_routine, arguments); -} - /** * @brief It's sub-routine running at privilege mode. * @@ -243,10 +151,10 @@ static u32_t _mutex_init_privilege_routine(arguments_t *pArgs) u32_t endAddr = 0u; mutex_context_t *pCurMutex = NULL; - pCurMutex = (mutex_context_t *)_impl_kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_MUTEX); - endAddr = (u32_t)_impl_kernel_member_id_toContainerEndAddress(KERNEL_MEMBER_MUTEX); + pCurMutex = (mutex_context_t *)kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_MUTEX); + endAddr = (u32_t)kernel_member_id_toContainerEndAddress(KERNEL_MEMBER_MUTEX); do { - os_id_t id = _impl_kernel_member_containerAddress_toUnifiedid((u32_t)pCurMutex); + os_id_t id = kernel_member_containerAddress_toUnifiedid((u32_t)pCurMutex); if (_mutex_id_isInvalid(id)) { break; } @@ -290,14 +198,14 @@ static u32_t _mutex_lock_privilege_routine(arguments_t *pArgs) u32p_t postcode = PC_SC_SUCCESS; pCurMutex = _mutex_object_contextGet(id); - pCurThread = _impl_kernel_thread_runContextGet(); + pCurThread = kernel_thread_runContextGet(); if (pCurMutex->head.linker.pList == _mutex_list_lockingHeadGet()) { - pLockThread = (thread_context_t *)_impl_kernel_member_unified_id_toContainerAddress(pCurMutex->holdThreadId); + pLockThread = (thread_context_t *)kernel_member_unified_id_toContainerAddress(pCurMutex->holdThreadId); if (pCurThread->priority.level < pLockThread->priority.level) { pLockThread->priority = pCurThread->priority; } - postcode = _impl_kernel_thread_exit_trigger(pCurThread->head.id, id, _mutex_list_blockingHeadGet(id), 0u, NULL); + postcode = kernel_thread_exit_trigger(pCurThread->head.id, id, _mutex_list_blockingHeadGet(id), 0u, NULL); EXIT_CRITICAL_SECTION(); return postcode; @@ -331,7 +239,7 @@ static u32_t _mutex_unlock_privilege_routine(arguments_t *pArgs) pCurMutex = _mutex_object_contextGet(id); pMutexHighestBlockingThread = (thread_context_t *)_mutex_linker_head_fromBlocking(id); - pLockThread = (thread_context_t *)_impl_kernel_member_unified_id_toContainerAddress(pCurMutex->holdThreadId); + pLockThread = (thread_context_t *)kernel_member_unified_id_toContainerAddress(pCurMutex->holdThreadId); /* priority recovery */ pLockThread->priority = pCurMutex->originalPriority; if (!pMutexHighestBlockingThread) { @@ -344,13 +252,97 @@ static u32_t _mutex_unlock_privilege_routine(arguments_t *pArgs) /* The next thread take the ticket */ pCurMutex->holdThreadId = pMutexHighestBlockingThread->head.id; pCurMutex->originalPriority = pMutexHighestBlockingThread->priority; - postcode = _impl_kernel_thread_entry_trigger(pMutexHighestBlockingThread->head.id, id, PC_SC_SUCCESS, NULL); + postcode = kernel_thread_entry_trigger(pMutexHighestBlockingThread->head.id, id, PC_SC_SUCCESS, NULL); } EXIT_CRITICAL_SECTION(); return postcode; } +/** + * @brief Convert the internal os id to kernel member number. + * + * @param id The provided unique id. + * + * @return The value of member number. + */ +u32_t _impl_mutex_os_id_to_number(os_id_t id) +{ + if (_mutex_id_isInvalid(id)) { + return 0u; + } + + return (u32_t)((id - kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_MUTEX)) / sizeof(mutex_context_t)); +} + +/** + * @brief Initialize a new mutex. + * + * @param pName The mutex name. + * + * @return The mutex unique id. + */ +os_id_t _impl_mutex_init(const char_t *pName) +{ + arguments_t arguments[] = { + [0] = {.pch_val = (const char_t *)pName}, + }; + + return kernel_privilege_invoke((const void *)_mutex_init_privilege_routine, arguments); +} + +/** + * @brief Mutex lock to avoid another thread access this resource. + * + * @param id The mutex unique id. + * + * @return The result of the operation. + */ +u32p_t _impl_mutex_lock(os_id_t id) +{ + if (_mutex_id_isInvalid(id)) { + return _PC_CMPT_FAILED; + } + + if (!_mutex_object_isInit(id)) { + return _PC_CMPT_FAILED; + } + + if (!kernel_isInThreadMode()) { + return _PC_CMPT_FAILED; + } + + arguments_t arguments[] = { + [0] = {.u32_val = (u32_t)id}, + }; + + return kernel_privilege_invoke((const void *)_mutex_lock_privilege_routine, arguments); +} + +/** + * @brief Mutex unlock to allow another access the resource. + * + * @param id The mutex unique id. + * + * @return The result of the operation. + */ +u32p_t _impl_mutex_unlock(os_id_t id) +{ + if (_mutex_id_isInvalid(id)) { + return _PC_CMPT_FAILED; + } + + if (!_mutex_object_isInit(id)) { + return _PC_CMPT_FAILED; + } + + arguments_t arguments[] = { + [0] = {.u32_val = (u32_t)id}, + }; + + return kernel_privilege_invoke((const void *)_mutex_unlock_privilege_routine, arguments); +} + /** * @brief Get mutex snapshot informations. * @@ -359,7 +351,7 @@ static u32_t _mutex_unlock_privilege_routine(arguments_t *pArgs) * * @return TRUE: Operation pass, FALSE: Operation failed. */ -b_t _impl_trace_mutex_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) +b_t mutex_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) { #if defined KTRACE mutex_context_t *pCurMutex = NULL; @@ -369,8 +361,8 @@ b_t _impl_trace_mutex_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) ENTER_CRITICAL_SECTION(); offset = sizeof(mutex_context_t) * instance; - pCurMutex = (mutex_context_t *)(_impl_kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_MUTEX) + offset); - id = _impl_kernel_member_containerAddress_toUnifiedid((u32_t)pCurMutex); + pCurMutex = (mutex_context_t *)(kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_MUTEX) + offset); + id = kernel_member_containerAddress_toUnifiedid((u32_t)pCurMutex); _memset((u8_t *)pMsgs, 0x0u, sizeof(kernel_snapshot_t)); if (_mutex_id_isInvalid(id)) { diff --git a/kernel/pool.c b/kernel/pool.c index 7470bd7..164d946 100644 --- a/kernel/pool.c +++ b/kernel/pool.c @@ -7,7 +7,6 @@ #include "kernel.h" #include "timer.h" -#include "pool.h" #include "postcode.h" #include "trace.h" @@ -20,15 +19,6 @@ extern "C" { */ #define _PC_CMPT_FAILED PC_FAILED(PC_CMPT_POOL_8) -/** - * The local function lists for current file internal use. - */ -static u32_t _pool_init_privilege_routine(arguments_t *pArgs); -static u32_t _pool_take_privilege_routine(arguments_t *pArgs); -static u32_t _pool_release_privilege_routine(arguments_t *pArgs); - -static void _pool_schedule(os_id_t id); - /** * @brief Get the pool context based on provided unique id. * @@ -38,7 +28,7 @@ static void _pool_schedule(os_id_t id); */ static pool_context_t *_pool_object_contextGet(os_id_t id) { - return (pool_context_t *)(_impl_kernel_member_unified_id_toContainerAddress(id)); + return (pool_context_t *)(kernel_member_unified_id_toContainerAddress(id)); } /** @@ -48,7 +38,7 @@ static pool_context_t *_pool_object_contextGet(os_id_t id) */ static list_t *_pool_list_initHeadGet(void) { - return (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_POOL, KERNEL_MEMBER_LIST_POOL_INIT); + return (list_t *)kernel_member_list_get(KERNEL_MEMBER_POOL, KERNEL_MEMBER_LIST_POOL_INIT); } /** @@ -89,7 +79,7 @@ static void _pool_list_transferToInit(linker_head_t *pCurHead) */ static b_t _pool_id_isInvalid(u32_t id) { - return _impl_kernel_member_unified_id_isInvalid(KERNEL_MEMBER_POOL, id); + return kernel_member_unified_id_isInvalid(KERNEL_MEMBER_POOL, id); } /** @@ -106,70 +96,6 @@ static b_t _pool_object_isInit(i32_t id) return ((pCurPool) ? (((pCurPool->head.linker.pList) ? (TRUE) : (FALSE))) : FALSE); } -/** - * @brief The pool timeout callback fucntion. - * - * @param id The pool unique id. - */ -static void _pool_callback_fromTimeOut(os_id_t id) -{ - _impl_kernel_thread_entry_trigger(_impl_kernel_member_unified_id_timerToThread(id), id, PC_SC_TIMEOUT, _pool_schedule); -} - -/** - * @brief Convert the internal os id to kernel member number. - * - * @param id The provided unique id. - * - * @return The value of member number. - */ -u32_t _impl_pool_os_id_to_number(os_id_t id) -{ - if (_pool_id_isInvalid(id)) { - return 0u; - } - - return (u32_t)((id - _impl_kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_POOL)) / sizeof(pool_context_t)); -} - -/** - * @brief Initialize a new pool. - * - * @param pName The pool name. - * @param pMemAddr The pointer of the pool buffer. - * @param elementLen The element size. - * @param elementNum The element number. - * - * @return The pool unique id. - */ -os_id_t _impl_pool_init(const void *pMemAddr, u16_t elementLen, u16_t elementNum, const char_t *pName) -{ - if (!pMemAddr) { - return OS_INVALID_ID; - } - - if (!elementLen) { - return OS_INVALID_ID; - } - - if (!elementNum) { - return OS_INVALID_ID; - } - - if (elementNum > U32_B) { - return OS_INVALID_ID; - } - - arguments_t arguments[] = { - [0] = {.ptr_val = (const void *)pMemAddr}, - [1] = {.u16_val = (u16_t)elementLen}, - [2] = {.u16_val = (u16_t)elementNum}, - [3] = {.pch_val = (const char_t *)pName}, - }; - - return _impl_kernel_privilege_invoke((const void *)_pool_init_privilege_routine, arguments); -} - /** * @brief Take a memory pool address. * @@ -230,82 +156,59 @@ static bool _mem_release(pool_context_t *pCurPool, void *pUserMem) } /** - * @brief Take a message pool resource. - * - * @param id The pool unique id. - * @param ppUserBuffer The dual pointer of the message memory address. - * @param pBufferSize The pointer of the message memory size. - * @param timeout_ms The pool take timeout option. + * @brief The pool schedule routine execute the the pendsv context. * - * @return The result of the operation. + * @param id The unique id of the entry thread. */ -u32p_t _impl_pool_take(os_id_t id, void **ppUserBuffer, u16_t bufferSize, u32_t timeout_ms) +static void _pool_schedule(os_id_t id) { - if (_pool_id_isInvalid(id)) { - return _PC_CMPT_FAILED; - } + thread_context_t *pEntryThread = (thread_context_t *)(kernel_member_unified_id_toContainerAddress(id)); + thread_entry_t *pEntry = NULL; + pool_context_t *pCurPool = NULL; + b_t isAvail = FALSE; - if (!_pool_object_isInit(id)) { - return _PC_CMPT_FAILED; + pCurPool = _pool_object_contextGet(pEntryThread->schedule.hold); + if (kernel_member_unified_id_toId(pEntryThread->schedule.hold) != KERNEL_MEMBER_POOL) { + pEntryThread->schedule.entry.result = _PC_CMPT_FAILED; + return; } - if (!_impl_kernel_isInThreadMode()) { - if (timeout_ms != OS_TIME_NOWAIT_VAL) { - return _PC_CMPT_FAILED; - } + if ((pEntryThread->schedule.entry.result != PC_SC_SUCCESS) && (pEntryThread->schedule.entry.result != PC_SC_TIMEOUT)) { + return; } - arguments_t arguments[] = { - [0] = {.u32_val = (u32_t)id}, - [1] = {.pv_val = (void **)ppUserBuffer}, - [2] = {.u16_val = (u16_t)bufferSize}, - [3] = {.u32_val = (u32_t)timeout_ms}, - }; - - u32p_t postcode = _impl_kernel_privilege_invoke((const void *)_pool_take_privilege_routine, arguments); - - ENTER_CRITICAL_SECTION(); - if (postcode == PC_SC_UNAVAILABLE) { - thread_context_t *pCurThread = (thread_context_t *)_impl_kernel_thread_runContextGet(); - postcode = (u32p_t)_impl_kernel_schedule_entry_result_take((action_schedule_t *)&pCurThread->schedule); + pEntry = &pEntryThread->schedule.entry; + if (!timer_busy(kernel_member_unified_id_threadToTimer(pEntryThread->head.id))) { + if (kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_TIMER_INTERNAL) { + pEntry->result = PC_SC_TIMEOUT; + } else { + isAvail = true; + } + } else if (kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_POOL) { + timer_stop_for_thread(kernel_member_unified_id_threadToTimer(pEntryThread->head.id)); + isAvail = true; + } else { + pEntry->result = _PC_CMPT_FAILED; } - if (PC_IOK(postcode) && (postcode != PC_SC_TIMEOUT)) { - postcode = PC_SC_SUCCESS; + if (isAvail) { + *pEntryThread->pool.ppUserMemAddress = _mem_take(pCurPool); + if (!*pEntryThread->pool.ppUserMemAddress) { + pEntry->result = _PC_CMPT_FAILED; + } else { + pEntry->result = PC_SC_SUCCESS; + } } - - EXIT_CRITICAL_SECTION(); - return postcode; } /** - * @brief Release memory pool. + * @brief The pool timeout callback fucntion. * * @param id The pool unique id. - * @param ppUserBuffer The dual pointer of the message memory address. - * - * @return The result of the operation. */ -u32p_t _impl_pool_release(os_id_t id, void **ppUserBuffer) +static void _pool_callback_fromTimeOut(os_id_t id) { - if (_pool_id_isInvalid(id)) { - return _PC_CMPT_FAILED; - } - - if (!_pool_object_isInit(id)) { - return _PC_CMPT_FAILED; - } - - if (*ppUserBuffer == NULL) { - return _PC_CMPT_FAILED; - } - - arguments_t arguments[] = { - [0] = {.u32_val = (u32_t)id}, - [1] = {.pv_val = (void **)ppUserBuffer}, - }; - - return _impl_kernel_privilege_invoke((const void *)_pool_release_privilege_routine, arguments); + kernel_thread_entry_trigger(kernel_member_unified_id_timerToThread(id), id, PC_SC_TIMEOUT, _pool_schedule); } /** @@ -326,10 +229,10 @@ static u32_t _pool_init_privilege_routine(arguments_t *pArgs) u32_t endAddr = 0u; pool_context_t *pCurPool = NULL; - pCurPool = (pool_context_t *)_impl_kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_POOL); - endAddr = (u32_t)_impl_kernel_member_id_toContainerEndAddress(KERNEL_MEMBER_POOL); + pCurPool = (pool_context_t *)kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_POOL); + endAddr = (u32_t)kernel_member_id_toContainerEndAddress(KERNEL_MEMBER_POOL); do { - os_id_t id = _impl_kernel_member_containerAddress_toUnifiedid((u32_t)pCurPool); + os_id_t id = kernel_member_containerAddress_toUnifiedid((u32_t)pCurPool); if (_pool_id_isInvalid(id)) { break; } @@ -376,7 +279,7 @@ static u32_t _pool_take_privilege_routine(arguments_t *pArgs) u32p_t postcode = PC_SC_SUCCESS; pCurPool = _pool_object_contextGet(id); - pCurThread = _impl_kernel_thread_runContextGet(); + pCurThread = kernel_thread_runContextGet(); if (bufferSize > pCurPool->elementLength) { EXIT_CRITICAL_SECTION(); @@ -384,7 +287,7 @@ static u32_t _pool_take_privilege_routine(arguments_t *pArgs) } if (!pCurPool->elementFreeBits) { - if ((timeout_ms == OS_TIME_NOWAIT_VAL) && (!_impl_kernel_isInThreadMode())) { + if ((timeout_ms == OS_TIME_NOWAIT_VAL) && (!kernel_isInThreadMode())) { EXIT_CRITICAL_SECTION(); return _PC_CMPT_FAILED; } @@ -392,8 +295,8 @@ static u32_t _pool_take_privilege_routine(arguments_t *pArgs) _memset((u8_t *)&pCurThread->pool, 0x0u, sizeof(action_pool_t)); pCurThread->pool.ppUserMemAddress = ppUserBuffer; - postcode = _impl_kernel_thread_exit_trigger(pCurThread->head.id, id, _pool_list_blockingHeadGet(id), timeout_ms, - _pool_callback_fromTimeOut); + postcode = + kernel_thread_exit_trigger(pCurThread->head.id, id, _pool_list_blockingHeadGet(id), timeout_ms, _pool_callback_fromTimeOut); if (PC_IOK(postcode)) { postcode = PC_SC_UNAVAILABLE; @@ -428,7 +331,7 @@ static u32_t _pool_release_privilege_routine(arguments_t *pArgs) u32p_t postcode = PC_SC_SUCCESS; pCurPool = _pool_object_contextGet(id); - pCurThread = (thread_context_t *)_impl_kernel_thread_runContextGet(); + pCurThread = (thread_context_t *)kernel_thread_runContextGet(); if (!_mem_release(pCurPool, *ppUserBuffer)) { EXIT_CRITICAL_SECTION(); @@ -442,7 +345,7 @@ static u32_t _pool_release_privilege_routine(arguments_t *pArgs) list_iterator_init(&it, _pool_list_blockingHeadGet(id)); pCurThread = (thread_context_t *)list_iterator_next(&it); if (pCurThread) { - postcode = _impl_kernel_thread_entry_trigger(pCurThread->head.id, id, PC_SC_SUCCESS, _pool_schedule); + postcode = kernel_thread_entry_trigger(pCurThread->head.id, id, PC_SC_SUCCESS, _pool_schedule); } EXIT_CRITICAL_SECTION(); @@ -450,49 +353,136 @@ static u32_t _pool_release_privilege_routine(arguments_t *pArgs) } /** - * @brief The pool schedule routine execute the the pendsv context. + * @brief Convert the internal os id to kernel member number. * - * @param id The unique id of the entry thread. + * @param id The provided unique id. + * + * @return The value of member number. */ -static void _pool_schedule(os_id_t id) +u32_t _impl_pool_os_id_to_number(os_id_t id) { - thread_context_t *pEntryThread = (thread_context_t *)(_impl_kernel_member_unified_id_toContainerAddress(id)); - thread_entry_t *pEntry = NULL; - pool_context_t *pCurPool = NULL; - b_t isAvail = FALSE; + if (_pool_id_isInvalid(id)) { + return 0u; + } - pCurPool = _pool_object_contextGet(pEntryThread->schedule.hold); - if (_impl_kernel_member_unified_id_toId(pEntryThread->schedule.hold) != KERNEL_MEMBER_POOL) { - pEntryThread->schedule.entry.result = _PC_CMPT_FAILED; - return; + return (u32_t)((id - kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_POOL)) / sizeof(pool_context_t)); +} + +/** + * @brief Initialize a new pool. + * + * @param pName The pool name. + * @param pMemAddr The pointer of the pool buffer. + * @param elementLen The element size. + * @param elementNum The element number. + * + * @return The pool unique id. + */ +os_id_t _impl_pool_init(const void *pMemAddr, u16_t elementLen, u16_t elementNum, const char_t *pName) +{ + if (!pMemAddr) { + return OS_INVALID_ID; } - if ((pEntryThread->schedule.entry.result != PC_SC_SUCCESS) && (pEntryThread->schedule.entry.result != PC_SC_TIMEOUT)) { - return; + if (!elementLen) { + return OS_INVALID_ID; } - pEntry = &pEntryThread->schedule.entry; - if (!_impl_timer_status_isBusy(_impl_kernel_member_unified_id_threadToTimer(pEntryThread->head.id))) { - if (_impl_kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_TIMER_INTERNAL) { - pEntry->result = PC_SC_TIMEOUT; - } else { - isAvail = true; - } - } else if (_impl_kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_POOL) { - _impl_timer_stop(_impl_kernel_member_unified_id_threadToTimer(pEntryThread->head.id)); - isAvail = true; - } else { - pEntry->result = _PC_CMPT_FAILED; + if (!elementNum) { + return OS_INVALID_ID; } - if (isAvail) { - *pEntryThread->pool.ppUserMemAddress = _mem_take(pCurPool); - if (!*pEntryThread->pool.ppUserMemAddress) { - pEntry->result = _PC_CMPT_FAILED; - } else { - pEntry->result = PC_SC_SUCCESS; + if (elementNum > U32_B) { + return OS_INVALID_ID; + } + + arguments_t arguments[] = { + [0] = {.ptr_val = (const void *)pMemAddr}, + [1] = {.u16_val = (u16_t)elementLen}, + [2] = {.u16_val = (u16_t)elementNum}, + [3] = {.pch_val = (const char_t *)pName}, + }; + + return kernel_privilege_invoke((const void *)_pool_init_privilege_routine, arguments); +} + +/** + * @brief Take a message pool resource. + * + * @param id The pool unique id. + * @param ppUserBuffer The dual pointer of the message memory address. + * @param pBufferSize The pointer of the message memory size. + * @param timeout_ms The pool take timeout option. + * + * @return The result of the operation. + */ +u32p_t _impl_pool_take(os_id_t id, void **ppUserBuffer, u16_t bufferSize, u32_t timeout_ms) +{ + if (_pool_id_isInvalid(id)) { + return _PC_CMPT_FAILED; + } + + if (!_pool_object_isInit(id)) { + return _PC_CMPT_FAILED; + } + + if (!kernel_isInThreadMode()) { + if (timeout_ms != OS_TIME_NOWAIT_VAL) { + return _PC_CMPT_FAILED; } } + + arguments_t arguments[] = { + [0] = {.u32_val = (u32_t)id}, + [1] = {.pv_val = (void **)ppUserBuffer}, + [2] = {.u16_val = (u16_t)bufferSize}, + [3] = {.u32_val = (u32_t)timeout_ms}, + }; + + u32p_t postcode = kernel_privilege_invoke((const void *)_pool_take_privilege_routine, arguments); + + ENTER_CRITICAL_SECTION(); + if (postcode == PC_SC_UNAVAILABLE) { + thread_context_t *pCurThread = (thread_context_t *)kernel_thread_runContextGet(); + postcode = (u32p_t)kernel_schedule_entry_result_take((action_schedule_t *)&pCurThread->schedule); + } + + if (PC_IOK(postcode) && (postcode != PC_SC_TIMEOUT)) { + postcode = PC_SC_SUCCESS; + } + + EXIT_CRITICAL_SECTION(); + return postcode; +} + +/** + * @brief Release memory pool. + * + * @param id The pool unique id. + * @param ppUserBuffer The dual pointer of the message memory address. + * + * @return The result of the operation. + */ +u32p_t _impl_pool_release(os_id_t id, void **ppUserBuffer) +{ + if (_pool_id_isInvalid(id)) { + return _PC_CMPT_FAILED; + } + + if (!_pool_object_isInit(id)) { + return _PC_CMPT_FAILED; + } + + if (*ppUserBuffer == NULL) { + return _PC_CMPT_FAILED; + } + + arguments_t arguments[] = { + [0] = {.u32_val = (u32_t)id}, + [1] = {.pv_val = (void **)ppUserBuffer}, + }; + + return kernel_privilege_invoke((const void *)_pool_release_privilege_routine, arguments); } /** @@ -503,7 +493,7 @@ static void _pool_schedule(os_id_t id) * * @return TRUE: Operation pass, FALSE: Operation failed. */ -b_t _impl_trace_pool_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) +b_t pool_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) { #if defined KTRACE pool_context_t *pCurPool = NULL; @@ -513,8 +503,8 @@ b_t _impl_trace_pool_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) ENTER_CRITICAL_SECTION(); offset = sizeof(pool_context_t) * instance; - pCurPool = (pool_context_t *)(_impl_kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_POOL) + offset); - id = _impl_kernel_member_containerAddress_toUnifiedid((u32_t)pCurPool); + pCurPool = (pool_context_t *)(kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_POOL) + offset); + id = kernel_member_containerAddress_toUnifiedid((u32_t)pCurPool); _memset((u8_t *)pMsgs, 0x0u, sizeof(kernel_snapshot_t)); if (_pool_id_isInvalid(id)) { diff --git a/kernel/queue.c b/kernel/queue.c index e6cfec9..9058277 100644 --- a/kernel/queue.c +++ b/kernel/queue.c @@ -7,7 +7,6 @@ #include "kernel.h" #include "timer.h" -#include "queue.h" #include "postcode.h" #include "trace.h" @@ -22,15 +21,6 @@ extern "C" { #define _QUEUE_WAKEUP_SENDER PC_SC_B #define _QUEUE_WAKEUP_RECEIVER PC_SC_A -/** - * The local function lists for current file internal use. - */ -static u32_t _queue_init_privilege_routine(arguments_t *pArgs); -static u32_t _queue_send_privilege_routine(arguments_t *pArgs); -static u32_t _queue_receive_privilege_routine(arguments_t *pArgs); - -static void _queue_schedule(os_id_t id); - /** * @brief Get the queue context based on provided unique id. * @@ -40,7 +30,7 @@ static void _queue_schedule(os_id_t id); */ static queue_context_t *_queue_object_contextGet(os_id_t id) { - return (queue_context_t *)(_impl_kernel_member_unified_id_toContainerAddress(id)); + return (queue_context_t *)(kernel_member_unified_id_toContainerAddress(id)); } /** @@ -50,7 +40,7 @@ static queue_context_t *_queue_object_contextGet(os_id_t id) */ static list_t *_queue_list_initHeadGet(void) { - return (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_QUEUE, KERNEL_MEMBER_LIST_QUEUE_INIT); + return (list_t *)kernel_member_list_get(KERNEL_MEMBER_QUEUE, KERNEL_MEMBER_LIST_QUEUE_INIT); } /** @@ -99,7 +89,7 @@ static void _queue_list_transferToInit(linker_head_t *pCurHead) */ static b_t _queue_id_isInvalid(u32_t id) { - return _impl_kernel_member_unified_id_isInvalid(KERNEL_MEMBER_QUEUE, id); + return kernel_member_unified_id_isInvalid(KERNEL_MEMBER_QUEUE, id); } /** @@ -116,66 +106,6 @@ static b_t _queue_object_isInit(i32_t id) return ((pCurQueue) ? (((pCurQueue->head.linker.pList) ? (TRUE) : (FALSE))) : FALSE); } -/** - * @brief The queue timeout callback fucntion. - * - * @param id The queue unique id. - */ -static void _queue_callback_fromTimeOut(os_id_t id) -{ - _impl_kernel_thread_entry_trigger(_impl_kernel_member_unified_id_timerToThread(id), id, PC_SC_TIMEOUT, _queue_schedule); -} - -/** - * @brief Convert the internal os id to kernel member number. - * - * @param id The provided unique id. - * - * @return The value of member number. - */ -u32_t _impl_queue_os_id_to_number(os_id_t id) -{ - if (_queue_id_isInvalid(id)) { - return 0u; - } - - return (u32_t)((id - _impl_kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_QUEUE)) / sizeof(queue_context_t)); -} - -/** - * @brief Initialize a new queue. - * - * @param pName The queue name. - * @param pQueueBufferAddr The pointer of the queue buffer. - * @param elementLen The element size. - * @param elementNum The element number. - * - * @return The queue unique id. - */ -os_id_t _impl_queue_init(const void *pQueueBufferAddr, u16_t elementLen, u16_t elementNum, const char_t *pName) -{ - if (!pQueueBufferAddr) { - return OS_INVALID_ID; - } - - if (!elementLen) { - return OS_INVALID_ID; - } - - if (!elementNum) { - return OS_INVALID_ID; - } - - arguments_t arguments[] = { - [0] = {.u32_val = (u32_t)pQueueBufferAddr}, - [1] = {.u16_val = (u16_t)elementLen}, - [2] = {.u16_val = (u16_t)elementNum}, - [3] = {.pch_val = (const char_t *)pName}, - }; - - return _impl_kernel_privilege_invoke((const void *)_queue_init_privilege_routine, arguments); -} - /** * @brief Send a queue message. * @@ -267,102 +197,87 @@ static void _message_receive_behind(queue_context_t *pCurQueue, const u8_t *pUse } /** - * @brief Send a queue message. - * - * @param id The queue unique id. - * @param pUserBuffer The pointer of the message buffer address. - * @param bufferSize The queue buffer size. - * @param isToFront The direction of the message operation. - * @param timeout_ms The queue send timeout option. + * @brief The queue schedule routine execute the the pendsv context. * - * @return The result of the operation. + * @param id The unique id of the entry thread. */ -u32p_t _impl_queue_send(os_id_t id, const u8_t *pUserBuffer, u16_t bufferSize, b_t isToFront, u32_t timeout_ms) +static void _queue_schedule(os_id_t id) { - if (_queue_id_isInvalid(id)) { - return _PC_CMPT_FAILED; - } + thread_context_t *pEntryThread = (thread_context_t *)(kernel_member_unified_id_toContainerAddress(id)); + queue_context_t *pCurQueue = NULL; + thread_entry_t *pEntry = NULL; + b_t isTxAvail = FALSE; + b_t isRxAvail = FALSE; - if (!_queue_object_isInit(id)) { - return _PC_CMPT_FAILED; + if (kernel_member_unified_id_toId(pEntryThread->schedule.hold) != KERNEL_MEMBER_QUEUE) { + pEntryThread->schedule.entry.result = _PC_CMPT_FAILED; + return; } - if (!_impl_kernel_isInThreadMode()) { - if (timeout_ms != OS_TIME_NOWAIT_VAL) { - return _PC_CMPT_FAILED; + pCurQueue = _queue_object_contextGet(pEntryThread->schedule.hold); + pEntry = &pEntryThread->schedule.entry; + if (pEntry->result == PC_SC_TIMEOUT) { + pEntry->result = PC_SC_TIMEOUT; + } else if (pEntry->result == _QUEUE_WAKEUP_RECEIVER) { + // Release function doesn't kill the timer node from waiting list + if (!timer_busy(kernel_member_unified_id_threadToTimer(pEntryThread->head.id))) { + if (kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_TIMER_INTERNAL) { + pEntry->result = PC_SC_TIMEOUT; + } else { + isRxAvail = true; + } + } else if (kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_QUEUE) { + timer_stop_for_thread(kernel_member_unified_id_threadToTimer(pEntryThread->head.id)); + isRxAvail = true; + } else { + pEntry->result = _PC_CMPT_FAILED; } + } else if (pEntry->result == _QUEUE_WAKEUP_SENDER) { + // Release function doesn't kill the timer node from waiting list + if (!timer_busy(kernel_member_unified_id_threadToTimer(pEntryThread->head.id))) { + if (kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_TIMER_INTERNAL) { + pEntry->result = PC_SC_TIMEOUT; + } else { + isTxAvail = true; + } + } else if (kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_QUEUE) { + timer_stop_for_thread(kernel_member_unified_id_threadToTimer(pEntryThread->head.id)); + isTxAvail = true; + } else { + pEntry->result = _PC_CMPT_FAILED; + } + } else { + pEntry->result = _PC_CMPT_FAILED; } - arguments_t arguments[] = { - [0] = {.u32_val = (u32_t)id}, [1] = {.ptr_val = (const void *)pUserBuffer}, [2] = {.u16_val = (u16_t)bufferSize}, - [3] = {.b_val = (b_t)isToFront}, [4] = {.u32_val = (u32_t)timeout_ms}, - }; - - u32p_t postcode = _impl_kernel_privilege_invoke((const void *)_queue_send_privilege_routine, arguments); - - ENTER_CRITICAL_SECTION(); - if (postcode == PC_SC_UNAVAILABLE) { - thread_context_t *pCurThread = (thread_context_t *)_impl_kernel_thread_runContextGet(); - postcode = (u32p_t)_impl_kernel_schedule_entry_result_take((action_schedule_t *)&pCurThread->schedule); - } - - if (PC_IOK(postcode) && (postcode != PC_SC_TIMEOUT)) { - postcode = PC_SC_SUCCESS; + if ((isRxAvail) || (isTxAvail)) { + if (isRxAvail) { + if (pEntryThread->queue.fromBack) { + _message_receive_behind((queue_context_t *)pCurQueue, pEntryThread->queue.pUserBufferAddress, + pEntryThread->queue.userBufferSize); + } else { + _message_receive((queue_context_t *)pCurQueue, pEntryThread->queue.pUserBufferAddress, pEntryThread->queue.userBufferSize); + } + } else if (isTxAvail) { + if (pEntryThread->queue.toFront) { + _message_send_front((queue_context_t *)pCurQueue, pEntryThread->queue.pUserBufferAddress, + pEntryThread->queue.userBufferSize); + } else { + _message_send((queue_context_t *)pCurQueue, pEntryThread->queue.pUserBufferAddress, pEntryThread->queue.userBufferSize); + } + } + pEntry->result = PC_SC_SUCCESS; } - - EXIT_CRITICAL_SECTION(); - return postcode; } /** - * @brief Receive a queue message. + * @brief The queue timeout callback fucntion. * * @param id The queue unique id. - * @param pUserBuffer The pointer of the message buffer address. - * @param bufferSize The queue buffer size. - * @param isFromBack The direction of the message operation. - * @param timeout_ms The queue send timeout option. - * - * @return The result of the operation. */ -u32p_t _impl_queue_receive(os_id_t id, const u8_t *pUserBuffer, u16_t bufferSize, b_t isFromBack, u32_t timeout_ms) +static void _queue_callback_fromTimeOut(os_id_t id) { - if (_queue_id_isInvalid(id)) { - return _PC_CMPT_FAILED; - } - - if (!_queue_object_isInit(id)) { - return _PC_CMPT_FAILED; - } - - if (!_impl_kernel_isInThreadMode()) { - if (timeout_ms != OS_TIME_NOWAIT_VAL) { - return _PC_CMPT_FAILED; - } - } - - arguments_t arguments[] = { - [0] = {.u32_val = (u32_t)id}, [1] = {.ptr_val = (const void *)pUserBuffer}, [2] = {.u16_val = (u16_t)bufferSize}, - [3] = {.b_val = (b_t)isFromBack}, [4] = {.u32_val = (u32_t)timeout_ms}, - }; - - u32p_t postcode = _impl_kernel_privilege_invoke((const void *)_queue_receive_privilege_routine, arguments); - - ENTER_CRITICAL_SECTION(); - - if (postcode == PC_SC_UNAVAILABLE) { - thread_context_t *pCurThread = (thread_context_t *)_impl_kernel_thread_runContextGet(); - - postcode = (u32p_t)pCurThread->schedule.entry.result; - pCurThread->schedule.entry.result = 0u; - } - - if (PC_IOK(postcode) && (postcode != PC_SC_TIMEOUT)) { - postcode = PC_SC_SUCCESS; - } - - EXIT_CRITICAL_SECTION(); - return postcode; + kernel_thread_entry_trigger(kernel_member_unified_id_timerToThread(id), id, PC_SC_TIMEOUT, _queue_schedule); } /** @@ -383,10 +298,10 @@ static u32_t _queue_init_privilege_routine(arguments_t *pArgs) u32_t endAddr = 0u; queue_context_t *pCurQueue = NULL; - pCurQueue = (queue_context_t *)_impl_kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_QUEUE); - endAddr = (u32_t)_impl_kernel_member_id_toContainerEndAddress(KERNEL_MEMBER_QUEUE); + pCurQueue = (queue_context_t *)kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_QUEUE); + endAddr = (u32_t)kernel_member_id_toContainerEndAddress(KERNEL_MEMBER_QUEUE); do { - os_id_t id = _impl_kernel_member_containerAddress_toUnifiedid((u32_t)pCurQueue); + os_id_t id = kernel_member_containerAddress_toUnifiedid((u32_t)pCurQueue); if (_queue_id_isInvalid(id)) { break; } @@ -436,7 +351,7 @@ static u32_t _queue_send_privilege_routine(arguments_t *pArgs) u32p_t postcode = PC_SC_SUCCESS; pCurQueue = _queue_object_contextGet(id); - pCurThread = _impl_kernel_thread_runContextGet(); + pCurThread = kernel_thread_runContextGet(); if (bufferSize > pCurQueue->elementLength) { EXIT_CRITICAL_SECTION(); return _PC_CMPT_FAILED; @@ -454,8 +369,8 @@ static u32_t _queue_send_privilege_routine(arguments_t *pArgs) pCurThread->queue.userBufferSize = bufferSize; pCurThread->queue.toFront = isFront; - postcode = _impl_kernel_thread_exit_trigger(pCurThread->head.id, id, _queue_list_inBlockingHeadGet(id), timeout_ms, - _queue_callback_fromTimeOut); + postcode = + kernel_thread_exit_trigger(pCurThread->head.id, id, _queue_list_inBlockingHeadGet(id), timeout_ms, _queue_callback_fromTimeOut); if (PC_IOK(postcode)) { postcode = PC_SC_UNAVAILABLE; @@ -472,7 +387,7 @@ static u32_t _queue_send_privilege_routine(arguments_t *pArgs) list_iterator_init(&it, _queue_list_OutBlockingHeadGet(id)); pCurThread = (thread_context_t *)list_iterator_next(&it); if (pCurThread) { - postcode = _impl_kernel_thread_entry_trigger(pCurThread->head.id, id, _QUEUE_WAKEUP_RECEIVER, _queue_schedule); + postcode = kernel_thread_entry_trigger(pCurThread->head.id, id, _QUEUE_WAKEUP_RECEIVER, _queue_schedule); } } @@ -501,7 +416,7 @@ static u32_t _queue_receive_privilege_routine(arguments_t *pArgs) u32p_t postcode = PC_SC_SUCCESS; pCurQueue = _queue_object_contextGet(id); - pCurThread = (thread_context_t *)_impl_kernel_thread_runContextGet(); + pCurThread = (thread_context_t *)kernel_thread_runContextGet(); if (bufferSize > pCurQueue->elementLength) { EXIT_CRITICAL_SECTION(); return _PC_CMPT_FAILED; @@ -518,8 +433,8 @@ static u32_t _queue_receive_privilege_routine(arguments_t *pArgs) pCurThread->queue.userBufferSize = bufferSize; pCurThread->queue.fromBack = isBack; - postcode = _impl_kernel_thread_exit_trigger(pCurThread->head.id, id, _queue_list_OutBlockingHeadGet(id), timeout_ms, - _queue_callback_fromTimeOut); + postcode = kernel_thread_exit_trigger(pCurThread->head.id, id, _queue_list_OutBlockingHeadGet(id), timeout_ms, + _queue_callback_fromTimeOut); if (PC_IOK(postcode)) { postcode = PC_SC_UNAVAILABLE; @@ -536,7 +451,7 @@ static u32_t _queue_receive_privilege_routine(arguments_t *pArgs) list_iterator_init(&it, _queue_list_inBlockingHeadGet(id)); pCurThread = (thread_context_t *)list_iterator_next(&it); if (pCurThread) { - postcode = _impl_kernel_thread_entry_trigger(pCurThread->head.id, id, _QUEUE_WAKEUP_SENDER, _queue_schedule); + postcode = kernel_thread_entry_trigger(pCurThread->head.id, id, _QUEUE_WAKEUP_SENDER, _queue_schedule); } } @@ -545,77 +460,152 @@ static u32_t _queue_receive_privilege_routine(arguments_t *pArgs) } /** - * @brief The queue schedule routine execute the the pendsv context. + * @brief Convert the internal os id to kernel member number. * - * @param id The unique id of the entry thread. + * @param id The provided unique id. + * + * @return The value of member number. */ -static void _queue_schedule(os_id_t id) +u32_t _impl_queue_os_id_to_number(os_id_t id) { - thread_context_t *pEntryThread = (thread_context_t *)(_impl_kernel_member_unified_id_toContainerAddress(id)); - queue_context_t *pCurQueue = NULL; - thread_entry_t *pEntry = NULL; - b_t isTxAvail = FALSE; - b_t isRxAvail = FALSE; + if (_queue_id_isInvalid(id)) { + return 0u; + } - if (_impl_kernel_member_unified_id_toId(pEntryThread->schedule.hold) != KERNEL_MEMBER_QUEUE) { - pEntryThread->schedule.entry.result = _PC_CMPT_FAILED; - return; + return (u32_t)((id - kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_QUEUE)) / sizeof(queue_context_t)); +} + +/** + * @brief Initialize a new queue. + * + * @param pName The queue name. + * @param pQueueBufferAddr The pointer of the queue buffer. + * @param elementLen The element size. + * @param elementNum The element number. + * + * @return The queue unique id. + */ +os_id_t _impl_queue_init(const void *pQueueBufferAddr, u16_t elementLen, u16_t elementNum, const char_t *pName) +{ + if (!pQueueBufferAddr) { + return OS_INVALID_ID; } - pCurQueue = _queue_object_contextGet(pEntryThread->schedule.hold); - pEntry = &pEntryThread->schedule.entry; - if (pEntry->result == PC_SC_TIMEOUT) { - pEntry->result = PC_SC_TIMEOUT; - } else if (pEntry->result == _QUEUE_WAKEUP_RECEIVER) { - // Release function doesn't kill the timer node from waiting list - if (!_impl_timer_status_isBusy(_impl_kernel_member_unified_id_threadToTimer(pEntryThread->head.id))) { - if (_impl_kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_TIMER_INTERNAL) { - pEntry->result = PC_SC_TIMEOUT; - } else { - isRxAvail = true; - } - } else if (_impl_kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_QUEUE) { - _impl_timer_stop(_impl_kernel_member_unified_id_threadToTimer(pEntryThread->head.id)); - isRxAvail = true; - } else { - pEntry->result = _PC_CMPT_FAILED; - } - } else if (pEntry->result == _QUEUE_WAKEUP_SENDER) { - // Release function doesn't kill the timer node from waiting list - if (!_impl_timer_status_isBusy(_impl_kernel_member_unified_id_threadToTimer(pEntryThread->head.id))) { - if (_impl_kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_TIMER_INTERNAL) { - pEntry->result = PC_SC_TIMEOUT; - } else { - isTxAvail = true; - } - } else if (_impl_kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_QUEUE) { - _impl_timer_stop(_impl_kernel_member_unified_id_threadToTimer(pEntryThread->head.id)); - isTxAvail = true; - } else { - pEntry->result = _PC_CMPT_FAILED; + if (!elementLen) { + return OS_INVALID_ID; + } + + if (!elementNum) { + return OS_INVALID_ID; + } + + arguments_t arguments[] = { + [0] = {.u32_val = (u32_t)pQueueBufferAddr}, + [1] = {.u16_val = (u16_t)elementLen}, + [2] = {.u16_val = (u16_t)elementNum}, + [3] = {.pch_val = (const char_t *)pName}, + }; + + return kernel_privilege_invoke((const void *)_queue_init_privilege_routine, arguments); +} + +/** + * @brief Send a queue message. + * + * @param id The queue unique id. + * @param pUserBuffer The pointer of the message buffer address. + * @param bufferSize The queue buffer size. + * @param isToFront The direction of the message operation. + * @param timeout_ms The queue send timeout option. + * + * @return The result of the operation. + */ +u32p_t _impl_queue_send(os_id_t id, const u8_t *pUserBuffer, u16_t bufferSize, b_t isToFront, u32_t timeout_ms) +{ + if (_queue_id_isInvalid(id)) { + return _PC_CMPT_FAILED; + } + + if (!_queue_object_isInit(id)) { + return _PC_CMPT_FAILED; + } + + if (!kernel_isInThreadMode()) { + if (timeout_ms != OS_TIME_NOWAIT_VAL) { + return _PC_CMPT_FAILED; } - } else { - pEntry->result = _PC_CMPT_FAILED; } - if ((isRxAvail) || (isTxAvail)) { - if (isRxAvail) { - if (pEntryThread->queue.fromBack) { - _message_receive_behind((queue_context_t *)pCurQueue, pEntryThread->queue.pUserBufferAddress, - pEntryThread->queue.userBufferSize); - } else { - _message_receive((queue_context_t *)pCurQueue, pEntryThread->queue.pUserBufferAddress, pEntryThread->queue.userBufferSize); - } - } else if (isTxAvail) { - if (pEntryThread->queue.toFront) { - _message_send_front((queue_context_t *)pCurQueue, pEntryThread->queue.pUserBufferAddress, - pEntryThread->queue.userBufferSize); - } else { - _message_send((queue_context_t *)pCurQueue, pEntryThread->queue.pUserBufferAddress, pEntryThread->queue.userBufferSize); - } + arguments_t arguments[] = { + [0] = {.u32_val = (u32_t)id}, [1] = {.ptr_val = (const void *)pUserBuffer}, [2] = {.u16_val = (u16_t)bufferSize}, + [3] = {.b_val = (b_t)isToFront}, [4] = {.u32_val = (u32_t)timeout_ms}, + }; + + u32p_t postcode = kernel_privilege_invoke((const void *)_queue_send_privilege_routine, arguments); + + ENTER_CRITICAL_SECTION(); + if (postcode == PC_SC_UNAVAILABLE) { + thread_context_t *pCurThread = (thread_context_t *)kernel_thread_runContextGet(); + postcode = (u32p_t)kernel_schedule_entry_result_take((action_schedule_t *)&pCurThread->schedule); + } + + if (PC_IOK(postcode) && (postcode != PC_SC_TIMEOUT)) { + postcode = PC_SC_SUCCESS; + } + + EXIT_CRITICAL_SECTION(); + return postcode; +} + +/** + * @brief Receive a queue message. + * + * @param id The queue unique id. + * @param pUserBuffer The pointer of the message buffer address. + * @param bufferSize The queue buffer size. + * @param isFromBack The direction of the message operation. + * @param timeout_ms The queue send timeout option. + * + * @return The result of the operation. + */ +u32p_t _impl_queue_receive(os_id_t id, const u8_t *pUserBuffer, u16_t bufferSize, b_t isFromBack, u32_t timeout_ms) +{ + if (_queue_id_isInvalid(id)) { + return _PC_CMPT_FAILED; + } + + if (!_queue_object_isInit(id)) { + return _PC_CMPT_FAILED; + } + + if (!kernel_isInThreadMode()) { + if (timeout_ms != OS_TIME_NOWAIT_VAL) { + return _PC_CMPT_FAILED; } - pEntry->result = PC_SC_SUCCESS; } + + arguments_t arguments[] = { + [0] = {.u32_val = (u32_t)id}, [1] = {.ptr_val = (const void *)pUserBuffer}, [2] = {.u16_val = (u16_t)bufferSize}, + [3] = {.b_val = (b_t)isFromBack}, [4] = {.u32_val = (u32_t)timeout_ms}, + }; + + u32p_t postcode = kernel_privilege_invoke((const void *)_queue_receive_privilege_routine, arguments); + + ENTER_CRITICAL_SECTION(); + + if (postcode == PC_SC_UNAVAILABLE) { + thread_context_t *pCurThread = (thread_context_t *)kernel_thread_runContextGet(); + + postcode = (u32p_t)pCurThread->schedule.entry.result; + pCurThread->schedule.entry.result = 0u; + } + + if (PC_IOK(postcode) && (postcode != PC_SC_TIMEOUT)) { + postcode = PC_SC_SUCCESS; + } + + EXIT_CRITICAL_SECTION(); + return postcode; } /** @@ -626,7 +616,7 @@ static void _queue_schedule(os_id_t id) * * @return TRUE: Operation pass, FALSE: Operation failed. */ -b_t _impl_trace_queue_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) +b_t queue_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) { #if defined KTRACE queue_context_t *pCurQueue = NULL; @@ -636,8 +626,8 @@ b_t _impl_trace_queue_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) ENTER_CRITICAL_SECTION(); offset = sizeof(queue_context_t) * instance; - pCurQueue = (queue_context_t *)(_impl_kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_QUEUE) + offset); - id = _impl_kernel_member_containerAddress_toUnifiedid((u32_t)pCurQueue); + pCurQueue = (queue_context_t *)(kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_QUEUE) + offset); + id = kernel_member_containerAddress_toUnifiedid((u32_t)pCurQueue); _memset((u8_t *)pMsgs, 0x0u, sizeof(kernel_snapshot_t)); if (_queue_id_isInvalid(id)) { diff --git a/kernel/semaphore.c b/kernel/semaphore.c index acb0dc6..23bfe12 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c @@ -7,7 +7,6 @@ #include "kernel.h" #include "timer.h" -#include "semaphore.h" #include "postcode.h" #include "trace.h" @@ -21,14 +20,6 @@ extern "C" { #define _PC_CMPT_FAILED PC_FAILED(PC_CMPT_SEMAPHORE_3) #define _SEMAPHORE_AVAILABLE_COUNT_MAXIMUM (0xFEu) -/** - * The local function lists for current file internal use. - */ -static u32_t _semaphore_init_privilege_routine(arguments_t *pArgs); -static u32_t _semaphore_take_privilege_routine(arguments_t *pArgs); -static u32_t _semaphore_give_privilege_routine(arguments_t *pArgs); -static u32_t _semaphore_flush_privilege_routine(arguments_t *pArgs); - static void _semaphore_schedule(os_id_t id); /** @@ -41,7 +32,7 @@ static void _semaphore_schedule(os_id_t id); static semaphore_context_t *_semaphore_object_contextGet(os_id_t id) { - return (semaphore_context_t *)(_impl_kernel_member_unified_id_toContainerAddress(id)); + return (semaphore_context_t *)(kernel_member_unified_id_toContainerAddress(id)); } /** @@ -51,7 +42,7 @@ static semaphore_context_t *_semaphore_object_contextGet(os_id_t id) */ static list_t *_semaphore_list_lockingHeadGet(void) { - return (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_SEMAPHORE, KERNEL_MEMBER_LIST_SEMAPHORE_LOCK); + return (list_t *)kernel_member_list_get(KERNEL_MEMBER_SEMAPHORE, KERNEL_MEMBER_LIST_SEMAPHORE_LOCK); } /** @@ -61,7 +52,7 @@ static list_t *_semaphore_list_lockingHeadGet(void) */ static list_t *_semaphore_list_unlockingHeadGet(void) { - return (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_SEMAPHORE, KERNEL_MEMBER_LIST_SEMAPHORE_UNLOCK); + return (list_t *)kernel_member_list_get(KERNEL_MEMBER_SEMAPHORE, KERNEL_MEMBER_LIST_SEMAPHORE_UNLOCK); } /** @@ -132,7 +123,7 @@ static linker_head_t *_semaphore_linker_head_fromBlocking(os_id_t id) */ static b_t _semaphore_id_isInvalid(u32_t id) { - return _impl_kernel_member_unified_id_isInvalid(KERNEL_MEMBER_SEMAPHORE, id); + return kernel_member_unified_id_isInvalid(KERNEL_MEMBER_SEMAPHORE, id); } /** @@ -156,145 +147,58 @@ static b_t _semaphore_object_isInit(i32_t id) */ static void _semaphore_callback_fromTimeOut(os_id_t id) { - _impl_kernel_thread_entry_trigger(_impl_kernel_member_unified_id_timerToThread(id), id, PC_SC_TIMEOUT, _semaphore_schedule); + kernel_thread_entry_trigger(kernel_member_unified_id_timerToThread(id), id, PC_SC_TIMEOUT, _semaphore_schedule); } /** - * @brief Convert the internal os id to kernel member number. - * - * @param id The provided unique id. - * - * @return The semaphore member's number. - */ -u32_t _impl_semaphore_os_id_to_number(os_id_t id) -{ - if (_semaphore_id_isInvalid(id)) { - return 0u; - } - - return (u32_t)((id - _impl_kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_SEMAPHORE)) / sizeof(semaphore_context_t)); -} - -/** - * @brief Initialize a new semaphore. - * - * @param initial The initial count that allows the system take. - * @param limit The maximum count that it's the semaphore's limitation. - * @param permit It permits that all sem_give counts save until sem_take flush, even if the counts number higher than limitation setting - * count. - * @param pName The semaphore name. - * - * @return The semaphore unique id. - */ -os_id_t _impl_semaphore_init(u8_t initialCount, u8_t limitCount, b_t permit, const char_t *pName) -{ - if (!limitCount) { - return OS_INVALID_ID; - } - - arguments_t arguments[] = { - [0] = {.u8_val = (u8_t)initialCount}, - [1] = {.u8_val = (u8_t)limitCount}, - [2] = {.b_val = (b_t)permit}, - [3] = {.pch_val = (const char_t *)pName}, - }; - - return _impl_kernel_privilege_invoke((const void *)_semaphore_init_privilege_routine, arguments); -} - -/** - * @brief Take the semaphore away with timeout option. - * - * @param id The semaphore unique id. + * @brief The semaphore schedule routine execute the the pendsv context. * - * @return The result of the operation. + * @param id The unique id of the entry thread. */ -u32p_t _impl_semaphore_take(os_id_t id, u32_t timeout_ms) +static void _semaphore_schedule(os_id_t id) { - if (_semaphore_id_isInvalid(id)) { - return _PC_CMPT_FAILED; - } - - if (!_semaphore_object_isInit(id)) { - return _PC_CMPT_FAILED; - } - - if (!timeout_ms) { - return _PC_CMPT_FAILED; - } + thread_context_t *pEntryThread = (thread_context_t *)(kernel_member_unified_id_toContainerAddress(id)); + semaphore_context_t *pCurSemaphore = NULL; + thread_entry_t *pEntry = NULL; + b_t isAvail = FALSE; - if (!_impl_kernel_isInThreadMode()) { - return _PC_CMPT_FAILED; + if (kernel_member_unified_id_toId(pEntryThread->schedule.hold) != KERNEL_MEMBER_SEMAPHORE) { + pEntryThread->schedule.entry.result = _PC_CMPT_FAILED; + return; } - arguments_t arguments[] = { - [0] = {.u32_val = (u32_t)id}, - [1] = {.u32_val = (u32_t)timeout_ms}, - }; - - u32p_t postcode = _impl_kernel_privilege_invoke((const void *)_semaphore_take_privilege_routine, arguments); - - ENTER_CRITICAL_SECTION(); - - if (postcode == PC_SC_UNAVAILABLE) { - thread_context_t *pCurThread = _impl_kernel_thread_runContextGet(); - postcode = (u32p_t)_impl_kernel_schedule_entry_result_take((action_schedule_t *)&pCurThread->schedule); + if ((pEntryThread->schedule.entry.result != PC_SC_SUCCESS) && (pEntryThread->schedule.entry.result != PC_SC_TIMEOUT)) { + return; } - if (PC_IOK(postcode) && (postcode != PC_SC_TIMEOUT)) { - postcode = PC_SC_SUCCESS; + // Release function doesn't kill the timer node from waiting list + pEntry = &pEntryThread->schedule.entry; + pCurSemaphore = _semaphore_object_contextGet(pEntryThread->schedule.hold); + if (!timer_busy(kernel_member_unified_id_threadToTimer(pEntryThread->head.id))) { + if (kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_TIMER_INTERNAL) { + pEntry->result = PC_SC_TIMEOUT; + } else { + isAvail = true; + } + } else if (kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_SEMAPHORE) { + timer_stop_for_thread(kernel_member_unified_id_threadToTimer(pEntryThread->head.id)); + isAvail = true; + } else { + pEntry->result = _PC_CMPT_FAILED; } - EXIT_CRITICAL_SECTION(); - return postcode; -} - -/** - * @brief Give the semaphore to release the avaliable count. - * - * @param id The semaphore unique id. - * - * @return The result of the operation. - */ -u32_t _impl_semaphore_give(os_id_t id) -{ - if (_semaphore_id_isInvalid(id)) { - return _PC_CMPT_FAILED; + if (isAvail) { + pEntry->result = PC_SC_SUCCESS; + /* If the PC arrive, the semaphore will be available and can be acquired */ + pCurSemaphore->initialCount--; // The semaphore has available count } - if (!_semaphore_object_isInit(id)) { - return _PC_CMPT_FAILED; + /* Check if we can take the next acquired thread into locking */ + if (pCurSemaphore->initialCount < pCurSemaphore->limitCount) { + _semaphore_list_transfer_toLock((linker_head_t *)&pCurSemaphore->head); + } else { + _semaphore_list_transfer_toUnlock((linker_head_t *)&pCurSemaphore->head); } - - arguments_t arguments[] = { - [0] = {.u32_val = (u32_t)id}, - }; - - return _impl_kernel_privilege_invoke((const void *)_semaphore_give_privilege_routine, arguments); -} - -/** - * @brief Flush the semaphore to release all the avaliable count. - * - * @param id The semaphore unique id. - * - * @return The result of the operation. - */ -u32_t _impl_semaphore_flush(os_id_t id) -{ - if (_semaphore_id_isInvalid(id)) { - return _PC_CMPT_FAILED; - } - - if (!_semaphore_object_isInit(id)) { - return _PC_CMPT_FAILED; - } - - arguments_t arguments[] = { - [0] = {.u32_val = (u32_t)id}, - }; - - return _impl_kernel_privilege_invoke((const void *)_semaphore_flush_privilege_routine, arguments); } /** @@ -317,11 +221,11 @@ static u32_t _semaphore_init_privilege_routine(arguments_t *pArgs) semaphore_context_t *pCurSemaphore = NULL; internal = (sizeof(semaphore_context_t) * KERNEL_APPLICATION_SEMAPHORE_INSTANCE); - pCurSemaphore = (semaphore_context_t *)(_impl_kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_SEMAPHORE) + internal); - endAddr = (u32_t)_impl_kernel_member_id_toContainerEndAddress(KERNEL_MEMBER_SEMAPHORE); + pCurSemaphore = (semaphore_context_t *)(kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_SEMAPHORE) + internal); + endAddr = (u32_t)kernel_member_id_toContainerEndAddress(KERNEL_MEMBER_SEMAPHORE); do { - os_id_t id = _impl_kernel_member_containerAddress_toUnifiedid((u32_t)pCurSemaphore); + os_id_t id = kernel_member_containerAddress_toUnifiedid((u32_t)pCurSemaphore); if (_semaphore_id_isInvalid(id)) { break; } @@ -369,12 +273,12 @@ static u32_t _semaphore_take_privilege_routine(arguments_t *pArgs) thread_context_t *pCurThread = NULL; u32p_t postcode = PC_SC_SUCCESS; - pCurThread = _impl_kernel_thread_runContextGet(); + pCurThread = kernel_thread_runContextGet(); pCurSemaphore = _semaphore_object_contextGet(id); if (pCurSemaphore->head.linker.pList == _semaphore_list_lockingHeadGet()) { /* No availabe count */ - postcode = _impl_kernel_thread_exit_trigger(pCurThread->head.id, id, _semaphore_list_blockingHeadGet(id), timeout_ms, - _semaphore_callback_fromTimeOut); + postcode = kernel_thread_exit_trigger(pCurThread->head.id, id, _semaphore_list_blockingHeadGet(id), timeout_ms, + _semaphore_callback_fromTimeOut); if (PC_IOK(postcode)) { postcode = PC_SC_UNAVAILABLE; @@ -427,7 +331,7 @@ static u32_t _semaphore_give_privilege_routine(arguments_t *pArgs) } if (pSemaphoreHighestBlockingThread) { - postcode = _impl_kernel_thread_entry_trigger(pSemaphoreHighestBlockingThread->head.id, id, PC_SC_SUCCESS, _semaphore_schedule); + postcode = kernel_thread_entry_trigger(pSemaphoreHighestBlockingThread->head.id, id, PC_SC_SUCCESS, _semaphore_schedule); } EXIT_CRITICAL_SECTION(); @@ -456,7 +360,7 @@ static u32_t _semaphore_flush_privilege_routine(arguments_t *pArgs) pCurThread = (thread_context_t *)list_iterator_next(&it); while (pCurThread) { pCurSemaphore->initialCount++; - postcode = _impl_kernel_thread_entry_trigger(pCurThread->head.id, id, PC_SC_SUCCESS, _semaphore_schedule); + postcode = kernel_thread_entry_trigger(pCurThread->head.id, id, PC_SC_SUCCESS, _semaphore_schedule); if (PC_IER(postcode)) { break; } @@ -468,54 +372,141 @@ static u32_t _semaphore_flush_privilege_routine(arguments_t *pArgs) } /** - * @brief The semaphore schedule routine execute the the pendsv context. + * @brief Convert the internal os id to kernel member number. * - * @param id The unique id of the entry thread. + * @param id The provided unique id. + * + * @return The semaphore member's number. */ -static void _semaphore_schedule(os_id_t id) +u32_t _impl_semaphore_os_id_to_number(os_id_t id) { - thread_context_t *pEntryThread = (thread_context_t *)(_impl_kernel_member_unified_id_toContainerAddress(id)); - semaphore_context_t *pCurSemaphore = NULL; - thread_entry_t *pEntry = NULL; - b_t isAvail = FALSE; + if (_semaphore_id_isInvalid(id)) { + return 0u; + } - if (_impl_kernel_member_unified_id_toId(pEntryThread->schedule.hold) != KERNEL_MEMBER_SEMAPHORE) { - pEntryThread->schedule.entry.result = _PC_CMPT_FAILED; - return; + return (u32_t)((id - kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_SEMAPHORE)) / sizeof(semaphore_context_t)); +} + +/** + * @brief Initialize a new semaphore. + * + * @param initial The initial count that allows the system take. + * @param limit The maximum count that it's the semaphore's limitation. + * @param permit It permits that all sem_give counts save until sem_take flush, even if the counts number higher than limitation setting + * count. + * @param pName The semaphore name. + * + * @return The semaphore unique id. + */ +os_id_t _impl_semaphore_init(u8_t initialCount, u8_t limitCount, b_t permit, const char_t *pName) +{ + if (!limitCount) { + return OS_INVALID_ID; } - if ((pEntryThread->schedule.entry.result != PC_SC_SUCCESS) && (pEntryThread->schedule.entry.result != PC_SC_TIMEOUT)) { - return; + arguments_t arguments[] = { + [0] = {.u8_val = (u8_t)initialCount}, + [1] = {.u8_val = (u8_t)limitCount}, + [2] = {.b_val = (b_t)permit}, + [3] = {.pch_val = (const char_t *)pName}, + }; + + return kernel_privilege_invoke((const void *)_semaphore_init_privilege_routine, arguments); +} + +/** + * @brief Take the semaphore away with timeout option. + * + * @param id The semaphore unique id. + * + * @return The result of the operation. + */ +u32p_t _impl_semaphore_take(os_id_t id, u32_t timeout_ms) +{ + if (_semaphore_id_isInvalid(id)) { + return _PC_CMPT_FAILED; } - // Release function doesn't kill the timer node from waiting list - pEntry = &pEntryThread->schedule.entry; - pCurSemaphore = _semaphore_object_contextGet(pEntryThread->schedule.hold); - if (!_impl_timer_status_isBusy(_impl_kernel_member_unified_id_threadToTimer(pEntryThread->head.id))) { - if (_impl_kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_TIMER_INTERNAL) { - pEntry->result = PC_SC_TIMEOUT; - } else { - isAvail = true; - } - } else if (_impl_kernel_member_unified_id_toId(pEntry->release) == KERNEL_MEMBER_SEMAPHORE) { - _impl_timer_stop(_impl_kernel_member_unified_id_threadToTimer(pEntryThread->head.id)); - isAvail = true; - } else { - pEntry->result = _PC_CMPT_FAILED; + if (!_semaphore_object_isInit(id)) { + return _PC_CMPT_FAILED; } - if (isAvail) { - pEntry->result = PC_SC_SUCCESS; - /* If the PC arrive, the semaphore will be available and can be acquired */ - pCurSemaphore->initialCount--; // The semaphore has available count + if (!timeout_ms) { + return _PC_CMPT_FAILED; } - /* Check if we can take the next acquired thread into locking */ - if (pCurSemaphore->initialCount < pCurSemaphore->limitCount) { - _semaphore_list_transfer_toLock((linker_head_t *)&pCurSemaphore->head); - } else { - _semaphore_list_transfer_toUnlock((linker_head_t *)&pCurSemaphore->head); + if (!kernel_isInThreadMode()) { + return _PC_CMPT_FAILED; + } + + arguments_t arguments[] = { + [0] = {.u32_val = (u32_t)id}, + [1] = {.u32_val = (u32_t)timeout_ms}, + }; + + u32p_t postcode = kernel_privilege_invoke((const void *)_semaphore_take_privilege_routine, arguments); + + ENTER_CRITICAL_SECTION(); + + if (postcode == PC_SC_UNAVAILABLE) { + thread_context_t *pCurThread = kernel_thread_runContextGet(); + postcode = (u32p_t)kernel_schedule_entry_result_take((action_schedule_t *)&pCurThread->schedule); + } + + if (PC_IOK(postcode) && (postcode != PC_SC_TIMEOUT)) { + postcode = PC_SC_SUCCESS; + } + + EXIT_CRITICAL_SECTION(); + return postcode; +} + +/** + * @brief Give the semaphore to release the avaliable count. + * + * @param id The semaphore unique id. + * + * @return The result of the operation. + */ +u32_t _impl_semaphore_give(os_id_t id) +{ + if (_semaphore_id_isInvalid(id)) { + return _PC_CMPT_FAILED; + } + + if (!_semaphore_object_isInit(id)) { + return _PC_CMPT_FAILED; + } + + arguments_t arguments[] = { + [0] = {.u32_val = (u32_t)id}, + }; + + return kernel_privilege_invoke((const void *)_semaphore_give_privilege_routine, arguments); +} + +/** + * @brief Flush the semaphore to release all the avaliable count. + * + * @param id The semaphore unique id. + * + * @return The result of the operation. + */ +u32_t _impl_semaphore_flush(os_id_t id) +{ + if (_semaphore_id_isInvalid(id)) { + return _PC_CMPT_FAILED; } + + if (!_semaphore_object_isInit(id)) { + return _PC_CMPT_FAILED; + } + + arguments_t arguments[] = { + [0] = {.u32_val = (u32_t)id}, + }; + + return kernel_privilege_invoke((const void *)_semaphore_flush_privilege_routine, arguments); } /** @@ -526,7 +517,7 @@ static void _semaphore_schedule(os_id_t id) * * @return TRUE: Operation pass, FALSE: Operation failed. */ -b_t _impl_trace_semaphore_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) +b_t semaphore_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) { #if defined KTRACE semaphore_context_t *pCurSemaphore = NULL; @@ -536,8 +527,8 @@ b_t _impl_trace_semaphore_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) ENTER_CRITICAL_SECTION(); offset = sizeof(semaphore_context_t) * instance; - pCurSemaphore = (semaphore_context_t *)(_impl_kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_SEMAPHORE) + offset); - id = _impl_kernel_member_containerAddress_toUnifiedid((u32_t)pCurSemaphore); + pCurSemaphore = (semaphore_context_t *)(kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_SEMAPHORE) + offset); + id = kernel_member_containerAddress_toUnifiedid((u32_t)pCurSemaphore); _memset((u8_t *)pMsgs, 0x0u, sizeof(kernel_snapshot_t)); if (_semaphore_id_isInvalid(id)) { diff --git a/kernel/thread.c b/kernel/thread.c index e944df1..0ed1408 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -7,7 +7,6 @@ #include "kernel.h" #include "timer.h" -#include "thread.h" #include "postcode.h" #include "trace.h" @@ -20,16 +19,6 @@ extern "C" { */ #define _PC_CMPT_FAILED PC_FAILED(PC_CMPT_THREAD_2) -/** - * The local function lists for current file internal use. - */ -static os_id_t _thread_init_privilege_routine(arguments_t *pArgs); -static u32_t _thread_resume_privilege_routine(arguments_t *pArgs); -static u32_t _thread_suspend_privilege_routine(arguments_t *pArgs); -static u32_t _thread_yield_privilege_routine(arguments_t *pArgs); -static u32_t _thread_sleep_privilege_routine(arguments_t *pArgs); -static u32_t _thread_delete_privilege_routine(arguments_t *pArgs); - /** * @brief Get the thread context based on provided unique id. * @@ -39,7 +28,7 @@ static u32_t _thread_delete_privilege_routine(arguments_t *pArgs); */ static thread_context_t *_thread_object_contextGet(os_id_t id) { - return (thread_context_t *)_impl_kernel_member_unified_id_toContainerAddress(id); + return (thread_context_t *)kernel_member_unified_id_toContainerAddress(id); } /** @@ -49,7 +38,7 @@ static thread_context_t *_thread_object_contextGet(os_id_t id) */ static os_id_t _thread_id_runtime_get(void) { - return (os_id_t)_impl_kernel_thread_runIdGet(); + return (os_id_t)kernel_thread_runIdGet(); } /** @@ -59,7 +48,7 @@ static os_id_t _thread_id_runtime_get(void) */ static thread_context_t *_thread_object_runtime_get(void) { - return _thread_object_contextGet(_impl_kernel_thread_runIdGet()); + return _thread_object_contextGet(kernel_thread_runIdGet()); } /** @@ -69,7 +58,7 @@ static thread_context_t *_thread_object_runtime_get(void) */ static list_t *_thread_list_waitingHeadGet(void) { - return (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_THREAD, KERNEL_MEMBER_LIST_THREAD_WAIT); + return (list_t *)kernel_member_list_get(KERNEL_MEMBER_THREAD, KERNEL_MEMBER_LIST_THREAD_WAIT); } /** @@ -79,7 +68,7 @@ static list_t *_thread_list_waitingHeadGet(void) */ static list_t *_thread_list_pendingHeadGet(void) { - return (list_t *)_impl_kernel_list_pendingHeadGet(); + return (list_t *)kernel_list_pendingHeadGet(); } /** @@ -120,7 +109,7 @@ static void _thread_list_transfer_toEntry(linker_head_t *pCurHead) { ENTER_CRITICAL_SECTION(); - _impl_kernel_thread_list_transfer_toEntry(pCurHead); + kernel_thread_list_transfer_toEntry(pCurHead); EXIT_CRITICAL_SECTION(); } @@ -134,7 +123,7 @@ static void _thread_list_transfer_toPend(linker_head_t *pCurHead) { ENTER_CRITICAL_SECTION(); - _impl_kernel_thread_list_transfer_toPend(pCurHead); + kernel_thread_list_transfer_toPend(pCurHead); EXIT_CRITICAL_SECTION(); } @@ -175,7 +164,7 @@ static linker_head_t *_thread_linker_Head_next_fromPending(void) */ static b_t _thread_id_isInvalid(u32_t id) { - return _impl_kernel_member_unified_id_isInvalid(KERNEL_MEMBER_THREAD, id); + return kernel_member_unified_id_isInvalid(KERNEL_MEMBER_THREAD, id); } /** @@ -199,7 +188,198 @@ static b_t _thread_object_isInit(os_id_t id) */ static void _thread_callback_fromTimeOut(os_id_t id) { - _impl_kernel_thread_entry_trigger(_impl_kernel_member_unified_id_timerToThread(id), id, PC_SC_TIMEOUT, NULL); + kernel_thread_entry_trigger(kernel_member_unified_id_timerToThread(id), id, PC_SC_TIMEOUT, NULL); +} + +/** + * @brief It's sub-routine running at privilege mode. + * + * @param pArgs The function argument packages. + * + * @return The result of privilege routine. + */ +static os_id_t _thread_init_privilege_routine(arguments_t *pArgs) +{ + ENTER_CRITICAL_SECTION(); + + pThread_entryFunc_t pEntryFun = (pThread_entryFunc_t)pArgs[0].ptr_val; + u32_t *pAddress = (u32_t *)pArgs[1].u32_val; + u32_t size = (u32_t)pArgs[2].u32_val; + u8_t priority = (u8_t)pArgs[3].u8_val; + const char_t *pName = (const char_t *)pArgs[4].pch_val; + u32_t internal = 0u; + u32_t endAddr = 0u; + thread_context_t *pCurThread = NULL; + + internal = sizeof(thread_context_t) * KERNEL_APPLICATION_THREAD_INSTANCE; + pCurThread = (thread_context_t *)(kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_THREAD) + internal); + endAddr = (u32_t)kernel_member_id_toContainerEndAddress(KERNEL_MEMBER_THREAD); + do { + os_id_t id = kernel_member_containerAddress_toUnifiedid((u32_t)pCurThread); + if (_thread_id_isInvalid(id)) { + break; + } + + if (_thread_object_isInit(id)) { + continue; + } + + _memset((char_t *)pCurThread, 0x0u, sizeof(thread_context_t)); + pCurThread->head.id = id; + pCurThread->head.pName = pName; + + pCurThread->priority.level = priority; + pCurThread->pEntryFunc = pEntryFun; + pCurThread->pStackAddr = pAddress; + pCurThread->stackSize = size; + + pCurThread->PSPStartAddr = (u32_t)kernel_stack_frame_init(pEntryFun, pCurThread->pStackAddr, pCurThread->stackSize); + timer_init_for_thread(kernel_member_unified_id_threadToTimer(id)); + + _thread_list_transfer_toPend((linker_head_t *)&pCurThread->head); + + EXIT_CRITICAL_SECTION(); + return id; + } while ((u32_t)++pCurThread < endAddr); + + EXIT_CRITICAL_SECTION(); + return OS_INVALID_ID; +} + +/** + * @brief It's sub-routine running at privilege mode. + * + * @param pArgs The function argument packages. + * + * @return The result of privilege routine. + */ +static u32p_t _thread_resume_privilege_routine(arguments_t *pArgs) +{ + ENTER_CRITICAL_SECTION(); + os_id_t id = (os_id_t)pArgs[0].u32_val; + thread_context_t *pCurThread = NULL; + u32p_t postcode = PC_SC_SUCCESS; + + if (_thread_id_runtime_get() == id) { + EXIT_CRITICAL_SECTION(); + return postcode; + } + + pCurThread = _thread_object_contextGet(id); + _thread_list_transfer_toEntry((linker_head_t *)&pCurThread->head); + postcode = kernel_thread_schedule_request(); + + EXIT_CRITICAL_SECTION(); + return postcode; +} + +/** + * @brief It's sub-routine running at privilege mode. + * + * @param pArgs The function argument packages. + * + * @return The result of privilege routine. + */ +static u32p_t _thread_suspend_privilege_routine(arguments_t *pArgs) +{ + ENTER_CRITICAL_SECTION(); + os_id_t id = (os_id_t)pArgs[0].u32_val; + thread_context_t *pCurThread = NULL; + u32p_t postcode = _PC_CMPT_FAILED; + + pCurThread = _thread_object_contextGet(id); + if (!_thread_linker_Head_next_fromPending()) { + EXIT_CRITICAL_SECTION(); + return postcode; + } + _thread_list_transfer_toWait((linker_head_t *)&pCurThread->head); + postcode = kernel_thread_schedule_request(); + + EXIT_CRITICAL_SECTION(); + return postcode; +} + +/** + * @brief It's sub-routine running at privilege mode. + * + * @param pArgs The function argument packages. + * + * @return The result of privilege routine. + */ +static u32p_t _thread_yield_privilege_routine(arguments_t *pArgs) +{ + ENTER_CRITICAL_SECTION(); + UNUSED_MSG(pArgs); + thread_context_t *pCurThread = NULL; + u32p_t postcode = PC_SC_SUCCESS; + + pCurThread = (thread_context_t *)_thread_object_runtime_get(); + if (!_thread_linker_Head_next_fromPending()) { + EXIT_CRITICAL_SECTION(); + return postcode; + } + + _thread_list_transfer_toWait((linker_head_t *)&pCurThread->head); + postcode = kernel_thread_schedule_request(); + + EXIT_CRITICAL_SECTION(); + return postcode; +} + +/** + * @brief It's sub-routine running at privilege mode. + * + * @param pArgs The function argument packages. + * + * @return The result of privilege routine. + */ +static u32p_t _thread_delete_privilege_routine(arguments_t *pArgs) +{ + ENTER_CRITICAL_SECTION(); + os_id_t id = (os_id_t)pArgs[0].u32_val; + thread_context_t *pCurThread = NULL; + u32p_t postcode = _PC_CMPT_FAILED; + + pCurThread = _thread_object_contextGet(id); + if (id == _thread_id_runtime_get()) { + EXIT_CRITICAL_SECTION(); + return postcode; + } + + if (!_thread_linker_Head_next_fromPending()) { + EXIT_CRITICAL_SECTION(); + return postcode; + } + + _thread_list_transfer_toUninitialized((linker_head_t *)&pCurThread->head); + _memset((char_t *)pCurThread->pStackAddr, STACT_UNUSED_DATA, pCurThread->stackSize); + _memset((char_t *)pCurThread, 0x0u, sizeof(thread_context_t)); + postcode = kernel_thread_schedule_request(); + + EXIT_CRITICAL_SECTION(); + return postcode; +} + +/** + * @brief It's sub-routine running at privilege mode. + * + * @param pArgs The function argument packages. + * + * @return The result of privilege routine. + */ +static u32p_t _thread_sleep_privilege_routine(arguments_t *pArgs) +{ + ENTER_CRITICAL_SECTION(); + u32_t timeout_ms = (u32_t)pArgs[0].u32_val; + thread_context_t *pCurThread = NULL; + u32p_t postcode = _PC_CMPT_FAILED; + + pCurThread = kernel_thread_runContextGet(); + postcode = kernel_thread_exit_trigger(pCurThread->head.id, OS_INVALID_ID, _thread_list_waitingHeadGet(), timeout_ms, + _thread_callback_fromTimeOut); + + EXIT_CRITICAL_SECTION(); + return postcode; } /** @@ -213,7 +393,7 @@ u32_t _impl_thread_os_id_to_number(os_id_t id) return 0u; } - return (u32_t)((id - _impl_kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_THREAD)) / sizeof(thread_context_t)); + return (u32_t)((id - kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_THREAD)) / sizeof(thread_context_t)); } /** @@ -276,7 +456,7 @@ os_id_t _impl_thread_init(pThread_entryFunc_t pEntryFun, u32_t *pAddress, u32_t [3] = {.u8_val = (u8_t)priority}, [4] = {.pch_val = (const void *)pName}, }; - return _impl_kernel_privilege_invoke((const void *)_thread_init_privilege_routine, arguments); + return kernel_privilege_invoke((const void *)_thread_init_privilege_routine, arguments); } /** @@ -300,7 +480,7 @@ u32p_t _impl_thread_resume(os_id_t id) [0] = {.u32_val = (u32_t)id}, }; - return _impl_kernel_privilege_invoke((const void *)_thread_resume_privilege_routine, arguments); + return kernel_privilege_invoke((const void *)_thread_resume_privilege_routine, arguments); } /** @@ -324,7 +504,7 @@ u32p_t _impl_thread_suspend(os_id_t id) [0] = {.u32_val = (u32_t)id}, }; - return _impl_kernel_privilege_invoke((const void *)_thread_suspend_privilege_routine, arguments); + return kernel_privilege_invoke((const void *)_thread_suspend_privilege_routine, arguments); } /** @@ -336,11 +516,11 @@ u32p_t _impl_thread_suspend(os_id_t id) */ u32p_t _impl_thread_yield(void) { - if (!_impl_kernel_isInThreadMode()) { + if (!kernel_isInThreadMode()) { return _PC_CMPT_FAILED; } - return _impl_kernel_privilege_invoke((const void *)_thread_yield_privilege_routine, NULL); + return kernel_privilege_invoke((const void *)_thread_yield_privilege_routine, NULL); } /** @@ -364,7 +544,7 @@ u32p_t _impl_thread_delete(os_id_t id) [0] = {.u32_val = (u32_t)id}, }; - return _impl_kernel_privilege_invoke((const void *)_thread_delete_privilege_routine, arguments); + return kernel_privilege_invoke((const void *)_thread_delete_privilege_routine, arguments); } /** @@ -380,7 +560,7 @@ u32p_t _impl_thread_sleep(u32_t timeout_ms) return _PC_CMPT_FAILED; } - if (!_impl_kernel_isInThreadMode()) { + if (!kernel_isInThreadMode()) { return _PC_CMPT_FAILED; } @@ -388,198 +568,7 @@ u32p_t _impl_thread_sleep(u32_t timeout_ms) [0] = {.u32_val = (u32_t)timeout_ms}, }; - return _impl_kernel_privilege_invoke((const void *)_thread_sleep_privilege_routine, arguments); -} - -/** - * @brief It's sub-routine running at privilege mode. - * - * @param pArgs The function argument packages. - * - * @return The result of privilege routine. - */ -static os_id_t _thread_init_privilege_routine(arguments_t *pArgs) -{ - ENTER_CRITICAL_SECTION(); - - pThread_entryFunc_t pEntryFun = (pThread_entryFunc_t)pArgs[0].ptr_val; - u32_t *pAddress = (u32_t *)pArgs[1].u32_val; - u32_t size = (u32_t)pArgs[2].u32_val; - u8_t priority = (u8_t)pArgs[3].u8_val; - const char_t *pName = (const char_t *)pArgs[4].pch_val; - u32_t internal = 0u; - u32_t endAddr = 0u; - thread_context_t *pCurThread = NULL; - - internal = sizeof(thread_context_t) * KERNEL_APPLICATION_THREAD_INSTANCE; - pCurThread = (thread_context_t *)(_impl_kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_THREAD) + internal); - endAddr = (u32_t)_impl_kernel_member_id_toContainerEndAddress(KERNEL_MEMBER_THREAD); - do { - os_id_t id = _impl_kernel_member_containerAddress_toUnifiedid((u32_t)pCurThread); - if (_thread_id_isInvalid(id)) { - break; - } - - if (_thread_object_isInit(id)) { - continue; - } - - _memset((char_t *)pCurThread, 0x0u, sizeof(thread_context_t)); - pCurThread->head.id = id; - pCurThread->head.pName = pName; - - pCurThread->priority.level = priority; - pCurThread->pEntryFunc = pEntryFun; - pCurThread->pStackAddr = pAddress; - pCurThread->stackSize = size; - - pCurThread->PSPStartAddr = (u32_t)_impl_kernel_stack_frame_init(pEntryFun, pCurThread->pStackAddr, pCurThread->stackSize); - _impl_thread_timer_init(_impl_kernel_member_unified_id_threadToTimer(id)); - - _thread_list_transfer_toPend((linker_head_t *)&pCurThread->head); - - EXIT_CRITICAL_SECTION(); - return id; - } while ((u32_t)++pCurThread < endAddr); - - EXIT_CRITICAL_SECTION(); - return OS_INVALID_ID; -} - -/** - * @brief It's sub-routine running at privilege mode. - * - * @param pArgs The function argument packages. - * - * @return The result of privilege routine. - */ -static u32p_t _thread_resume_privilege_routine(arguments_t *pArgs) -{ - ENTER_CRITICAL_SECTION(); - os_id_t id = (os_id_t)pArgs[0].u32_val; - thread_context_t *pCurThread = NULL; - u32p_t postcode = PC_SC_SUCCESS; - - if (_thread_id_runtime_get() == id) { - EXIT_CRITICAL_SECTION(); - return postcode; - } - - pCurThread = _thread_object_contextGet(id); - _thread_list_transfer_toEntry((linker_head_t *)&pCurThread->head); - postcode = _impl_kernel_thread_schedule_request(); - - EXIT_CRITICAL_SECTION(); - return postcode; -} - -/** - * @brief It's sub-routine running at privilege mode. - * - * @param pArgs The function argument packages. - * - * @return The result of privilege routine. - */ -static u32p_t _thread_suspend_privilege_routine(arguments_t *pArgs) -{ - ENTER_CRITICAL_SECTION(); - os_id_t id = (os_id_t)pArgs[0].u32_val; - thread_context_t *pCurThread = NULL; - u32p_t postcode = _PC_CMPT_FAILED; - - pCurThread = _thread_object_contextGet(id); - if (!_thread_linker_Head_next_fromPending()) { - EXIT_CRITICAL_SECTION(); - return postcode; - } - _thread_list_transfer_toWait((linker_head_t *)&pCurThread->head); - postcode = _impl_kernel_thread_schedule_request(); - - EXIT_CRITICAL_SECTION(); - return postcode; -} - -/** - * @brief It's sub-routine running at privilege mode. - * - * @param pArgs The function argument packages. - * - * @return The result of privilege routine. - */ -static u32p_t _thread_yield_privilege_routine(arguments_t *pArgs) -{ - ENTER_CRITICAL_SECTION(); - UNUSED_MSG(pArgs); - thread_context_t *pCurThread = NULL; - u32p_t postcode = PC_SC_SUCCESS; - - pCurThread = (thread_context_t *)_thread_object_runtime_get(); - if (!_thread_linker_Head_next_fromPending()) { - EXIT_CRITICAL_SECTION(); - return postcode; - } - - _thread_list_transfer_toWait((linker_head_t *)&pCurThread->head); - postcode = _impl_kernel_thread_schedule_request(); - - EXIT_CRITICAL_SECTION(); - return postcode; -} - -/** - * @brief It's sub-routine running at privilege mode. - * - * @param pArgs The function argument packages. - * - * @return The result of privilege routine. - */ -static u32p_t _thread_delete_privilege_routine(arguments_t *pArgs) -{ - ENTER_CRITICAL_SECTION(); - os_id_t id = (os_id_t)pArgs[0].u32_val; - thread_context_t *pCurThread = NULL; - u32p_t postcode = _PC_CMPT_FAILED; - - pCurThread = _thread_object_contextGet(id); - if (id == _thread_id_runtime_get()) { - EXIT_CRITICAL_SECTION(); - return postcode; - } - - if (!_thread_linker_Head_next_fromPending()) { - EXIT_CRITICAL_SECTION(); - return postcode; - } - - _thread_list_transfer_toUninitialized((linker_head_t *)&pCurThread->head); - _memset((char_t *)pCurThread->pStackAddr, STACT_UNUSED_DATA, pCurThread->stackSize); - _memset((char_t *)pCurThread, 0x0u, sizeof(thread_context_t)); - postcode = _impl_kernel_thread_schedule_request(); - - EXIT_CRITICAL_SECTION(); - return postcode; -} - -/** - * @brief It's sub-routine running at privilege mode. - * - * @param pArgs The function argument packages. - * - * @return The result of privilege routine. - */ -static u32p_t _thread_sleep_privilege_routine(arguments_t *pArgs) -{ - ENTER_CRITICAL_SECTION(); - u32_t timeout_ms = (u32_t)pArgs[0].u32_val; - thread_context_t *pCurThread = NULL; - u32p_t postcode = _PC_CMPT_FAILED; - - pCurThread = _impl_kernel_thread_runContextGet(); - postcode = _impl_kernel_thread_exit_trigger(pCurThread->head.id, OS_INVALID_ID, _thread_list_waitingHeadGet(), timeout_ms, - _thread_callback_fromTimeOut); - - EXIT_CRITICAL_SECTION(); - return postcode; + return kernel_privilege_invoke((const void *)_thread_sleep_privilege_routine, arguments); } /** @@ -590,7 +579,7 @@ static u32p_t _thread_sleep_privilege_routine(arguments_t *pArgs) * * @return TRUE: Operation pass, FALSE: Operation failed. */ -b_t _impl_trace_thread_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) +b_t thread_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) { #if defined KTRACE thread_context_t *pCurThread = NULL; @@ -601,8 +590,8 @@ b_t _impl_trace_thread_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) ENTER_CRITICAL_SECTION(); offset = sizeof(thread_context_t) * instance; - pCurThread = (thread_context_t *)(_impl_kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_THREAD) + offset); - id = _impl_kernel_member_containerAddress_toUnifiedid((u32_t)pCurThread); + pCurThread = (thread_context_t *)(kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_THREAD) + offset); + id = kernel_member_containerAddress_toUnifiedid((u32_t)pCurThread); _memset((u8_t *)pMsgs, 0x0u, sizeof(kernel_snapshot_t)); if (_thread_id_isInvalid(id)) { @@ -610,7 +599,7 @@ b_t _impl_trace_thread_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) return FALSE; } - sv_ms = _impl_kernel_schedule_time_get(); + sv_ms = kernel_schedule_time_get(); if (pCurThread->head.linker.pList == _thread_list_waitingHeadGet()) { pMsgs->pState = "wait"; pMsgs->thread.delay = sv_ms - pCurThread->schedule.analyze.exit_ms; @@ -647,7 +636,7 @@ b_t _impl_trace_thread_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) } unused *= sizeof(u32_t); pMsgs->thread.ram = ((pCurThread->stackSize - unused) * 100u) / pCurThread->stackSize; - pMsgs->thread.cpu = impl_kernel_thread_use_percent_take(pCurThread->head.id); + pMsgs->thread.cpu = kernel_thread_use_percent_take(pCurThread->head.id); EXIT_CRITICAL_SECTION(); return TRUE; diff --git a/kernel/timer.c b/kernel/timer.c index 0089a2d..69d49b6 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -36,16 +36,6 @@ typedef struct { */ _timer_resource_t g_timer_resource = {0u}; -/** - * The local function lists for current file internal use. - */ -static u32_t _timer_init_privilege_routine(arguments_t *pArgs); -static u32_t _timer_start_privilege_routine(arguments_t *pArgs); -static u32_t _timer_stop_privilege_routine(arguments_t *pArgs); -static u32_t _timer_total_system_ms_get_privilege_routine(arguments_t *pArgs); -static u32_t _timer_total_system_us_get_privilege_routine(arguments_t *pArgs); -static u32_t _kernel_timer_schedule_request_privilege_routine(arguments_t *pArgs); - /** * @brief Get the timer context based on provided unique id. * @@ -55,7 +45,7 @@ static u32_t _kernel_timer_schedule_request_privilege_routine(arguments_t *pArgs */ static timer_context_t *_timer_object_contextGet(os_id_t id) { - return (timer_context_t *)(_impl_kernel_member_unified_id_toContainerAddress(id)); + return (timer_context_t *)(kernel_member_unified_id_toContainerAddress(id)); } /** @@ -65,7 +55,7 @@ static timer_context_t *_timer_object_contextGet(os_id_t id) */ static list_t *_timer_list_stopingHeadGet(void) { - return (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_TIMER, KERNEL_MEMBER_LIST_TIMER_STOP); + return (list_t *)kernel_member_list_get(KERNEL_MEMBER_TIMER, KERNEL_MEMBER_LIST_TIMER_STOP); } /** @@ -75,7 +65,7 @@ static list_t *_timer_list_stopingHeadGet(void) */ static list_t *_timer_list_waitingHeadGet(void) { - return (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_TIMER, KERNEL_MEMBER_LIST_TIMER_WAIT); + return (list_t *)kernel_member_list_get(KERNEL_MEMBER_TIMER, KERNEL_MEMBER_LIST_TIMER_WAIT); } /** @@ -85,7 +75,7 @@ static list_t *_timer_list_waitingHeadGet(void) */ static list_t *_timer_list_endingHeadGet(void) { - return (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_TIMER, KERNEL_MEMBER_LIST_TIMER_END); + return (list_t *)kernel_member_list_get(KERNEL_MEMBER_TIMER, KERNEL_MEMBER_LIST_TIMER_END); } /** @@ -95,7 +85,7 @@ static list_t *_timer_list_endingHeadGet(void) */ static list_t *_timer_list_pendingHeadGet(void) { - return (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_TIMER, KERNEL_MEMBER_LIST_TIMER_PEND); + return (list_t *)kernel_member_list_get(KERNEL_MEMBER_TIMER, KERNEL_MEMBER_LIST_TIMER_PEND); } /** @@ -105,7 +95,7 @@ static list_t *_timer_list_pendingHeadGet(void) */ static list_t *_timer_list_runningHeadGet(void) { - return (list_t *)_impl_kernel_member_list_get(KERNEL_MEMBER_TIMER, KERNEL_MEMBER_LIST_TIMER_RUN); + return (list_t *)kernel_member_list_get(KERNEL_MEMBER_TIMER, KERNEL_MEMBER_LIST_TIMER_RUN); } /** @@ -260,11 +250,11 @@ static linker_head_t *_timer_linker_head_fromWaiting(void) */ static b_t _timer_id_isInvalid(u32_t id) { - if (!_impl_kernel_member_unified_id_isInvalid(KERNEL_MEMBER_TIMER_INTERNAL, id)) { + if (!kernel_member_unified_id_isInvalid(KERNEL_MEMBER_TIMER_INTERNAL, id)) { return FALSE; } - if (!_impl_kernel_member_unified_id_isInvalid(KERNEL_MEMBER_TIMER, id)) { + if (!kernel_member_unified_id_isInvalid(KERNEL_MEMBER_TIMER, id)) { return FALSE; } @@ -286,205 +276,25 @@ static b_t _timer_object_isInit(u32_t id) } /** - * @brief Convert the internal os id to kernel member number. - * - * @param id The provided unique id. - * - * @return The value of member number. - */ -u32_t _impl_timer_os_id_to_number(u32_t id) -{ - if (!_impl_kernel_member_unified_id_isInvalid(KERNEL_MEMBER_TIMER_INTERNAL, id)) { - return (u32_t)((id - _impl_kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_TIMER_INTERNAL)) / sizeof(timer_context_t)); - } - - if (!_impl_kernel_member_unified_id_isInvalid(KERNEL_MEMBER_TIMER, id)) { - return (u32_t)((id - _impl_kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_TIMER)) / sizeof(timer_context_t)); - } - - return 0u; -} - -/** - * @brief Initialize a new timer. - * - * @param pCallFun The timer entry function pointer. - * @param isCycle It indicates the timer if it's cycle repeat. - * @param timeout_ms The expired time. - * @param pName The timer's name, it supported NULL pointer. - * - * @return The value of the timer unique id. - */ -os_id_t _impl_timer_init(pTimer_callbackFunc_t pCallFun, b_t isCycle, u32_t timeout_ms, const char_t *pName) -{ - arguments_t arguments[] = { - [0] = {.ptr_val = (const void *)pCallFun}, - [1] = {.b_val = (b_t)isCycle}, - [2] = {.u32_val = (u32_t)timeout_ms}, - [3] = {.pch_val = (const void *)pName}, - }; - - return _impl_kernel_privilege_invoke((const void *)_timer_init_privilege_routine, arguments); -} - -/** - * @brief Initialize a timer for internal thread context use. + * @brief It's sub-routine running at privilege mode. * - * @param id The thread unique id is same as timer. - */ -void _impl_thread_timer_init(os_id_t id) -{ - ENTER_CRITICAL_SECTION(); - - timer_context_t *pCurTimer = _timer_object_contextGet(id); - - _memset((char_t *)pCurTimer, 0x0u, sizeof(timer_context_t)); - pCurTimer->head.id = id; - pCurTimer->head.pName = "TH"; - - pCurTimer->isCycle = FALSE; - pCurTimer->timeout_ms = 0u; - pCurTimer->duration_us = 0u; - pCurTimer->call.pThread = NULL; - - _timer_list_transfer_toStopList((linker_head_t *)&pCurTimer->head); - - EXIT_CRITICAL_SECTION(); -} - -/** - * @brief Timer start for internal thread context use. + * @param pArgs The function argument packages. * - * @param id The thread unique id is same as timer. - * @param timeout_ms The thread timer timeout time. - * @param pCallback The thread timeout callback function. + * @return The result of privilege routine. */ -void _impl_thread_timer_start(os_id_t id, u32_t timeout_ms, void (*pCallback)(os_id_t)) +static u32_t _timer_schedule_request_privilege_routine(arguments_t *pArgs) { ENTER_CRITICAL_SECTION(); - timer_context_t *pCurTimer = _timer_object_contextGet(id); // Only for internal thread use - - pCurTimer->call.pThread = pCallback; - pCurTimer->timeout_ms = OS_TIME_FOREVER_VAL; - pCurTimer->isCycle = FALSE; - - if (pCurTimer->head.linker.pList == _timer_list_waitingHeadGet()) { - _timer_list_remove_fromWaitList((linker_head_t *)&pCurTimer->head); - } - - if (timeout_ms == OS_TIME_FOREVER_VAL) { - _timer_list_transfer_toEndList((linker_head_t *)&pCurTimer->head); + timer_context_t *pCurTimer = (timer_context_t *)_timer_linker_head_fromWaiting(); + if (pCurTimer) { + clock_time_interval_set(pCurTimer->duration_us); } else { - pCurTimer->duration_us = (timeout_ms * 1000u); - _timer_list_transfer_toWaitList((linker_head_t *)&pCurTimer->head); - } - - EXIT_CRITICAL_SECTION(); -} - -/** - * @brief Timer starts operation, be careful if the timer's last time isn't expired or be handled, - * the newer start will override it. - * - * @param id The timer unique id. - * @param isCycle It indicates the timer if it's cycle repeat. - * @param timeout_ms The timer expired time. - * - * @return The result of timer start operation. - */ -u32p_t _impl_timer_start(os_id_t id, b_t isCycle, u32_t timeout_ms) -{ - if (_timer_id_isInvalid(id)) { - return _PC_CMPT_FAILED; - } - - if (!_timer_object_isInit(id)) { - return _PC_CMPT_FAILED; - } - - if (!timeout_ms) { - return _PC_CMPT_FAILED; - } - - arguments_t arguments[] = { - [0] = {.u32_val = (u32_t)id}, - [1] = {.b_val = (u32_t)isCycle}, - [2] = {.u32_val = (u32_t)timeout_ms}, - }; - - return _impl_kernel_privilege_invoke((const void *)_timer_start_privilege_routine, arguments); -} - -/** - * @brief timer stops operation. - * - * @param id The timer unique id. - * - * @return The result of timer stop operation. - */ -u32p_t _impl_timer_stop(os_id_t id) -{ - if (_timer_id_isInvalid(id)) { - return _PC_CMPT_FAILED; - } - - if (!_timer_object_isInit(id)) { - return _PC_CMPT_FAILED; - } - - arguments_t arguments[] = { - [0] = {.u32_val = (u32_t)id}, - }; - - return _impl_kernel_privilege_invoke((const void *)_timer_stop_privilege_routine, arguments); -} - -/** - * @brief Check the timer to confirm if it's already scheduled in the waiting list. - * - * @param id The timer unique id. - * - * @return The true result indicates time busy, otherwise is free status. - */ -b_t _impl_timer_status_isBusy(os_id_t id) -{ - if (_timer_id_isInvalid(id)) { - return FALSE; + clock_time_interval_set(OS_TIME_FOREVER_VAL); } - if (!_timer_object_isInit(id)) { - return FALSE; - } - - ENTER_CRITICAL_SECTION(); - - linker_head_t *pCurHead = (linker_head_t *)_timer_object_contextGet(id); - b_t isBusy = (pCurHead->linker.pList == (list_t *)_timer_list_waitingHeadGet()); - isBusy |= (pCurHead->linker.pList == (list_t *)_timer_list_endingHeadGet()); - EXIT_CRITICAL_SECTION(); - return isBusy; -} - -/** - * @brief Get the kernel RTOS system time (ms). - * - * @return The value of the total system time (ms). - */ -u32_t _impl_timer_total_system_ms_get(void) -{ - return _impl_kernel_privilege_invoke((const void *)_timer_total_system_ms_get_privilege_routine, NULL); -} - -/** - * @brief Get the kernel RTOS system time (us). - * - * @return The value of the total system time (us). - */ -u32_t _impl_timer_total_system_us_get(void) -{ - return _impl_kernel_privilege_invoke((const void *)_timer_total_system_us_get_privilege_routine, NULL); + return PC_SC_SUCCESS; } /** @@ -492,9 +302,9 @@ u32_t _impl_timer_total_system_us_get(void) * * @return The result of timer schedule request. */ -u32p_t _impl_kernel_timer_schedule_request(void) +static u32p_t _timer_schedule(void) { - return _impl_kernel_privilege_invoke((const void *)_kernel_timer_schedule_request_privilege_routine, NULL); + return kernel_privilege_invoke((const void *)_timer_schedule_request_privilege_routine, NULL); } /** @@ -515,10 +325,10 @@ static u32_t _timer_init_privilege_routine(arguments_t *pArgs) u32_t endAddr = 0u; timer_context_t *pCurTimer = NULL; - pCurTimer = (timer_context_t *)_impl_kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_TIMER); - endAddr = (u32_t)_impl_kernel_member_id_toContainerEndAddress(KERNEL_MEMBER_TIMER); + pCurTimer = (timer_context_t *)kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_TIMER); + endAddr = (u32_t)kernel_member_id_toContainerEndAddress(KERNEL_MEMBER_TIMER); do { - os_id_t id = _impl_kernel_member_containerAddress_toUnifiedid((u32_t)pCurTimer); + os_id_t id = kernel_member_containerAddress_toUnifiedid((u32_t)pCurTimer); if (_timer_id_isInvalid(id)) { break; } @@ -576,7 +386,7 @@ static u32_t _timer_start_privilege_routine(arguments_t *pArgs) pCurTimer->duration_us = timeout_ms * 1000u; _timer_list_transfer_toWaitList((linker_head_t *)&pCurTimer->head); } - _impl_kernel_timer_schedule_request(); + _timer_schedule(); EXIT_CRITICAL_SECTION(); return PC_SC_SUCCESS; @@ -603,7 +413,7 @@ static u32_t _timer_stop_privilege_routine(arguments_t *pArgs) } _timer_list_transfer_toStopList((linker_head_t *)&pCurTimer->head); - _impl_kernel_timer_schedule_request(); + _timer_schedule(); EXIT_CRITICAL_SECTION(); return PC_SC_SUCCESS; @@ -622,7 +432,7 @@ static u32_t _timer_total_system_ms_get_privilege_routine(arguments_t *pArgs) UNUSED_MSG(pArgs); - u64_t us = (!g_timer_resource.remaining_us) ? (_impl_clock_time_elapsed_get()) : (0u); + u64_t us = (!g_timer_resource.remaining_us) ? (clock_time_elapsed_get()) : (0u); us += g_timer_resource.system_us; @@ -645,7 +455,7 @@ static u32_t _timer_total_system_us_get_privilege_routine(arguments_t *pArgs) UNUSED_MSG(pArgs); - u32_t us = (u32_t)((!g_timer_resource.remaining_us) ? (_impl_clock_time_elapsed_get()) : (0u)); + u32_t us = (u32_t)((!g_timer_resource.remaining_us) ? (clock_time_elapsed_get()) : (0u)); us += g_timer_resource.system_us; @@ -654,31 +464,211 @@ static u32_t _timer_total_system_us_get_privilege_routine(arguments_t *pArgs) } /** - * @brief It's sub-routine running at privilege mode. + * @brief Convert the internal os id to kernel member number. * - * @param pArgs The function argument packages. + * @param id The provided unique id. * - * @return The result of privilege routine. + * @return The value of member number. + */ +u32_t _impl_timer_os_id_to_number(u32_t id) +{ + if (!kernel_member_unified_id_isInvalid(KERNEL_MEMBER_TIMER_INTERNAL, id)) { + return (u32_t)((id - kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_TIMER_INTERNAL)) / sizeof(timer_context_t)); + } + + if (!kernel_member_unified_id_isInvalid(KERNEL_MEMBER_TIMER, id)) { + return (u32_t)((id - kernel_member_id_toUnifiedIdStart(KERNEL_MEMBER_TIMER)) / sizeof(timer_context_t)); + } + + return 0u; +} + +/** + * @brief Initialize a new timer. + * + * @param pCallFun The timer entry function pointer. + * @param isCycle It indicates the timer if it's cycle repeat. + * @param timeout_ms The expired time. + * @param pName The timer's name, it supported NULL pointer. + * + * @return The value of the timer unique id. + */ +os_id_t _impl_timer_init(pTimer_callbackFunc_t pCallFun, b_t isCycle, u32_t timeout_ms, const char_t *pName) +{ + arguments_t arguments[] = { + [0] = {.ptr_val = (const void *)pCallFun}, + [1] = {.b_val = (b_t)isCycle}, + [2] = {.u32_val = (u32_t)timeout_ms}, + [3] = {.pch_val = (const void *)pName}, + }; + + return kernel_privilege_invoke((const void *)_timer_init_privilege_routine, arguments); +} + +/** + * @brief Timer starts operation, be careful if the timer's last time isn't expired or be handled, + * the newer start will override it. + * + * @param id The timer unique id. + * @param isCycle It indicates the timer if it's cycle repeat. + * @param timeout_ms The timer expired time. + * + * @return The result of timer start operation. + */ +u32p_t _impl_timer_start(os_id_t id, b_t isCycle, u32_t timeout_ms) +{ + if (_timer_id_isInvalid(id)) { + return _PC_CMPT_FAILED; + } + + if (!_timer_object_isInit(id)) { + return _PC_CMPT_FAILED; + } + + if (!timeout_ms) { + return _PC_CMPT_FAILED; + } + + arguments_t arguments[] = { + [0] = {.u32_val = (u32_t)id}, + [1] = {.b_val = (u32_t)isCycle}, + [2] = {.u32_val = (u32_t)timeout_ms}, + }; + + return kernel_privilege_invoke((const void *)_timer_start_privilege_routine, arguments); +} + +/** + * @brief timer stops operation. + * + * @param id The timer unique id. + * + * @return The result of timer stop operation. + */ +u32p_t _impl_timer_stop(os_id_t id) +{ + if (_timer_id_isInvalid(id)) { + return _PC_CMPT_FAILED; + } + + if (!_timer_object_isInit(id)) { + return _PC_CMPT_FAILED; + } + + arguments_t arguments[] = { + [0] = {.u32_val = (u32_t)id}, + }; + + return kernel_privilege_invoke((const void *)_timer_stop_privilege_routine, arguments); +} + +/** + * @brief Check the timer to confirm if it's already scheduled in the waiting list. + * + * @param id The timer unique id. + * + * @return The true result indicates time busy, otherwise is free status. */ -static u32_t _kernel_timer_schedule_request_privilege_routine(arguments_t *pArgs) +b_t _impl_timer_busy(os_id_t id) { + if (_timer_id_isInvalid(id)) { + return FALSE; + } + + if (!_timer_object_isInit(id)) { + return FALSE; + } + ENTER_CRITICAL_SECTION(); - timer_context_t *pCurTimer = (timer_context_t *)_timer_linker_head_fromWaiting(); - if (pCurTimer) { - _impl_clock_time_interval_set(pCurTimer->duration_us); + linker_head_t *pCurHead = (linker_head_t *)_timer_object_contextGet(id); + b_t isBusy = (pCurHead->linker.pList == (list_t *)_timer_list_waitingHeadGet()); + isBusy |= (pCurHead->linker.pList == (list_t *)_timer_list_endingHeadGet()); + + EXIT_CRITICAL_SECTION(); + return isBusy; +} + +/** + * @brief Get the kernel RTOS system time (ms). + * + * @return The value of the total system time (ms). + */ +u32_t _impl_timer_total_system_ms_get(void) +{ + return kernel_privilege_invoke((const void *)_timer_total_system_ms_get_privilege_routine, NULL); +} + +/** + * @brief Get the kernel RTOS system time (us). + * + * @return The value of the total system time (us). + */ +u32_t _impl_timer_total_system_us_get(void) +{ + return kernel_privilege_invoke((const void *)_timer_total_system_us_get_privilege_routine, NULL); +} + +/** + * @brief Initialize a timer for internal thread context use. + * + * @param id The thread unique id is same as timer. + */ +void timer_init_for_thread(os_id_t id) +{ + ENTER_CRITICAL_SECTION(); + + timer_context_t *pCurTimer = _timer_object_contextGet(id); + + _memset((char_t *)pCurTimer, 0x0u, sizeof(timer_context_t)); + pCurTimer->head.id = id; + pCurTimer->head.pName = "TH"; + + pCurTimer->isCycle = FALSE; + pCurTimer->timeout_ms = 0u; + pCurTimer->duration_us = 0u; + pCurTimer->call.pThread = NULL; + + _timer_list_transfer_toStopList((linker_head_t *)&pCurTimer->head); + + EXIT_CRITICAL_SECTION(); +} + +/** + * @brief Timer start for internal thread context use. + * + * @param id The thread unique id is same as timer. + * @param timeout_ms The thread timer timeout time. + * @param pCallback The thread timeout callback function. + */ +void timer_start_for_thread(os_id_t id, u32_t timeout_ms, void (*pCallback)(os_id_t)) +{ + ENTER_CRITICAL_SECTION(); + + timer_context_t *pCurTimer = _timer_object_contextGet(id); // Only for internal thread use + + pCurTimer->call.pThread = pCallback; + pCurTimer->timeout_ms = OS_TIME_FOREVER_VAL; + pCurTimer->isCycle = FALSE; + + if (pCurTimer->head.linker.pList == _timer_list_waitingHeadGet()) { + _timer_list_remove_fromWaitList((linker_head_t *)&pCurTimer->head); + } + + if (timeout_ms == OS_TIME_FOREVER_VAL) { + _timer_list_transfer_toEndList((linker_head_t *)&pCurTimer->head); } else { - _impl_clock_time_interval_set(OS_TIME_FOREVER_VAL); + pCurTimer->duration_us = (timeout_ms * 1000u); + _timer_list_transfer_toWaitList((linker_head_t *)&pCurTimer->head); } EXIT_CRITICAL_SECTION(); - return PC_SC_SUCCESS; } /** * @brief Timer callback function handle in the kernel thread. */ -void _impl_timer_reamining_elapsed_handler(void) +void timer_reamining_elapsed_handler(void) { list_t *pListRunning = (list_t *)_timer_list_runningHeadGet(); @@ -702,7 +692,7 @@ void _impl_timer_reamining_elapsed_handler(void) * * @param elapsed_us Clock time reported elapsed time. */ -void _impl_timer_elapsed_handler(u32_t elapsed_us) +void timer_elapsed_handler(u32_t elapsed_us) { ENTER_CRITICAL_SECTION(); @@ -718,7 +708,7 @@ void _impl_timer_elapsed_handler(u32_t elapsed_us) g_timer_resource.system_us += pCurTimer->duration_us; pCurTimer->duration_us = 0u; - if (_impl_kernel_member_unified_id_toId(pCurTimer->head.id) == KERNEL_MEMBER_TIMER_INTERNAL) { + if (kernel_member_unified_id_toId(pCurTimer->head.id) == KERNEL_MEMBER_TIMER_INTERNAL) { _timer_list_transfer_toStopList((linker_head_t *)&pCurTimer->head); if (pCurTimer->call.pThread) { @@ -763,9 +753,9 @@ void _impl_timer_elapsed_handler(u32_t elapsed_us) } if (request) { - _impl_kernel_message_notification(); + kernel_message_notification(); } - _impl_kernel_timer_schedule_request(); + _timer_schedule(); EXIT_CRITICAL_SECTION(); } @@ -778,7 +768,7 @@ void _impl_timer_elapsed_handler(u32_t elapsed_us) * * @return TRUE: Operation pass, FALSE: Operation failed. */ -b_t _impl_trace_timer_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) +b_t timer_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) { #if defined KTRACE timer_context_t *pCurTimer = NULL; @@ -788,8 +778,8 @@ b_t _impl_trace_timer_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) ENTER_CRITICAL_SECTION(); offset = sizeof(timer_context_t) * instance; - pCurTimer = (timer_context_t *)(_impl_kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_TIMER) + offset); - id = _impl_kernel_member_containerAddress_toUnifiedid((u32_t)pCurTimer); + pCurTimer = (timer_context_t *)(kernel_member_id_toContainerStartAddress(KERNEL_MEMBER_TIMER) + offset); + id = kernel_member_containerAddress_toUnifiedid((u32_t)pCurTimer); _memset((u8_t *)pMsgs, 0x0u, sizeof(kernel_snapshot_t)); if (_timer_id_isInvalid(id)) { @@ -829,6 +819,64 @@ b_t _impl_trace_timer_snapshot(u32_t instance, kernel_snapshot_t *pMsgs) #endif } +/** + * @brief timer stops operation. + * + * @param id The timer unique id. + * + * @return The result of timer stop operation. + */ +u32p_t timer_stop_for_thread(os_id_t id) +{ + if (kernel_member_unified_id_toId(id) != KERNEL_MEMBER_TIMER_INTERNAL) { + return _PC_CMPT_FAILED; + } + + return _impl_timer_stop(id); +} + +/** + * @brief Check the timer to confirm if it's already scheduled in the waiting list. + * + * @param id The timer unique id. + * + * @return The true result indicates time busy, otherwise is free status. + */ +b_t timer_busy(os_id_t id) +{ + return _impl_timer_busy(id); +} + +/** + * @brief Get the kernel RTOS system time (ms). + * + * @return The value of the total system time (ms). + */ +u32_t timer_total_system_ms_get(void) +{ + return _impl_timer_total_system_ms_get(); +} + +/** + * @brief Get the kernel RTOS system time (us). + * + * @return The value of the total system time (us). + */ +u32_t timer_total_system_us_get(void) +{ + return _impl_timer_total_system_us_get(); +} + +/** + * @brief kernel RTOS request to update new schedule. + * + * @return The result of timer schedule request. + */ +u32p_t timer_schedule(void) +{ + return _timer_schedule(); +} + #ifdef __cplusplus } #endif diff --git a/kernel/trace.c b/kernel/trace.c index 412781d..af1ef3b 100644 --- a/kernel/trace.c +++ b/kernel/trace.c @@ -67,7 +67,7 @@ void _impl_trace_kernel_snapshot_print(void) KTRACE(">> %-6s %-15s %-5s %-7s %-3s %-10s %-7s %-9s %-10s\n", "Thread", "Name", "ID", "STATE", "PRI", "PSP_ADDR", "RAM(1%)", "CPU(0.1%)", "W/P/R(ms)"); for (u32_t i = 0u; i < THREAD_INSTANCE_SUPPORTED_NUMBER; i++) { - if (_impl_trace_thread_snapshot(i, &snapshot_data)) { + if (thread_snapshot(i, &snapshot_data)) { KTRACE(" %-6d %-15s %-5d %-7s %-3d 0x%-8x %-7d %-9d %-10d\n", (i + 1u), snapshot_data.pName, snapshot_data.id, snapshot_data.pState, snapshot_data.thread.priority, snapshot_data.thread.current_psp, snapshot_data.thread.ram, snapshot_data.thread.cpu, snapshot_data.thread.delay); @@ -79,7 +79,7 @@ void _impl_trace_kernel_snapshot_print(void) KTRACE(">> %-6s %-15s %-5s %-7s %-5s %-5s %-1s %-11s %-5s\n", "Sem", "Name", "ID", "STATE", "Init", "Limit", "P", "Timeout(ms)", "Block(ID)"); for (u32_t i = 0u; i < SEMAPHORE_INSTANCE_SUPPORTED_NUMBER; i++) { - if (_impl_trace_semaphore_snapshot(i, &snapshot_data)) { + if (semaphore_snapshot(i, &snapshot_data)) { KTRACE(" %-6d %-15s %-5d %-7s %-5d %-5d %-1d %-11d", (i + 1u), snapshot_data.pName, snapshot_data.id, snapshot_data.pState, snapshot_data.semaphore.initial_count, snapshot_data.semaphore.limit_count, snapshot_data.semaphore.permit, snapshot_data.semaphore.timeout_ms); @@ -99,7 +99,7 @@ void _impl_trace_kernel_snapshot_print(void) KTRACE(">> %-6s %-15s %-5s %-7s %-5s %-3s %-5s\n", "Mutex", "Name", "ID", "STATE", "Hold", "Ori", "Block(ID)"); for (u32_t i = 0u; i < MUTEX_INSTANCE_SUPPORTED_NUMBER; i++) { - if (_impl_trace_mutex_snapshot(i, &snapshot_data)) { + if (mutex_snapshot(i, &snapshot_data)) { KTRACE(" %-6d %-15s %-5d %-7s %-5d %-3d", (i + 1u), snapshot_data.pName, snapshot_data.id, snapshot_data.pState, snapshot_data.mutex.holdThreadId, snapshot_data.mutex.originalPriority); @@ -118,7 +118,7 @@ void _impl_trace_kernel_snapshot_print(void) KTRACE(">> %-6s %-15s %-5s %-7s %-10s %-10s %-5s\n", "Event", "Name", "ID", "State", "Set", "Edge", "Block(ID)"); for (u32_t i = 0u; i < EVENT_INSTANCE_SUPPORTED_NUMBER; i++) { - if (_impl_trace_event_snapshot(i, &snapshot_data)) { + if (event_snapshot(i, &snapshot_data)) { KTRACE(" %-6d %-15s %-5d %-7s 0x%-8x 0x%-8x", (i + 1u), snapshot_data.pName, snapshot_data.id, snapshot_data.pState, snapshot_data.event.set, snapshot_data.event.edge); @@ -137,7 +137,7 @@ void _impl_trace_kernel_snapshot_print(void) KTRACE(">> %-6s %-15s %-5s %-7s %-5s %-20s\n", "Queue", "Name", "ID", "State", "Has", "Block(ID)"); for (u32_t i = 0u; i < QUEUE_INSTANCE_SUPPORTED_NUMBER; i++) { - if (_impl_trace_queue_snapshot(i, &snapshot_data)) { + if (queue_snapshot(i, &snapshot_data)) { KTRACE(" %-6d %-15s %-5d %-7s %-5d", (i + 1u), snapshot_data.pName, snapshot_data.id, snapshot_data.pState, snapshot_data.queue.cacheSize); @@ -163,7 +163,7 @@ void _impl_trace_kernel_snapshot_print(void) KTRACE(">> %-6s %-15s %-5s %-7s %-1s %-11s\n", "Timer", "Name", "ID", "State", "C", "Timeout(ms)"); for (u32_t i = 0u; i < TIMER_INSTANCE_SUPPORTED_NUMBER; i++) { - if (_impl_trace_timer_snapshot(i, &snapshot_data)) { + if (timer_snapshot(i, &snapshot_data)) { KTRACE(" %-6d %-15s %-5d %-7s %-1d %-11d\n", (i + 1u), snapshot_data.pName, snapshot_data.id, snapshot_data.pState, snapshot_data.timer.is_cycle, snapshot_data.timer.timeout_ms); } else { @@ -173,7 +173,7 @@ void _impl_trace_kernel_snapshot_print(void) KTRACE(">> %-6s %-15s %-5s %-7s %-13s %-5s\n", "Pool", "Name", "ID", "State", "FreeBits", "Block(ID)"); for (u32_t i = 0u; i < POOL_INSTANCE_SUPPORTED_NUMBER; i++) { - if (_impl_trace_pool_snapshot(i, &snapshot_data)) { + if (pool_snapshot(i, &snapshot_data)) { KTRACE(" %-6d %-15s %-5d %-7s 0x%-11x", (i + 1u), snapshot_data.pName, snapshot_data.id, snapshot_data.pState, snapshot_data.pool.free); diff --git a/package.json b/package.json index a296099..f1eb160 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "At-RTOS", "homepage": "https://github.com/At-EC/At-RTOS", - "version": "1.4.4", - "timestamp": "2024-04-20,15:06", - "commit_id": "c07f1f47f92be3dfd394de63e781c9c24fee2510" + "version": "1.4.5", + "timestamp": "2024-04-29,19:55", + "commit_id": "15ae539784cbd1d3a83e2f08c4cf5c4d7e6a71e8" } diff --git a/port/port_common.c b/port/port_common.c index 8d27995..54a1d6e 100644 --- a/port/port_common.c +++ b/port/port_common.c @@ -4,7 +4,7 @@ * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. **/ - + #include "clock_tick.h" #include "port.h" @@ -17,7 +17,7 @@ extern "C" { */ void SysTick_Handler(void) { - _impl_clock_isr(); + clock_isr(); } /** @@ -31,7 +31,7 @@ void HardFault_Handler(void) /** * @brief To check if it's in interrupt content. */ -b_t _impl_port_isInInterruptContent(void) +b_t port_isInInterruptContent(void) { if (__get_IPSR()) { return TRUE; @@ -47,7 +47,7 @@ b_t _impl_port_isInInterruptContent(void) /** * @brief To check if it's in kernel thread content. */ -b_t _impl_port_isInThreadMode(void) +b_t port_isInThreadMode(void) { if (__get_IPSR()) { return FALSE; @@ -58,7 +58,7 @@ b_t _impl_port_isInThreadMode(void) /** * @brief ARM core trigger the pendsv interrupt. */ -void _impl_port_setPendSV(void) +void port_setPendSV(void) { SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk; } @@ -66,7 +66,7 @@ void _impl_port_setPendSV(void) /** * @brief ARM core config kernel thread interrupt priority. */ -void _impl_port_interrupt_init(void) +void port_interrupt_init(void) { NVIC_SetPriority(PendSV_IRQn, 0xFFu); // Set PendSV to lowest possible priority NVIC_SetPriority(SVCall_IRQn, 0u); // Set SV to lowest possible priority @@ -82,7 +82,7 @@ void _impl_port_interrupt_init(void) * * @return The PSP stack address. */ -u32_t _impl_port_stack_frame_init(void (*pEntryFunction)(void), u32_t *pAddress, u32_t size) +u32_t port_stack_frame_init(void (*pEntryFunction)(void), u32_t *pAddress, u32_t size) { _memset((uchar_t *)pAddress, STACT_UNUSED_DATA, size); diff --git a/port/port_iar.s b/port/port_iar.s index e871f7d..fa6c321 100644 --- a/port/port_iar.s +++ b/port/port_iar.s @@ -12,8 +12,8 @@ SECTION .text : CODE - EXTERN _impl_kernel_privilege_call_inSVC_c - EXTERN _impl_kernel_scheduler_inPendSV_c + EXTERN kernel_privilege_call_inSVC_c + EXTERN kernel_scheduler_inPendSV_c EXPORT _impl_port_run_theFirstThread EXPORT PendSV_Handler @@ -50,7 +50,7 @@ PendSV_Handler: PUSH {R0, R1, R12, LR} MOV R0, SP ; R0 points to the argument ppCurPsp ADD R1, SP, #4 ; R1 points to the argument ppNextPSP - BL _impl_kernel_scheduler_inPendSV_c ; Call _impl_kernel_scheduler_inPendSV_c + BL kernel_scheduler_inPendSV_c ; Call kernel_scheduler_inPendSV_c POP {R0, R1, R12, LR} ; R0 = ppCurPsp, R1 = ppNextPSP CMP R0, R1 ; if R0 = R1 @@ -58,11 +58,11 @@ PendSV_Handler: MRS R2, PSP ; Get current process stack pointer value */ -#if ( FPU_ENABLED ) ; If the Cortex-M is not supported, the ASM instruction will not support VSTMDBEQ +#if ( FPU_ENABLED ) ; If the Cortex-M is not supported, the ASM instruction will not support VSTMDBEQ TST LR, #0x10 ; Test bit 4 of EXC_RETURN (0: FPU active on exception entry, 1: FPU not active) - IT EQ ; if (LR[4] == 0) */ + IT EQ ; if (LR[4] == 0) VSTMDBEQ R2!, {S16 - S31} ; Save floating point registers, EQ suffix will save FPU registers {s16 - S31} - ; if bit of LR was zero (S0-S15, FPSCR alread saved by MCU) */ + ; if bit of LR was zero (S0-S15, FPSCR alread saved by MCU) MRS R3, CONTROL ; Save CONTROL register in R3 to be pushed on stack - bit 2 (FPCA) indicates floating-point is active STMDB R2!, {R3 - R11} ; Save CONTROL, {R4 - R11} @@ -94,7 +94,7 @@ PendSV_Handler: ; End of Context switching code exit - CPSIE I ; Enable interrupts + CPSIE I ; Enable interrupts ISB BX LR ; return from exception, restoring {R0 - R3, R12, LR, PC, PSR} @@ -107,7 +107,7 @@ SVC_Handler: MRSEQ R0, MSP ; Set R0 = MSP MRSNE R0, PSP ; Set R0 = PSP - B _impl_kernel_privilege_call_inSVC_c ; call _impl_kernel_privilege_call_inSVC_c + B kernel_privilege_call_inSVC_c ; call kernel_privilege_call_inSVC_c ; return from exception, restoring {R0-R3, R12, LR, PC, PSR} END diff --git a/port/port_keil_ac5.c b/port/port_keil_ac5.c index 4cc6181..6d61d4a 100644 --- a/port/port_keil_ac5.c +++ b/port/port_keil_ac5.c @@ -17,8 +17,8 @@ extern "C" { #define FPU_ENABLED 0 #endif -extern void _impl_kernel_privilege_call_inSVC_c(u32_t *svc_args); -extern void _impl_kernel_scheduler_inPendSV_c(u32_t **ppCurPsp, u32_t **ppNextPSP); +extern void kernel_privilege_call_inSVC_c(u32_t *svc_args); +extern void kernel_scheduler_inPendSV_c(u32_t **ppCurPsp, u32_t **ppNextPSP); /** * @brief ARM core SVC interrupt handle function. @@ -31,7 +31,7 @@ __asm void SVC_Handler(void) MRSEQ R0, MSP /* Set R0 = MSP */ MRSNE R0, PSP /* Set R0 = PSP */ - B __cpp(_impl_kernel_privilege_call_inSVC_c) /* call _impl_kernel_privilege_call_inSVC_c */ + B __cpp(kernel_privilege_call_inSVC_c) /* call kernel_privilege_call_inSVC_c */ /** * return from exception, restoring {R0-R3, R12, LR, PC, PSR} @@ -58,7 +58,7 @@ __asm void PendSV_Handler(void) PUSH {R0, R1, R12, LR} MOV R0, SP /* R0 points to the argument ppCurPsp */ ADD R1, SP, #4 /* R1 points to the argument ppNextPSP */ - BL __cpp(_impl_kernel_scheduler_inPendSV_c) /* Call _impl_kernel_scheduler_inPendSV_c */ + BL __cpp(kernel_scheduler_inPendSV_c) /* Call kernel_scheduler_inPendSV_c */ POP {R0, R1, R12, LR} /* R0 = ppCurPsp, R1 = ppNextPSP */ CMP R0, R1 /* if R0 = R1 */ @@ -116,7 +116,7 @@ Exit /** * @brief ARM core trigger the first thread to run. */ -__asm void _impl_port_run_theFirstThread(u32_t sp) +__asm void port_run_theFirstThread(u32_t sp) { /** * initialize R4-R11 from context frame using passed SP diff --git a/port/port_keil_ac6.c b/port/port_keil_ac6.c index d77f187..64360e2 100644 --- a/port/port_keil_ac6.c +++ b/port/port_keil_ac6.c @@ -26,13 +26,13 @@ void SVC_Handler( void ) __asm volatile ( " .syntax unified \n" - " .extern _impl_kernel_privilege_call_inSVC_c \n" + " .extern kernel_privilege_call_inSVC_c \n" " \n" " tst lr, #4 \n" /* call from which stack pointer base on the bit 2 of EXC_RETURN (LR) */ " ite eq \n" " mrseq r0, msp \n" /* Set R0 = MSP */ " mrsne r0, psp \n" /* Set R0 = PSP */ - " b _impl_kernel_privilege_call_inSVC_c \n" /* call _impl_kernel_privilege_call_inSVC_c */ + " b kernel_privilege_call_inSVC_c \n" /* call kernel_privilege_call_inSVC_c */ " \n" " .align 4 \n" ); @@ -53,7 +53,7 @@ void PendSV_Handler(void) __asm volatile ( " .syntax unified \n" - " .extern _impl_kernel_scheduler_inPendSV_c \n" + " .extern kernel_scheduler_inPendSV_c \n" " \n" " cpsid i \n" /* Disable interrupts */ " isb \n" @@ -64,7 +64,7 @@ void PendSV_Handler(void) " push {r0, r1, r12, lr} \n" " mov r0, sp \n" /* R0 points to the argument ppCurPsp */ " add r1, sp, #4 \n" /* R1 points to the argument ppNextPSP */ - " bl _impl_kernel_scheduler_inPendSV_c \n" /* Call _impl_kernel_scheduler_inPendSV_c */ + " bl kernel_scheduler_inPendSV_c \n" /* Call kernel_scheduler_inPendSV_c */ " pop {r0, r1, r12, lr} \n" /* R0 = ppCurPsp, R1 = ppNextPSP */ " \n" " cmp r0, r1 \n" /* if R0 = R1 */ @@ -89,7 +89,7 @@ void PendSV_Handler(void) */ " str r2, [r0] \n" /* *ppCurPSP = CurPSP */ " ldr r2, [r1] \n" /* NextPSP = *pNextPSP */ - " \n" + " \n" #if ( FPU_ENABLED ) /* If the Cortex-M is not supported, the ASM instruction will not support VSTMDBEQ */ " ldmia r2!, {lr} \n" /* restore LR */ " ldmia r2!, {r3 - r11} \n" /* restore {R3 - R11} */ @@ -104,11 +104,11 @@ void PendSV_Handler(void) " ldmia r2!, {r4 - r11} \n" /* no FPU present - context is {R4 - R11} */ #endif " \n" - " msr psp, r2 \n" /* Set PSP to next thread */ + " msr psp, r2 \n" /* Set PSP to next thread */ /** * End of Context switching code */ - "exit: \n" + "exit: \n" " cpsie I \n" /* Enable interrupts */ " isb \n" " \n" @@ -121,7 +121,7 @@ void PendSV_Handler(void) /** * @brief ARM core trigger the first thread to run. */ -void _impl_port_run_theFirstThread(u32_t sp) +void port_run_theFirstThread(u32_t sp) { /** * initialize R4-R11 from context frame using passed SP diff --git a/port/port_native_gcc.c b/port/port_native_gcc.c index f77b20d..d089f59 100644 --- a/port/port_native_gcc.c +++ b/port/port_native_gcc.c @@ -15,7 +15,7 @@ extern "C" { /** * @brief ARM core trigger the svc call interrupt. */ -u32_t _impl_kernel_svc_call(u32_t args_0, u32_t args_1, u32_t args_2, u32_t args_3) +u32_t kernel_svc_call(u32_t args_0, u32_t args_1, u32_t args_2, u32_t args_3) { /* TODO */ } @@ -39,7 +39,7 @@ void HardFault_Handler(void) /** * @brief To check if it's in interrupt content. */ -b_t _impl_port_isInInterruptContent(void) +b_t port_isInInterruptContent(void) { /* TODO */ @@ -49,7 +49,7 @@ b_t _impl_port_isInInterruptContent(void) /** * @brief To check if it's in kernel thread content. */ -b_t _impl_port_isInThreadMode(void) +b_t port_isInThreadMode(void) { /* TODO */ return TRUE; @@ -58,7 +58,7 @@ b_t _impl_port_isInThreadMode(void) /** * @brief ARM core trigger the pendsv interrupt. */ -void _impl_port_setPendSV(void) +void port_setPendSV(void) { /* TODO */ } @@ -66,7 +66,7 @@ void _impl_port_setPendSV(void) /** * @brief ARM core config kernel thread interrupt priority. */ -void _impl_port_interrupt_init(void) +void port_interrupt_init(void) { /* TODO */ } @@ -90,7 +90,7 @@ void PendSV_Handler(void) /** * @brief ARM core trigger the first thread to run. */ -void _impl_port_run_theFirstThread(u32_t sp) +void port_run_theFirstThread(u32_t sp) { /* TODO */ } @@ -104,7 +104,7 @@ void _impl_port_run_theFirstThread(u32_t sp) * * @return The PSP stack address. */ -u32_t _impl_port_stack_frame_init(void (*pEntryFunction)(void), u32_t *pAddress, u32_t size) +u32_t port_stack_frame_init(void (*pEntryFunction)(void), u32_t *pAddress, u32_t size) { /* TODO */ }