The following usage is based on zephyr v2.7.
Stack Usage 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 #include <zephyr.h> #include <kernel.h> #include <kernel_structs.h> #define STACK_GUARD_VALUE 0xAA void check_stage_usage (k_tid_t tid,const char *thread_name, size_t stack_size) { size_t unused_stack_space; size_t total_stack_space = stack_size; struct k_thread *thread = (struct k_thread *) tid; uint8_t *stack_ptr = (uint8_t *)thread->stack_info.start; k_thread_stack_space_get(tid,&unused_stack_space); size_t used_stack_space = total_stack_space - unused_stack_space; double stack_usage = (double )used_stack_space/total_stack_space; if (stack_usage > 0.8 ) { } if (stack_ptr[0 ]!=STACK_GUARD_VALUE) { } }
CPU Load 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 #define LOOP_CYCLE 669/100 uint32_t last_runtimes;struct k_thread_runtime_stats stats ;k_thread_runtime_stats_get(k_current_get(),stats); float cpu_load = (float )100 *(stats.execution_cycles - last_runtimes)/(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC/1000 *LOOP_CYCLE);last_runtimes = stats.execution_cycles; void thread_runtime_cb (struct k_thread *thread,void *user_data) { uint32_t *total*_runtime = (uint32_t *)user_data; struct k_thread_runtime_stats stats ; k_thread_runtime_stats_get(thread,stats); *total_runtime += stats.execution_cycles; } int get_cpu_load (void ) { uint64_t total_runtime = 0 ; float cpu_load; k_thread_foreach(thread_runtime_cb,&total_runtime); cpu_load = (float )100 * (total_runtime - last_total_runtime)/((CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC/1000 *LOOP_CYCLE)); prink("cpu load = %.2f%%\n" ,cpu_load); last_total_runtime = total_runtime; return cpu_load; }