@@ -19,7 +19,7 @@ void _timer_tick_handler(void);
1919/* Kernel-wide control block (KCB) */
2020static kcb_t kernel_state = {
2121 .tasks = NULL ,
22- .task_current = NULL ,
22+ .task_current = {} ,
2323 .rt_sched = noop_rtsched ,
2424 .timer_list = NULL , /* Managed by timer.c, but stored here. */
2525 .next_tid = 1 , /* Start from 1 to avoid confusion with invalid ID 0 */
@@ -145,10 +145,10 @@ static void task_stack_check(void)
145145 if (!should_check )
146146 return ;
147147
148- if (unlikely (!kcb || !kcb -> task_current || !kcb -> task_current -> data ))
148+ if (unlikely (!kcb || !get_task_current () || !get_task_current () -> data ))
149149 panic (ERR_STACK_CHECK );
150150
151- tcb_t * self = kcb -> task_current -> data ;
151+ tcb_t * self = get_task_current () -> data ;
152152 if (unlikely (!is_valid_task (self )))
153153 panic (ERR_STACK_CHECK );
154154
@@ -366,10 +366,10 @@ void sched_dequeue_task(tcb_t *task)
366366/* Handle time slice expiration for current task */
367367void sched_tick_current_task (void )
368368{
369- if (unlikely (!kcb -> task_current || !kcb -> task_current -> data ))
369+ if (unlikely (!get_task_current () || !get_task_current () -> data ))
370370 return ;
371371
372- tcb_t * current_task = kcb -> task_current -> data ;
372+ tcb_t * current_task = get_task_current () -> data ;
373373
374374 /* Decrement time slice */
375375 if (current_task -> time_slice > 0 )
@@ -414,17 +414,17 @@ void sched_wakeup_task(tcb_t *task)
414414 */
415415uint16_t sched_select_next_task (void )
416416{
417- if (unlikely (!kcb -> task_current || !kcb -> task_current -> data ))
417+ if (unlikely (!get_task_current () || !get_task_current () -> data ))
418418 panic (ERR_NO_TASKS );
419419
420- tcb_t * current_task = kcb -> task_current -> data ;
420+ tcb_t * current_task = get_task_current () -> data ;
421421
422422 /* Mark current task as ready if it was running */
423423 if (current_task -> state == TASK_RUNNING )
424424 current_task -> state = TASK_READY ;
425425
426426 /* Round-robin search: find next ready task in the master task list */
427- list_node_t * start_node = kcb -> task_current ;
427+ list_node_t * start_node = get_task_current () ;
428428 list_node_t * node = start_node ;
429429 int iterations = 0 ; /* Safety counter to prevent infinite loops */
430430
@@ -441,7 +441,7 @@ uint16_t sched_select_next_task(void)
441441 continue ;
442442
443443 /* Found a ready task */
444- kcb -> task_current = node ;
444+ set_task_current ( node ) ;
445445 task -> state = TASK_RUNNING ;
446446 task -> time_slice = get_priority_timeslice (task -> prio_level );
447447
@@ -478,14 +478,14 @@ void dispatcher(void)
478478/* Top-level context-switch for preemptive scheduling. */
479479void dispatch (void )
480480{
481- if (unlikely (!kcb || !kcb -> task_current || !kcb -> task_current -> data ))
481+ if (unlikely (!kcb || !get_task_current () || !get_task_current () -> data ))
482482 panic (ERR_NO_TASKS );
483483
484484 /* Save current context using dedicated HAL routine that handles both
485485 * execution context and processor state for context switching.
486486 * Returns immediately if this is the restore path.
487487 */
488- if (hal_context_save (((tcb_t * ) kcb -> task_current -> data )-> context ) != 0 )
488+ if (hal_context_save (((tcb_t * ) get_task_current () -> data )-> context ) != 0 )
489489 return ;
490490
491491#if CONFIG_STACK_PROTECTION
@@ -505,20 +505,20 @@ void dispatch(void)
505505 hal_interrupt_tick ();
506506
507507 /* Restore next task context */
508- hal_context_restore (((tcb_t * ) kcb -> task_current -> data )-> context , 1 );
508+ hal_context_restore (((tcb_t * ) get_task_current () -> data )-> context , 1 );
509509}
510510
511511/* Cooperative context switch */
512512void yield (void )
513513{
514- if (unlikely (!kcb || !kcb -> task_current || !kcb -> task_current -> data ))
514+ if (unlikely (!kcb || !get_task_current () || !get_task_current () -> data ))
515515 return ;
516516
517517 /* Process deferred timer work during yield */
518518 process_deferred_timer_work ();
519519
520520 /* HAL context switching is used for preemptive scheduling. */
521- if (hal_context_save (((tcb_t * ) kcb -> task_current -> data )-> context ) != 0 )
521+ if (hal_context_save (((tcb_t * ) get_task_current () -> data )-> context ) != 0 )
522522 return ;
523523
524524#if CONFIG_STACK_PROTECTION
@@ -530,7 +530,7 @@ void yield(void)
530530 list_foreach (kcb -> tasks , delay_update , NULL );
531531
532532 sched_select_next_task (); /* Use O(1) priority scheduler */
533- hal_context_restore (((tcb_t * ) kcb -> task_current -> data )-> context , 1 );
533+ hal_context_restore (((tcb_t * ) get_task_current () -> data )-> context , 1 );
534534}
535535
536536/* Stack initialization with minimal overhead */
@@ -618,8 +618,8 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
618618 tcb -> id = kcb -> next_tid ++ ;
619619 kcb -> task_count ++ ; /* Cached count of active tasks for quick access */
620620
621- if (!kcb -> task_current )
622- kcb -> task_current = node ;
621+ if (!get_task_current () )
622+ set_task_current ( node ) ;
623623
624624 spin_unlock_irqrestore (& kcb -> kcb_lock , task_flags );
625625
@@ -691,12 +691,12 @@ void mo_task_delay(uint16_t ticks)
691691 return ;
692692
693693 spin_lock_irqsave (& kcb -> kcb_lock , & task_flags );
694- if (unlikely (!kcb || !kcb -> task_current || !kcb -> task_current -> data )) {
694+ if (unlikely (!kcb || !get_task_current () || !get_task_current () -> data )) {
695695 spin_unlock_irqrestore (& kcb -> kcb_lock , task_flags );
696696 return ;
697697 }
698698
699- tcb_t * self = kcb -> task_current -> data ;
699+ tcb_t * self = get_task_current () -> data ;
700700
701701 /* Set delay and blocked state - scheduler will skip blocked tasks */
702702 self -> delay = ticks ;
@@ -726,7 +726,7 @@ int32_t mo_task_suspend(uint16_t id)
726726 }
727727
728728 task -> state = TASK_SUSPENDED ;
729- bool is_current = (kcb -> task_current == node );
729+ bool is_current = (get_task_current () == node );
730730
731731 spin_unlock_irqrestore (& kcb -> kcb_lock , task_flags );
732732
@@ -813,9 +813,9 @@ int32_t mo_task_rt_priority(uint16_t id, void *priority)
813813
814814uint16_t mo_task_id (void )
815815{
816- if (unlikely (!kcb || !kcb -> task_current || !kcb -> task_current -> data ))
816+ if (unlikely (!kcb || !get_task_current () || !get_task_current () -> data ))
817817 return 0 ;
818- return ((tcb_t * ) kcb -> task_current -> data )-> id ;
818+ return ((tcb_t * ) get_task_current () -> data )-> id ;
819819}
820820
821821int32_t mo_task_idref (void * task_entry )
@@ -860,14 +860,14 @@ uint64_t mo_uptime(void)
860860
861861void _sched_block (queue_t * wait_q )
862862{
863- if (unlikely (!wait_q || !kcb || !kcb -> task_current ||
864- !kcb -> task_current -> data ))
863+ if (unlikely (!wait_q || !kcb || !get_task_current () ||
864+ !get_task_current () -> data ))
865865 panic (ERR_SEM_OPERATION );
866866
867867 /* Process deferred timer work before blocking */
868868 process_deferred_timer_work ();
869869
870- tcb_t * self = kcb -> task_current -> data ;
870+ tcb_t * self = get_task_current () -> data ;
871871
872872 if (queue_enqueue (wait_q , self ) != 0 )
873873 panic (ERR_SEM_OPERATION );
0 commit comments