refactor(freertos/idf): Move xTaskIncrementTickOtherCores() to IDF additition headers

This commit moves xTaskIncrementTickOtherCores() to freertos_tasks_c_additions.h
and freertos_idf_additions_priv.h (as API is private). This reduces the code
differences cmpared to upstream FreeRTOS.
This commit is contained in:
Darian Leung
2023-08-28 21:55:27 +08:00
parent 8825c8dda9
commit db9d9658f9
5 changed files with 106 additions and 98 deletions

View File

@@ -3369,90 +3369,6 @@ BaseType_t xTaskIncrementTick( void )
return xSwitchRequired;
}
#if ( configNUM_CORES > 1 )
BaseType_t xTaskIncrementTickOtherCores( void )
{
/* Minor optimization. This function can never switch cores mid
* execution */
BaseType_t xCoreID = xPortGetCoreID();
BaseType_t xSwitchRequired = pdFALSE;
/* This function should never be called by Core 0. */
configASSERT( xCoreID != 0 );
/* Called by the portable layer each time a tick interrupt occurs.
* Increments the tick then checks to see if the new tick value will cause any
* tasks to be unblocked. */
traceTASK_INCREMENT_TICK( xTickCount );
if( uxSchedulerSuspended[ xCoreID ] == ( UBaseType_t ) 0U )
{
/* We need take the kernel lock here as we are about to access
* kernel data structures. */
taskENTER_CRITICAL_ISR( &xKernelLock );
/* A task being unblocked cannot cause an immediate context switch
* if preemption is turned off. */
#if ( configUSE_PREEMPTION == 1 )
{
/* Check if core 0 calling xTaskIncrementTick() has
* unblocked a task that can be run. */
if( uxTopReadyPriority > pxCurrentTCB[ xCoreID ]->uxPriority )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* if ( configUSE_PREEMPTION == 1 ) */
/* Tasks of equal priority to the currently running task will share
* processing time (time slice) if preemption is on, and the application
* writer has not explicitly turned time slicing off. */
#if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
{
if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[ xCoreID ]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
/* Release the previously taken kernel lock as we have finished
* accessing the kernel data structures. */
taskEXIT_CRITICAL_ISR( &xKernelLock );
#if ( configUSE_PREEMPTION == 1 )
{
if( xYieldPending[ xCoreID ] != pdFALSE )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_PREEMPTION */
}
#if ( configUSE_TICK_HOOK == 1 )
{
vApplicationTickHook();
}
#endif
return xSwitchRequired;
}
#endif /* ( configNUM_CORES > 1 ) */
/*-----------------------------------------------------------*/
#if ( configUSE_APPLICATION_TASK_TAG == 1 )