mirror of
https://github.com/espressif/esp-idf.git
synced 2025-08-09 20:41:14 +00:00
freertos: Fix SMP round robin scheduling
The previous SMP freertos round robin would skip over tasks when time slicing. This commit implements a Best Effort Round Robin where selected tasks are put to the back of the list, thus makes the time slicing more fair. - Documentation has been updated accordingly. - Tidy up vTaskSwitchContext() to match v10.4.3 more - Increased esp_ipc task stack size to avoid overflow Closes https://github.com/espressif/esp-idf/issues/7256
This commit is contained in:
@@ -143,6 +143,9 @@
|
||||
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#ifdef ESP_PLATFORM
|
||||
#define taskSELECT_HIGHEST_PRIORITY_TASK() taskSelectHighestPriorityTaskSMP()
|
||||
#else //ESP_PLATFORM
|
||||
#define taskSELECT_HIGHEST_PRIORITY_TASK() \
|
||||
{ \
|
||||
UBaseType_t uxTopPriority = uxTopReadyPriority; \
|
||||
@@ -159,6 +162,7 @@
|
||||
listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB[xPortGetCoreID()], &( pxReadyTasksLists[ uxTopPriority ] ) ); \
|
||||
uxTopReadyPriority = uxTopPriority; \
|
||||
} /* taskSELECT_HIGHEST_PRIORITY_TASK */
|
||||
#endif //ESP_PLATFORM
|
||||
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
@@ -3358,12 +3362,102 @@ BaseType_t xTaskIncrementTick( void )
|
||||
#endif /* configUSE_APPLICATION_TASK_TAG */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#ifdef ESP_PLATFORM
|
||||
#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
|
||||
static void taskSelectHighestPriorityTaskSMP( void )
|
||||
{
|
||||
/* This function is called from a critical section. So some optimizations are made */
|
||||
BaseType_t uxCurPriority;
|
||||
BaseType_t xTaskScheduled = pdFALSE;
|
||||
BaseType_t xNewTopPrioritySet = pdFALSE;
|
||||
BaseType_t xCoreID = xPortGetCoreID(); /* Optimization: Read once */
|
||||
|
||||
/* Search for tasks, starting form the highest ready priority. If nothing is
|
||||
* found, we eventually default to the IDLE tasks at priority 0 */
|
||||
for ( uxCurPriority = uxTopReadyPriority; uxCurPriority >= 0 && xTaskScheduled == pdFALSE; uxCurPriority-- )
|
||||
{
|
||||
/* Check if current priority has one or more ready tasks. Skip if none */
|
||||
if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurPriority ] ) ) )
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Save a copy of highest priority that has a ready state task */
|
||||
if( xNewTopPrioritySet == pdFALSE )
|
||||
{
|
||||
xNewTopPrioritySet = pdTRUE;
|
||||
uxTopReadyPriority = uxCurPriority;
|
||||
}
|
||||
|
||||
/* We now search this priority's ready task list for a runnable task.
|
||||
* We always start searching from the head of the list, so we reset
|
||||
* pxIndex to point to the tail so that we start walking the list from
|
||||
* the first item */
|
||||
pxReadyTasksLists[ uxCurPriority ].pxIndex = ( ListItem_t * ) &( pxReadyTasksLists[ uxCurPriority ].xListEnd );
|
||||
|
||||
/* Get the first item on the list */
|
||||
TCB_t * pxTCBCur;
|
||||
TCB_t * pxTCBFirst;
|
||||
listGET_OWNER_OF_NEXT_ENTRY( pxTCBCur, &( pxReadyTasksLists[ uxCurPriority ] ) );
|
||||
pxTCBFirst = pxTCBCur;
|
||||
do
|
||||
{
|
||||
/* Check if the current task is currently being executed. However, if
|
||||
* it's being executed by the current core, we can still schedule it.
|
||||
* Todo: Each task can store a xTaskRunState, instead of needing to
|
||||
* check each core */
|
||||
UBaseType_t ux;
|
||||
for( ux = 0; ux < ( UBaseType_t )configNUM_CORES; ux++ )
|
||||
{
|
||||
if ( ux == xCoreID )
|
||||
{
|
||||
continue;
|
||||
}
|
||||
else if ( pxCurrentTCB[ux] == pxTCBCur )
|
||||
{
|
||||
/* Current task is already being executed. Get the next task */
|
||||
goto get_next_task;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if the current task has a compatible affinity */
|
||||
if ( ( pxTCBCur->xCoreID != xCoreID ) && ( pxTCBCur->xCoreID != tskNO_AFFINITY ) )
|
||||
{
|
||||
goto get_next_task;
|
||||
}
|
||||
|
||||
/* The current task is runnable. Schedule it */
|
||||
pxCurrentTCB[ xCoreID ] = pxTCBCur;
|
||||
xTaskScheduled = pdTRUE;
|
||||
|
||||
/* Move the current tasks list item to the back of the list in order
|
||||
* to implement best effort round robin. To do this, we need to reset
|
||||
* the pxIndex to point to the tail again. */
|
||||
pxReadyTasksLists[ uxCurPriority ].pxIndex = ( ListItem_t * ) &( pxReadyTasksLists[ uxCurPriority ].xListEnd );
|
||||
uxListRemove( &( pxTCBCur->xStateListItem ) );
|
||||
vListInsertEnd( &( pxReadyTasksLists[ uxCurPriority ] ), &( pxTCBCur->xStateListItem ) );
|
||||
break;
|
||||
|
||||
get_next_task:
|
||||
/* The current task cannot be scheduled. Get the next task in the list */
|
||||
listGET_OWNER_OF_NEXT_ENTRY( pxTCBCur, &( pxReadyTasksLists[ uxCurPriority ] ) );
|
||||
} while( pxTCBCur != pxTCBFirst); /* Check to see if we've walked the entire list */
|
||||
}
|
||||
|
||||
assert( xTaskScheduled == pdTRUE ); /* At this point, a task MUST have been scheduled */
|
||||
}
|
||||
#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
|
||||
#endif //ESP_PLATFORM
|
||||
|
||||
void vTaskSwitchContext( void )
|
||||
{
|
||||
//Theoretically, this is only called from either the tick interrupt or the crosscore interrupt, so disabling
|
||||
//interrupts shouldn't be necessary anymore. Still, for safety we'll leave it in for now.
|
||||
int irqstate = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
|
||||
#ifdef ESP_PLATFORM
|
||||
/* vTaskSwitchContext is called either from:
|
||||
* - ISR dispatcher when return from an ISR (interrupts will already be disabled)
|
||||
* - vTaskSuspend() which is not in a critical section
|
||||
* Therefore, we enter a critical section ISR version to ensure safety */
|
||||
taskENTER_CRITICAL_ISR();
|
||||
#endif // ESP_PLATFORM
|
||||
if( uxSchedulerSuspended[ xPortGetCoreID() ] != ( UBaseType_t ) pdFALSE )
|
||||
{
|
||||
/* The scheduler is currently suspended - do not allow a context
|
||||
@@ -3373,7 +3467,9 @@ void vTaskSwitchContext( void )
|
||||
else
|
||||
{
|
||||
xYieldPending[ xPortGetCoreID() ] = pdFALSE;
|
||||
#ifdef ESP_PLATFORM
|
||||
xSwitchingContext[ xPortGetCoreID() ] = pdTRUE;
|
||||
#endif // ESP_PLATFORM
|
||||
traceTASK_SWITCHED_OUT();
|
||||
|
||||
#if ( configGENERATE_RUN_TIME_STATS == 1 )
|
||||
@@ -3391,7 +3487,6 @@ void vTaskSwitchContext( void )
|
||||
* overflows. The guard against negative values is to protect
|
||||
* against suspect run time stat counter implementations - which
|
||||
* are provided by the application, not the kernel. */
|
||||
taskENTER_CRITICAL_ISR();
|
||||
if( ulTotalRunTime > ulTaskSwitchedInTime[ xPortGetCoreID() ] )
|
||||
{
|
||||
pxCurrentTCB[ xPortGetCoreID() ]->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime[ xPortGetCoreID() ] );
|
||||
@@ -3400,134 +3495,59 @@ void vTaskSwitchContext( void )
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
taskEXIT_CRITICAL_ISR();
|
||||
|
||||
ulTaskSwitchedInTime[ xPortGetCoreID() ] = ulTotalRunTime;
|
||||
}
|
||||
#endif /* configGENERATE_RUN_TIME_STATS */
|
||||
|
||||
/* Check for stack overflow, if configured. */
|
||||
#ifdef ESP_PLATFORM
|
||||
taskFIRST_CHECK_FOR_STACK_OVERFLOW();
|
||||
taskSECOND_CHECK_FOR_STACK_OVERFLOW();
|
||||
#else
|
||||
taskCHECK_FOR_STACK_OVERFLOW();
|
||||
|
||||
/* Select a new task to run */
|
||||
|
||||
/*
|
||||
We cannot do taskENTER_CRITICAL_ISR(); here because it saves the interrupt context to the task tcb, and we're
|
||||
swapping that out here. Instead, we're going to do the work here ourselves. Because interrupts are already disabled, we only
|
||||
need to acquire the mutex.
|
||||
*/
|
||||
vPortCPUAcquireMutex( &xTaskQueueMutex );
|
||||
|
||||
#if !configUSE_PORT_OPTIMISED_TASK_SELECTION
|
||||
unsigned portBASE_TYPE foundNonExecutingWaiter = pdFALSE, ableToSchedule = pdFALSE, resetListHead;
|
||||
unsigned portBASE_TYPE holdTop=pdFALSE;
|
||||
tskTCB * pxTCB;
|
||||
|
||||
portBASE_TYPE uxDynamicTopReady = uxTopReadyPriority;
|
||||
/*
|
||||
* ToDo: This scheduler doesn't correctly implement the round-robin scheduling as done in the single-core
|
||||
* FreeRTOS stack when multiple tasks have the same priority and are all ready; it just keeps grabbing the
|
||||
* first one. ToDo: fix this.
|
||||
* (Is this still true? if any, there's the issue with one core skipping over the processes for the other
|
||||
* core, potentially not giving the skipped-over processes any time.)
|
||||
*/
|
||||
|
||||
while ( ableToSchedule == pdFALSE && uxDynamicTopReady >= 0 )
|
||||
{
|
||||
resetListHead = pdFALSE;
|
||||
// Nothing to do for empty lists
|
||||
if (!listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxDynamicTopReady ] ) )) {
|
||||
|
||||
ableToSchedule = pdFALSE;
|
||||
tskTCB * pxRefTCB;
|
||||
|
||||
/* Remember the current list item so that we
|
||||
can detect if all items have been inspected.
|
||||
Once this happens, we move on to a lower
|
||||
priority list (assuming nothing is suitable
|
||||
for scheduling). Note: This can return NULL if
|
||||
the list index is at the listItem */
|
||||
pxRefTCB = pxReadyTasksLists[ uxDynamicTopReady ].pxIndex->pvOwner;
|
||||
|
||||
if ((void*)pxReadyTasksLists[ uxDynamicTopReady ].pxIndex==(void*)&pxReadyTasksLists[ uxDynamicTopReady ].xListEnd) {
|
||||
//pxIndex points to the list end marker. Skip that and just get the next item.
|
||||
listGET_OWNER_OF_NEXT_ENTRY( pxRefTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
|
||||
}
|
||||
|
||||
do {
|
||||
listGET_OWNER_OF_NEXT_ENTRY( pxTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
|
||||
/* Find out if the next task in the list is
|
||||
already being executed by another core */
|
||||
foundNonExecutingWaiter = pdTRUE;
|
||||
portBASE_TYPE i = 0;
|
||||
for ( i=0; i<configNUM_CORES; i++ ) {
|
||||
if (i == xPortGetCoreID()) {
|
||||
continue;
|
||||
} else if (pxCurrentTCB[i] == pxTCB) {
|
||||
holdTop=pdTRUE; //keep this as the top prio, for the other CPU
|
||||
foundNonExecutingWaiter = pdFALSE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (foundNonExecutingWaiter == pdTRUE) {
|
||||
/* If the task is not being executed
|
||||
by another core and its affinity is
|
||||
compatible with the current one,
|
||||
prepare it to be swapped in */
|
||||
if (pxTCB->xCoreID == tskNO_AFFINITY) {
|
||||
pxCurrentTCB[xPortGetCoreID()] = pxTCB;
|
||||
ableToSchedule = pdTRUE;
|
||||
} else if (pxTCB->xCoreID == xPortGetCoreID()) {
|
||||
pxCurrentTCB[xPortGetCoreID()] = pxTCB;
|
||||
ableToSchedule = pdTRUE;
|
||||
} else {
|
||||
ableToSchedule = pdFALSE;
|
||||
holdTop=pdTRUE; //keep this as the top prio, for the other CPU
|
||||
}
|
||||
} else {
|
||||
ableToSchedule = pdFALSE;
|
||||
}
|
||||
|
||||
if (ableToSchedule == pdFALSE) {
|
||||
resetListHead = pdTRUE;
|
||||
} else if ((ableToSchedule == pdTRUE) && (resetListHead == pdTRUE)) {
|
||||
tskTCB * pxResetTCB;
|
||||
do {
|
||||
listGET_OWNER_OF_NEXT_ENTRY( pxResetTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
|
||||
} while(pxResetTCB != pxRefTCB);
|
||||
}
|
||||
} while ((ableToSchedule == pdFALSE) && (pxTCB != pxRefTCB));
|
||||
} else {
|
||||
if (!holdTop) --uxTopReadyPriority;
|
||||
/* Before the currently running task is switched out, save its errno. */
|
||||
#if ( configUSE_POSIX_ERRNO == 1 )
|
||||
{
|
||||
pxCurrentTCB->iTaskErrno = FreeRTOS_errno;
|
||||
}
|
||||
--uxDynamicTopReady;
|
||||
}
|
||||
|
||||
#else
|
||||
//For Unicore targets we can keep the current FreeRTOS O(1)
|
||||
//Scheduler. I hope to optimize better the scheduler for
|
||||
//Multicore settings -- This will involve to create a per
|
||||
//affinity ready task list which will impact hugely on
|
||||
//tasks module
|
||||
taskSELECT_HIGHEST_PRIORITY_TASK();
|
||||
#endif
|
||||
|
||||
traceTASK_SWITCHED_IN();
|
||||
xSwitchingContext[ xPortGetCoreID() ] = pdFALSE;
|
||||
|
||||
#ifdef ESP_PLATFORM
|
||||
//Exit critical region manually as well: release the mux now, interrupts will be re-enabled when we
|
||||
//exit the function.
|
||||
vPortCPUReleaseMutex( &xTaskQueueMutex );
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
/* Select a new task to run using either the generic C or port
|
||||
* optimised asm code. */
|
||||
taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
|
||||
traceTASK_SWITCHED_IN();
|
||||
|
||||
#ifdef ESP_PLATFORM
|
||||
xSwitchingContext[ xPortGetCoreID() ] = pdFALSE;
|
||||
#if CONFIG_FREERTOS_WATCHPOINT_END_OF_STACK
|
||||
vPortSetStackWatchpoint(pxCurrentTCB[xPortGetCoreID()]->pxStack);
|
||||
#endif
|
||||
#else
|
||||
/* After the new task is switched in, update the global errno. */
|
||||
#if ( configUSE_POSIX_ERRNO == 1 )
|
||||
{
|
||||
FreeRTOS_errno = pxCurrentTCB->iTaskErrno;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if ( configUSE_NEWLIB_REENTRANT == 1 )
|
||||
{
|
||||
/* Switch Newlib's _impure_ptr variable to point to the _reent
|
||||
* structure specific to this task.
|
||||
* See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
|
||||
* for additional information. */
|
||||
_impure_ptr = &( pxCurrentTCB->xNewLib_reent );
|
||||
}
|
||||
#endif /* configUSE_NEWLIB_REENTRANT */
|
||||
#endif // ESP_PLATFORM
|
||||
}
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR(irqstate);
|
||||
#ifdef ESP_PLATFORM
|
||||
/* Exit the critical section previously entered */
|
||||
taskEXIT_CRITICAL_ISR();
|
||||
#endif // ESP_PLATFORM
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
|
Reference in New Issue
Block a user