esp_ringbuf: Replace internal semaphores with task event lists

This commit replaces the existing TX/RX semaphores with FreeRTOS task event
lists (similar to how FreeRTOS queues implement task blocking). Using task
event lists allows the ring buffer object to be smaller and various ring buffer
functions to be faster. The ring buffer queue set impelementation was also
updated as a result.
This commit is contained in:
Darian Leung
2023-02-23 12:54:06 +08:00
parent df13918c80
commit 443d87c2ff
4 changed files with 295 additions and 383 deletions

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -7,10 +7,13 @@
#include <stdlib.h>
#include <string.h>
#include "freertos/FreeRTOS.h"
#include "freertos/list.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "freertos/queue.h"
#include "freertos/ringbuf.h"
// ------------------------------------------------- Macros and Types --------------------------------------------------
//32-bit alignment macros
#define rbALIGN_MASK (0x03)
#define rbALIGN_SIZE( xSize ) ( ( xSize + rbALIGN_MASK ) & ~rbALIGN_MASK )
@@ -21,6 +24,7 @@
#define rbBYTE_BUFFER_FLAG ( ( UBaseType_t ) 2 ) //The ring buffer is a byte buffer
#define rbBUFFER_FULL_FLAG ( ( UBaseType_t ) 4 ) //The ring buffer is currently full (write pointer == free pointer)
#define rbBUFFER_STATIC_FLAG ( ( UBaseType_t ) 8 ) //The ring buffer is statically allocated
#define rbUSING_QUEUE_SET ( ( UBaseType_t ) 16 ) //The ring buffer has been added to a queue set
//Item flags
#define rbITEM_FREE_FLAG ( ( UBaseType_t ) 1 ) //Item has been retrieved and returned by application, free to overwrite
@@ -28,15 +32,6 @@
#define rbITEM_SPLIT_FLAG ( ( UBaseType_t ) 4 ) //Valid for RINGBUF_TYPE_ALLOWSPLIT, indicating that rest of the data is wrapped around
#define rbITEM_WRITTEN_FLAG ( ( UBaseType_t ) 8 ) //Item has been written to by the application, thus can be read
//Static allocation related
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
#define rbGET_TX_SEM_HANDLE( pxRingbuffer ) ( (SemaphoreHandle_t) &(pxRingbuffer->xTransSemStatic) )
#define rbGET_RX_SEM_HANDLE( pxRingbuffer ) ( (SemaphoreHandle_t) &(pxRingbuffer->xRecvSemStatic) )
#else
#define rbGET_TX_SEM_HANDLE( pxRingbuffer ) ( pxRingbuffer->xTransSemHandle )
#define rbGET_RX_SEM_HANDLE( pxRingbuffer ) ( pxRingbuffer->xRecvSemHandle )
#endif
typedef struct {
//This size of this structure must be 32-bit aligned
size_t xItemLen;
@@ -71,52 +66,25 @@ typedef struct RingbufferDefinition {
uint8_t *pucTail; //Pointer to the end of the ring buffer storage area
BaseType_t xItemsWaiting; //Number of items/bytes(for byte buffers) currently in ring buffer that have not yet been read
/*
* TransSem: Binary semaphore used to indicate to a blocked transmitting tasks
* that more free space has become available or that the block has
* timed out.
*
* RecvSem: Binary semaphore used to indicate to a blocked receiving task that
* new data/item has been written to the ring buffer.
*
* Note - When static allocation is enabled, the two semaphores are always
* statically stored in the ring buffer's control structure
* regardless of whether the ring buffer is allocated dynamically or
* statically. When static allocation is disabled, the two semaphores
* are allocated dynamically and their handles stored instead, thus
* making the ring buffer's control structure slightly smaller when
* static allocation is disabled.
*/
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
StaticSemaphore_t xTransSemStatic;
StaticSemaphore_t xRecvSemStatic;
#else
SemaphoreHandle_t xTransSemHandle;
SemaphoreHandle_t xRecvSemHandle;
#endif
List_t xTasksWaitingToSend; //List of tasks that are blocked waiting to send/acquire onto this ring buffer. Stored in priority order.
List_t xTasksWaitingToReceive; //List of tasks that are blocked waiting to receive from this ring buffer. Stored in priority order.
QueueSetHandle_t xQueueSet; //Ring buffer's read queue set handle.
portMUX_TYPE mux; //Spinlock required for SMP
} Ringbuffer_t;
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
#if __GNUC_PREREQ(4, 6)
_Static_assert(sizeof(StaticRingbuffer_t) == sizeof(Ringbuffer_t), "StaticRingbuffer_t != Ringbuffer_t");
#endif
#endif
/*
Remark: A counting semaphore for items_buffered_sem would be more logical, but counting semaphores in
FreeRTOS need a maximum count, and allocate more memory the larger the maximum count is. Here, we
would need to set the maximum to the maximum amount of times a null-byte unit first in the buffer,
which is quite high and so would waste a fair amount of memory.
*/
/* --------------------------- Static Declarations -------------------------- */
// ------------------------------------------------ Forward Declares ---------------------------------------------------
/*
* WARNING: All of the following static functions (except generic functions)
* ARE NOT THREAD SAFE. Therefore they should only be called within a critical
* section (using spin locks)
*/
//Initialize a ring buffer after space has been allocated for it
static void prvInitializeNewRingbuffer(size_t xBufferSize,
RingbufferType_t xBufferType,
@@ -201,12 +169,23 @@ static size_t prvGetCurMaxSizeAllowSplit(Ringbuffer_t *pxRingbuffer);
//Get the maximum size an item that can currently have if sent to a byte buffer
static size_t prvGetCurMaxSizeByteBuf(Ringbuffer_t *pxRingbuffer);
/**
* Generic function used to retrieve an item/data from ring buffers. If called on
* an allow-split buffer, and pvItem2 and xItemSize2 are not NULL, both parts of
* a split item will be retrieved. xMaxSize will only take effect if called on
* byte buffers.
*/
/*
Generic function used to send or acquire an item/buffer.
- If sending, set ppvItem to NULL. pvItem remains unchanged on failure.
- If acquiring, set pvItem to NULL. ppvItem remains unchanged on failure.
*/
static BaseType_t prvSendAcquireGeneric(Ringbuffer_t *pxRingbuffer,
const void *pvItem,
void **ppvItem,
size_t xItemSize,
TickType_t xTicksToWait);
/*
Generic function used to retrieve an item/data from ring buffers. If called on
an allow-split buffer, and pvItem2 and xItemSize2 are not NULL, both parts of
a split item will be retrieved. xMaxSize will only take effect if called on
byte buffers. xItemSize must remain unchanged if no item is retrieved.
*/
static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer,
void **pvItem1,
void **pvItem2,
@@ -215,7 +194,7 @@ static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer,
size_t xMaxSize,
TickType_t xTicksToWait);
//Generic function used to retrieve an item/data from ring buffers in an ISR
//From ISR version of prvReceiveGeneric()
static BaseType_t prvReceiveGenericFromISR(Ringbuffer_t *pxRingbuffer,
void **pvItem1,
void **pvItem2,
@@ -223,7 +202,7 @@ static BaseType_t prvReceiveGenericFromISR(Ringbuffer_t *pxRingbuffer,
size_t *xItemSize2,
size_t xMaxSize);
/* --------------------------- Static Definitions --------------------------- */
// ------------------------------------------------ Static Functions ---------------------------------------------------
static void prvInitializeNewRingbuffer(size_t xBufferSize,
RingbufferType_t xBufferType,
@@ -272,7 +251,11 @@ static void prvInitializeNewRingbuffer(size_t xBufferSize,
pxNewRingbuffer->xMaxItemSize = pxNewRingbuffer->xSize;
pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeByteBuf;
}
xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxNewRingbuffer));
vListInitialise(&pxNewRingbuffer->xTasksWaitingToSend);
vListInitialise(&pxNewRingbuffer->xTasksWaitingToReceive);
pxNewRingbuffer->xQueueSet = NULL;
portMUX_INITIALIZE(&pxNewRingbuffer->mux);
}
@@ -759,6 +742,73 @@ static size_t prvGetCurMaxSizeByteBuf(Ringbuffer_t *pxRingbuffer)
return xFreeSize;
}
static BaseType_t prvSendAcquireGeneric(Ringbuffer_t *pxRingbuffer,
const void *pvItem,
void **ppvItem,
size_t xItemSize,
TickType_t xTicksToWait)
{
BaseType_t xReturn = pdFALSE;
BaseType_t xExitLoop = pdFALSE;
BaseType_t xEntryTimeSet = pdFALSE;
BaseType_t xNotifyQueueSet = pdFALSE;
TimeOut_t xTimeOut;
while (xExitLoop == pdFALSE) {
portENTER_CRITICAL(&pxRingbuffer->mux);
if (pxRingbuffer->xCheckItemFits(pxRingbuffer, xItemSize) == pdTRUE) {
//xItemSize will fit. Copy or acquire the buffer immediately
if (ppvItem) {
//Acquire the buffer
*ppvItem = prvAcquireItemNoSplit(pxRingbuffer, xItemSize);
} else {
//Copy item into buffer
pxRingbuffer->vCopyItem(pxRingbuffer, pvItem, xItemSize);
if (pxRingbuffer->xQueueSet) {
//If ring buffer was added to a queue set, notify the queue set
xNotifyQueueSet = pdTRUE;
} else {
//If a task was waiting for data to arrive on the ring buffer, unblock it immediately.
if (listLIST_IS_EMPTY(&pxRingbuffer->xTasksWaitingToReceive) == pdFALSE) {
if (xTaskRemoveFromEventList(&pxRingbuffer->xTasksWaitingToReceive) == pdTRUE) {
//The unblocked task will preempt us. Trigger a yield here.
portYIELD_WITHIN_API();
}
}
}
}
xReturn = pdTRUE;
xExitLoop = pdTRUE;
goto loop_end;
} else if (xTicksToWait == (TickType_t) 0) {
//No block time. Return immediately.
xExitLoop = pdTRUE;
goto loop_end;
} else if (xEntryTimeSet == pdFALSE) {
//This is our first block. Set entry time
vTaskInternalSetTimeOutState(&xTimeOut);
xEntryTimeSet = pdTRUE;
}
if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE) {
//Not timed out yet. Block the current task
vTaskPlaceOnEventList(&pxRingbuffer->xTasksWaitingToSend, xTicksToWait);
portYIELD_WITHIN_API();
} else {
//We have timed out
xExitLoop = pdTRUE;
}
loop_end:
portEXIT_CRITICAL(&pxRingbuffer->mux);
}
//Defer notifying the queue set until we are outside the loop and critical section.
if (xNotifyQueueSet == pdTRUE) {
xQueueSend((QueueHandle_t)pxRingbuffer->xQueueSet, (QueueSetMemberHandle_t *)&pxRingbuffer, 0);
}
return xReturn;
}
static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer,
void **pvItem1,
void **pvItem2,
@@ -768,30 +818,31 @@ static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer,
TickType_t xTicksToWait)
{
BaseType_t xReturn = pdFALSE;
BaseType_t xReturnSemaphore = pdFALSE;
TickType_t xTicksEnd = xTaskGetTickCount() + xTicksToWait;
TickType_t xTicksRemaining = xTicksToWait;
while (xTicksRemaining <= xTicksToWait) { //xTicksToWait will underflow once xTaskGetTickCount() > ticks_end
//Block until more free space becomes available or timeout
if (xSemaphoreTake(rbGET_RX_SEM_HANDLE(pxRingbuffer), xTicksRemaining) != pdTRUE) {
xReturn = pdFALSE; //Timed out attempting to get semaphore
break;
}
BaseType_t xExitLoop = pdFALSE;
BaseType_t xEntryTimeSet = pdFALSE;
TimeOut_t xTimeOut;
//Semaphore obtained, check if item can be retrieved
#ifdef __clang_analyzer__
// Teach clang-tidy that if NULL pointers are provided, this function will never dereference them
if (!pvItem1 || !pvItem2 || !xItemSize1 || !xItemSize2) {
return pdFALSE;
}
#endif /*__clang_analyzer__ */
while (xExitLoop == pdFALSE) {
portENTER_CRITICAL(&pxRingbuffer->mux);
if (prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
//Item is available for retrieval
//Item/data is available for retrieval
BaseType_t xIsSplit = pdFALSE;
if (pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) {
//Second argument (pxIsSplit) is unused for byte buffers
//Read up to xMaxSize bytes from byte buffer
*pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, NULL, xMaxSize, xItemSize1);
} else {
//Third argument (xMaxSize) is unused for no-split/allow-split buffers
//Get (first) item from no-split/allow-split buffers
*pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize1);
}
//Check for item split if configured to do so
if ((pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) && (pvItem2 != NULL) && (xItemSize2 != NULL)) {
//If split buffer, check for split items
if (pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) {
if (xIsSplit == pdTRUE) {
*pvItem2 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize2);
configASSERT(*pvItem2 < *pvItem1); //Check wrap around has occurred
@@ -801,26 +852,30 @@ static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer,
}
}
xReturn = pdTRUE;
if (pxRingbuffer->xItemsWaiting > 0) {
xReturnSemaphore = pdTRUE;
}
portEXIT_CRITICAL(&pxRingbuffer->mux);
break;
xExitLoop = pdTRUE;
goto loop_end;
} else if (xTicksToWait == (TickType_t) 0) {
//No block time. Return immediately.
xExitLoop = pdTRUE;
goto loop_end;
} else if (xEntryTimeSet == pdFALSE) {
//This is our first block. Set entry time
vTaskInternalSetTimeOutState(&xTimeOut);
xEntryTimeSet = pdTRUE;
}
//No item available for retrieval, adjust ticks and take the semaphore again
if (xTicksToWait != portMAX_DELAY) {
xTicksRemaining = xTicksEnd - xTaskGetTickCount();
if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE) {
//Not timed out yet. Block the current task
vTaskPlaceOnEventList(&pxRingbuffer->xTasksWaitingToReceive, xTicksToWait);
portYIELD_WITHIN_API();
} else {
//We have timed out.
xExitLoop = pdTRUE;
}
loop_end:
portEXIT_CRITICAL(&pxRingbuffer->mux);
/*
* Gap between critical section and re-acquiring of the semaphore. If
* semaphore is given now, priority inversion might occur (see docs)
*/
}
if (xReturnSemaphore == pdTRUE) {
xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer)); //Give semaphore back so other tasks can retrieve
}
return xReturn;
}
@@ -832,20 +887,26 @@ static BaseType_t prvReceiveGenericFromISR(Ringbuffer_t *pxRingbuffer,
size_t xMaxSize)
{
BaseType_t xReturn = pdFALSE;
BaseType_t xReturnSemaphore = pdFALSE;
#ifdef __clang_analyzer__
// Teach clang-tidy that if NULL pointers are provided, this function will never dereference them
if (!pvItem1 || !pvItem2 || !xItemSize1 || !xItemSize2) {
return pdFALSE;
}
#endif /*__clang_analyzer__ */
portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
if(prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
if (prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
BaseType_t xIsSplit = pdFALSE;
if (pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) {
//Second argument (pxIsSplit) is unused for byte buffers
//Read up to xMaxSize bytes from byte buffer
*pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, NULL, xMaxSize, xItemSize1);
} else {
//Third argument (xMaxSize) is unused for no-split/allow-split buffers
//Get (first) item from no-split/allow-split buffers
*pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize1);
}
//Check for item split if configured to do so
if ((pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) && pvItem2 != NULL && xItemSize2 != NULL) {
//If split buffer, check for split items
if (pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) {
if (xIsSplit == pdTRUE) {
*pvItem2 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize2);
configASSERT(*pvItem2 < *pvItem1); //Check wrap around has occurred
@@ -855,19 +916,15 @@ static BaseType_t prvReceiveGenericFromISR(Ringbuffer_t *pxRingbuffer,
}
}
xReturn = pdTRUE;
if (pxRingbuffer->xItemsWaiting > 0) {
xReturnSemaphore = pdTRUE;
}
} else {
xReturn = pdFALSE;
}
portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
if (xReturnSemaphore == pdTRUE) {
xSemaphoreGiveFromISR(rbGET_RX_SEM_HANDLE(pxRingbuffer), NULL); //Give semaphore back so other tasks can retrieve
}
return xReturn;
}
/* --------------------------- Public Definitions --------------------------- */
// ------------------------------------------------ Public Functions ---------------------------------------------------
RingbufHandle_t xRingbufferCreate(size_t xBufferSize, RingbufferType_t xBufferType)
{
@@ -884,25 +941,6 @@ RingbufHandle_t xRingbufferCreate(size_t xBufferSize, RingbufferType_t xBufferTy
goto err;
}
//Initialize Semaphores
#if ( configSUPPORT_STATIC_ALLOCATION == 1)
//We don't use the handles for static semaphores, and xSemaphoreCreateBinaryStatic will never fail thus no need to check static case
xSemaphoreCreateBinaryStatic(&(pxNewRingbuffer->xTransSemStatic));
xSemaphoreCreateBinaryStatic(&(pxNewRingbuffer->xRecvSemStatic));
#else
pxNewRingbuffer->xTransSemHandle = xSemaphoreCreateBinary();
pxNewRingbuffer->xRecvSemHandle = xSemaphoreCreateBinary();
if (pxNewRingbuffer->xTransSemHandle == NULL || pxNewRingbuffer->xRecvSemHandle == NULL) {
if (pxNewRingbuffer->xTransSemHandle != NULL) {
vSemaphoreDelete(pxNewRingbuffer->xTransSemHandle);
}
if (pxNewRingbuffer->xRecvSemHandle != NULL) {
vSemaphoreDelete(pxNewRingbuffer->xRecvSemHandle);
}
goto err;
}
#endif
prvInitializeNewRingbuffer(xBufferSize, xBufferType, pxNewRingbuffer, pucRingbufferStorage);
return (RingbufHandle_t)pxNewRingbuffer;
@@ -918,7 +956,6 @@ RingbufHandle_t xRingbufferCreateNoSplit(size_t xItemSize, size_t xItemNum)
return xRingbufferCreate((rbALIGN_SIZE(xItemSize) + rbHEADER_SIZE) * xItemNum, RINGBUF_TYPE_NOSPLIT);
}
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
RingbufHandle_t xRingbufferCreateStatic(size_t xBufferSize,
RingbufferType_t xBufferType,
uint8_t *pucRingbufferStorage,
@@ -934,22 +971,19 @@ RingbufHandle_t xRingbufferCreateStatic(size_t xBufferSize,
}
Ringbuffer_t *pxNewRingbuffer = (Ringbuffer_t *)pxStaticRingbuffer;
xSemaphoreCreateBinaryStatic(&(pxNewRingbuffer->xTransSemStatic));
xSemaphoreCreateBinaryStatic(&(pxNewRingbuffer->xRecvSemStatic));
prvInitializeNewRingbuffer(xBufferSize, xBufferType, pxNewRingbuffer, pucRingbufferStorage);
pxNewRingbuffer->uxRingbufferFlags |= rbBUFFER_STATIC_FLAG;
return (RingbufHandle_t)pxNewRingbuffer;
}
#endif
BaseType_t xRingbufferSendAcquire(RingbufHandle_t xRingbuffer, void **ppvItem, size_t xItemSize, TickType_t xTicksToWait)
{
//Check arguments
Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
//Check arguments
configASSERT(pxRingbuffer);
configASSERT(ppvItem != NULL || xItemSize == 0);
//currently only supported in NoSplit buffers
configASSERT((pxRingbuffer->uxRingbufferFlags & (rbBYTE_BUFFER_FLAG | rbALLOW_SPLIT_FLAG)) == 0);
configASSERT((pxRingbuffer->uxRingbufferFlags & (rbBYTE_BUFFER_FLAG | rbALLOW_SPLIT_FLAG)) == 0); //Send acquire currently only supported in NoSplit buffers
*ppvItem = NULL;
if (xItemSize > pxRingbuffer->xMaxItemSize) {
@@ -959,61 +993,38 @@ BaseType_t xRingbufferSendAcquire(RingbufHandle_t xRingbuffer, void **ppvItem, s
return pdTRUE; //Sending 0 bytes to byte buffer has no effect
}
//Attempt to send an item
BaseType_t xReturn = pdFALSE;
BaseType_t xReturnSemaphore = pdFALSE;
TickType_t xTicksEnd = xTaskGetTickCount() + xTicksToWait;
TickType_t xTicksRemaining = xTicksToWait;
while (xTicksRemaining <= xTicksToWait) { //xTicksToWait will underflow once xTaskGetTickCount() > ticks_end
//Block until more free space becomes available or timeout
if (xSemaphoreTake(rbGET_TX_SEM_HANDLE(pxRingbuffer), xTicksRemaining) != pdTRUE) {
xReturn = pdFALSE;
break;
}
//Semaphore obtained, check if item can fit
portENTER_CRITICAL(&pxRingbuffer->mux);
if(pxRingbuffer->xCheckItemFits(pxRingbuffer, xItemSize) == pdTRUE) {
//Item will fit, copy item
*ppvItem = prvAcquireItemNoSplit(pxRingbuffer, xItemSize);
xReturn = pdTRUE;
//Check if the free semaphore should be returned to allow other tasks to send
if (prvGetFreeSize(pxRingbuffer) > 0) {
xReturnSemaphore = pdTRUE;
}
portEXIT_CRITICAL(&pxRingbuffer->mux);
break;
}
//Item doesn't fit, adjust ticks and take the semaphore again
if (xTicksToWait != portMAX_DELAY) {
xTicksRemaining = xTicksEnd - xTaskGetTickCount();
}
portEXIT_CRITICAL(&pxRingbuffer->mux);
/*
* Gap between critical section and re-acquiring of the semaphore. If
* semaphore is given now, priority inversion might occur (see docs)
*/
}
if (xReturnSemaphore == pdTRUE) {
xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxRingbuffer)); //Give back semaphore so other tasks can acquire
}
return xReturn;
return prvSendAcquireGeneric(pxRingbuffer, NULL, ppvItem, xItemSize, xTicksToWait);
}
BaseType_t xRingbufferSendComplete(RingbufHandle_t xRingbuffer, void *pvItem)
{
//Check arguments
Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
BaseType_t xNotifyQueueSet = pdFALSE;
//Check arguments
configASSERT(pxRingbuffer);
configASSERT(pvItem != NULL);
configASSERT((pxRingbuffer->uxRingbufferFlags & (rbBYTE_BUFFER_FLAG | rbALLOW_SPLIT_FLAG)) == 0);
portENTER_CRITICAL(&pxRingbuffer->mux);
prvSendItemDoneNoSplit(pxRingbuffer, pvItem);
if (pxRingbuffer->xQueueSet) {
//If ring buffer was added to a queue set, notify the queue set
xNotifyQueueSet = pdTRUE;
} else {
//If a task was waiting for data to arrive on the ring buffer, unblock it immediately.
if (listLIST_IS_EMPTY(&pxRingbuffer->xTasksWaitingToReceive) == pdFALSE) {
if (xTaskRemoveFromEventList(&pxRingbuffer->xTasksWaitingToReceive) == pdTRUE) {
//The unblocked task will preempt us. Trigger a yield here.
portYIELD_WITHIN_API();
}
}
}
portEXIT_CRITICAL(&pxRingbuffer->mux);
xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer));
if (xNotifyQueueSet == pdTRUE) {
xQueueSend((QueueHandle_t)pxRingbuffer->xQueueSet, (QueueSetMemberHandle_t *)&pxRingbuffer, 0);
}
return pdTRUE;
}
@@ -1022,8 +1033,9 @@ BaseType_t xRingbufferSend(RingbufHandle_t xRingbuffer,
size_t xItemSize,
TickType_t xTicksToWait)
{
//Check arguments
Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
//Check arguments
configASSERT(pxRingbuffer);
configASSERT(pvItem != NULL || xItemSize == 0);
if (xItemSize > pxRingbuffer->xMaxItemSize) {
@@ -1033,49 +1045,7 @@ BaseType_t xRingbufferSend(RingbufHandle_t xRingbuffer,
return pdTRUE; //Sending 0 bytes to byte buffer has no effect
}
//Attempt to send an item
BaseType_t xReturn = pdFALSE;
BaseType_t xReturnSemaphore = pdFALSE;
TickType_t xTicksEnd = xTaskGetTickCount() + xTicksToWait;
TickType_t xTicksRemaining = xTicksToWait;
while (xTicksRemaining <= xTicksToWait) { //xTicksToWait will underflow once xTaskGetTickCount() > ticks_end
//Block until more free space becomes available or timeout
if (xSemaphoreTake(rbGET_TX_SEM_HANDLE(pxRingbuffer), xTicksRemaining) != pdTRUE) {
xReturn = pdFALSE;
break;
}
//Semaphore obtained, check if item can fit
portENTER_CRITICAL(&pxRingbuffer->mux);
if(pxRingbuffer->xCheckItemFits(pxRingbuffer, xItemSize) == pdTRUE) {
//Item will fit, copy item
pxRingbuffer->vCopyItem(pxRingbuffer, pvItem, xItemSize);
xReturn = pdTRUE;
//Check if the free semaphore should be returned to allow other tasks to send
if (prvGetFreeSize(pxRingbuffer) > 0) {
xReturnSemaphore = pdTRUE;
}
portEXIT_CRITICAL(&pxRingbuffer->mux);
break;
}
//Item doesn't fit, adjust ticks and take the semaphore again
if (xTicksToWait != portMAX_DELAY) {
xTicksRemaining = xTicksEnd - xTaskGetTickCount();
}
portEXIT_CRITICAL(&pxRingbuffer->mux);
/*
* Gap between critical section and re-acquiring of the semaphore. If
* semaphore is given now, priority inversion might occur (see docs)
*/
}
if (xReturnSemaphore == pdTRUE) {
xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxRingbuffer)); //Give back semaphore so other tasks can send
}
if (xReturn == pdTRUE) {
//Indicate item was successfully sent
xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer));
}
return xReturn;
return prvSendAcquireGeneric(pxRingbuffer, pvItem, NULL, xItemSize, xTicksToWait);
}
BaseType_t xRingbufferSendFromISR(RingbufHandle_t xRingbuffer,
@@ -1083,8 +1053,11 @@ BaseType_t xRingbufferSendFromISR(RingbufHandle_t xRingbuffer,
size_t xItemSize,
BaseType_t *pxHigherPriorityTaskWoken)
{
//Check arguments
Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
BaseType_t xNotifyQueueSet = pdFALSE;
BaseType_t xReturn;
//Check arguments
configASSERT(pxRingbuffer);
configASSERT(pvItem != NULL || xItemSize == 0);
if (xItemSize > pxRingbuffer->xMaxItemSize) {
@@ -1094,45 +1067,45 @@ BaseType_t xRingbufferSendFromISR(RingbufHandle_t xRingbuffer,
return pdTRUE; //Sending 0 bytes to byte buffer has no effect
}
//Attempt to send an item
BaseType_t xReturn;
BaseType_t xReturnSemaphore = pdFALSE;
portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
if (pxRingbuffer->xCheckItemFits(xRingbuffer, xItemSize) == pdTRUE) {
pxRingbuffer->vCopyItem(xRingbuffer, pvItem, xItemSize);
xReturn = pdTRUE;
//Check if the free semaphore should be returned to allow other tasks to send
if (prvGetFreeSize(pxRingbuffer) > 0) {
xReturnSemaphore = pdTRUE;
if (pxRingbuffer->xQueueSet) {
//If ring buffer was added to a queue set, notify the queue set
xNotifyQueueSet = pdTRUE;
} else {
//If a task was waiting for data to arrive on the ring buffer, unblock it immediately.
if (listLIST_IS_EMPTY(&pxRingbuffer->xTasksWaitingToReceive) == pdFALSE) {
if (xTaskRemoveFromEventList(&pxRingbuffer->xTasksWaitingToReceive) == pdTRUE) {
//The unblocked task will preempt us. Record that a context switch is required.
if (pxHigherPriorityTaskWoken != NULL) {
*pxHigherPriorityTaskWoken = pdTRUE;
}
}
}
}
xReturn = pdTRUE;
} else {
xReturn = pdFALSE;
}
portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
if (xReturnSemaphore == pdTRUE) {
xSemaphoreGiveFromISR(rbGET_TX_SEM_HANDLE(pxRingbuffer), pxHigherPriorityTaskWoken); //Give back semaphore so other tasks can send
}
if (xReturn == pdTRUE) {
//Indicate item was successfully sent
xSemaphoreGiveFromISR(rbGET_RX_SEM_HANDLE(pxRingbuffer), pxHigherPriorityTaskWoken);
if (xNotifyQueueSet == pdTRUE) {
xQueueSendFromISR((QueueHandle_t)pxRingbuffer->xQueueSet, (QueueSetMemberHandle_t *)&pxRingbuffer, pxHigherPriorityTaskWoken);
}
return xReturn;
}
void *xRingbufferReceive(RingbufHandle_t xRingbuffer, size_t *pxItemSize, TickType_t xTicksToWait)
{
//Check arguments
Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
configASSERT(pxRingbuffer);
//Check arguments
configASSERT(pxRingbuffer && pxItemSize);
//Attempt to retrieve an item
void *pvTempItem;
size_t xTempSize;
if (prvReceiveGeneric(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, 0, xTicksToWait) == pdTRUE) {
if (pxItemSize != NULL) {
*pxItemSize = xTempSize;
}
if (prvReceiveGeneric(pxRingbuffer, &pvTempItem, NULL, pxItemSize, NULL, 0, xTicksToWait) == pdTRUE) {
return pvTempItem;
} else {
return NULL;
@@ -1141,17 +1114,14 @@ void *xRingbufferReceive(RingbufHandle_t xRingbuffer, size_t *pxItemSize, TickTy
void *xRingbufferReceiveFromISR(RingbufHandle_t xRingbuffer, size_t *pxItemSize)
{
//Check arguments
Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
configASSERT(pxRingbuffer);
//Check arguments
configASSERT(pxRingbuffer && pxItemSize);
//Attempt to retrieve an item
void *pvTempItem;
size_t xTempSize;
if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, 0) == pdTRUE) {
if (pxItemSize != NULL) {
*pxItemSize = xTempSize;
}
if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempItem, NULL, pxItemSize, NULL, 0) == pdTRUE) {
return pvTempItem;
} else {
return NULL;
@@ -1165,37 +1135,13 @@ BaseType_t xRingbufferReceiveSplit(RingbufHandle_t xRingbuffer,
size_t *pxTailItemSize,
TickType_t xTicksToWait)
{
//Check arguments
Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
configASSERT(pxRingbuffer);
configASSERT(pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG);
configASSERT(ppvHeadItem != NULL && ppvTailItem != NULL);
//Attempt to retrieve multiple items
void *pvTempHeadItem, *pvTempTailItem;
size_t xTempHeadSize, xTempTailSize;
if (prvReceiveGeneric(pxRingbuffer, &pvTempHeadItem, &pvTempTailItem, &xTempHeadSize, &xTempTailSize, 0, xTicksToWait) == pdTRUE) {
//At least one item was retrieved
*ppvHeadItem = pvTempHeadItem;
if(pxHeadItemSize != NULL){
*pxHeadItemSize = xTempHeadSize;
}
//Check to see if a second item was also retrieved
if (pvTempTailItem != NULL) {
*ppvTailItem = pvTempTailItem;
if (pxTailItemSize != NULL) {
*pxTailItemSize = xTempTailSize;
}
} else {
*ppvTailItem = NULL;
}
return pdTRUE;
} else {
//No items retrieved
*ppvHeadItem = NULL;
*ppvTailItem = NULL;
return pdFALSE;
}
//Check arguments
configASSERT(pxRingbuffer && ppvHeadItem && ppvTailItem && pxHeadItemSize && pxTailItemSize);
configASSERT(pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG);
return prvReceiveGeneric(pxRingbuffer, ppvHeadItem, ppvTailItem, pxHeadItemSize, pxTailItemSize, 0, xTicksToWait);
}
BaseType_t xRingbufferReceiveSplitFromISR(RingbufHandle_t xRingbuffer,
@@ -1204,36 +1150,13 @@ BaseType_t xRingbufferReceiveSplitFromISR(RingbufHandle_t xRingbuffer,
size_t *pxHeadItemSize,
size_t *pxTailItemSize)
{
//Check arguments
Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
configASSERT(pxRingbuffer);
configASSERT(pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG);
configASSERT(ppvHeadItem != NULL && ppvTailItem != NULL);
//Attempt to retrieve multiple items
void *pvTempHeadItem = NULL, *pvTempTailItem = NULL;
size_t xTempHeadSize, xTempTailSize;
if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempHeadItem, &pvTempTailItem, &xTempHeadSize, &xTempTailSize, 0) == pdTRUE) {
//At least one item was received
*ppvHeadItem = pvTempHeadItem;
if (pxHeadItemSize != NULL) {
*pxHeadItemSize = xTempHeadSize;
}
//Check to see if a second item was also retrieved
if (pvTempTailItem != NULL) {
*ppvTailItem = pvTempTailItem;
if (pxTailItemSize != NULL) {
*pxTailItemSize = xTempTailSize;
}
} else {
*ppvTailItem = NULL;
}
return pdTRUE;
} else {
*ppvHeadItem = NULL;
*ppvTailItem = NULL;
return pdFALSE;
}
//Check arguments
configASSERT(pxRingbuffer && ppvHeadItem && ppvTailItem && pxHeadItemSize && pxTailItemSize);
configASSERT(pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG);
return prvReceiveGenericFromISR(pxRingbuffer, ppvHeadItem, ppvTailItem, pxHeadItemSize, pxTailItemSize, 0);
}
void *xRingbufferReceiveUpTo(RingbufHandle_t xRingbuffer,
@@ -1241,21 +1164,18 @@ void *xRingbufferReceiveUpTo(RingbufHandle_t xRingbuffer,
TickType_t xTicksToWait,
size_t xMaxSize)
{
//Check arguments
Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
configASSERT(pxRingbuffer);
//Check arguments
configASSERT(pxRingbuffer && pxItemSize);
configASSERT(pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG); //This function should only be called for byte buffers
if (xMaxSize == 0) {
return NULL;
}
//Attempt to retrieve up to xMaxSize bytes
void *pvTempItem;
size_t xTempSize;
if (prvReceiveGeneric(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, xMaxSize, xTicksToWait) == pdTRUE) {
if (pxItemSize != NULL) {
*pxItemSize = xTempSize;
}
if (prvReceiveGeneric(pxRingbuffer, &pvTempItem, NULL, pxItemSize, NULL, xMaxSize, xTicksToWait) == pdTRUE) {
return pvTempItem;
} else {
return NULL;
@@ -1264,21 +1184,18 @@ void *xRingbufferReceiveUpTo(RingbufHandle_t xRingbuffer,
void *xRingbufferReceiveUpToFromISR(RingbufHandle_t xRingbuffer, size_t *pxItemSize, size_t xMaxSize)
{
//Check arguments
Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
configASSERT(pxRingbuffer);
//Check arguments
configASSERT(pxRingbuffer && pxItemSize);
configASSERT(pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG); //This function should only be called for byte buffers
if (xMaxSize == 0) {
return NULL;
}
//Attempt to retrieve up to xMaxSize bytes
void *pvTempItem;
size_t xTempSize;
if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, xMaxSize) == pdTRUE) {
if (pxItemSize != NULL) {
*pxItemSize = xTempSize;
}
if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempItem, NULL, pxItemSize, NULL, xMaxSize) == pdTRUE) {
return pvTempItem;
} else {
return NULL;
@@ -1293,8 +1210,14 @@ void vRingbufferReturnItem(RingbufHandle_t xRingbuffer, void *pvItem)
portENTER_CRITICAL(&pxRingbuffer->mux);
pxRingbuffer->vReturnItem(pxRingbuffer, (uint8_t *)pvItem);
//If a task was waiting for space to send, unblock it immediately.
if (listLIST_IS_EMPTY(&pxRingbuffer->xTasksWaitingToSend) == pdFALSE) {
if (xTaskRemoveFromEventList(&pxRingbuffer->xTasksWaitingToSend) == pdTRUE) {
//The unblocked task will preempt us. Trigger a yield here.
portYIELD_WITHIN_API();
}
}
portEXIT_CRITICAL(&pxRingbuffer->mux);
xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxRingbuffer));
}
void vRingbufferReturnItemFromISR(RingbufHandle_t xRingbuffer, void *pvItem, BaseType_t *pxHigherPriorityTaskWoken)
@@ -1305,8 +1228,16 @@ void vRingbufferReturnItemFromISR(RingbufHandle_t xRingbuffer, void *pvItem, Bas
portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
pxRingbuffer->vReturnItem(pxRingbuffer, (uint8_t *)pvItem);
//If a task was waiting for space to send, unblock it immediately.
if (listLIST_IS_EMPTY(&pxRingbuffer->xTasksWaitingToSend) == pdFALSE) {
if (xTaskRemoveFromEventList(&pxRingbuffer->xTasksWaitingToSend) == pdTRUE) {
//The unblocked task will preempt us. Record that a context switch is required.
if (pxHigherPriorityTaskWoken != NULL) {
*pxHigherPriorityTaskWoken = pdTRUE;
}
}
}
portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
xSemaphoreGiveFromISR(rbGET_TX_SEM_HANDLE(pxRingbuffer), pxHigherPriorityTaskWoken);
}
void vRingbufferDelete(RingbufHandle_t xRingbuffer)
@@ -1314,9 +1245,6 @@ void vRingbufferDelete(RingbufHandle_t xRingbuffer)
Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
configASSERT(pxRingbuffer);
vSemaphoreDelete(rbGET_TX_SEM_HANDLE(pxRingbuffer));
vSemaphoreDelete(rbGET_RX_SEM_HANDLE(pxRingbuffer));
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_STATIC_FLAG) {
//Ring buffer was statically allocated, no need to free
@@ -1349,46 +1277,48 @@ size_t xRingbufferGetCurFreeSize(RingbufHandle_t xRingbuffer)
BaseType_t xRingbufferAddToQueueSetRead(RingbufHandle_t xRingbuffer, QueueSetHandle_t xQueueSet)
{
Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
configASSERT(pxRingbuffer);
BaseType_t xReturn;
configASSERT(pxRingbuffer && xQueueSet);
portENTER_CRITICAL(&pxRingbuffer->mux);
//Cannot add semaphore to queue set if semaphore is not empty. Temporarily hold semaphore
BaseType_t result = xSemaphoreTake(rbGET_RX_SEM_HANDLE(pxRingbuffer), 0);
xReturn = xQueueAddToSet(rbGET_RX_SEM_HANDLE(pxRingbuffer), xQueueSet);
if (result == pdTRUE) {
//Return semaphore if temporarily held
result = xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer));
configASSERT(result == pdTRUE);
if (pxRingbuffer->xQueueSet != NULL || prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
/*
- Cannot add ring buffer to more than one queue set
- It is dangerous to add a ring buffer to a queue set if the ring buffer currently has data to be read.
*/
xReturn = pdFALSE;
} else {
//Add ring buffer to queue set
pxRingbuffer->xQueueSet = xQueueSet;
xReturn = pdTRUE;
}
portEXIT_CRITICAL(&pxRingbuffer->mux);
return xReturn;
}
BaseType_t xRingbufferCanRead(RingbufHandle_t xRingbuffer, QueueSetMemberHandle_t xMember)
{
//Check if the selected queue set member is the ring buffer's read semaphore
Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
configASSERT(pxRingbuffer);
return (rbGET_RX_SEM_HANDLE(pxRingbuffer) == xMember) ? pdTRUE : pdFALSE;
return xReturn;
}
BaseType_t xRingbufferRemoveFromQueueSetRead(RingbufHandle_t xRingbuffer, QueueSetHandle_t xQueueSet)
{
Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
configASSERT(pxRingbuffer);
BaseType_t xReturn;
configASSERT(pxRingbuffer && xQueueSet);
portENTER_CRITICAL(&pxRingbuffer->mux);
//Cannot remove semaphore from queue set if semaphore is not empty. Temporarily hold semaphore
BaseType_t result = xSemaphoreTake(rbGET_RX_SEM_HANDLE(pxRingbuffer), 0);
xReturn = xQueueRemoveFromSet(rbGET_RX_SEM_HANDLE(pxRingbuffer), xQueueSet);
if (result == pdTRUE) {
//Return semaphore if temporarily held
result = xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer));
configASSERT(result == pdTRUE);
if (pxRingbuffer->xQueueSet != xQueueSet || prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
/*
- Ring buffer was never added to this queue set
- It is dangerous to remove a ring buffer from a queue set if the ring buffer currently has data to be read.
*/
xReturn = pdFALSE;
} else {
//Remove ring buffer from queue set
pxRingbuffer->xQueueSet = NULL;
xReturn = pdTRUE;
}
portEXIT_CRITICAL(&pxRingbuffer->mux);
return xReturn;
}