mirror of
https://github.com/espressif/esp-idf.git
synced 2025-12-27 05:37:32 +00:00
Merge branch 'refactor/parlio_rx_use_dma_link' into 'master'
refactor(parlio_rx): use gdma_link for better gdma link management Closes IDF-13369, IDF-13627, and IDFGH-16449 See merge request espressif/esp-idf!40743
This commit is contained in:
@@ -232,7 +232,7 @@ esp_err_t bitscrambler_loopback_run(bitscrambler_handle_t bs, void *buffer_in, s
|
||||
.length = length_bytes_in,
|
||||
.flags = {
|
||||
.mark_eof = true,
|
||||
.mark_final = true,
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL,
|
||||
}
|
||||
};
|
||||
gdma_link_mount_buffers(bsl->tx_link_list, 0, &in_buf_mount_config, 1, NULL);
|
||||
@@ -242,7 +242,7 @@ esp_err_t bitscrambler_loopback_run(bitscrambler_handle_t bs, void *buffer_in, s
|
||||
.length = length_bytes_out,
|
||||
.flags = {
|
||||
.mark_eof = false,
|
||||
.mark_final = true,
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL,
|
||||
}
|
||||
};
|
||||
gdma_link_mount_buffers(bsl->rx_link_list, 0, &out_buf_mount_config, 1, NULL);
|
||||
|
||||
@@ -465,7 +465,7 @@ static esp_err_t do_dma_transaction_handler(i3c_master_bus_handle_t bus_handle,
|
||||
.length = dma_aligned_size,
|
||||
.flags = {
|
||||
.mark_eof = true,
|
||||
.mark_final = true,
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL,
|
||||
}
|
||||
};
|
||||
|
||||
@@ -491,7 +491,7 @@ static esp_err_t do_dma_transaction_handler(i3c_master_bus_handle_t bus_handle,
|
||||
.length = dma_aligned_size,
|
||||
.flags = {
|
||||
.mark_eof = true,
|
||||
.mark_final = true,
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL,
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ extern "C" {
|
||||
typedef struct {
|
||||
size_t trans_queue_depth; /*!< Depth of internal transaction queue */
|
||||
size_t max_recv_size; /*!< Maximum receive size in one transaction, in bytes. This decides the number of DMA nodes will be used for each transaction */
|
||||
size_t dma_burst_size; /*!< DMA burst size, in bytes */
|
||||
size_t data_width; /*!< Parallel IO data width, can set to 1/2/4/8/..., but can't be greater than PARLIO_RX_UNIT_MAX_DATA_WIDTH */
|
||||
parlio_clock_source_t clk_src; /*!< Parallel IO clock source */
|
||||
uint32_t ext_clk_freq_hz; /*!< The external source clock frequency. Only be valid when select PARLIO_CLK_SRC_EXTERNAL as clock source */
|
||||
|
||||
@@ -26,6 +26,12 @@ entries:
|
||||
|
||||
if SOC_PARLIO_TX_SUPPORT_LOOP_TRANSMISSION = y:
|
||||
gdma_link: gdma_link_get_buffer (noflash)
|
||||
if PARLIO_RX_ISR_HANDLER_IN_IRAM = y:
|
||||
gdma_link: gdma_link_mount_buffers (noflash)
|
||||
gdma_link: gdma_link_get_buffer (noflash)
|
||||
gdma_link: gdma_link_get_length (noflash)
|
||||
esp_dma_utils: esp_dma_split_rx_buffer_to_cache_aligned (noflash)
|
||||
esp_dma_utils: esp_dma_merge_aligned_rx_buffers (noflash)
|
||||
|
||||
[mapping:parlio_driver_soc_periph]
|
||||
archive: libsoc.a
|
||||
|
||||
@@ -19,12 +19,6 @@
|
||||
#define PARLIO_MAX_ALIGNED_DMA_BUF_SIZE DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED
|
||||
#endif
|
||||
|
||||
#if defined(SOC_GDMA_BUS_AHB) && (SOC_GDMA_TRIG_PERIPH_PARLIO0_BUS == SOC_GDMA_BUS_AHB)
|
||||
typedef dma_descriptor_align4_t parlio_dma_desc_t;
|
||||
#elif defined(SOC_GDMA_BUS_AXI) && (SOC_GDMA_TRIG_PERIPH_PARLIO0_BUS == SOC_GDMA_BUS_AXI)
|
||||
typedef dma_descriptor_align8_t parlio_dma_desc_t;
|
||||
#endif
|
||||
|
||||
#define PARLIO_DMA_MEM_ALLOC_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT | MALLOC_CAP_DMA)
|
||||
|
||||
/**
|
||||
@@ -32,13 +26,14 @@ typedef dma_descriptor_align8_t parlio_dma_desc_t;
|
||||
*/
|
||||
typedef struct {
|
||||
parlio_rx_delimiter_handle_t delimiter; /*!< Delimiter of this transaction */
|
||||
void *payload; /*!< The payload of this transaction, will be mounted to DMA descriptor */
|
||||
size_t size; /*!< The payload size in byte */
|
||||
dma_buffer_split_array_t aligned_payload; /*!< The aligned payload of this transaction, will be mounted to DMA link list node */
|
||||
size_t tot_trans_size; /*!< The total size of the transaction */
|
||||
size_t recv_bytes; /*!< The received bytes of this transaction
|
||||
will be reset when all data filled in the infinite transaction */
|
||||
size_t alignment; /*!< The alignment of the payload buffer */
|
||||
struct {
|
||||
uint32_t infinite : 1; /*!< Whether this is an infinite transaction */
|
||||
uint32_t indirect_mount : 1; /*!< Whether the user payload mount to the descriptor indirectly via an internal DMA buffer */
|
||||
uint32_t indirect_mount : 1; /*!< Whether the user payload mount to the link list node indirectly via an internal DMA buffer */
|
||||
} flags;
|
||||
} parlio_rx_transaction_t;
|
||||
|
||||
@@ -69,13 +64,17 @@ typedef struct parlio_rx_unit_t {
|
||||
/* DMA Resources */
|
||||
gdma_channel_handle_t dma_chan; /*!< DMA channel */
|
||||
size_t max_recv_size; /*!< Maximum receive size for a normal transaction */
|
||||
size_t desc_num; /*!< DMA descriptor number */
|
||||
size_t desc_size; /*!< DMA descriptors total size */
|
||||
parlio_dma_desc_t **dma_descs; /*!< DMA descriptor array pointer */
|
||||
parlio_dma_desc_t *curr_desc; /*!< The pointer of the current descriptor */
|
||||
size_t dma_burst_size; /*!< DMA burst size, in bytes */
|
||||
gdma_link_list_handle_t dma_link; /*!< DMA link list handle */
|
||||
uint32_t node_num; /*!< The number of nodes in the DMA link list */
|
||||
size_t dma_mem_align; /*!< Alignment for DMA memory */
|
||||
uint32_t curr_node_id; /*!< The index of the current node in the DMA link list */
|
||||
void *usr_recv_buf; /*!< The point to the user's receiving buffer */
|
||||
/* Infinite transaction specific */
|
||||
void *dma_buf; /*!< Additional internal DMA buffer only for infinite transactions */
|
||||
/* Unaligned DMA buffer management */
|
||||
uint8_t *stash_buf[2]; /*!< The ping-pong stash buffer for unaligned DMA buffer */
|
||||
uint8_t stash_buf_idx; /*!< The index of the current stash buffer */
|
||||
|
||||
/* Callback */
|
||||
parlio_rx_event_callbacks_t cbs; /*!< The group of callback function pointers */
|
||||
@@ -122,7 +121,6 @@ static portMUX_TYPE s_rx_spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
||||
|
||||
size_t parlio_rx_mount_transaction_buffer(parlio_rx_unit_handle_t rx_unit, parlio_rx_transaction_t *trans)
|
||||
{
|
||||
parlio_dma_desc_t **p_desc = rx_unit->dma_descs;
|
||||
/* Update the current transaction to the next one, and declare the delimiter is under using of the rx unit */
|
||||
memcpy(&rx_unit->curr_trans, trans, sizeof(parlio_rx_transaction_t));
|
||||
portENTER_CRITICAL_SAFE(&s_rx_spinlock);
|
||||
@@ -131,50 +129,65 @@ size_t parlio_rx_mount_transaction_buffer(parlio_rx_unit_handle_t rx_unit, parli
|
||||
}
|
||||
portEXIT_CRITICAL_SAFE(&s_rx_spinlock);
|
||||
|
||||
uint32_t desc_num = trans->size / PARLIO_MAX_ALIGNED_DMA_BUF_SIZE;
|
||||
uint32_t remain_num = trans->size % PARLIO_MAX_ALIGNED_DMA_BUF_SIZE;
|
||||
/* If there are still data remained, need one more descriptor */
|
||||
desc_num += remain_num ? 1 : 0;
|
||||
if (trans->flags.infinite && desc_num < 2) {
|
||||
/* At least 2 descriptors needed */
|
||||
desc_num = 2;
|
||||
/* Calculate the number of nodes needed for the transaction */
|
||||
uint32_t body_node_num = trans->aligned_payload.buf.body.length / PARLIO_MAX_ALIGNED_DMA_BUF_SIZE;
|
||||
uint32_t body_remain_size = trans->aligned_payload.buf.body.length % PARLIO_MAX_ALIGNED_DMA_BUF_SIZE;
|
||||
/* If there are still data remained, need one more node */
|
||||
body_node_num += body_remain_size ? 1 : 0;
|
||||
|
||||
uint32_t head_node_num = trans->aligned_payload.buf.head.length ? 1 : 0;
|
||||
uint32_t tail_node_num = trans->aligned_payload.buf.tail.length ? 1 : 0;
|
||||
uint32_t required_node_num = body_node_num + head_node_num + tail_node_num;
|
||||
|
||||
if (trans->flags.infinite && required_node_num < 2) {
|
||||
/* At least 2 nodes needed */
|
||||
required_node_num = 2;
|
||||
}
|
||||
rx_unit->node_num = required_node_num;
|
||||
|
||||
gdma_buffer_mount_config_t mount_config[required_node_num] = {};
|
||||
/* Mount head buffer */
|
||||
if (head_node_num) {
|
||||
mount_config[0].buffer = trans->aligned_payload.buf.head.aligned_buffer;
|
||||
mount_config[0].buffer_alignment = trans->alignment;
|
||||
mount_config[0].length = trans->aligned_payload.buf.head.length;
|
||||
mount_config[0].flags.bypass_buffer_align_check = false;
|
||||
mount_config[0].flags.mark_eof = false;
|
||||
mount_config[0].flags.mark_final = GDMA_FINAL_LINK_TO_DEFAULT;
|
||||
}
|
||||
/* Mount body buffer */
|
||||
size_t mount_size = 0;
|
||||
size_t offset = 0;
|
||||
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
|
||||
uint32_t alignment = rx_unit->base.group->dma_align;
|
||||
#else
|
||||
uint32_t alignment = 4;
|
||||
#endif
|
||||
/* Loop the descriptors to assign the data */
|
||||
for (int i = 0; i < desc_num; i++) {
|
||||
size_t rest_size = trans->size - offset;
|
||||
|
||||
for (int i = head_node_num; i < required_node_num - tail_node_num; i++) {
|
||||
size_t rest_size = trans->aligned_payload.buf.body.length - offset;
|
||||
if (rest_size >= 2 * PARLIO_MAX_ALIGNED_DMA_BUF_SIZE) {
|
||||
mount_size = PARLIO_RX_MOUNT_SIZE_CALC(trans->size, desc_num, alignment);
|
||||
mount_size = PARLIO_RX_MOUNT_SIZE_CALC(trans->aligned_payload.buf.body.length, body_node_num, trans->alignment);
|
||||
} else if (rest_size <= PARLIO_MAX_ALIGNED_DMA_BUF_SIZE) {
|
||||
mount_size = (desc_num == 2) && (i == 0) ? PARLIO_RX_MOUNT_SIZE_CALC(rest_size, 2, alignment) : rest_size;
|
||||
mount_size = (required_node_num == 2) && (i == 0) ? PARLIO_RX_MOUNT_SIZE_CALC(rest_size, 2, trans->alignment) : rest_size;
|
||||
} else {
|
||||
mount_size = PARLIO_RX_MOUNT_SIZE_CALC(rest_size, 2, alignment);
|
||||
}
|
||||
p_desc[i]->buffer = (void *)((uint8_t *)trans->payload + offset);
|
||||
p_desc[i]->dw0.size = mount_size;
|
||||
p_desc[i]->dw0.length = mount_size;
|
||||
p_desc[i]->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
// Link the descriptor
|
||||
if (i < desc_num - 1) {
|
||||
p_desc[i]->next = p_desc[i + 1];
|
||||
} else {
|
||||
/* For infinite transaction, link the descriptor as a ring */
|
||||
p_desc[i]->next = trans->flags.infinite ? p_desc[0] : NULL;
|
||||
mount_size = PARLIO_RX_MOUNT_SIZE_CALC(rest_size, 2, trans->alignment);
|
||||
}
|
||||
mount_config[i].buffer = (void *)((uint8_t *)trans->aligned_payload.buf.body.aligned_buffer + offset);
|
||||
mount_config[i].buffer_alignment = trans->alignment;
|
||||
mount_config[i].length = mount_size;
|
||||
mount_config[i].flags.bypass_buffer_align_check = false;
|
||||
mount_config[i].flags.mark_eof = false;
|
||||
mount_config[i].flags.mark_final = GDMA_FINAL_LINK_TO_DEFAULT;
|
||||
offset += mount_size;
|
||||
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
|
||||
esp_cache_msync(p_desc[i], rx_unit->desc_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
|
||||
#endif
|
||||
}
|
||||
/* Mount tail buffer */
|
||||
if (tail_node_num) {
|
||||
mount_config[required_node_num - 1].buffer = trans->aligned_payload.buf.tail.aligned_buffer;
|
||||
mount_config[required_node_num - 1].buffer_alignment = trans->alignment;
|
||||
mount_config[required_node_num - 1].length = trans->aligned_payload.buf.tail.length;
|
||||
mount_config[required_node_num - 1].flags.bypass_buffer_align_check = false;
|
||||
}
|
||||
/* For infinite transaction, link the node as a ring */
|
||||
mount_config[required_node_num - 1].flags.mark_final = !trans->flags.infinite ? GDMA_FINAL_LINK_TO_NULL : GDMA_FINAL_LINK_TO_HEAD;
|
||||
mount_config[required_node_num - 1].flags.mark_eof = true;
|
||||
gdma_link_mount_buffers(rx_unit->dma_link, 0, mount_config, required_node_num, NULL);
|
||||
/* Reset the current DMA node */
|
||||
rx_unit->curr_desc = p_desc[0];
|
||||
rx_unit->curr_node_id = 0;
|
||||
|
||||
return offset;
|
||||
}
|
||||
@@ -300,6 +313,7 @@ static bool parlio_rx_default_eof_callback(gdma_channel_handle_t dma_chan, gdma_
|
||||
need_yield |= rx_unit->cbs.on_timeout(rx_unit, &evt_data, rx_unit->user_data);
|
||||
}
|
||||
} else {
|
||||
esp_dma_merge_aligned_rx_buffers(&rx_unit->curr_trans.aligned_payload);
|
||||
/* If received a normal EOF, it's a receive done event on parlio RX */
|
||||
if (rx_unit->cbs.on_receive_done) {
|
||||
evt_data.data = rx_unit->usr_recv_buf;
|
||||
@@ -326,7 +340,7 @@ static bool parlio_rx_default_eof_callback(gdma_channel_handle_t dma_chan, gdma_
|
||||
}
|
||||
/* Mount the new transaction buffer and start the new transaction */
|
||||
parlio_rx_mount_transaction_buffer(rx_unit, &next_trans);
|
||||
gdma_start(rx_unit->dma_chan, (intptr_t)rx_unit->dma_descs[0]);
|
||||
gdma_start(rx_unit->dma_chan, gdma_link_get_head_addr(rx_unit->dma_link));
|
||||
if (rx_unit->cfg.flags.free_clk) {
|
||||
parlio_ll_rx_start(rx_unit->base.group->hal.regs, true);
|
||||
PARLIO_CLOCK_SRC_ATOMIC() {
|
||||
@@ -357,20 +371,20 @@ static bool parlio_rx_default_desc_done_callback(gdma_channel_handle_t dma_chan,
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Get the finished descriptor from the current descriptor */
|
||||
parlio_dma_desc_t *finished_desc = rx_unit->curr_desc;
|
||||
/* Get the finished node from the current node */
|
||||
void *finished_buffer = gdma_link_get_buffer(rx_unit->dma_link, rx_unit->curr_node_id);
|
||||
size_t finished_length = gdma_link_get_length(rx_unit->dma_link, rx_unit->curr_node_id);
|
||||
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
|
||||
esp_err_t ret = ESP_OK;
|
||||
ret |= esp_cache_msync((void *)finished_desc, rx_unit->desc_size, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
|
||||
ret |= esp_cache_msync((void *)(finished_desc->buffer), finished_desc->dw0.size, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
|
||||
ret = esp_cache_msync(finished_buffer, finished_length, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGW(TAG, "failed to sync dma buffer from memory to cache");
|
||||
}
|
||||
#endif
|
||||
parlio_rx_event_data_t evt_data = {
|
||||
.delimiter = rx_unit->curr_trans.delimiter,
|
||||
.data = finished_desc->buffer,
|
||||
.recv_bytes = finished_desc->dw0.length,
|
||||
.data = finished_buffer,
|
||||
.recv_bytes = finished_length,
|
||||
};
|
||||
if (rx_unit->cbs.on_partial_receive) {
|
||||
need_yield |= rx_unit->cbs.on_partial_receive(rx_unit, &evt_data, rx_unit->user_data);
|
||||
@@ -384,57 +398,37 @@ static bool parlio_rx_default_desc_done_callback(gdma_channel_handle_t dma_chan,
|
||||
portEXIT_CRITICAL_ISR(&s_rx_spinlock);
|
||||
}
|
||||
/* Update received bytes */
|
||||
if (rx_unit->curr_trans.recv_bytes >= rx_unit->curr_trans.size) {
|
||||
if (rx_unit->curr_trans.recv_bytes >= rx_unit->curr_trans.tot_trans_size) {
|
||||
rx_unit->curr_trans.recv_bytes = 0;
|
||||
}
|
||||
rx_unit->curr_trans.recv_bytes += evt_data.recv_bytes;
|
||||
/* Move to the next DMA descriptor */
|
||||
rx_unit->curr_desc = rx_unit->curr_desc->next;
|
||||
/* Move to the next DMA node */
|
||||
rx_unit->curr_node_id++;
|
||||
rx_unit->curr_node_id %= rx_unit->node_num;
|
||||
|
||||
return need_yield;
|
||||
}
|
||||
|
||||
static esp_err_t parlio_rx_create_dma_descriptors(parlio_rx_unit_handle_t rx_unit, uint32_t max_recv_size)
|
||||
static esp_err_t parlio_rx_create_dma_link(parlio_rx_unit_handle_t rx_unit, uint32_t max_recv_size)
|
||||
{
|
||||
ESP_RETURN_ON_FALSE(rx_unit, ESP_ERR_INVALID_ARG, TAG, "invalid param");
|
||||
esp_err_t ret = ESP_OK;
|
||||
uint32_t desc_num = max_recv_size / DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED + 1;
|
||||
/* set at least 2 descriptors */
|
||||
if (desc_num < 2) {
|
||||
desc_num = 2;
|
||||
}
|
||||
rx_unit->desc_num = desc_num;
|
||||
|
||||
/* Allocated and link the descriptor nodes */
|
||||
rx_unit->dma_descs = heap_caps_calloc(desc_num, sizeof(parlio_dma_desc_t *), MALLOC_CAP_DMA);
|
||||
ESP_RETURN_ON_FALSE(rx_unit->dma_descs, ESP_ERR_NO_MEM, TAG, "no memory for DMA descriptor array");
|
||||
uint32_t cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
|
||||
size_t alignment = MAX(cache_line_size, PARLIO_DMA_DESC_ALIGNMENT);
|
||||
rx_unit->desc_size = ALIGN_UP(sizeof(parlio_dma_desc_t), alignment);
|
||||
for (int i = 0; i < desc_num; i++) {
|
||||
rx_unit->dma_descs[i] = heap_caps_aligned_calloc(alignment, 1, rx_unit->desc_size, PARLIO_DMA_MEM_ALLOC_CAPS);
|
||||
ESP_GOTO_ON_FALSE(rx_unit->dma_descs[i], ESP_ERR_NO_MEM, err, TAG, "no memory for DMA descriptors");
|
||||
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
|
||||
esp_cache_msync(rx_unit->dma_descs[i], rx_unit->desc_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
|
||||
#endif
|
||||
}
|
||||
// calculated the total node number, add 2 for the aligned stash buffer
|
||||
size_t tot_node_num = esp_dma_calculate_node_count(max_recv_size, rx_unit->dma_mem_align, PARLIO_DMA_DESCRIPTOR_BUFFER_MAX_SIZE) + 2;
|
||||
gdma_link_list_config_t dma_link_config = {
|
||||
.num_items = tot_node_num,
|
||||
.item_alignment = PARLIO_DMA_DESC_ALIGNMENT,
|
||||
};
|
||||
|
||||
// create DMA link list, throw the error to the caller if failed
|
||||
ESP_RETURN_ON_ERROR(gdma_new_link_list(&dma_link_config, &rx_unit->dma_link), TAG, "create DMA link list failed");
|
||||
|
||||
rx_unit->max_recv_size = max_recv_size;
|
||||
|
||||
return ret;
|
||||
err:
|
||||
for (int i = 0; i < desc_num; i++) {
|
||||
if (rx_unit->dma_descs[i]) {
|
||||
free(rx_unit->dma_descs[i]);
|
||||
rx_unit->dma_descs[i] = NULL;
|
||||
}
|
||||
}
|
||||
free(rx_unit->dma_descs);
|
||||
rx_unit->dma_descs = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static esp_err_t parlio_rx_unit_init_dma(parlio_rx_unit_handle_t rx_unit)
|
||||
static esp_err_t parlio_rx_unit_init_dma(parlio_rx_unit_handle_t rx_unit, size_t dma_burst_size)
|
||||
{
|
||||
/* Allocate and connect the GDMA channel */
|
||||
gdma_channel_alloc_config_t dma_chan_config = {
|
||||
@@ -453,6 +447,14 @@ static esp_err_t parlio_rx_unit_init_dma(parlio_rx_unit_handle_t rx_unit)
|
||||
};
|
||||
gdma_apply_strategy(rx_unit->dma_chan, &gdma_strategy_conf);
|
||||
|
||||
// configure DMA transfer parameters
|
||||
rx_unit->dma_burst_size = dma_burst_size ? dma_burst_size : 16;
|
||||
gdma_transfer_config_t trans_cfg = {
|
||||
.max_data_burst_size = rx_unit->dma_burst_size, // Enable DMA burst transfer for better performance,
|
||||
};
|
||||
ESP_RETURN_ON_ERROR(gdma_config_transfer(rx_unit->dma_chan, &trans_cfg), TAG, "config DMA transfer failed");
|
||||
ESP_RETURN_ON_ERROR(gdma_get_alignment_constraints(rx_unit->dma_chan, &rx_unit->dma_mem_align, NULL), TAG, "get alignment constraints failed");
|
||||
|
||||
/* Register callbacks */
|
||||
gdma_rx_event_callbacks_t cbs = {
|
||||
.on_recv_eof = parlio_rx_default_eof_callback,
|
||||
@@ -552,21 +554,22 @@ static esp_err_t parlio_destroy_rx_unit(parlio_rx_unit_handle_t rx_unit)
|
||||
ESP_RETURN_ON_ERROR(gdma_disconnect(rx_unit->dma_chan), TAG, "disconnect dma channel failed");
|
||||
ESP_RETURN_ON_ERROR(gdma_del_channel(rx_unit->dma_chan), TAG, "delete dma channel failed");
|
||||
}
|
||||
/* Free the DMA descriptors */
|
||||
if (rx_unit->dma_descs) {
|
||||
for (int i = 0; i < rx_unit->desc_num; i++) {
|
||||
if (rx_unit->dma_descs[i]) {
|
||||
free(rx_unit->dma_descs[i]);
|
||||
rx_unit->dma_descs[i] = NULL;
|
||||
}
|
||||
}
|
||||
free(rx_unit->dma_descs);
|
||||
rx_unit->dma_descs = NULL;
|
||||
/* Free the DMA link list */
|
||||
if (rx_unit->dma_link) {
|
||||
gdma_del_link_list(rx_unit->dma_link);
|
||||
rx_unit->dma_link = NULL;
|
||||
}
|
||||
/* Free the internal DMA buffer */
|
||||
if (rx_unit->dma_buf) {
|
||||
free(rx_unit->dma_buf);
|
||||
}
|
||||
/* Free the stash buffer */
|
||||
for (uint8_t i = 0; i < 2; i++) {
|
||||
if (rx_unit->stash_buf[i]) {
|
||||
free(rx_unit->stash_buf[i]);
|
||||
rx_unit->stash_buf[i] = NULL;
|
||||
}
|
||||
}
|
||||
/* Unregister the RX unit from the PARLIO group */
|
||||
if (rx_unit->base.group) {
|
||||
parlio_unregister_unit_from_group(&rx_unit->base);
|
||||
@@ -615,7 +618,6 @@ esp_err_t parlio_new_rx_unit(const parlio_rx_unit_config_t *config, parlio_rx_un
|
||||
unit->trans_que = xQueueCreateWithCaps(config->trans_queue_depth, sizeof(parlio_rx_transaction_t), PARLIO_MEM_ALLOC_CAPS);
|
||||
ESP_GOTO_ON_FALSE(unit->trans_que, ESP_ERR_NO_MEM, err, TAG, "no memory for transaction queue");
|
||||
|
||||
ESP_GOTO_ON_ERROR(parlio_rx_create_dma_descriptors(unit, config->max_recv_size), err, TAG, "create dma descriptor failed");
|
||||
/* Register and attach the rx unit to the group */
|
||||
ESP_GOTO_ON_ERROR(parlio_register_unit_to_group(&unit->base), err, TAG, "failed to register the rx unit to the group");
|
||||
memcpy(&unit->cfg, config, sizeof(parlio_rx_unit_config_t));
|
||||
@@ -629,7 +631,14 @@ esp_err_t parlio_new_rx_unit(const parlio_rx_unit_config_t *config, parlio_rx_un
|
||||
/* Initialize GPIO */
|
||||
ESP_GOTO_ON_ERROR(parlio_rx_unit_set_gpio(unit, config), err, TAG, "failed to set GPIO");
|
||||
/* Install DMA service */
|
||||
ESP_GOTO_ON_ERROR(parlio_rx_unit_init_dma(unit), err, TAG, "install rx DMA failed");
|
||||
ESP_GOTO_ON_ERROR(parlio_rx_unit_init_dma(unit, config->dma_burst_size), err, TAG, "install rx DMA failed");
|
||||
ESP_GOTO_ON_ERROR(parlio_rx_create_dma_link(unit, config->max_recv_size), err, TAG, "create dma link list failed");
|
||||
|
||||
for (uint8_t i = 0; i < 2; i++) {
|
||||
unit->stash_buf[i] = heap_caps_aligned_calloc(unit->dma_mem_align, 2, unit->dma_mem_align, PARLIO_MEM_ALLOC_CAPS | MALLOC_CAP_DMA);
|
||||
ESP_GOTO_ON_FALSE(unit->stash_buf[i], ESP_ERR_NO_MEM, err, TAG, "no memory for stash buffer");
|
||||
}
|
||||
|
||||
/* Reset RX module */
|
||||
PARLIO_RCC_ATOMIC() {
|
||||
parlio_ll_rx_reset_clock(hal->regs);
|
||||
@@ -721,8 +730,10 @@ esp_err_t parlio_rx_unit_enable(parlio_rx_unit_handle_t rx_unit, bool reset_queu
|
||||
assert(res == pdTRUE);
|
||||
|
||||
if (trans.flags.indirect_mount && trans.flags.infinite && rx_unit->dma_buf == NULL) {
|
||||
rx_unit->dma_buf = heap_caps_aligned_calloc(rx_unit->base.group->dma_align, 1, trans.size, PARLIO_DMA_MEM_ALLOC_CAPS);
|
||||
rx_unit->dma_buf = heap_caps_aligned_calloc(rx_unit->dma_mem_align, 1, trans.aligned_payload.buf.body.length, PARLIO_DMA_MEM_ALLOC_CAPS);
|
||||
ESP_GOTO_ON_FALSE(rx_unit->dma_buf, ESP_ERR_NO_MEM, err, TAG, "No memory for the internal DMA buffer");
|
||||
trans.aligned_payload.buf.body.aligned_buffer = rx_unit->dma_buf;
|
||||
trans.aligned_payload.buf.body.recovery_address = rx_unit->dma_buf;
|
||||
}
|
||||
if (rx_unit->cfg.flags.free_clk) {
|
||||
PARLIO_CLOCK_SRC_ATOMIC() {
|
||||
@@ -732,7 +743,7 @@ esp_err_t parlio_rx_unit_enable(parlio_rx_unit_handle_t rx_unit, bool reset_queu
|
||||
assert(trans.delimiter);
|
||||
parlio_rx_set_delimiter_config(rx_unit, trans.delimiter);
|
||||
parlio_rx_mount_transaction_buffer(rx_unit, &trans);
|
||||
gdma_start(rx_unit->dma_chan, (intptr_t)rx_unit->curr_desc);
|
||||
gdma_start(rx_unit->dma_chan, gdma_link_get_head_addr(rx_unit->dma_link));
|
||||
if (rx_unit->cfg.flags.free_clk) {
|
||||
parlio_ll_rx_start(hal->regs, true);
|
||||
PARLIO_CLOCK_SRC_ATOMIC() {
|
||||
@@ -923,7 +934,7 @@ static esp_err_t parlio_rx_unit_do_transaction(parlio_rx_unit_handle_t rx_unit,
|
||||
parlio_rx_mount_transaction_buffer(rx_unit, trans);
|
||||
// Take semaphore without block time here, only indicate there are transactions on receiving
|
||||
xSemaphoreTake(rx_unit->trans_sem, 0);
|
||||
gdma_start(rx_unit->dma_chan, (intptr_t)rx_unit->curr_desc);
|
||||
gdma_start(rx_unit->dma_chan, gdma_link_get_head_addr(rx_unit->dma_link));
|
||||
if (rx_unit->cfg.flags.free_clk) {
|
||||
parlio_ll_rx_start(rx_unit->base.group->hal.regs, true);
|
||||
PARLIO_CLOCK_SRC_ATOMIC() {
|
||||
@@ -946,13 +957,11 @@ esp_err_t parlio_rx_unit_receive(parlio_rx_unit_handle_t rx_unit,
|
||||
ESP_RETURN_ON_FALSE(rx_unit && payload && recv_cfg, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
|
||||
ESP_RETURN_ON_FALSE(recv_cfg->delimiter, ESP_ERR_INVALID_ARG, TAG, "no delimiter specified");
|
||||
ESP_RETURN_ON_FALSE(payload_size <= rx_unit->max_recv_size, ESP_ERR_INVALID_ARG, TAG, "trans length too large");
|
||||
uint32_t alignment = rx_unit->base.group->dma_align;
|
||||
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
|
||||
ESP_RETURN_ON_FALSE(payload_size % alignment == 0, ESP_ERR_INVALID_ARG, TAG, "The payload size should align with %"PRIu32, alignment);
|
||||
size_t alignment = rx_unit->dma_mem_align;
|
||||
if (recv_cfg->flags.partial_rx_en) {
|
||||
ESP_RETURN_ON_FALSE(payload_size >= 2 * alignment, ESP_ERR_INVALID_ARG, TAG, "The payload size should greater than %"PRIu32, 2 * alignment);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if CONFIG_PARLIO_RX_ISR_CACHE_SAFE
|
||||
ESP_RETURN_ON_FALSE(esp_ptr_internal(payload), ESP_ERR_INVALID_ARG, TAG, "payload not in internal RAM");
|
||||
#else
|
||||
@@ -973,8 +982,8 @@ esp_err_t parlio_rx_unit_receive(parlio_rx_unit_handle_t rx_unit,
|
||||
rx_units[rx_unit->base.unit_id].
|
||||
data_sigs[recv_cfg->delimiter->valid_sig_line_id];
|
||||
}
|
||||
void *p_buffer = payload;
|
||||
|
||||
dma_buffer_split_array_t dma_buf_array = {0};
|
||||
/* Create the internal DMA buffer for the infinite transaction if indirect_mount is set */
|
||||
if (recv_cfg->flags.partial_rx_en && recv_cfg->flags.indirect_mount) {
|
||||
ESP_RETURN_ON_FALSE(!rx_unit->dma_buf, ESP_ERR_INVALID_STATE, TAG, "infinite transaction is using the internal DMA buffer");
|
||||
@@ -982,14 +991,21 @@ esp_err_t parlio_rx_unit_receive(parlio_rx_unit_handle_t rx_unit,
|
||||
rx_unit->dma_buf = heap_caps_aligned_calloc(alignment, 1, payload_size, PARLIO_DMA_MEM_ALLOC_CAPS);
|
||||
ESP_RETURN_ON_FALSE(rx_unit->dma_buf, ESP_ERR_NO_MEM, TAG, "No memory for the internal DMA buffer");
|
||||
/* Use the internal DMA buffer so that the user buffer can always be available */
|
||||
p_buffer = rx_unit->dma_buf;
|
||||
dma_buf_array.buf.body.aligned_buffer = rx_unit->dma_buf;
|
||||
dma_buf_array.buf.body.recovery_address = rx_unit->dma_buf;
|
||||
dma_buf_array.buf.body.length = payload_size;
|
||||
} else {
|
||||
ESP_RETURN_ON_ERROR(esp_dma_split_rx_buffer_to_cache_aligned(payload, payload_size, &dma_buf_array, &rx_unit->stash_buf[rx_unit->stash_buf_idx]),
|
||||
TAG, "failed to split the unaligned DMA buffer");
|
||||
rx_unit->stash_buf_idx = !rx_unit->stash_buf_idx;
|
||||
}
|
||||
|
||||
/* Create the transaction */
|
||||
parlio_rx_transaction_t transaction = {
|
||||
.delimiter = recv_cfg->delimiter,
|
||||
.payload = p_buffer,
|
||||
.size = payload_size,
|
||||
.aligned_payload = dma_buf_array,
|
||||
.tot_trans_size = payload_size,
|
||||
.alignment = alignment,
|
||||
.recv_bytes = 0,
|
||||
.flags.infinite = recv_cfg->flags.partial_rx_en,
|
||||
.flags.indirect_mount = recv_cfg->flags.indirect_mount,
|
||||
@@ -1013,13 +1029,10 @@ esp_err_t parlio_rx_unit_receive_from_isr(parlio_rx_unit_handle_t rx_unit,
|
||||
PARLIO_RX_CHECK_ISR(payload_size <= rx_unit->max_recv_size, ESP_ERR_INVALID_ARG);
|
||||
// Can only be called from ISR
|
||||
PARLIO_RX_CHECK_ISR(xPortInIsrContext() == pdTRUE, ESP_ERR_INVALID_STATE);
|
||||
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
|
||||
uint32_t alignment = rx_unit->base.group->dma_align;
|
||||
PARLIO_RX_CHECK_ISR(payload_size % alignment == 0, ESP_ERR_INVALID_ARG);
|
||||
size_t alignment = rx_unit->dma_mem_align;
|
||||
if (recv_cfg->flags.partial_rx_en) {
|
||||
PARLIO_RX_CHECK_ISR(payload_size >= 2 * alignment, ESP_ERR_INVALID_ARG);
|
||||
}
|
||||
#endif
|
||||
#if CONFIG_PARLIO_RX_ISR_CACHE_SAFE
|
||||
PARLIO_RX_CHECK_ISR(esp_ptr_internal(payload), ESP_ERR_INVALID_ARG);
|
||||
#else
|
||||
@@ -1038,19 +1051,27 @@ esp_err_t parlio_rx_unit_receive_from_isr(parlio_rx_unit_handle_t rx_unit,
|
||||
rx_units[rx_unit->base.unit_id].
|
||||
data_sigs[recv_cfg->delimiter->valid_sig_line_id];
|
||||
}
|
||||
void *p_buffer = payload;
|
||||
|
||||
dma_buffer_split_array_t dma_buf_array = {0};
|
||||
if (recv_cfg->flags.partial_rx_en && recv_cfg->flags.indirect_mount) {
|
||||
/* The internal DMA buffer should be allocated before calling this function */
|
||||
PARLIO_RX_CHECK_ISR(rx_unit->dma_buf, ESP_ERR_INVALID_STATE);
|
||||
p_buffer = rx_unit->dma_buf;
|
||||
dma_buf_array.buf.body.aligned_buffer = rx_unit->dma_buf;
|
||||
dma_buf_array.buf.body.recovery_address = rx_unit->dma_buf;
|
||||
dma_buf_array.buf.body.length = payload_size;
|
||||
} else {
|
||||
/* Create the internal DMA buffer for the infinite transaction if indirect_mount is set */
|
||||
esp_err_t esp_ret = esp_dma_split_rx_buffer_to_cache_aligned(payload, payload_size, &dma_buf_array, &rx_unit->stash_buf[rx_unit->stash_buf_idx]);
|
||||
PARLIO_RX_CHECK_ISR(esp_ret == ESP_OK, esp_ret);
|
||||
rx_unit->stash_buf_idx = !rx_unit->stash_buf_idx;
|
||||
}
|
||||
|
||||
/* Create the transaction */
|
||||
parlio_rx_transaction_t transaction = {
|
||||
.delimiter = recv_cfg->delimiter,
|
||||
.payload = p_buffer,
|
||||
.size = payload_size,
|
||||
.aligned_payload = dma_buf_array,
|
||||
.tot_trans_size = payload_size,
|
||||
.alignment = alignment,
|
||||
.recv_bytes = 0,
|
||||
.flags.infinite = recv_cfg->flags.partial_rx_en,
|
||||
.flags.indirect_mount = recv_cfg->flags.indirect_mount,
|
||||
|
||||
@@ -460,7 +460,7 @@ static void parlio_mount_buffer(parlio_tx_unit_t *tx_unit, parlio_tx_trans_desc_
|
||||
.flags = {
|
||||
// if transmission is loop, we don't need to generate the EOF for 1-bit data width, DIG-559
|
||||
.mark_eof = tx_unit->data_width == 1 ? !t->flags.loop_transmission : true,
|
||||
.mark_final = !t->flags.loop_transmission,
|
||||
.mark_final = t->flags.loop_transmission ? GDMA_FINAL_LINK_TO_START : GDMA_FINAL_LINK_TO_NULL,
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -476,7 +476,7 @@ TEST_CASE("parallel_rx_unit_receive_transaction_test", "[parlio_rx]")
|
||||
TEST_ESP_OK(parlio_rx_unit_receive(rx_unit, payload, TEST_PAYLOAD_SIZE, &recv_config));
|
||||
TEST_ESP_OK(parlio_rx_unit_wait_all_done(rx_unit, 5000));
|
||||
TEST_ESP_OK(parlio_rx_soft_delimiter_start_stop(rx_unit, deli, false));
|
||||
TEST_ASSERT_EQUAL_UINT32(2, test_data.partial_recv_cnt);
|
||||
TEST_ASSERT_GREATER_OR_EQUAL_UINT32(2, test_data.partial_recv_cnt);
|
||||
TEST_ASSERT_EQUAL_UINT32(1, test_data.recv_done_cnt);
|
||||
memset(&test_data, 0, sizeof(test_data_t));
|
||||
|
||||
@@ -488,7 +488,7 @@ TEST_CASE("parallel_rx_unit_receive_transaction_test", "[parlio_rx]")
|
||||
}
|
||||
TEST_ESP_OK(parlio_rx_unit_wait_all_done(rx_unit, 5000));
|
||||
TEST_ESP_OK(parlio_rx_soft_delimiter_start_stop(rx_unit, deli, false));
|
||||
TEST_ASSERT_EQUAL_UINT32(10, test_data.partial_recv_cnt);
|
||||
TEST_ASSERT_GREATER_OR_EQUAL_UINT32(10, test_data.partial_recv_cnt);
|
||||
TEST_ASSERT_EQUAL_UINT32(5, test_data.recv_done_cnt);
|
||||
memset(&test_data, 0, sizeof(test_data_t));
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ static inline void rmt_rx_mount_dma_buffer(rmt_rx_channel_t *rx_chan, const void
|
||||
.length = per_block_size,
|
||||
.buffer_alignment = mem_alignment,
|
||||
.flags = {
|
||||
.mark_final = false,
|
||||
.mark_final = GDMA_FINAL_LINK_TO_DEFAULT,
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ static esp_err_t rmt_tx_init_dma_link(rmt_tx_channel_t *tx_channel, const rmt_tx
|
||||
// each node can generate the DMA eof interrupt, and the driver will do a ping-pong trick in the eof callback
|
||||
.mark_eof = true,
|
||||
// chain the descriptors into a ring, and will break it in `rmt_encode_eof()`
|
||||
.mark_final = false,
|
||||
.mark_final = GDMA_FINAL_LINK_TO_DEFAULT,
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -273,7 +273,7 @@ static void uhci_do_transmit(uhci_controller_handle_t uhci_ctrl, uhci_transactio
|
||||
.length = trans->buffer_size,
|
||||
.flags = {
|
||||
.mark_eof = true,
|
||||
.mark_final = true,
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL,
|
||||
}
|
||||
};
|
||||
|
||||
@@ -338,7 +338,7 @@ esp_err_t uhci_receive(uhci_controller_handle_t uhci_ctrl, uint8_t *read_buffer,
|
||||
.buffer_alignment = buffer_alignment,
|
||||
.length = uhci_ctrl->rx_dir.buffer_size_per_desc_node[i],
|
||||
.flags = {
|
||||
.mark_final = false,
|
||||
.mark_final = GDMA_FINAL_LINK_TO_DEFAULT,
|
||||
}
|
||||
};
|
||||
ESP_LOGD(TAG, "The DMA node %d has %d byte", i, uhci_ctrl->rx_dir.buffer_size_per_desc_node[i]);
|
||||
|
||||
@@ -233,7 +233,7 @@ static esp_err_t mcp_cpdma_memcpy(async_memcpy_context_t *ctx, void *dst, void *
|
||||
.length = n,
|
||||
.flags = {
|
||||
.mark_eof = true, // mark the last item as EOF, so the RX channel can also received an EOF list item
|
||||
.mark_final = true, // using singly list, so terminate the link here
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL, // using singly list, so terminate the link here
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -257,7 +257,7 @@ static esp_err_t mcp_cpdma_memcpy(async_memcpy_context_t *ctx, void *dst, void *
|
||||
.length = n,
|
||||
.flags = {
|
||||
.mark_eof = false, // EOF is set by TX side
|
||||
.mark_final = true, // using singly list, so terminate the link here
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL, // using singly list, so terminate the link here
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -372,7 +372,7 @@ static esp_err_t mcp_gdma_memcpy(async_memcpy_context_t *ctx, void *dst, void *s
|
||||
.length = n,
|
||||
.flags = {
|
||||
.mark_eof = true, // mark the last item as EOF, so the RX channel can also received an EOF list item
|
||||
.mark_final = true, // using singly list, so terminate the link here
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL, // using singly list, so terminate the link here
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -29,7 +29,7 @@ esp_err_t esp_dma_split_rx_buffer_to_cache_aligned(void *rx_buffer, size_t buffe
|
||||
{
|
||||
esp_err_t ret = ESP_OK;
|
||||
uint8_t* stash_buffer = NULL;
|
||||
ESP_RETURN_ON_FALSE(rx_buffer && buffer_len && align_buf_array, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
|
||||
ESP_RETURN_ON_FALSE_ISR(rx_buffer && buffer_len && align_buf_array, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
|
||||
|
||||
// read the cache line size of internal and external memory, we also use this information to check if a given memory is behind the cache
|
||||
size_t int_mem_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
|
||||
@@ -41,80 +41,83 @@ esp_err_t esp_dma_split_rx_buffer_to_cache_aligned(void *rx_buffer, size_t buffe
|
||||
} else if (esp_ptr_internal(rx_buffer)) {
|
||||
split_line_size = int_mem_cache_line_size;
|
||||
}
|
||||
ESP_LOGV(TAG, "split_line_size:%zu", split_line_size);
|
||||
bool align_required = split_line_size > 0;
|
||||
ESP_EARLY_LOGV(TAG, "split_line_size:%zu", split_line_size);
|
||||
|
||||
// allocate the stash buffer from internal RAM
|
||||
// Note, the split_line_size can be 0, in this case, the stash_buffer is also NULL, which is fine
|
||||
stash_buffer = heap_caps_calloc(2, split_line_size, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
ESP_RETURN_ON_FALSE(!(split_line_size && !stash_buffer), ESP_ERR_NO_MEM, TAG, "no mem for stash buffer");
|
||||
if (*ret_stash_buffer == NULL) {
|
||||
// If the stash buffer is not offered by the caller, allocate the stash buffer from internal RAM
|
||||
// Note, the split_line_size can be 0, in this case, the stash_buffer is also NULL, which is fine
|
||||
stash_buffer = heap_caps_calloc(2, split_line_size, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
ESP_RETURN_ON_FALSE_ISR(!(split_line_size && !stash_buffer), ESP_ERR_NO_MEM, TAG, "no mem for stash buffer");
|
||||
} else {
|
||||
// If the stash buffer is offered by the caller, check if it is aligned
|
||||
ESP_RETURN_ON_FALSE_ISR(split_line_size == 0 || (uintptr_t)(*ret_stash_buffer) % split_line_size == 0,
|
||||
ESP_ERR_INVALID_ARG, TAG, "the offered stash buffer is not aligned");
|
||||
// If the stash buffer is offered by the caller, use it
|
||||
stash_buffer = *ret_stash_buffer;
|
||||
}
|
||||
|
||||
// clear align_array to avoid garbage data
|
||||
memset(align_buf_array, 0, sizeof(dma_buffer_split_array_t));
|
||||
bool need_cache_sync[3] = {false};
|
||||
|
||||
// if split_line_size is non-zero, split the buffer into head, body and tail
|
||||
if (split_line_size > 0) {
|
||||
// if align_required, split the buffer into head, body and tail
|
||||
if (align_required) {
|
||||
// calculate head_overflow_len
|
||||
size_t head_overflow_len = (uintptr_t)rx_buffer % split_line_size;
|
||||
head_overflow_len = head_overflow_len ? split_line_size - head_overflow_len : 0;
|
||||
ESP_LOGV(TAG, "head_addr:%p head_overflow_len:%zu", rx_buffer, head_overflow_len);
|
||||
ESP_EARLY_LOGV(TAG, "head_addr:%p head_overflow_len:%zu", rx_buffer, head_overflow_len);
|
||||
// calculate tail_overflow_len
|
||||
size_t tail_overflow_len = ((uintptr_t)rx_buffer + buffer_len) % split_line_size;
|
||||
ESP_LOGV(TAG, "tail_addr:%p tail_overflow_len:%zu", rx_buffer + buffer_len - tail_overflow_len, tail_overflow_len);
|
||||
|
||||
uint8_t extra_buf_count = 0;
|
||||
uint8_t* input_buffer = (uint8_t*)rx_buffer;
|
||||
align_buf_array->buf.head.recovery_address = input_buffer;
|
||||
align_buf_array->buf.head.aligned_buffer = stash_buffer + split_line_size * extra_buf_count++;
|
||||
align_buf_array->buf.head.length = head_overflow_len;
|
||||
need_cache_sync[0] = int_mem_cache_line_size > 0;
|
||||
align_buf_array->buf.body.recovery_address = input_buffer + head_overflow_len;
|
||||
align_buf_array->buf.body.aligned_buffer = input_buffer + head_overflow_len;
|
||||
align_buf_array->buf.body.length = buffer_len - head_overflow_len - tail_overflow_len;
|
||||
need_cache_sync[1] = true;
|
||||
align_buf_array->buf.tail.recovery_address = input_buffer + buffer_len - tail_overflow_len;
|
||||
align_buf_array->buf.tail.aligned_buffer = stash_buffer + split_line_size * extra_buf_count++;
|
||||
align_buf_array->buf.tail.length = tail_overflow_len;
|
||||
need_cache_sync[2] = int_mem_cache_line_size > 0;
|
||||
ESP_EARLY_LOGV(TAG, "tail_addr:%p tail_overflow_len:%zu", rx_buffer + buffer_len - tail_overflow_len, tail_overflow_len);
|
||||
|
||||
// special handling when input_buffer length is no more than buffer alignment
|
||||
if (head_overflow_len >= buffer_len || tail_overflow_len >= buffer_len) {
|
||||
align_buf_array->buf.head.length = buffer_len ;
|
||||
align_buf_array->buf.body.length = 0 ;
|
||||
align_buf_array->buf.tail.length = 0 ;
|
||||
bool is_small_buf = head_overflow_len >= buffer_len || tail_overflow_len >= buffer_len;
|
||||
uint8_t extra_buf_count = 0;
|
||||
uint8_t* input_buffer = (uint8_t*)rx_buffer;
|
||||
if (head_overflow_len || is_small_buf) {
|
||||
align_buf_array->buf.head.recovery_address = input_buffer;
|
||||
align_buf_array->buf.head.aligned_buffer = stash_buffer + split_line_size * extra_buf_count++;
|
||||
align_buf_array->buf.head.length = is_small_buf ? buffer_len : head_overflow_len;
|
||||
need_cache_sync[0] = int_mem_cache_line_size > 0;
|
||||
}
|
||||
int body_len = (int)buffer_len - (int)head_overflow_len - (int)tail_overflow_len;
|
||||
if (body_len > 0) {
|
||||
align_buf_array->buf.body.recovery_address = input_buffer + head_overflow_len;
|
||||
align_buf_array->buf.body.aligned_buffer = input_buffer + head_overflow_len;
|
||||
align_buf_array->buf.body.length = body_len;
|
||||
need_cache_sync[1] = true;
|
||||
}
|
||||
if (tail_overflow_len && !is_small_buf) {
|
||||
align_buf_array->buf.tail.recovery_address = input_buffer + buffer_len - tail_overflow_len;
|
||||
align_buf_array->buf.tail.aligned_buffer = stash_buffer + split_line_size * extra_buf_count++;
|
||||
align_buf_array->buf.tail.length = tail_overflow_len;
|
||||
need_cache_sync[2] = int_mem_cache_line_size > 0;
|
||||
}
|
||||
} else {
|
||||
align_buf_array->buf.body.aligned_buffer = rx_buffer;
|
||||
align_buf_array->buf.body.recovery_address = rx_buffer;
|
||||
align_buf_array->buf.body.length = buffer_len;
|
||||
need_cache_sync[1] = false;
|
||||
}
|
||||
|
||||
for (int i = 0; i < 3; i++) {
|
||||
if (align_buf_array->aligned_buffer[i].length == 0) {
|
||||
align_buf_array->aligned_buffer[i].aligned_buffer = NULL;
|
||||
align_buf_array->aligned_buffer[i].recovery_address = NULL;
|
||||
need_cache_sync[i] = false;
|
||||
}
|
||||
}
|
||||
|
||||
// invalidate the aligned buffer if necessary
|
||||
for (int i = 0; i < 3; i++) {
|
||||
if (need_cache_sync[i]) {
|
||||
size_t sync_size = align_buf_array->aligned_buffer[i].length;
|
||||
size_t sync_size = align_buf_array->aligned_buffer[i].length;
|
||||
if (need_cache_sync[i] && sync_size > 0) {
|
||||
if (sync_size < split_line_size) {
|
||||
// If the buffer is smaller than the cache line size, we need to sync the whole buffer
|
||||
sync_size = split_line_size;
|
||||
}
|
||||
esp_err_t res = esp_cache_msync(align_buf_array->aligned_buffer[i].aligned_buffer, sync_size, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
|
||||
ESP_GOTO_ON_ERROR(res, err, TAG, "failed to do cache sync");
|
||||
ESP_GOTO_ON_ERROR_ISR(res, err, TAG, "failed to do cache sync");
|
||||
}
|
||||
}
|
||||
|
||||
*ret_stash_buffer = stash_buffer;
|
||||
return ESP_OK;
|
||||
err:
|
||||
if (stash_buffer) {
|
||||
// Only free the stash buffer if it is not offered by the caller
|
||||
if (stash_buffer && *ret_stash_buffer == NULL) {
|
||||
free(stash_buffer);
|
||||
}
|
||||
return ret;
|
||||
|
||||
@@ -203,9 +203,22 @@ esp_err_t gdma_link_mount_buffers(gdma_link_list_handle_t list, int start_item_i
|
||||
lli_nc->dw0.size = lli_nc->dw0.length;
|
||||
// mark the EOF node
|
||||
lli_nc->dw0.suc_eof = (config->flags.mark_eof == 1) && (i == num_items_need - 1);
|
||||
// mark the final node
|
||||
if ((config->flags.mark_final == 1) && (i == num_items_need - 1)) {
|
||||
lli_nc->next = NULL;
|
||||
if (i == num_items_need - 1) {
|
||||
// mark the final node
|
||||
switch (config->flags.mark_final) {
|
||||
case GDMA_FINAL_LINK_TO_NULL:
|
||||
lli_nc->next = NULL;
|
||||
break;
|
||||
case GDMA_FINAL_LINK_TO_HEAD:
|
||||
lli_nc->next = (gdma_link_list_item_t *)(list->items);
|
||||
break;
|
||||
case GDMA_FINAL_LINK_TO_START:
|
||||
lli_nc->next = (gdma_link_list_item_t *)(list->items + start_item_index * item_size);
|
||||
break;
|
||||
default:
|
||||
lli_nc->next = (gdma_link_list_item_t *)(list->items + (i + begin_item_idx + 1) % list_item_capacity * item_size);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
lli_nc->next = (gdma_link_list_item_t *)(list->items + (i + begin_item_idx + 1) % list_item_capacity * item_size);
|
||||
}
|
||||
|
||||
@@ -56,6 +56,17 @@ esp_err_t gdma_new_link_list(const gdma_link_list_config_t *config, gdma_link_li
|
||||
*/
|
||||
esp_err_t gdma_del_link_list(gdma_link_list_handle_t list);
|
||||
|
||||
/**
|
||||
* @brief Types for the next node of the final item in the DMA link list
|
||||
*
|
||||
*/
|
||||
typedef enum {
|
||||
GDMA_FINAL_LINK_TO_DEFAULT = 0, /*!< The next node is linked to the default next item in the link list */
|
||||
GDMA_FINAL_LINK_TO_NULL = 1, /*!< No next node is linked */
|
||||
GDMA_FINAL_LINK_TO_HEAD = 2, /*!< The next node is linked to the head item in the link list */
|
||||
GDMA_FINAL_LINK_TO_START = 3, /*!< The next node is linked to the start item in the link list */
|
||||
} gdma_final_node_link_type_t;
|
||||
|
||||
/**
|
||||
* @brief DMA buffer mount configurations
|
||||
*/
|
||||
@@ -66,12 +77,11 @@ typedef struct {
|
||||
struct gdma_buffer_mount_flags {
|
||||
uint32_t mark_eof: 1; /*!< Whether to mark the list item as the "EOF" item.
|
||||
Note, an "EOF" descriptor can be interrupted differently by peripheral.
|
||||
But it doesn't mean to terminate a DMA link (use `mark_final` instead).
|
||||
But it doesn't mean to terminate a DMA link (set `mark_final` to GDMA_FINAL_LINK_TO_NULL instead).
|
||||
EOF link list item can also trigger an interrupt. */
|
||||
uint32_t mark_final: 1; /*!< Whether to terminate the DMA link list at this item.
|
||||
Note, DMA engine will stop at this item and trigger an interrupt.
|
||||
If `mark_final` is not set, this list item will point to the next item, and
|
||||
wrap around to the head item if it's the last one in the list. */
|
||||
gdma_final_node_link_type_t mark_final: 2; /*!< Specify the next item of the final item of this mount.
|
||||
For the other items that not the final one, it will be linked to the next item automatically and this field takes no effect.
|
||||
Note, the final item here does not mean the last item in the link list. It is `start_item_index + num_items - 1` */
|
||||
uint32_t bypass_buffer_align_check: 1; /*!< Whether to bypass the buffer alignment check.
|
||||
Only enable it when you know what you are doing. */
|
||||
} flags; //!< Flags for buffer mount configurations
|
||||
|
||||
@@ -278,7 +278,7 @@ static void test_gdma_m2m_transaction(gdma_channel_handle_t tx_chan, gdma_channe
|
||||
#if !SOC_DMA_CAN_ACCESS_FLASH
|
||||
.flags = {
|
||||
.mark_eof = true,
|
||||
.mark_final = true, // using singly list, so terminate the link here
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL, // using singly list, so terminate the link here
|
||||
}
|
||||
#endif
|
||||
},
|
||||
@@ -289,7 +289,7 @@ static void test_gdma_m2m_transaction(gdma_channel_handle_t tx_chan, gdma_channe
|
||||
.length = src_string_len,
|
||||
.flags = {
|
||||
.mark_eof = true,
|
||||
.mark_final = true, // using singly list, so terminate the link here
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL, // using singly list, so terminate the link here
|
||||
}
|
||||
},
|
||||
#endif
|
||||
@@ -448,7 +448,7 @@ static void test_gdma_m2m_unaligned_buffer_test(uint8_t *dst_data, uint8_t *src_
|
||||
.length = data_length,
|
||||
.flags = {
|
||||
.mark_eof = true,
|
||||
.mark_final = true, // using singly list, so terminate the link here
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL, // using singly list, so terminate the link here
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -609,7 +609,7 @@ TEST_CASE("GDMA M2M Unaligned RX Buffer Test", "[GDMA][M2M]")
|
||||
.length = COPY_SIZE,
|
||||
.flags = {
|
||||
.mark_eof = true,
|
||||
.mark_final = true, // using singly list, so terminate the link here
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL, // using singly list, so terminate the link here
|
||||
}
|
||||
};
|
||||
TEST_ESP_OK(gdma_link_mount_buffers(tx_link_list, 0, &tx_buf_mount_config, 1, NULL));
|
||||
@@ -619,7 +619,7 @@ TEST_CASE("GDMA M2M Unaligned RX Buffer Test", "[GDMA][M2M]")
|
||||
.buffer_alignment = 32,
|
||||
.length = COPY_SIZE,
|
||||
.flags = {
|
||||
.mark_final = true, // using singly list, so terminate the link here
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL, // using singly list, so terminate the link here
|
||||
}
|
||||
};
|
||||
TEST_ESP_OK(gdma_link_mount_buffers(rx_link_list, 0, &rx_buf_mount_config, 1, NULL));
|
||||
|
||||
@@ -498,7 +498,7 @@ static esp_err_t panel_io_i80_tx_param(esp_lcd_panel_io_t *io, int lcd_cmd, cons
|
||||
gdma_buffer_mount_config_t mount_config = {
|
||||
.flags = {
|
||||
.mark_eof = true,
|
||||
.mark_final = true, // singly link list, mark final descriptor
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL, // singly link list, mark final descriptor
|
||||
}
|
||||
};
|
||||
|
||||
@@ -578,7 +578,7 @@ static esp_err_t panel_io_i80_tx_color(esp_lcd_panel_io_t *io, int lcd_cmd, cons
|
||||
gdma_buffer_mount_config_t mount_config = {
|
||||
.flags = {
|
||||
.mark_eof = true,
|
||||
.mark_final = true, // singly link list, mark final descriptor
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL, // singly link list, mark final descriptor
|
||||
}
|
||||
};
|
||||
|
||||
@@ -707,7 +707,7 @@ static void i2s_lcd_trigger_quick_trans_done_event(esp_lcd_i80_bus_handle_t bus)
|
||||
.length = 4,
|
||||
.flags = {
|
||||
.mark_eof = true, // mark the "EOF" flag to trigger I2S EOF interrupt
|
||||
.mark_final = true, // singly link list, mark final descriptor
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL, // singly link list, mark final descriptor
|
||||
}
|
||||
};
|
||||
gdma_link_mount_buffers(bus->dma_link, 0, &mount_config, 1, NULL);
|
||||
@@ -796,7 +796,7 @@ static IRAM_ATTR void i2s_lcd_default_isr_handler(void *args)
|
||||
.length = trans_desc->data_length,
|
||||
.flags = {
|
||||
.mark_eof = true,
|
||||
.mark_final = true, // singly link list, mark final descriptor
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL, // singly link list, mark final descriptor
|
||||
}
|
||||
};
|
||||
gdma_link_mount_buffers(bus->dma_link, 0, &mount_config, 1, NULL);
|
||||
|
||||
@@ -473,7 +473,7 @@ static esp_err_t panel_io_i80_tx_param(esp_lcd_panel_io_t *io, int lcd_cmd, cons
|
||||
.length = trans_desc->data_length,
|
||||
.flags = {
|
||||
.mark_eof = true,
|
||||
.mark_final = true, // singly link list, mark final descriptor
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL, // singly link list, mark final descriptor
|
||||
}
|
||||
};
|
||||
gdma_link_mount_buffers(bus->dma_link, 0, &mount_config, 1, NULL);
|
||||
@@ -797,7 +797,7 @@ IRAM_ATTR static void i80_lcd_default_isr_handler(void *args)
|
||||
.length = trans_desc->data_length,
|
||||
.flags = {
|
||||
.mark_eof = true,
|
||||
.mark_final = true, // singly link list, mark final descriptor
|
||||
.mark_final = GDMA_FINAL_LINK_TO_NULL, // singly link list, mark final descriptor
|
||||
}
|
||||
};
|
||||
gdma_link_mount_buffers(bus->dma_link, 0, &mount_config, 1, NULL);
|
||||
|
||||
@@ -1101,7 +1101,7 @@ static esp_err_t lcd_rgb_panel_init_trans_link(esp_rgb_panel_t *rgb_panel)
|
||||
gdma_buffer_mount_config_t mount_cfg = {
|
||||
.length = rgb_panel->fb_size,
|
||||
.flags = {
|
||||
.mark_final = rgb_panel->flags.stream_mode ? false : true,
|
||||
.mark_final = rgb_panel->flags.stream_mode ? GDMA_FINAL_LINK_TO_DEFAULT : GDMA_FINAL_LINK_TO_NULL,
|
||||
.mark_eof = true,
|
||||
},
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user