refactor(spi): replace dma_ll related in spi by dma driver (part1)

This commit is contained in:
wanlei
2023-10-18 17:25:20 +08:00
parent 35fc493dcc
commit d0023b061f
17 changed files with 376 additions and 334 deletions

View File

@@ -10,6 +10,7 @@
#include "esp_types.h"
#include "esp_attr.h"
#include "esp_check.h"
#include "esp_cache.h"
#include "esp_rom_gpio.h"
#include "esp_heap_caps.h"
#include "soc/spi_periph.h"
@@ -18,6 +19,7 @@
#include "esp_private/periph_ctrl.h"
#include "esp_private/spi_common_internal.h"
#include "esp_private/spi_share_hw_ctrl.h"
#include "esp_private/esp_cache_private.h"
#include "hal/spi_hal.h"
#include "hal/gpio_hal.h"
#if CONFIG_IDF_TARGET_ESP32
@@ -25,8 +27,6 @@
#endif
#if SOC_GDMA_SUPPORTED
#include "esp_private/gdma.h"
#include "hal/cache_hal.h"
#include "hal/cache_ll.h"
#endif
static const char *SPI_TAG = "spi";
@@ -42,10 +42,7 @@ static const char *SPI_TAG = "spi";
#define SPI_MAIN_BUS_DEFAULT() { \
.host_id = 0, \
.bus_attr = { \
.tx_dma_chan = 0, \
.rx_dma_chan = 0, \
.max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE, \
.dma_desc_num= 0, \
}, \
}
@@ -56,6 +53,7 @@ typedef struct {
spi_destroy_func_t destroy_func;
void* destroy_arg;
spi_bus_attr_t bus_attr;
spi_dma_ctx_t *dma_ctx;
#if SOC_GDMA_SUPPORTED
gdma_channel_handle_t tx_channel;
gdma_channel_handle_t rx_channel;
@@ -75,11 +73,19 @@ static __attribute__((constructor)) void spi_bus_lock_init_main_bus(void)
}
#endif
#if !SOC_GDMA_SUPPORTED
#if SOC_GDMA_SUPPORTED
//NOTE!! If both A and B are not defined, '#if (A==B)' is true, because GCC use 0 stand for undefined symbol
#if defined(SOC_GDMA_BUS_AXI) && (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AXI)
#define SPI_GDMA_NEW_CHANNEL gdma_new_axi_channel
#elif defined(SOC_GDMA_BUS_AHB) && (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AHB)
#define SPI_GDMA_NEW_CHANNEL gdma_new_ahb_channel
#endif
#else
//Each bit stands for 1 dma channel, BIT(0) should be used for SPI1
static uint8_t spi_dma_chan_enabled = 0;
static portMUX_TYPE spi_dma_spinlock = portMUX_INITIALIZER_UNLOCKED;
#endif //#if !SOC_GDMA_SUPPORTED
#endif //!SOC_GDMA_SUPPORTED
static inline bool is_valid_host(spi_host_device_t host)
{
@@ -157,7 +163,7 @@ static void connect_spi_and_dma(spi_host_device_t host, int dma_chan)
#endif
}
static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_chan, uint32_t *out_actual_tx_dma_chan, uint32_t *out_actual_rx_dma_chan)
static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_chan, spi_dma_ctx_t *dma_ctx)
{
assert(is_valid_host(host_id));
#if CONFIG_IDF_TARGET_ESP32
@@ -187,60 +193,66 @@ static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_ch
}
//On ESP32 and ESP32S2, actual_tx_dma_chan and actual_rx_dma_chan are always same
*out_actual_tx_dma_chan = actual_dma_chan;
*out_actual_rx_dma_chan = actual_dma_chan;
dma_ctx->tx_dma_chan.chan_id = actual_dma_chan;
dma_ctx->rx_dma_chan.chan_id = actual_dma_chan;
dma_ctx->tx_dma_chan.host_id = host_id;
dma_ctx->rx_dma_chan.host_id = host_id;
dma_ctx->tx_dma_chan.dir = DMA_CHANNEL_DIRECTION_TX;
dma_ctx->rx_dma_chan.dir = DMA_CHANNEL_DIRECTION_RX;
if (!success) {
SPI_CHECK(false, "no available dma channel", ESP_ERR_NOT_FOUND);
}
connect_spi_and_dma(host_id, actual_dma_chan);
connect_spi_and_dma(host_id, *out_actual_tx_dma_chan);
spi_dma_enable_burst(dma_ctx->tx_dma_chan, true, true);
spi_dma_enable_burst(dma_ctx->rx_dma_chan, true, true);
return ret;
}
#else //SOC_GDMA_SUPPORTED
static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_chan, uint32_t *out_actual_tx_dma_chan, uint32_t *out_actual_rx_dma_chan)
static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_chan, spi_dma_ctx_t *dma_ctx)
{
assert(is_valid_host(host_id));
assert(dma_chan == SPI_DMA_CH_AUTO);
esp_err_t ret = ESP_OK;
spicommon_bus_context_t *ctx = bus_ctx[host_id];
if (dma_chan == SPI_DMA_CH_AUTO) {
gdma_channel_alloc_config_t tx_alloc_config = {
.flags.reserve_sibling = 1,
.direction = GDMA_CHANNEL_DIRECTION_TX,
};
ESP_RETURN_ON_ERROR(SPI_GDMA_NEW_CHANNEL(&tx_alloc_config, &ctx->tx_channel), SPI_TAG, "alloc gdma tx failed");
ESP_RETURN_ON_ERROR(SPI_GDMA_NEW_CHANNEL(&tx_alloc_config, &dma_ctx->tx_dma_chan), SPI_TAG, "alloc gdma tx failed");
gdma_channel_alloc_config_t rx_alloc_config = {
.direction = GDMA_CHANNEL_DIRECTION_RX,
.sibling_chan = ctx->tx_channel,
.sibling_chan = dma_ctx->tx_dma_chan,
};
ESP_RETURN_ON_ERROR(SPI_GDMA_NEW_CHANNEL(&rx_alloc_config, &ctx->rx_channel), SPI_TAG, "alloc gdma rx failed");
ESP_RETURN_ON_ERROR(SPI_GDMA_NEW_CHANNEL(&rx_alloc_config, &dma_ctx->rx_dma_chan), SPI_TAG, "alloc gdma rx failed");
if (host_id == SPI2_HOST) {
gdma_connect(ctx->rx_channel, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 2));
gdma_connect(ctx->tx_channel, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 2));
gdma_connect(dma_ctx->tx_dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 2));
gdma_connect(dma_ctx->rx_dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 2));
}
#if (SOC_SPI_PERIPH_NUM >= 3)
else if (host_id == SPI3_HOST) {
gdma_connect(ctx->rx_channel, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 3));
gdma_connect(ctx->tx_channel, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 3));
gdma_connect(dma_ctx->tx_dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 3));
gdma_connect(dma_ctx->rx_dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 3));
}
#endif
gdma_get_channel_id(ctx->tx_channel, (int *)out_actual_tx_dma_chan);
gdma_get_channel_id(ctx->rx_channel, (int *)out_actual_rx_dma_chan);
gdma_transfer_ability_t ability = {
.psram_trans_align = 0, // fall back to use the same size of the psram data cache line size
.sram_trans_align = 4,
};
ESP_RETURN_ON_ERROR(gdma_set_transfer_ability(dma_ctx->tx_dma_chan, &ability), SPI_TAG, "set gdma tx transfer ability failed");
ESP_RETURN_ON_ERROR(gdma_set_transfer_ability(dma_ctx->rx_dma_chan, &ability), SPI_TAG, "set gdma rx transfer ability failed");
}
return ret;
}
#endif //#if !SOC_GDMA_SUPPORTED
esp_err_t spicommon_dma_chan_alloc(spi_host_device_t host_id, spi_dma_chan_t dma_chan, uint32_t *out_actual_tx_dma_chan, uint32_t *out_actual_rx_dma_chan)
esp_err_t spicommon_dma_chan_alloc(spi_host_device_t host_id, spi_dma_chan_t dma_chan, spi_dma_ctx_t **out_dma_ctx)
{
assert(is_valid_host(host_id));
#if CONFIG_IDF_TARGET_ESP32
@@ -250,60 +262,56 @@ esp_err_t spicommon_dma_chan_alloc(spi_host_device_t host_id, spi_dma_chan_t dma
#endif
esp_err_t ret = ESP_OK;
uint32_t actual_tx_dma_chan = 0;
uint32_t actual_rx_dma_chan = 0;
spicommon_bus_context_t *ctx = (spicommon_bus_context_t *)calloc(1, sizeof(spicommon_bus_context_t));
if (!ctx) {
spi_dma_ctx_t *dma_ctx = (spi_dma_ctx_t *)calloc(1, sizeof(spi_dma_ctx_t));
if (!dma_ctx) {
ret = ESP_ERR_NO_MEM;
goto cleanup;
}
bus_ctx[host_id] = ctx;
ctx->host_id = host_id;
ret = alloc_dma_chan(host_id, dma_chan, &actual_tx_dma_chan, &actual_rx_dma_chan);
ret = alloc_dma_chan(host_id, dma_chan, dma_ctx);
if (ret != ESP_OK) {
goto cleanup;
}
ctx->bus_attr.tx_dma_chan = actual_tx_dma_chan;
ctx->bus_attr.rx_dma_chan = actual_rx_dma_chan;
*out_actual_tx_dma_chan = actual_tx_dma_chan;
*out_actual_rx_dma_chan = actual_rx_dma_chan;
*out_dma_ctx = dma_ctx;
return ret;
cleanup:
free(ctx);
ctx = NULL;
free(dma_ctx);
return ret;
}
#if SOC_GDMA_SUPPORTED
esp_err_t spicommon_gdma_get_handle(spi_host_device_t host_id, gdma_channel_handle_t *gdma_handle, gdma_channel_direction_t gdma_direction)
esp_err_t spicommon_dma_desc_alloc(spi_dma_ctx_t *dma_ctx, int cfg_max_sz, int *actual_max_sz)
{
assert(is_valid_host(host_id));
ESP_RETURN_ON_FALSE((gdma_direction == GDMA_CHANNEL_DIRECTION_TX) || \
(gdma_direction == GDMA_CHANNEL_DIRECTION_RX), \
ESP_ERR_INVALID_ARG, SPI_TAG, "GDMA Direction not supported!");
int dma_desc_ct = (cfg_max_sz + DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED - 1) / DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
if (dma_desc_ct == 0) {
dma_desc_ct = 1; //default to 4k when max is not given
}
if (gdma_direction == GDMA_CHANNEL_DIRECTION_TX) {
*gdma_handle = bus_ctx[host_id]->tx_channel;
}
if (gdma_direction == GDMA_CHANNEL_DIRECTION_RX) {
*gdma_handle = bus_ctx[host_id]->rx_channel;
dma_ctx->dmadesc_tx = heap_caps_aligned_alloc(DMA_DESC_MEM_ALIGN_SIZE, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA);
dma_ctx->dmadesc_rx = heap_caps_aligned_alloc(DMA_DESC_MEM_ALIGN_SIZE, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA);
if (dma_ctx->dmadesc_tx == NULL || dma_ctx->dmadesc_rx == NULL) {
if (dma_ctx->dmadesc_tx) {
free(dma_ctx->dmadesc_tx);
dma_ctx->dmadesc_tx = NULL;
}
if (dma_ctx->dmadesc_rx) {
free(dma_ctx->dmadesc_rx);
dma_ctx->dmadesc_rx = NULL;
}
return ESP_ERR_NO_MEM;
}
dma_ctx->dma_desc_num = dma_desc_ct;
*actual_max_sz = dma_desc_ct * DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
return ESP_OK;
}
#endif // SOC_GDMA_SUPPORTED
//----------------------------------------------------------free dma periph-------------------------------------------------------//
static esp_err_t dma_chan_free(spi_host_device_t host_id)
esp_err_t spicommon_dma_chan_free(spi_dma_ctx_t *dma_ctx)
{
assert(is_valid_host(host_id));
assert(dma_ctx);
spicommon_bus_context_t *ctx = bus_ctx[host_id];
#if !SOC_GDMA_SUPPORTED
//On ESP32S2, each SPI controller has its own DMA channel
int dma_chan = ctx->bus_attr.tx_dma_chan;
int dma_chan = dma_ctx->tx_dma_chan.chan_id;
assert(spi_dma_chan_enabled & BIT(dma_chan));
portENTER_CRITICAL(&spi_dma_spinlock);
@@ -311,41 +319,31 @@ static esp_err_t dma_chan_free(spi_host_device_t host_id)
#if SPI_LL_DMA_SHARED
PERIPH_RCC_RELEASE_ATOMIC(get_dma_periph(dma_chan), ref_count) {
if (ref_count == 0) {
spi_dma_ll_enable_bus_clock(host_id, false);
spi_dma_ll_enable_bus_clock(dma_ctx->tx_dma_chan.host_id, false);
}
}
#else
SPI_COMMON_RCC_CLOCK_ATOMIC() {
spi_dma_ll_enable_bus_clock(host_id, false);
spi_dma_ll_enable_bus_clock(dma_ctx->tx_dma_chan.host_id, false);
}
#endif
portEXIT_CRITICAL(&spi_dma_spinlock);
#else //SOC_GDMA_SUPPORTED
if (ctx->rx_channel) {
gdma_disconnect(ctx->rx_channel);
gdma_del_channel(ctx->rx_channel);
if (dma_ctx->rx_dma_chan) {
gdma_disconnect(dma_ctx->rx_dma_chan);
gdma_del_channel(dma_ctx->rx_dma_chan);
}
if (ctx->tx_channel) {
gdma_disconnect(ctx->tx_channel);
gdma_del_channel(ctx->tx_channel);
if (dma_ctx->tx_dma_chan) {
gdma_disconnect(dma_ctx->tx_dma_chan);
gdma_del_channel(dma_ctx->tx_dma_chan);
}
#endif
free(dma_ctx);
return ESP_OK;
}
esp_err_t spicommon_dma_chan_free(spi_host_device_t host_id)
{
assert(is_valid_host(host_id));
esp_err_t ret = dma_chan_free(host_id);
free(bus_ctx[host_id]);
bus_ctx[host_id] = NULL;
return ret;
}
//----------------------------------------------------------IO general-------------------------------------------------------//
#if SOC_SPI_SUPPORT_OCT
static bool check_iomux_pins_oct(spi_host_device_t host, const spi_bus_config_t* bus_config)
@@ -757,8 +755,6 @@ esp_err_t spi_bus_initialize(spi_host_device_t host_id, const spi_bus_config_t *
esp_err_t err = ESP_OK;
spicommon_bus_context_t *ctx = NULL;
spi_bus_attr_t *bus_attr = NULL;
uint32_t actual_tx_dma_chan = 0;
uint32_t actual_rx_dma_chan = 0;
SPI_CHECK(is_valid_host(host_id), "invalid host_id", ESP_ERR_INVALID_ARG);
SPI_CHECK(bus_ctx[host_id] == NULL, "SPI bus already initialized.", ESP_ERR_INVALID_STATE);
@@ -791,35 +787,22 @@ esp_err_t spi_bus_initialize(spi_host_device_t host_id, const spi_bus_config_t *
if (dma_chan != SPI_DMA_DISABLED) {
bus_attr->dma_enabled = 1;
err = alloc_dma_chan(host_id, dma_chan, &actual_tx_dma_chan, &actual_rx_dma_chan);
err = spicommon_dma_chan_alloc(host_id, dma_chan, &ctx->dma_ctx);
if (err != ESP_OK) {
goto cleanup;
}
bus_attr->tx_dma_chan = actual_tx_dma_chan;
bus_attr->rx_dma_chan = actual_rx_dma_chan;
int dma_desc_ct = (bus_config->max_transfer_sz + DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED - 1) / DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
if (dma_desc_ct == 0) {
dma_desc_ct = 1; //default to 4k when max is not given
}
bus_attr->max_transfer_sz = dma_desc_ct * DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
bus_attr->dmadesc_tx = heap_caps_aligned_alloc(DMA_DESC_MEM_ALIGN_SIZE, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA);
bus_attr->dmadesc_rx = heap_caps_aligned_alloc(DMA_DESC_MEM_ALIGN_SIZE, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA);
if (bus_attr->dmadesc_tx == NULL || bus_attr->dmadesc_rx == NULL) {
err = ESP_ERR_NO_MEM;
err = spicommon_dma_desc_alloc(ctx->dma_ctx, bus_config->max_transfer_sz, &bus_attr->max_transfer_sz);
if (err != ESP_OK) {
goto cleanup;
}
bus_attr->dma_desc_num = dma_desc_ct;
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
bus_attr->internal_mem_align_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
esp_cache_get_alignment(ESP_CACHE_MALLOC_FLAG_DMA, (size_t *)&bus_attr->internal_mem_align_size);
#else
bus_attr->internal_mem_align_size = 4;
#endif
} else {
bus_attr->dma_enabled = 0;
bus_attr->max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE;
bus_attr->dma_desc_num = 0;
}
spi_bus_lock_config_t lock_config = {
@@ -854,12 +837,11 @@ cleanup:
if (bus_attr->lock) {
spi_bus_deinit_lock(bus_attr->lock);
}
free(bus_attr->dmadesc_tx);
free(bus_attr->dmadesc_rx);
bus_attr->dmadesc_tx = NULL;
bus_attr->dmadesc_rx = NULL;
if (bus_attr->dma_enabled) {
dma_chan_free(host_id);
if (ctx->dma_ctx) {
free(ctx->dma_ctx->dmadesc_tx);
free(ctx->dma_ctx->dmadesc_rx);
spicommon_dma_chan_free(ctx->dma_ctx);
ctx->dma_ctx = NULL;
}
}
spicommon_periph_free(host_id);
@@ -877,6 +859,15 @@ const spi_bus_attr_t* spi_bus_get_attr(spi_host_device_t host_id)
return &bus_ctx[host_id]->bus_attr;
}
const spi_dma_ctx_t* spi_bus_get_dma_ctx(spi_host_device_t host_id)
{
if (bus_ctx[host_id] == NULL) {
return NULL;
}
return bus_ctx[host_id]->dma_ctx;
}
esp_err_t spi_bus_free(spi_host_device_t host_id)
{
if (bus_ctx[host_id] == NULL) {
@@ -890,19 +881,17 @@ esp_err_t spi_bus_free(spi_host_device_t host_id)
if (ctx->destroy_func) {
err = ctx->destroy_func(ctx->destroy_arg);
}
spicommon_bus_free_io_cfg(&bus_attr->bus_cfg);
#ifdef CONFIG_PM_ENABLE
esp_pm_lock_delete(bus_attr->pm_lock);
#endif
spi_bus_deinit_lock(bus_attr->lock);
free(bus_attr->dmadesc_rx);
free(bus_attr->dmadesc_tx);
bus_attr->dmadesc_tx = NULL;
bus_attr->dmadesc_rx = NULL;
if (bus_attr->dma_enabled > 0) {
dma_chan_free(host_id);
if (ctx->dma_ctx) {
free(ctx->dma_ctx->dmadesc_tx);
free(ctx->dma_ctx->dmadesc_rx);
spicommon_dma_chan_free(ctx->dma_ctx);
ctx->dma_ctx = NULL;
}
spicommon_periph_free(host_id);
free(ctx);