reflow-oven-control-sw/stm-firmware/uart/dma-ring-buffer.c

241 lines
7.1 KiB
C

#include <stm-periph/uart/dma-ring-buffer.h>
#include <stm-periph/clock-enable-manager.h>
#include <stdbool.h>
#include <string.h>
static int dma_ring_buffer_switch_clock_enable(uint8_t base_dma, bool clk_en)
{
int ret_val;
int (*clk_func)(volatile uint32_t *, uint8_t);
if (clk_en)
clk_func = rcc_manager_enable_clock;
else
clk_func = rcc_manager_disable_clock;
switch (base_dma) {
case 1:
ret_val = clk_func(&RCC->AHB1ENR, BITMASK_TO_BITNO(RCC_AHB1ENR_DMA1EN));
break;
case 2:
ret_val = clk_func(&RCC->AHB1ENR, BITMASK_TO_BITNO(RCC_AHB1ENR_DMA2EN));
break;
default:
ret_val = -1000;
break;
}
return ret_val;
}
int dma_ring_buffer_periph_to_mem_initialize(struct dma_ring_buffer_to_mem *dma_buffer, uint8_t base_dma_id,
DMA_Stream_TypeDef *dma_stream, size_t buffer_element_count, size_t element_size,
void *data_buffer, void* src_reg, uint8_t dma_trigger_channel)
{
int ret_val = 0;
if (!dma_buffer || !dma_stream || !data_buffer || !src_reg)
return -1000;
if (dma_trigger_channel > 7)
return -1007;
dma_buffer->base_dma_id = base_dma_id;
ret_val = dma_ring_buffer_switch_clock_enable(base_dma_id, true);
if (ret_val)
return ret_val;
dma_buffer->dma = dma_stream;
dma_buffer->get_idx = 0;
dma_buffer->buffer_count = buffer_element_count;
dma_buffer->data_ptr = data_buffer;
dma_buffer->element_size = element_size;
dma_stream->PAR = (uint32_t)src_reg;
dma_stream->M0AR = (uint32_t)data_buffer;
dma_stream->NDTR = buffer_element_count;
dma_stream->NDTR = buffer_element_count;
dma_stream->CR |= (dma_trigger_channel<<25) | DMA_SxCR_MINC | DMA_SxCR_CIRC | DMA_SxCR_EN;
return 0;
}
int dma_ring_buffer_periph_to_mem_get_data(struct dma_ring_buffer_to_mem *buff, const void **data_buff, size_t *len)
{
int ret_code = 0;
uint32_t ndtr;
size_t put_idx;
if (!buff || !data_buff || !len)
return -1;
ndtr = buff->dma->NDTR;
put_idx = buff->buffer_count - ndtr;
/* Check if wrap around */
if (put_idx < buff->get_idx) {
*data_buff = &(((char *)buff->data_ptr)[buff->get_idx * buff->element_size]);
*len = buff->buffer_count - buff->get_idx;
buff->get_idx = 0;
ret_code = 1;
} else if (put_idx > buff->get_idx) {
*data_buff = &(((char *)buff->data_ptr)[buff->get_idx * buff->element_size]);
*len = put_idx - buff->get_idx;
buff->get_idx += *len;
} else {
/* No new data */
*len = 0;
}
return ret_code;
}
void dma_ring_buffer_periph_to_mem_stop(struct dma_ring_buffer_to_mem *buff)
{
if (!buff || !buff->dma)
return;
buff->dma->CR = 0;
buff->dma->NDTR = 0;
buff->dma->M1AR = 0;
buff->dma->FCR = 0;
dma_ring_buffer_switch_clock_enable(buff->base_dma_id, false);
}
int dma_ring_buffer_mem_to_periph_initialize(struct dma_ring_buffer_to_periph *dma_buffer, uint8_t base_dma_id, DMA_Stream_TypeDef *dma_stream, size_t buffer_element_count, size_t element_size, void *data_buffer, uint8_t dma_trigger_channel, void *dest_reg)
{
if (!dma_buffer || !dma_stream || !data_buffer || !dest_reg)
return -1000;
dma_buffer->dma = dma_stream;
dma_buffer->dma_base_id = base_dma_id;
dma_buffer->src_buffer = data_buffer;
dma_buffer->buffer_count = buffer_element_count;
dma_buffer->element_size = element_size;
dma_buffer->sw_put_idx = 0U;
dma_buffer->dma_get_idx_current = 0U;
dma_buffer->dma_get_idx_future = 0U;
dma_ring_buffer_switch_clock_enable(base_dma_id, true);
dma_stream->PAR = (uint32_t)dest_reg;
dma_stream->CR = DMA_SxCR_MINC | DMA_SxCR_TCIE | (dma_trigger_channel<<25) | DMA_SxCR_DIR_0;
return 0;
}
static size_t calculate_ring_buffer_fill_level(size_t buffer_size, size_t get_idx, size_t put_idx)
{
size_t fill_level;
if (put_idx >= get_idx) {
fill_level = (put_idx - get_idx);
} else {
fill_level = buffer_size - get_idx + put_idx;
}
return fill_level;
}
static void queue_or_start_dma_transfer(struct dma_ring_buffer_to_periph *buff)
{
uint32_t dma_transfer_cnt;
if (!buff)
return;
/* Check if DMA is running. Do nothing in this case. Will be stated from interrupt */
if (buff->dma_get_idx_current != buff->dma_get_idx_future)
return;
/* No new data to transfer */
if (buff->sw_put_idx == buff->dma_get_idx_current)
return;
/* Calculate future get idx. Stop at end of buffer to prevent impossible wrap around */
if (buff->sw_put_idx < buff->dma_get_idx_current && buff->sw_put_idx != 0) {
buff->dma_get_idx_future = 0U;
dma_transfer_cnt = buff->buffer_count - buff->dma_get_idx_current;
} else {
buff->dma_get_idx_future = buff->sw_put_idx;
if (buff->sw_put_idx == 0)
dma_transfer_cnt = buff->buffer_count - buff->dma_get_idx_current;
else
dma_transfer_cnt = buff->sw_put_idx - buff->dma_get_idx_current;
}
buff->dma->NDTR = dma_transfer_cnt;
buff->dma->M0AR = (uint32_t)&((char *)buff->src_buffer)[buff->dma_get_idx_current * buff->element_size];
buff->dma->CR |= DMA_SxCR_EN;
}
int dma_ring_buffer_mem_to_periph_insert_data(struct dma_ring_buffer_to_periph *buff, const void *data_to_insert, size_t count)
{
int ret = 0;
size_t free_item_count;
char *insert_ptr;
char *dest_ptr;
void *ptr;
size_t first_round_count;
if (!buff || !data_to_insert || !count)
return -1000;
/* Check if data fits into buffer minus one element. If not: try full-1 buffer and rest
* Buffer is not allowed to be completely full, because I cannot ddifferentiate a full buffer from a completely empty one
*/
if (count >= buff->buffer_count) {
ret = dma_ring_buffer_mem_to_periph_insert_data(buff, data_to_insert, buff->buffer_count - 1);
if (ret)
goto return_retval;
ptr = (void *)(((char *)data_to_insert) + ((buff->buffer_count-1) * buff->element_size));
ret = dma_ring_buffer_mem_to_periph_insert_data(buff, ptr, count - buff->buffer_count + 1);
goto return_retval;
}
/* Wait for buffer to be able to handle input */
do {
free_item_count = buff->buffer_count - calculate_ring_buffer_fill_level(buff->buffer_count, buff->dma_get_idx_current, buff->sw_put_idx);
} while (free_item_count < count+1);
/* Fillup buffer (max is buffer end, wrap around afterwards) */
insert_ptr = (char *)data_to_insert;
dest_ptr = &((char *)buff->src_buffer)[buff->sw_put_idx * buff->element_size];
if (buff->buffer_count - buff->sw_put_idx >= count) {
memcpy(dest_ptr, insert_ptr, buff->element_size * count);
buff->sw_put_idx += count;
if(buff->sw_put_idx >= buff->buffer_count)
buff->sw_put_idx = 0;
} else {
first_round_count = buff->element_size * (buff->buffer_count - buff->sw_put_idx);
memcpy(dest_ptr, insert_ptr, first_round_count);
insert_ptr += first_round_count;
memcpy(buff->src_buffer, insert_ptr, count - first_round_count);
buff->sw_put_idx = count - first_round_count;
}
queue_or_start_dma_transfer(buff);
return_retval:
return ret;
}
void dma_ring_buffer_mem_to_periph_int_callback(struct dma_ring_buffer_to_periph *buff)
{
/* update current get index */
buff->dma_get_idx_current = buff->dma_get_idx_future;
queue_or_start_dma_transfer(buff);
}
void dma_ring_buffer_mem_to_periph_stop(struct dma_ring_buffer_to_periph *buff)
{
buff->dma->CR = 0;
dma_ring_buffer_switch_clock_enable(buff->dma_base_id, false);
memset(buff, 0, sizeof(struct dma_ring_buffer_to_periph));
}