reflow-oven-control-sw/stm-firmware/stm-periph/dma-ring-buffer.c

324 lines
9.4 KiB
C

/* Reflow Oven Controller
*
* Copyright (C) 2020 Mario Hüttel <mario.huettel@gmx.net>
*
* This file is part of the Reflow Oven Controller Project.
*
* The reflow oven controller is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* The Reflow Oven Control Firmware is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with the reflow oven controller project.
* If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @file dma-ring-buffer.c
* @brief DMA Ring buffer implemenation
*/
/**
* @addtogroup dma-ring-buffer
* @{
*/
#include <stm-periph/dma-ring-buffer.h>
#include <stm-periph/rcc-manager.h>
#include <stdbool.h>
#include <string.h>
static size_t calculate_ring_buffer_fill_level(size_t buffer_size, size_t get_idx, size_t put_idx)
{
size_t fill_level;
if (put_idx >= get_idx)
fill_level = (put_idx - get_idx);
else
fill_level = buffer_size - get_idx + put_idx;
return fill_level;
}
static int dma_ring_buffer_switch_clock_enable(uint8_t base_dma, bool clk_en)
{
int ret_val;
int (*clk_func)(volatile uint32_t *reg, uint8_t bit_no);
if (clk_en)
clk_func = rcc_manager_enable_clock;
else
clk_func = rcc_manager_disable_clock;
switch (base_dma) {
case 1:
ret_val = clk_func(&RCC->AHB1ENR, BITMASK_TO_BITNO(RCC_AHB1ENR_DMA1EN));
break;
case 2:
ret_val = clk_func(&RCC->AHB1ENR, BITMASK_TO_BITNO(RCC_AHB1ENR_DMA2EN));
break;
default:
ret_val = -1000;
break;
}
return ret_val;
}
int dma_ring_buffer_periph_to_mem_initialize(struct dma_ring_buffer_to_mem *dma_buffer, uint8_t base_dma_id,
DMA_Stream_TypeDef *dma_stream, size_t buffer_element_count,
size_t element_size, volatile void *data_buffer,
void *src_reg, uint8_t dma_trigger_channel)
{
int ret_val = 0;
if (!dma_buffer || !dma_stream || !data_buffer || !src_reg)
return -1000;
if (dma_trigger_channel > 7)
return -1007;
dma_buffer->base_dma_id = base_dma_id;
ret_val = dma_ring_buffer_switch_clock_enable(base_dma_id, true);
if (ret_val)
return ret_val;
dma_buffer->dma = dma_stream;
dma_buffer->get_idx = 0;
dma_buffer->buffer_count = buffer_element_count;
dma_buffer->data_ptr = data_buffer;
dma_buffer->element_size = element_size;
dma_stream->PAR = (uint32_t)src_reg;
dma_stream->M0AR = (uint32_t)data_buffer;
dma_stream->NDTR = buffer_element_count;
dma_stream->NDTR = buffer_element_count;
dma_stream->CR |= (dma_trigger_channel<<25) | DMA_SxCR_MINC | DMA_SxCR_CIRC | DMA_SxCR_EN;
return 0;
}
int dma_ring_buffer_periph_to_mem_get_data(struct dma_ring_buffer_to_mem *buff, const volatile void **data_buff,
size_t *len)
{
int ret_code = 0;
uint32_t ndtr;
size_t put_idx;
if (!buff || !data_buff || !len)
return -1;
ndtr = buff->dma->NDTR;
put_idx = buff->buffer_count - ndtr;
/* Check if wrap around */
if (put_idx < buff->get_idx) {
/* Available data wraps around end of buffer: Return first part upt to the end of the ring buffer */
*data_buff = &(((char *)buff->data_ptr)[buff->get_idx * buff->element_size]);
*len = buff->buffer_count - buff->get_idx;
buff->get_idx = 0;
ret_code = 2;
} else if (put_idx > buff->get_idx) {
/* Data does not wrap around ring buffer. Return full data */
*data_buff = &(((char *)buff->data_ptr)[buff->get_idx * buff->element_size]);
*len = put_idx - buff->get_idx;
buff->get_idx += *len;
ret_code = 1;
} else {
/* No new data */
*len = 0;
}
return ret_code;
}
void dma_ring_buffer_periph_to_mem_stop(struct dma_ring_buffer_to_mem *buff)
{
if (!buff || !buff->dma)
return;
buff->dma->CR = 0;
buff->dma->NDTR = 0;
buff->dma->M1AR = 0;
buff->dma->FCR = 0;
dma_ring_buffer_switch_clock_enable(buff->base_dma_id, false);
memset(buff, 0, sizeof(struct dma_ring_buffer_to_mem));
}
int dma_ring_buffer_periph_to_mem_fill_level(struct dma_ring_buffer_to_mem *buff, size_t *fill_level)
{
size_t put_idx;
if (!buff || !fill_level)
return -1000;
put_idx = buff->buffer_count - buff->dma->NDTR;
*fill_level = calculate_ring_buffer_fill_level(buff->buffer_count, buff->get_idx, put_idx);
return 0;
}
int dma_ring_buffer_mem_to_periph_initialize(struct dma_ring_buffer_to_periph *dma_buffer, uint8_t base_dma_id,
DMA_Stream_TypeDef *dma_stream, size_t buffer_element_count,
size_t element_size, volatile void *data_buffer,
uint8_t dma_trigger_channel, void *dest_reg)
{
if (!dma_buffer || !dma_stream || !data_buffer || !dest_reg)
return -1000;
dma_buffer->dma = dma_stream;
dma_buffer->dma_base_id = base_dma_id;
dma_buffer->src_buffer = data_buffer;
dma_buffer->buffer_count = buffer_element_count;
dma_buffer->element_size = element_size;
dma_buffer->sw_put_idx = 0U;
dma_buffer->dma_get_idx_current = 0U;
dma_buffer->dma_get_idx_future = 0U;
dma_ring_buffer_switch_clock_enable(base_dma_id, true);
dma_stream->PAR = (uint32_t)dest_reg;
dma_stream->CR = DMA_SxCR_MINC | DMA_SxCR_TCIE | (dma_trigger_channel<<25) | DMA_SxCR_DIR_0;
return 0;
}
static void queue_or_start_dma_transfer(struct dma_ring_buffer_to_periph *buff)
{
uint32_t dma_transfer_cnt;
if (!buff)
return;
/* Check if DMA is running. Do nothing in this case. Will be stated from interrupt */
if (buff->dma_get_idx_current != buff->dma_get_idx_future)
return;
/* No new data to transfer */
if (buff->sw_put_idx == buff->dma_get_idx_current)
return;
/* Calculate future get idx. Stop at end of buffer to prevent impossible wrap around */
if (buff->sw_put_idx < buff->dma_get_idx_current && buff->sw_put_idx != 0) {
buff->dma_get_idx_future = 0U;
dma_transfer_cnt = buff->buffer_count - buff->dma_get_idx_current;
} else {
buff->dma_get_idx_future = buff->sw_put_idx;
if (buff->sw_put_idx == 0)
dma_transfer_cnt = buff->buffer_count - buff->dma_get_idx_current;
else
dma_transfer_cnt = buff->sw_put_idx - buff->dma_get_idx_current;
}
buff->dma->NDTR = dma_transfer_cnt;
buff->dma->M0AR = (uint32_t)&((char *)buff->src_buffer)[buff->dma_get_idx_current * buff->element_size];
buff->dma->CR |= DMA_SxCR_EN;
}
int dma_ring_buffer_mem_to_periph_insert_data(struct dma_ring_buffer_to_periph *buff, const void *data_to_insert,
size_t count)
{
int ret = 0;
size_t free_item_count;
char *insert_ptr;
char *dest_ptr;
void *ptr;
size_t first_round_count;
if (!buff || !data_to_insert || !count)
return -1000;
/* Check if data fits into buffer minus one element. If not: try full-1 buffer and rest
* Buffer is not allowed to be completely full, because I cannot ddifferentiate a full buffer from a
* completely empty one
*/
if (count >= buff->buffer_count) {
ret = dma_ring_buffer_mem_to_periph_insert_data(buff, data_to_insert, buff->buffer_count - 1);
if (ret)
goto return_retval;
ptr = (void *)(((char *)data_to_insert) + ((buff->buffer_count-1) * buff->element_size));
ret = dma_ring_buffer_mem_to_periph_insert_data(buff, ptr, count - buff->buffer_count + 1);
goto return_retval;
}
/* Wait for buffer to be able to handle input */
do {
free_item_count = buff->buffer_count -
calculate_ring_buffer_fill_level(buff->buffer_count, buff->dma_get_idx_current,
buff->sw_put_idx);
} while (free_item_count < count+1);
/* Fillup buffer (max is buffer end, wrap around afterwards) */
insert_ptr = (char *)data_to_insert;
dest_ptr = &((char *)buff->src_buffer)[buff->sw_put_idx * buff->element_size];
/* Check if data completely fits into memory starting from put index */
if (buff->buffer_count - buff->sw_put_idx >= count) {
/* Copy data and move put index */
memcpy(dest_ptr, insert_ptr, buff->element_size * count);
buff->sw_put_idx += count;
/* If buffer is used up to last element, set put index to beginning */
if (buff->sw_put_idx >= buff->buffer_count)
buff->sw_put_idx = 0;
} else {
/* Fill up to end of buffer and fill rest after wrap around */
first_round_count = buff->element_size * (buff->buffer_count - buff->sw_put_idx);
memcpy(dest_ptr, insert_ptr, first_round_count);
insert_ptr += first_round_count;
memcpy((void *)buff->src_buffer, insert_ptr, count - first_round_count);
/* Move put index */
buff->sw_put_idx = count - first_round_count;
}
/* Queue the DMA transfer. If DMA is already enabled, this has no effect
* DMA is triggerd from interrupt in this case
*/
queue_or_start_dma_transfer(buff);
return_retval:
return ret;
}
void dma_ring_buffer_mem_to_periph_int_callback(struct dma_ring_buffer_to_periph *buff)
{
/* update current get index because DMA is finished */
buff->dma_get_idx_current = buff->dma_get_idx_future;
/* Start new DMA transfer if not all data is trasnferred yet */
queue_or_start_dma_transfer(buff);
}
void dma_ring_buffer_mem_to_periph_stop(struct dma_ring_buffer_to_periph *buff)
{
/* Stop DMA and clock */
buff->dma->CR = 0;
dma_ring_buffer_switch_clock_enable(buff->dma_base_id, false);
/* Reset the structure */
memset(buff, 0, sizeof(struct dma_ring_buffer_to_periph));
}
int dma_ring_buffer_mem_to_periph_fill_level(struct dma_ring_buffer_to_periph *buff, size_t *fill_level)
{
if (!buff || !fill_level)
return -1000;
*fill_level = calculate_ring_buffer_fill_level(buff->buffer_count, buff->dma_get_idx_current, buff->sw_put_idx);
return 0;
}
/** @} */