2020-02-15 22:09:55 +01:00
|
|
|
/* Reflow Oven Controller
|
|
|
|
*
|
|
|
|
* Copyright (C) 2020 Mario Hüttel <mario.huettel@gmx.net>
|
|
|
|
*
|
|
|
|
* This file is part of the Reflow Oven Controller Project.
|
|
|
|
*
|
|
|
|
* The reflow oven controller is free software: you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* GDSII-Converter is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with the reflow oven controller project.
|
|
|
|
* If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2020-02-16 17:35:35 +01:00
|
|
|
/**
|
|
|
|
* @file dma-ring-buffer.c
|
|
|
|
* @brief DMA Ring buffer implemenation
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @addtogroup dma-ring-buffer
|
|
|
|
* @{
|
|
|
|
*/
|
|
|
|
|
2020-02-12 21:06:52 +01:00
|
|
|
#include <stm-periph/dma-ring-buffer.h>
|
2020-02-12 21:00:35 +01:00
|
|
|
#include <stm-periph/clock-enable-manager.h>
|
2020-02-10 22:38:24 +01:00
|
|
|
#include <stdbool.h>
|
2020-02-11 22:49:47 +01:00
|
|
|
#include <string.h>
|
2020-02-05 23:09:23 +01:00
|
|
|
|
2020-02-10 22:38:24 +01:00
|
|
|
static int dma_ring_buffer_switch_clock_enable(uint8_t base_dma, bool clk_en)
|
2020-02-09 19:13:37 +01:00
|
|
|
{
|
2020-02-10 22:38:24 +01:00
|
|
|
int ret_val;
|
|
|
|
int (*clk_func)(volatile uint32_t *, uint8_t);
|
2020-02-09 19:13:37 +01:00
|
|
|
|
2020-02-10 22:38:24 +01:00
|
|
|
if (clk_en)
|
|
|
|
clk_func = rcc_manager_enable_clock;
|
|
|
|
else
|
|
|
|
clk_func = rcc_manager_disable_clock;
|
2020-02-09 19:13:37 +01:00
|
|
|
|
2020-02-10 22:38:24 +01:00
|
|
|
switch (base_dma) {
|
2020-02-09 19:13:37 +01:00
|
|
|
case 1:
|
2020-02-10 22:38:24 +01:00
|
|
|
ret_val = clk_func(&RCC->AHB1ENR, BITMASK_TO_BITNO(RCC_AHB1ENR_DMA1EN));
|
2020-02-09 19:13:37 +01:00
|
|
|
break;
|
|
|
|
case 2:
|
2020-02-10 22:38:24 +01:00
|
|
|
ret_val = clk_func(&RCC->AHB1ENR, BITMASK_TO_BITNO(RCC_AHB1ENR_DMA2EN));
|
2020-02-09 19:13:37 +01:00
|
|
|
break;
|
|
|
|
default:
|
2020-02-10 22:38:24 +01:00
|
|
|
ret_val = -1000;
|
2020-02-09 19:13:37 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-02-10 22:38:24 +01:00
|
|
|
return ret_val;
|
|
|
|
}
|
|
|
|
|
|
|
|
int dma_ring_buffer_periph_to_mem_initialize(struct dma_ring_buffer_to_mem *dma_buffer, uint8_t base_dma_id,
|
|
|
|
DMA_Stream_TypeDef *dma_stream, size_t buffer_element_count, size_t element_size,
|
2020-02-16 11:42:18 +01:00
|
|
|
volatile void *data_buffer, void* src_reg, uint8_t dma_trigger_channel)
|
2020-02-10 22:38:24 +01:00
|
|
|
{
|
|
|
|
int ret_val = 0;
|
|
|
|
|
|
|
|
if (!dma_buffer || !dma_stream || !data_buffer || !src_reg)
|
|
|
|
return -1000;
|
|
|
|
|
|
|
|
if (dma_trigger_channel > 7)
|
|
|
|
return -1007;
|
|
|
|
|
|
|
|
dma_buffer->base_dma_id = base_dma_id;
|
|
|
|
|
|
|
|
ret_val = dma_ring_buffer_switch_clock_enable(base_dma_id, true);
|
|
|
|
if (ret_val)
|
|
|
|
return ret_val;
|
|
|
|
|
2020-02-09 19:13:37 +01:00
|
|
|
dma_buffer->dma = dma_stream;
|
|
|
|
dma_buffer->get_idx = 0;
|
|
|
|
dma_buffer->buffer_count = buffer_element_count;
|
|
|
|
|
|
|
|
dma_buffer->data_ptr = data_buffer;
|
2020-02-10 22:38:24 +01:00
|
|
|
dma_buffer->element_size = element_size;
|
2020-02-09 19:13:37 +01:00
|
|
|
|
|
|
|
dma_stream->PAR = (uint32_t)src_reg;
|
|
|
|
dma_stream->M0AR = (uint32_t)data_buffer;
|
|
|
|
dma_stream->NDTR = buffer_element_count;
|
|
|
|
dma_stream->NDTR = buffer_element_count;
|
|
|
|
|
|
|
|
dma_stream->CR |= (dma_trigger_channel<<25) | DMA_SxCR_MINC | DMA_SxCR_CIRC | DMA_SxCR_EN;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-02-16 11:42:18 +01:00
|
|
|
int dma_ring_buffer_periph_to_mem_get_data(struct dma_ring_buffer_to_mem *buff, const volatile void **data_buff, size_t *len)
|
2020-02-09 19:13:37 +01:00
|
|
|
{
|
|
|
|
int ret_code = 0;
|
|
|
|
uint32_t ndtr;
|
|
|
|
size_t put_idx;
|
|
|
|
|
|
|
|
if (!buff || !data_buff || !len)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
ndtr = buff->dma->NDTR;
|
|
|
|
put_idx = buff->buffer_count - ndtr;
|
|
|
|
|
|
|
|
/* Check if wrap around */
|
|
|
|
if (put_idx < buff->get_idx) {
|
2020-02-16 11:42:18 +01:00
|
|
|
/* Available data wraps around end of buffer: Return first part upt to the end of the ring buffer */
|
2020-02-10 22:38:24 +01:00
|
|
|
*data_buff = &(((char *)buff->data_ptr)[buff->get_idx * buff->element_size]);
|
2020-02-09 19:13:37 +01:00
|
|
|
*len = buff->buffer_count - buff->get_idx;
|
|
|
|
buff->get_idx = 0;
|
2020-02-15 17:53:15 +01:00
|
|
|
ret_code = 2;
|
2020-02-09 19:13:37 +01:00
|
|
|
} else if (put_idx > buff->get_idx) {
|
2020-02-16 11:42:18 +01:00
|
|
|
/* Data does not wrap around ring buffer. Return full data */
|
2020-02-10 22:38:24 +01:00
|
|
|
*data_buff = &(((char *)buff->data_ptr)[buff->get_idx * buff->element_size]);
|
2020-02-09 19:13:37 +01:00
|
|
|
*len = put_idx - buff->get_idx;
|
|
|
|
buff->get_idx += *len;
|
2020-02-15 17:53:15 +01:00
|
|
|
ret_code = 1;
|
2020-02-09 19:13:37 +01:00
|
|
|
} else {
|
|
|
|
/* No new data */
|
|
|
|
*len = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret_code;
|
|
|
|
}
|
|
|
|
|
2020-02-10 22:38:24 +01:00
|
|
|
void dma_ring_buffer_periph_to_mem_stop(struct dma_ring_buffer_to_mem *buff)
|
2020-02-09 19:13:37 +01:00
|
|
|
{
|
|
|
|
if (!buff || !buff->dma)
|
|
|
|
return;
|
|
|
|
|
|
|
|
buff->dma->CR = 0;
|
|
|
|
buff->dma->NDTR = 0;
|
|
|
|
buff->dma->M1AR = 0;
|
|
|
|
buff->dma->FCR = 0;
|
|
|
|
|
2020-02-10 22:38:24 +01:00
|
|
|
dma_ring_buffer_switch_clock_enable(buff->base_dma_id, false);
|
2020-02-12 21:49:28 +01:00
|
|
|
|
|
|
|
memset(buff, 0, sizeof(struct dma_ring_buffer_to_mem));
|
2020-02-09 19:13:37 +01:00
|
|
|
}
|
2020-02-11 22:49:47 +01:00
|
|
|
|
2020-02-16 11:42:18 +01:00
|
|
|
int dma_ring_buffer_mem_to_periph_initialize(struct dma_ring_buffer_to_periph *dma_buffer, uint8_t base_dma_id, DMA_Stream_TypeDef *dma_stream, size_t buffer_element_count, size_t element_size, volatile void *data_buffer, uint8_t dma_trigger_channel, void *dest_reg)
|
2020-02-11 22:49:47 +01:00
|
|
|
{
|
|
|
|
if (!dma_buffer || !dma_stream || !data_buffer || !dest_reg)
|
|
|
|
return -1000;
|
|
|
|
|
|
|
|
dma_buffer->dma = dma_stream;
|
|
|
|
dma_buffer->dma_base_id = base_dma_id;
|
|
|
|
dma_buffer->src_buffer = data_buffer;
|
|
|
|
dma_buffer->buffer_count = buffer_element_count;
|
|
|
|
dma_buffer->element_size = element_size;
|
|
|
|
dma_buffer->sw_put_idx = 0U;
|
|
|
|
dma_buffer->dma_get_idx_current = 0U;
|
|
|
|
dma_buffer->dma_get_idx_future = 0U;
|
|
|
|
|
|
|
|
dma_ring_buffer_switch_clock_enable(base_dma_id, true);
|
|
|
|
|
|
|
|
dma_stream->PAR = (uint32_t)dest_reg;
|
|
|
|
dma_stream->CR = DMA_SxCR_MINC | DMA_SxCR_TCIE | (dma_trigger_channel<<25) | DMA_SxCR_DIR_0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t calculate_ring_buffer_fill_level(size_t buffer_size, size_t get_idx, size_t put_idx)
|
|
|
|
{
|
|
|
|
size_t fill_level;
|
|
|
|
|
|
|
|
if (put_idx >= get_idx) {
|
|
|
|
fill_level = (put_idx - get_idx);
|
|
|
|
} else {
|
|
|
|
fill_level = buffer_size - get_idx + put_idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
return fill_level;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void queue_or_start_dma_transfer(struct dma_ring_buffer_to_periph *buff)
|
|
|
|
{
|
|
|
|
uint32_t dma_transfer_cnt;
|
|
|
|
|
|
|
|
if (!buff)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Check if DMA is running. Do nothing in this case. Will be stated from interrupt */
|
|
|
|
if (buff->dma_get_idx_current != buff->dma_get_idx_future)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* No new data to transfer */
|
|
|
|
if (buff->sw_put_idx == buff->dma_get_idx_current)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Calculate future get idx. Stop at end of buffer to prevent impossible wrap around */
|
|
|
|
if (buff->sw_put_idx < buff->dma_get_idx_current && buff->sw_put_idx != 0) {
|
|
|
|
buff->dma_get_idx_future = 0U;
|
|
|
|
dma_transfer_cnt = buff->buffer_count - buff->dma_get_idx_current;
|
|
|
|
} else {
|
|
|
|
buff->dma_get_idx_future = buff->sw_put_idx;
|
|
|
|
if (buff->sw_put_idx == 0)
|
|
|
|
dma_transfer_cnt = buff->buffer_count - buff->dma_get_idx_current;
|
|
|
|
else
|
|
|
|
dma_transfer_cnt = buff->sw_put_idx - buff->dma_get_idx_current;
|
|
|
|
}
|
|
|
|
|
|
|
|
buff->dma->NDTR = dma_transfer_cnt;
|
|
|
|
buff->dma->M0AR = (uint32_t)&((char *)buff->src_buffer)[buff->dma_get_idx_current * buff->element_size];
|
|
|
|
buff->dma->CR |= DMA_SxCR_EN;
|
|
|
|
}
|
|
|
|
|
|
|
|
int dma_ring_buffer_mem_to_periph_insert_data(struct dma_ring_buffer_to_periph *buff, const void *data_to_insert, size_t count)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
size_t free_item_count;
|
|
|
|
char *insert_ptr;
|
|
|
|
char *dest_ptr;
|
|
|
|
void *ptr;
|
|
|
|
size_t first_round_count;
|
|
|
|
|
|
|
|
if (!buff || !data_to_insert || !count)
|
|
|
|
return -1000;
|
|
|
|
|
|
|
|
/* Check if data fits into buffer minus one element. If not: try full-1 buffer and rest
|
|
|
|
* Buffer is not allowed to be completely full, because I cannot ddifferentiate a full buffer from a completely empty one
|
|
|
|
*/
|
|
|
|
if (count >= buff->buffer_count) {
|
|
|
|
ret = dma_ring_buffer_mem_to_periph_insert_data(buff, data_to_insert, buff->buffer_count - 1);
|
|
|
|
if (ret)
|
|
|
|
goto return_retval;
|
|
|
|
ptr = (void *)(((char *)data_to_insert) + ((buff->buffer_count-1) * buff->element_size));
|
|
|
|
ret = dma_ring_buffer_mem_to_periph_insert_data(buff, ptr, count - buff->buffer_count + 1);
|
|
|
|
goto return_retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait for buffer to be able to handle input */
|
|
|
|
do {
|
|
|
|
free_item_count = buff->buffer_count - calculate_ring_buffer_fill_level(buff->buffer_count, buff->dma_get_idx_current, buff->sw_put_idx);
|
|
|
|
} while (free_item_count < count+1);
|
|
|
|
|
|
|
|
/* Fillup buffer (max is buffer end, wrap around afterwards) */
|
|
|
|
insert_ptr = (char *)data_to_insert;
|
|
|
|
dest_ptr = &((char *)buff->src_buffer)[buff->sw_put_idx * buff->element_size];
|
2020-02-12 21:49:28 +01:00
|
|
|
|
|
|
|
/* Check if data completely fits into memory starting from put index */
|
2020-02-11 22:49:47 +01:00
|
|
|
if (buff->buffer_count - buff->sw_put_idx >= count) {
|
2020-02-12 21:49:28 +01:00
|
|
|
/* Copy data and move put index */
|
2020-02-11 22:49:47 +01:00
|
|
|
memcpy(dest_ptr, insert_ptr, buff->element_size * count);
|
|
|
|
buff->sw_put_idx += count;
|
2020-02-12 21:49:28 +01:00
|
|
|
|
|
|
|
/* If buffer is used up to last element, set put index to beginning */
|
2020-02-11 22:49:47 +01:00
|
|
|
if(buff->sw_put_idx >= buff->buffer_count)
|
|
|
|
buff->sw_put_idx = 0;
|
|
|
|
} else {
|
2020-02-12 21:49:28 +01:00
|
|
|
/* Fill up to end of buffer and fill rest after wrap around */
|
2020-02-11 22:49:47 +01:00
|
|
|
first_round_count = buff->element_size * (buff->buffer_count - buff->sw_put_idx);
|
|
|
|
memcpy(dest_ptr, insert_ptr, first_round_count);
|
|
|
|
insert_ptr += first_round_count;
|
2020-02-16 11:42:18 +01:00
|
|
|
memcpy((void *)buff->src_buffer, insert_ptr, count - first_round_count);
|
2020-02-12 21:49:28 +01:00
|
|
|
|
|
|
|
/* Move put index */
|
2020-02-11 22:49:47 +01:00
|
|
|
buff->sw_put_idx = count - first_round_count;
|
|
|
|
}
|
2020-02-12 21:49:28 +01:00
|
|
|
|
|
|
|
/* Queue the DMA transfer. If DMA is already enabled, this has no effect
|
|
|
|
* DMA is triggerd from interrupt in this case
|
|
|
|
*/
|
2020-02-11 22:49:47 +01:00
|
|
|
queue_or_start_dma_transfer(buff);
|
|
|
|
|
|
|
|
return_retval:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void dma_ring_buffer_mem_to_periph_int_callback(struct dma_ring_buffer_to_periph *buff)
|
|
|
|
{
|
2020-02-12 21:49:28 +01:00
|
|
|
/* update current get index because DMA is finished */
|
2020-02-11 22:49:47 +01:00
|
|
|
buff->dma_get_idx_current = buff->dma_get_idx_future;
|
2020-02-12 21:49:28 +01:00
|
|
|
|
|
|
|
/* Start new DMA transfer if not all data is trasnferred yet */
|
2020-02-11 22:49:47 +01:00
|
|
|
queue_or_start_dma_transfer(buff);
|
|
|
|
}
|
|
|
|
|
|
|
|
void dma_ring_buffer_mem_to_periph_stop(struct dma_ring_buffer_to_periph *buff)
|
|
|
|
{
|
2020-02-12 21:49:28 +01:00
|
|
|
/* Stop DMA and clock */
|
2020-02-11 22:49:47 +01:00
|
|
|
buff->dma->CR = 0;
|
|
|
|
dma_ring_buffer_switch_clock_enable(buff->dma_base_id, false);
|
|
|
|
|
2020-02-12 21:49:28 +01:00
|
|
|
/* Reset the structure */
|
2020-02-11 22:49:47 +01:00
|
|
|
memset(buff, 0, sizeof(struct dma_ring_buffer_to_periph));
|
|
|
|
}
|
2020-02-16 17:35:35 +01:00
|
|
|
|
|
|
|
/** @} */
|