You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1424 lines
35 KiB

/*
* Copyright (c) Hisilicon Technologies Co., Ltd. 2019-2019. All rights reserved.
* Description: MPI function file for Huanglong low latency
* Author: audio
* Create: 2019-05-30
*/
#include <sys/ioctl.h>
#include <string.h>
#include <unistd.h>
#include <pthread.h>
#include <stdint.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <dlfcn.h>
#include "mpi_ao_debug.h"
#include "mpi_ao_lowlatency.h"
#include "securec.h"
#include "list.h"
#include "mpi_memory_ext.h"
#include "mpi_ao_ext.h"
#include "soc_errno.h"
#include "drv_ioctl_ao.h"
#include "drv_aoe_struct.h"
#include "mpi_ao_circ_buf.h"
#include "mpi_ao.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif
#endif /* __cplusplus */
typedef struct {
td_handle track;
td_bool used;
td_u32 bit_depth;
td_u32 sample_rate;
td_u32 channels;
td_u32 buf_level_ms;
td_u32 latency_size;
td_u32 aip_id;
td_bool sw_aoe;
ext_audio_buffer aip_buf;
ext_audio_buffer aoe_reg;
circ_buf cb;
} track_mmap_source;
#define MAX_LL_TRACK_NUM 4
typedef enum {
MMAP_ID0 = 0x00,
MMAP_ID1 = 0x01,
MMAP_ID2 = 0x02,
MMAP_ID3 = 0x03,
MMAP_ID_MAX = MAX_LL_TRACK_NUM,
} mmap_id;
static pthread_mutex_t g_llt_mutex = PTHREAD_MUTEX_INITIALIZER;
static track_mmap_source g_llt_map[MAX_LL_TRACK_NUM];
#define AO_TRACK_LOWLATENCY_DEFATTR_BUFLEVELMS 400
#define MAX_LATENCY_MS 40 /* 40ms */
#define MIN_LATENCY_MS 10 /* 10ms */
#define PAGE_SIZE_MASK 0xfffff000
#define MAX_PCM_SAMPLES 0xffff
#define AUDIO_ALIGN_SIZE_4 0x4
#define AUDIO_ALIGN_SIZE_MASK_4 0x3
static inline td_u32 audio_align_4(td_u32 x)
{
td_u32 res = x;
if ((x & AUDIO_ALIGN_SIZE_MASK_4) != 0) {
res = x + AUDIO_ALIGN_SIZE_4 - (x & AUDIO_ALIGN_SIZE_MASK_4);
}
return res;
}
static td_void audio_munmap(td_void *buf, td_u32 size)
{
td_s32 ret;
if (buf == TD_NULL) {
return;
}
ret = munmap(buf, size);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(munmap, ret);
return;
}
}
static td_void audio_munmap_ion(td_void *buf, td_u32 size)
{
audio_munmap(buf, size);
}
static td_void *audio_mmap(td_s32 fd, td_u32 size, td_u64 phys_addr)
{
td_void *buf = TD_NULL;
if (fd < 0) {
return TD_NULL;
}
buf = (td_void *)mmap64((td_void *)0, size,
PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off64_t)phys_addr);
if (buf == MAP_FAILED) {
soc_log_err("mmap failed, fd = %d, size = %d\n", fd, size);
return TD_NULL;
}
return buf;
}
static inline td_void llt_mutex_lock(td_void)
{
if (pthread_mutex_lock(&g_llt_mutex) != 0) {
soc_log_err("Lock mutex failed\n");
}
}
static inline td_void llt_mutex_unlock(td_void)
{
if (pthread_mutex_unlock(&g_llt_mutex) != 0) {
soc_log_err("Unlock mutex failed\n");
}
}
static td_s32 dma_mmap(ext_audio_buffer *map_buf)
{
map_buf->virt_addr = audio_mmap(map_buf->fd, map_buf->size, 0);
if (map_buf->virt_addr == TD_NULL) {
return SOC_ERR_AO_MALLOC_FAILED;
}
return TD_SUCCESS;
}
static td_void dma_munmap(ext_audio_buffer *map_buf)
{
audio_munmap_ion(map_buf->virt_addr, map_buf->size);
map_buf->virt_addr = TD_NULL;
}
static td_u32 ao_frame_pcm_data_size(const ext_ao_frame *ao_frame)
{
if (ao_frame->pcm_samples > MAX_PCM_SAMPLES) {
return 0;
}
if (ao_frame->channels > EXT_AUDIO_CH_16) {
return 0;
}
if (ao_frame->bit_depth == EXT_BIT_DEPTH_16) {
return ao_frame->pcm_samples * ao_frame->channels * sizeof(td_s16);
} else {
return ao_frame->pcm_samples * ao_frame->channels * sizeof(td_s32);
}
}
static td_u32 llt_get_free_map_source(td_void)
{
td_u32 id;
for (id = MMAP_ID0; id < MAX_LL_TRACK_NUM; id++) {
if (g_llt_map[id].used == TD_FALSE) {
return id;
}
}
return MMAP_ID_MAX;
}
static td_u32 llt_get_id_by_track(td_handle track)
{
td_u32 id;
for (id = MMAP_ID0; id < MAX_LL_TRACK_NUM; id++) {
if (g_llt_map[id].track == track) {
return id;
}
}
return MMAP_ID_MAX;
}
static td_void llt_source_reset(td_u32 id)
{
td_s32 ret;
ret = memset_s(&g_llt_map[id], sizeof(g_llt_map[id]),
0, sizeof(track_mmap_source));
if (ret != EOK) {
soc_err_print_call_fun_err(memset_s, ret);
return;
}
g_llt_map[id].track = TD_INVALID_HANDLE;
g_llt_map[id].bit_depth = EXT_BIT_DEPTH_16;
g_llt_map[id].sample_rate = EXT_SAMPLE_RATE_48K;
g_llt_map[id].channels = EXT_AUDIO_CH_STEREO;
g_llt_map[id].buf_level_ms = AO_TRACK_LOWLATENCY_DEFATTR_BUFLEVELMS;
g_llt_map[id].latency_size = g_llt_map[id].buf_level_ms * g_llt_map[id].sample_rate *
g_llt_map[id].channels / 1000; /* 1000 ms to s */
g_llt_map[id].aip_buf.fd = -1;
g_llt_map[id].aoe_reg.fd = -1;
g_llt_map[id].aip_id = AOE_MAX_AIP_NUM;
}
static td_bool circ_buf_check(const circ_buf *cb)
{
return ((cb->write != TD_NULL) &&
(cb->read != TD_NULL) &&
(cb->data != TD_NULL));
}
static td_s32 llt_write_frame(td_u32 id, const ext_ao_frame *ao_frame)
{
td_u32 busy_size;
td_u32 frame_size;
td_u32 write_size;
circ_buf *cb = &g_llt_map[id].cb;
if (circ_buf_check(cb) == TD_FALSE) {
soc_log_err("invalid lowlatency track circle buffer!\n");
return SOC_ERR_AO_NULL_PTR;
}
frame_size = ao_frame_pcm_data_size(ao_frame);
if (circ_buf_query_free(cb) <= frame_size) {
return SOC_ERR_AO_OUT_BUF_FULL;
}
busy_size = circ_buf_query_busy(cb);
if (busy_size + frame_size >= g_llt_map[id].latency_size) {
return SOC_ERR_AO_OUT_BUF_FULL;
}
write_size = circ_buf_write(cb, (td_u8 *)ao_frame->pcm_buffer, frame_size);
return (write_size == frame_size) ? TD_SUCCESS : TD_FAILURE;
}
static td_void llt_munmap_aip(td_handle track, td_u32 id)
{
td_s32 ret;
td_s32 ao_fd;
ao_track_mmap_param param = {
.h_track = track,
.mmap_attr.bit_depth = g_llt_map[id].bit_depth,
.mmap_attr.sample_rate = g_llt_map[id].sample_rate,
.mmap_attr.channels = g_llt_map[id].channels,
.mmap_attr.aip_id = g_llt_map[id].aip_id,
.mmap_attr.aoe_reg.phys_addr = g_llt_map[id].aoe_reg.phys_addr,
.mmap_attr.aoe_reg.size = g_llt_map[id].aoe_reg.size,
.mmap_attr.aoe_reg.fd = g_llt_map[id].aoe_reg.fd,
.mmap_attr.aip_buf.phys_addr = g_llt_map[id].aip_buf.phys_addr,
.mmap_attr.aip_buf.size = g_llt_map[id].aip_buf.size,
.mmap_attr.aip_buf.fd = g_llt_map[id].aip_buf.fd,
};
ao_fd = mpi_ao_get_fd();
if (ao_fd < 0) {
soc_log_err("mpi ao not open\n");
return;
}
if (g_llt_map[id].sw_aoe == TD_FALSE) {
param.mmap_attr.aoe_reg.fd = -1;
}
ret = ioctl(ao_fd, CMD_AO_TRACK_MUNMAP, &param);
if (ret != TD_SUCCESS) {
soc_log_err("ioctl CMD_AO_TRACK_MMAP failed\n");
soc_err_print_err_code(ret);
return;
}
g_llt_map[id].aip_id = AOE_MAX_AIP_NUM;
g_llt_map[id].aip_buf.phys_addr = 0;
g_llt_map[id].aip_buf.size = 0;
g_llt_map[id].aip_buf.fd = -1;
g_llt_map[id].aoe_reg.phys_addr = 0;
g_llt_map[id].aoe_reg.size = 0;
g_llt_map[id].aoe_reg.fd = -1;
}
static td_s32 llt_mmap_aip(td_handle track, td_u32 id)
{
td_s32 ret;
td_s32 ao_fd;
ao_track_mmap_param param = {
.h_track = track,
.mmap_attr.bit_depth = g_llt_map[id].bit_depth,
.mmap_attr.sample_rate = g_llt_map[id].sample_rate,
.mmap_attr.channels = g_llt_map[id].channels,
.mmap_attr.aip_id = AOE_MAX_AIP_NUM,
.mmap_attr.aoe_reg.phys_addr = 0,
.mmap_attr.aoe_reg.size = 0,
.mmap_attr.aoe_reg.fd = -1,
.mmap_attr.aip_buf.phys_addr = 0,
.mmap_attr.aip_buf.size = 0,
.mmap_attr.aip_buf.fd = -1,
};
ao_fd = mpi_ao_get_fd();
if (ao_fd < 0) {
soc_log_err("mpi ao not open\n");
return SOC_ERR_AO_NOT_INIT;
}
ret = ioctl(ao_fd, CMD_AO_TRACK_MMAP, &param);
if (ret != TD_SUCCESS) {
soc_log_err("ioctl CMD_AO_TRACK_MMAP failed\n");
soc_err_print_err_code(ret);
return ret;
}
g_llt_map[id].aip_id = param.mmap_attr.aip_id;
g_llt_map[id].aip_buf.phys_addr = param.mmap_attr.aip_buf.phys_addr;
g_llt_map[id].aip_buf.size = param.mmap_attr.aip_buf.size;
g_llt_map[id].aip_buf.fd = param.mmap_attr.aip_buf.fd;
g_llt_map[id].aoe_reg.phys_addr = param.mmap_attr.aoe_reg.phys_addr;
g_llt_map[id].aoe_reg.size = param.mmap_attr.aoe_reg.size;
g_llt_map[id].aoe_reg.fd = param.mmap_attr.aoe_reg.fd;
return TD_SUCCESS;
}
static inline td_void llt_munmap_sw_aoe_reg(td_u32 id)
{
dma_munmap(&g_llt_map[id].aoe_reg);
}
static td_s32 llt_mmap_sw_aoe_reg(td_u32 id)
{
td_s32 ret;
/* sw aoe alloc smmu buffer as aoe reg
* we use ion buffer fd to mmap
*/
ret = dma_mmap(&g_llt_map[id].aoe_reg);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(dma_mmap, ret);
return ret;
}
return TD_SUCCESS;
}
static inline td_void llt_munmap_hw_aoe_reg(td_u32 id)
{
audio_munmap(g_llt_map[id].aoe_reg.virt_addr, g_llt_map[id].aoe_reg.size);
}
static td_s32 llt_mmap_hw_aoe_reg(td_u32 id)
{
td_s32 fd;
ext_audio_buffer *buf = TD_NULL;
/* hw aoe use dsp dram or cpu sram as aoe reg
* we use ao fd to mmap
*/
fd = mpi_ao_get_fd();
if (fd < 0) {
soc_log_err("mpi_ao_get_fd failed\n");
return SOC_ERR_AO_DEV_NOT_OPEN;
}
buf = &g_llt_map[id].aoe_reg;
buf->fd = fd;
buf->virt_addr = (td_u8 *)audio_mmap(fd, buf->size, buf->phys_addr);
if (buf->virt_addr == TD_NULL) {
soc_log_err("call audio_mmap failed\n");
return SOC_ERR_AO_MALLOC_FAILED;
}
return TD_SUCCESS;
}
static td_void llt_munmap_aoe_reg(td_u32 id)
{
if (g_llt_map[id].sw_aoe == TD_TRUE) {
llt_munmap_sw_aoe_reg(id);
} else {
llt_munmap_hw_aoe_reg(id);
}
}
static td_s32 llt_mmap_aoe_reg(td_u32 id)
{
g_llt_map[id].sw_aoe = (g_llt_map[id].aoe_reg.fd > 0);
if (g_llt_map[id].sw_aoe == TD_TRUE) {
return llt_mmap_sw_aoe_reg(id);
} else {
return llt_mmap_hw_aoe_reg(id);
}
}
static td_void llt_munmap_buf(td_handle track, td_u32 id)
{
TD_UNUSED(track);
dma_munmap(&g_llt_map[id].aip_buf);
llt_munmap_aoe_reg(id);
}
static td_s32 llt_mmap_buf(td_handle track, td_u32 id)
{
td_s32 ret;
aip_regs_type *aip_reg = TD_NULL;
TD_UNUSED(track);
ret = llt_mmap_aoe_reg(id);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(llt_mmap_aoe_reg, ret);
return ret;
}
ret = dma_mmap(&g_llt_map[id].aip_buf);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(dma_mmap, ret);
llt_munmap_aoe_reg(id);
return ret;
}
if (g_llt_map[id].aip_id >= AOE_MAX_AIP_NUM) {
llt_munmap_aoe_reg(id);
return SOC_ERR_AO_INVALID_ID;
}
aip_reg = (aip_regs_type *)(g_llt_map[id].aoe_reg.virt_addr +
AOE_AIP_REG_OFFSET + AOE_AIP_REG_BANDSIZE * g_llt_map[id].aip_id);
circ_buf_init(&g_llt_map[id].cb,
(td_u32 *)(&aip_reg->aip_buf_wptr),
(td_u32 *)(&aip_reg->aip_buf_rptr),
g_llt_map[id].aip_buf.virt_addr,
aip_reg->aip_buf_size.bits.buff_size);
return TD_SUCCESS;
}
static td_s32 llt_mmap(td_handle track, td_u32 id)
{
td_s32 ret;
ret = llt_mmap_aip(track, id);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(llt_mmap_aip, ret);
return ret;
}
ret = llt_mmap_buf(track, id);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(llt_mmap_buf, ret);
llt_munmap_aip(track, id);
return ret;
}
return TD_SUCCESS;
}
static td_void llt_munmap(td_handle track, td_u32 id)
{
llt_munmap_aip(track, id);
llt_munmap_buf(track, id);
}
td_bool llt_check(td_handle track)
{
if ((track & 0xff00) == (SOC_ID_LOWLATENCY_TRACK << 8)) { /* this 8 is shift number */
return TD_TRUE;
}
return TD_FALSE;
}
td_s32 llt_enable(td_handle track)
{
td_s32 ret;
td_u32 id = MMAP_ID_MAX;
llt_mutex_lock();
for (id = MMAP_ID0; id < MMAP_ID_MAX; id++) {
if (g_llt_map[id].track == track) {
soc_log_err("track handle has been occupied!\n");
return SOC_ERR_AO_DEVICE_BUSY;
}
}
id = llt_get_free_map_source();
if (id == MMAP_ID_MAX) {
soc_log_err("no mmap source can be used!");
return SOC_ERR_AO_DEVICE_BUSY;
}
llt_source_reset(id);
g_llt_map[id].track = track;
ret = llt_mmap(track, id);
if (ret != TD_SUCCESS) {
soc_log_err("llt_mmap failed(0x%x)!\n", ret);
llt_mutex_unlock();
return ret;
}
g_llt_map[id].used = TD_TRUE;
llt_mutex_unlock();
return ret;
}
td_void llt_disable(td_handle track)
{
mmap_id id = MMAP_ID_MAX;
id = llt_get_id_by_track(track);
if (id == MMAP_ID_MAX) {
soc_log_err("no lowlatency track can be destory!");
return;
}
llt_mutex_lock();
llt_munmap(track, id);
llt_source_reset(id);
llt_mutex_unlock();
}
td_s32 llt_send_data(td_handle track, const ext_ao_frame *ao_frame)
{
td_s32 ret;
mmap_id id = MMAP_ID_MAX;
id = llt_get_id_by_track(track);
if (id == MMAP_ID_MAX) {
soc_log_err("no lowlatency track can be send!");
return SOC_ERR_AO_DEVICE_BUSY;
}
check_ao_null_ptr(ao_frame);
if ((ao_frame->channels != EXT_AUDIO_CH_STEREO) ||
(ao_frame->bit_depth != EXT_BIT_DEPTH_16) ||
(ao_frame->sample_rate != EXT_SAMPLE_RATE_48K)) {
soc_log_err("ao_frame invalid param\n");
return SOC_ERR_AO_INVALID_PARA;
}
llt_mutex_lock();
ret = llt_write_frame(id, ao_frame);
llt_mutex_unlock();
return ret;
}
td_s32 llt_get_aip_delay_ms(td_handle track, td_u32 *delay_ms)
{
td_u32 busy_size;
mmap_id id = MMAP_ID_MAX;
id = llt_get_id_by_track(track);
if (id == MMAP_ID_MAX) {
soc_log_err("no lowlatency track can be send, track = 0x%x!", track);
return SOC_ERR_AO_DEVICE_BUSY;
}
check_ao_null_ptr(delay_ms);
llt_mutex_lock();
busy_size = circ_buf_query_busy(&g_llt_map[id].cb);
llt_mutex_unlock();
*delay_ms = busy_size * 1000 / /* this 1000 for s to ms */
(EXT_SAMPLE_RATE_48K * EXT_AUDIO_CH_STEREO * sizeof(td_u16));
return TD_SUCCESS;
}
/* ao dma API */
#define SND_MUTES_FRAMES 10
#define DMA_PREPARE_MUTE_MS 3
#define DMA_PERIOND_MS_MAX 40 /* 40ms */
/* 8ch/24bit/48kHz 40ms */
#define DMA_WK_BUFFER_SIZE (EXT_AUDIO_CH_8 * sizeof(td_s32) * (EXT_SAMPLE_RATE_48K / 1000) * DMA_PERIOND_MS_MAX)
#define DMA_WK_BUFFER_MAX_NUM 2
#define AIAO_TX_WRITE_OFFSET 0x88
#define AIAO_TX_READ_OFFSET 0x8c
typedef struct {
td_u8 *dma;
td_u32 *write;
td_u32 *read;
td_u32 size;
circ_buf cb;
} snd_port_dma_ctx;
typedef struct {
snd_port_kernel_attr dma_attr;
snd_port_dma_ctx dma_ctx;
struct list_head node;
} snd_port_dma;
typedef struct {
td_u32 id;
td_u32 blk_size;
td_u32 blk_num;
td_u8 *buf;
} dma_work_buf;
typedef struct {
td_bool used;
td_bool enable;
ao_snd_id snd;
ext_sample_rate sample_rate;
td_u32 frame_cnt; /* debug frame count */
snd_port_info dma_info;
pthread_mutex_t mutex;
dma_work_buf wk_buf;
td_u32 *aoe_reg;
struct list_head list;
} snd_dma_source;
static snd_dma_source g_dma_source = {
.used = TD_FALSE,
.sample_rate = EXT_SAMPLE_RATE_48K,
.list = {
.next = &g_dma_source.list,
.prev = &g_dma_source.list,
}
};
static td_void dma_mutex_init(td_void)
{
if (pthread_mutex_init(&g_dma_source.mutex, TD_NULL) != 0) {
soc_log_err("init mutex failed\n");
}
}
static inline td_void dma_mutex_lock(td_void)
{
if (pthread_mutex_lock(&g_dma_source.mutex) != 0) {
soc_log_err("Lock mutex failed\n");
}
}
static inline td_void dma_mutex_unlock(td_void)
{
if (pthread_mutex_unlock(&g_dma_source.mutex) != 0) {
soc_log_err("Unlock mutex failed\n");
}
}
static td_void dma_deinit_wk_buf(td_void)
{
dma_work_buf *wk_buf = &g_dma_source.wk_buf;
if (wk_buf->buf == TD_NULL) {
return;
}
free(wk_buf->buf);
wk_buf->id = 0;
wk_buf->blk_size = 0;
wk_buf->blk_num = 0;
wk_buf->buf = TD_NULL;
}
static td_s32 dma_init_wk_buf(td_void)
{
td_u8 *buf = TD_NULL;
dma_work_buf *wk_buf = &g_dma_source.wk_buf;
buf = (td_u8 *)malloc(DMA_WK_BUFFER_MAX_NUM * DMA_WK_BUFFER_SIZE);
if (buf == TD_NULL) {
soc_log_err("call malloc( failed\n");
return SOC_ERR_AO_MALLOC_FAILED;
}
wk_buf->id = 0;
wk_buf->blk_size = DMA_WK_BUFFER_SIZE;
wk_buf->blk_num = DMA_WK_BUFFER_MAX_NUM;
wk_buf->buf = buf;
return TD_SUCCESS;
}
static td_u8 *dma_get_wk_buf(td_void)
{
td_u8 *buf = TD_NULL;
dma_work_buf *wk_buf = &g_dma_source.wk_buf;
if (wk_buf->buf == TD_NULL) {
return TD_NULL;
}
if (wk_buf->id >= wk_buf->blk_num) {
wk_buf->id = 0;
}
buf = wk_buf->buf + wk_buf->id * wk_buf->blk_size;
wk_buf->id++;
return buf;
}
static td_u32 dma_calc_frame_size(const snd_port_kernel_attr *dma_attr)
{
if (dma_attr->bit_depth == EXT_BIT_DEPTH_16) {
return dma_attr->channels * sizeof(td_s16);
} else {
return dma_attr->channels * sizeof(td_s32);
}
}
static td_s32 dma_unify_bit_depth_pcm_data(td_u32 bit_depth, td_void *wk_buf, ext_ao_frame *frame)
{
td_u32 i;
td_u32 samples = frame->pcm_samples * frame->channels;
check_ao_null_ptr(frame->pcm_buffer);
if ((bit_depth == EXT_BIT_DEPTH_16) && (frame->bit_depth == EXT_BIT_DEPTH_24)) {
td_u32 *from = (td_u32 *)frame->pcm_buffer;
td_u16 *to = (td_u16 *)wk_buf;
for (i = 0; i < samples; i++) {
*to++ = (td_u16)(*from++ >> 16); /* 16 is shift number, 24bit to 16bit */
}
frame->pcm_buffer = (td_s32 *)wk_buf;
frame->bit_depth = EXT_BIT_DEPTH_16;
} else if ((bit_depth == EXT_BIT_DEPTH_24) && (frame->bit_depth == EXT_BIT_DEPTH_16)) {
td_u16 *from = (td_u16 *)frame->pcm_buffer;
td_u32 *to = (td_u32 *)wk_buf;
for (i = 0; i < samples; i++) {
*to++ = ((td_u32)(*from++) << 16); /* 16 is shift number, 16bit to 24bit */
}
frame->pcm_buffer = (td_s32 *)wk_buf;
frame->bit_depth = EXT_BIT_DEPTH_24;
}
return TD_SUCCESS;
}
static td_void dma_upmix_pcm_data_24bit(const ext_ao_frame *frame,
td_u32 channels, const td_u32 *pcm_buffer, td_u32 *to)
{
td_u32 i;
td_u32 ch;
const td_u32 *from = pcm_buffer;
for (i = 0; i < frame->pcm_samples; i++) {
for (ch = 0; ch < channels; ch++) {
if (ch < frame->channels) {
to[i * channels + ch] = *from++;
} else {
to[i * channels + ch] = 0;
}
}
}
}
static td_void dma_upmix_pcm_data_16bit(const ext_ao_frame *frame,
td_u32 channels, const td_u16 *pcm_buffer, td_u16 *to)
{
td_u32 j;
td_u32 ch;
const td_u16 *from = pcm_buffer;
for (j = 0; j < frame->pcm_samples; j++) {
for (ch = 0; ch < channels; ch++) {
if (ch < frame->channels) {
to[j * channels + ch] = *from++;
} else {
to[j * channels + ch] = 0;
}
}
}
}
static td_s32 dma_upmix_pcm_data(td_u32 ch, td_void *wk_buf, ext_ao_frame *frame)
{
check_ao_null_ptr(frame->pcm_buffer);
if (frame->bit_depth == EXT_BIT_DEPTH_24) {
dma_upmix_pcm_data_24bit(frame, ch, (td_u32 *)frame->pcm_buffer, (td_u32 *)wk_buf);
frame->pcm_buffer = (td_s32 *)wk_buf;
frame->channels = ch;
} else if (frame->bit_depth == EXT_BIT_DEPTH_16) {
dma_upmix_pcm_data_16bit(frame, ch, (td_u16 *)frame->pcm_buffer, (td_u16 *)wk_buf);
frame->pcm_buffer = (td_s32 *)wk_buf;
frame->channels = ch;
}
return TD_SUCCESS;
}
static td_s32 dma_unify_pcm_data(const snd_port_kernel_attr *dma_attr, ext_ao_frame *frame)
{
td_s32 ret;
td_void *wk_buf = TD_NULL;
if (frame->bit_depth != (td_s32)dma_attr->bit_depth) {
wk_buf = (td_void *)dma_get_wk_buf();
if (wk_buf == TD_NULL) {
return SOC_ERR_AO_NULL_PTR;
}
ret = dma_unify_bit_depth_pcm_data(dma_attr->bit_depth, wk_buf, frame);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(dma_unify_bit_depth_pcm_data, ret);
return ret;
}
}
if (frame->channels != dma_attr->channels) {
wk_buf = (td_void *)dma_get_wk_buf();
if (wk_buf == TD_NULL) {
return SOC_ERR_AO_NULL_PTR;
}
ret = dma_upmix_pcm_data(dma_attr->channels, wk_buf, frame);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(dma_upmix_pcm_data, ret);
return ret;
}
}
return TD_SUCCESS;
}
static td_void munmap_aiao_reg(td_void)
{
if (g_dma_source.aoe_reg == TD_NULL) {
return;
}
audio_munmap((td_void *)g_dma_source.aoe_reg, AIAO_CBB_REGBANDSIZE);
g_dma_source.aoe_reg = TD_NULL;
}
static td_s32 mmap_aiao_reg(td_u64 aiao_phys_addr)
{
td_s32 fd;
td_void *aiao_reg = TD_NULL;
fd = mpi_ao_get_fd();
if (fd < 0) {
soc_log_err("mpi_ao_get_fd failed\n");
return SOC_ERR_AO_DEV_NOT_OPEN;
}
aiao_reg = audio_mmap(fd, AIAO_CBB_REGBANDSIZE, aiao_phys_addr & PAGE_SIZE_MASK);
if (aiao_reg == TD_NULL) {
return SOC_ERR_AO_MALLOC_FAILED;
}
g_dma_source.aoe_reg = (td_u32 *)aiao_reg;
return TD_SUCCESS;
}
static td_void dma_source_reset(ao_snd_id sound)
{
td_s32 ret;
ret = memset_s(&g_dma_source, sizeof(g_dma_source), 0, sizeof(snd_dma_source));
if (ret != EOK) {
soc_err_print_call_fun_err(memset_s, ret);
return;
}
g_dma_source.snd = sound;
g_dma_source.sample_rate = EXT_SAMPLE_RATE_48K;
dma_mutex_init();
INIT_LIST_HEAD(&g_dma_source.list);
}
static td_s32 mpi_ao_dma_set_sample_rate(ao_snd_id sound, ext_sample_rate sample_rate)
{
td_s32 ret;
td_s32 fd;
ao_snd_dma_set_sample_rate_param param = {
.sound = sound,
.sample_rate = sample_rate,
};
fd = mpi_ao_get_fd();
if (fd < 0) {
soc_log_err("mpi_ao_get_fd failed\n");
return SOC_ERR_AO_DEV_NOT_OPEN;
}
ret = ioctl(fd, CMD_AO_SND_DMASETSAMPLERATE, &param);
if (ret != TD_SUCCESS) {
soc_log_err("ioctl CMD_AO_SND_DMASETSAMPLERATE failed(0x%x)\n", ret);
return ret;
}
return TD_SUCCESS;
}
static td_s32 mpi_ao_dma_destroy(ao_snd_id sound, const snd_port_info *info)
{
td_s32 ret;
td_s32 ao_fd;
ao_snd_dma_create_param param = {
.sound = sound,
};
ao_fd = mpi_ao_get_fd();
if (ao_fd < 0) {
soc_log_err("mpi_ao_get_fd failed\n");
return SOC_ERR_AO_DEV_NOT_OPEN;
}
ret = memcpy_s(&param.port_info, sizeof(param.port_info), info, sizeof(*info));
if (ret != EOK) {
soc_err_print_call_fun_err(memcpy_s, ret);
return ret;
}
ret = ioctl(ao_fd, CMD_AO_SND_DMADESTORY, &param);
if (ret != TD_SUCCESS) {
soc_log_err("ioctl CMD_AO_SND_DMADESTORY failed(0x%x)\n", ret);
return ret;
}
return TD_SUCCESS;
}
static td_s32 mpi_ao_dma_create(ao_snd_id sound, snd_port_info *info)
{
td_s32 ret;
td_s32 ao_fd;
ao_snd_dma_create_param param;
ao_fd = mpi_ao_get_fd();
if (ao_fd < 0) {
soc_log_err("mpi_ao_get_fd failed\n");
return SOC_ERR_AO_DEV_NOT_OPEN;
}
ret = memset_s(&param, sizeof(param), 0, sizeof(ao_snd_dma_create_param));
if (ret != EOK) {
soc_err_print_call_fun_err(memset_s, ret);
return ret;
}
param.sound = sound;
ret = ioctl(ao_fd, CMD_AO_SND_DMACREATE, &param);
if (ret != TD_SUCCESS) {
soc_log_err("ioctl CMD_AO_SND_DMACREATE failed(0x%x)\n", ret);
return ret;
}
ret = memcpy_s(info, sizeof(*info), &param.port_info, sizeof(param.port_info));
if (ret != EOK) {
soc_err_print_call_fun_err(memcpy_s, ret);
return ret;
}
return TD_SUCCESS;
}
static td_void munmap_one_port(snd_port_dma *dma)
{
snd_port_dma_ctx *ctx = &dma->dma_ctx;
snd_port_kernel_attr *attr = &dma->dma_attr;
audio_munmap_ion((td_void *)ctx->dma, attr->port_dma.size);
ctx->dma = TD_NULL;
}
static td_void dma_munmap_one_port(snd_port_dma *dma)
{
list_del(&dma->node);
munmap_one_port(dma);
free((td_void *)dma);
}
static td_void dma_unmap_port(td_void)
{
struct list_head *node = TD_NULL;
struct list_head *tmp = TD_NULL;
snd_port_dma *dma = TD_NULL;
list_for_each_safe(node, tmp, &g_dma_source.list) {
dma = list_entry((uintptr_t)node, snd_port_dma, node);
dma_munmap_one_port(dma);
}
munmap_aiao_reg();
}
static td_s32 mmap_one_port(snd_port_dma *dma)
{
td_u32 offset;
td_u8 *aoe_reg = (td_u8 *)g_dma_source.aoe_reg;
snd_port_dma_ctx *ctx = &dma->dma_ctx;
snd_port_kernel_attr *attr = &dma->dma_attr;
if (g_dma_source.aoe_reg == TD_NULL) {
return SOC_ERR_AO_NULL_PTR;
}
if ((attr->tx_id >= AIAO_PORT_TX0) && (attr->tx_id <= AIAO_PORT_TX7)) {
offset = AIAO_TX_OFFSET + (attr->tx_id - AIAO_PORT_TX0) * AIAO_TX_REG_BANDSIZE;
} else if ((attr->tx_id >= AIAO_PORT_SPDIF_TX0) && (attr->tx_id <= AIAO_PORT_SPDIF_TX3)) {
offset = AIAO_TXSDPIF_OFFSET + (attr->tx_id - AIAO_PORT_SPDIF_TX0) * AIAO_TX_REG_BANDSIZE;
} else {
return SOC_ERR_AO_NOTSUPPORT;
}
ctx->dma = (td_u8 *)audio_mmap(attr->port_dma.fd, attr->port_dma.size, 0);
if (ctx->dma == TD_NULL) {
return SOC_ERR_AO_MALLOC_FAILED;
}
ctx->size = attr->size;
ctx->write = (td_u32 *)(aoe_reg + offset + AIAO_TX_WRITE_OFFSET);
ctx->read = (td_u32 *)(aoe_reg + offset + AIAO_TX_READ_OFFSET);
circ_buf_init(&ctx->cb, ctx->write, ctx->read, (td_void *)ctx->dma, ctx->size);
return TD_SUCCESS;
}
static td_s32 dma_port_init(const snd_port_kernel_attr *dma_attr, snd_port_dma *dma)
{
td_s32 ret;
ret = memset_s(dma, sizeof(*dma), 0, sizeof(snd_port_dma));
if (ret != EOK) {
soc_err_print_call_fun_err(memset_s, ret);
return ret;
}
ret = memcpy_s(&dma->dma_attr, sizeof(dma->dma_attr),
dma_attr, sizeof(*dma_attr));
if (ret != EOK) {
soc_err_print_call_fun_err(memcpy_s, ret);
return ret;
}
return TD_SUCCESS;
}
static td_s32 dma_mmap_one_port(const snd_port_kernel_attr *dma_attr)
{
td_s32 ret;
snd_port_dma *dma = TD_NULL;
dma = (snd_port_dma *)malloc(sizeof(snd_port_dma));
if (dma == TD_NULL) {
soc_log_err("call malloc( failed\n");
return SOC_ERR_AO_MALLOC_FAILED;
}
ret = dma_port_init(dma_attr, dma);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(dma_port_init, ret);
goto out;
}
ret = mmap_one_port(dma);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(mmap_one_port, ret);
goto out;
}
list_add_tail(&dma->node, &g_dma_source.list);
return TD_SUCCESS;
out:
free((td_void *)dma);
return ret;
}
static td_s32 dma_mmap_port(const snd_port_info *dma_info)
{
td_u32 i;
td_s32 ret;
const snd_port_kernel_attr *dma_attr = TD_NULL;
ret = mmap_aiao_reg(dma_info->aiao_reg);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(mmap_aiao_reg, ret);
return ret;
}
for (i = 0; i < EXT_AO_OUTPUT_PORT_MAX; i++) {
dma_attr = &dma_info->port_attr[i];
if (dma_attr->tx_id >= AIAO_PORT_MAX) {
continue;
}
ret = dma_mmap_one_port(dma_attr);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(dma_mmap_one_port, ret);
dma_unmap_port();
return ret;
}
}
return TD_SUCCESS;
}
static td_void dma_prepare(td_void)
{
td_u32 frame_size;
struct list_head *node = TD_NULL;
snd_port_dma *dma = TD_NULL;
list_for_each(node, &g_dma_source.list) {
dma = list_entry((uintptr_t)node, snd_port_dma, node);
frame_size = DMA_PREPARE_MUTE_MS * (td_u32)g_dma_source.sample_rate *
dma_calc_frame_size(&dma->dma_attr) / 1000; /* this 1000 for ms to s */
circ_buf_write(&dma->dma_ctx.cb, TD_NULL, audio_align_4(frame_size));
}
}
static inline td_void dma_disable(td_void)
{
g_dma_source.enable = TD_FALSE;
}
static td_s32 dma_enable(ao_snd_id sound, td_u32 sample_rate)
{
td_s32 ret;
if (g_dma_source.enable == TD_TRUE) {
return TD_SUCCESS;
}
ret = mpi_ao_dma_set_sample_rate(sound, sample_rate);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(mpi_ao_dma_set_sample_rate, ret);
return ret;
}
g_dma_source.sample_rate = (ext_sample_rate)sample_rate;
dma_prepare();
g_dma_source.enable = TD_TRUE;
return TD_SUCCESS;
}
static td_bool dma_port_check_all_free(const ext_ao_frame *frame, const td_u32 latency_ms)
{
td_u32 busy_size;
td_u32 latency;
td_u32 frame_size;
struct list_head *node = TD_NULL;
snd_port_dma *dma = TD_NULL;
list_for_each(node, &g_dma_source.list) {
dma = list_entry((uintptr_t)node, snd_port_dma, node);
frame_size = frame->pcm_samples * dma_calc_frame_size(&dma->dma_attr);
/* this 1000 for ms to s */
latency = latency_ms * frame->sample_rate * dma_calc_frame_size(&dma->dma_attr) / 1000;
busy_size = circ_buf_query_busy(&dma->dma_ctx.cb);
if ((busy_size + frame_size) > latency) {
return TD_FALSE;
}
}
return TD_TRUE;
}
static td_s32 dma_port_write_frame(snd_port_dma_ctx *ctx, const ext_ao_frame *frame)
{
td_u32 frame_size;
td_u32 write_size;
td_u8 *data = TD_NULL;
frame_size = ao_frame_pcm_data_size(frame);
if (g_dma_source.frame_cnt > SND_MUTES_FRAMES) {
data = (td_u8 *)frame->pcm_buffer;
}
/* write mute frame when data is NULL */
write_size = circ_buf_write(&ctx->cb, data, frame_size);
return (write_size == frame_size) ? TD_SUCCESS : TD_FAILURE;
}
static td_s32 dma_create(ao_snd_id sound)
{
td_s32 ret;
snd_port_info *dma_info = &g_dma_source.dma_info;
if (sound != AO_SND_0) {
soc_log_err("We only support DMA mode in SND0!\n");
return SOC_ERR_AO_INVALID_ID;
}
if (g_dma_source.used == TD_TRUE) {
soc_log_err("SND DMA is busy!\n");
return SOC_ERR_AO_NOTSUPPORT;
}
dma_source_reset(sound);
ret = memset_s(dma_info, sizeof(*dma_info), 0, sizeof(snd_port_info));
if (ret != EOK) {
soc_err_print_call_fun_err(memset_s, ret);
return ret;
}
ret = mpi_ao_dma_create(sound, dma_info);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(mpi_ao_dma_create, ret);
return ret;
}
ret = dma_mmap_port(dma_info);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(dma_mmap_port, ret);
(td_void)mpi_ao_dma_destroy(sound, dma_info);
return ret;
}
ret = dma_init_wk_buf();
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(dma_init_wk_buf, ret);
(td_void)mpi_ao_dma_destroy(sound, dma_info);
dma_unmap_port();
return ret;
}
g_dma_source.used = TD_TRUE;
return TD_SUCCESS;
}
td_s32 snd_dma_create(ao_snd_id sound)
{
td_s32 ret;
dma_mutex_lock();
ret = dma_create(sound);
dma_mutex_unlock();
return ret;
}
static td_s32 dma_destroy(ao_snd_id sound)
{
td_s32 ret;
dma_deinit_wk_buf();
ret = mpi_ao_dma_destroy(sound, &g_dma_source.dma_info);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(mpi_ao_dma_destroy, ret);
}
dma_unmap_port();
dma_disable();
dma_source_reset(sound);
return ret;
}
td_s32 snd_dma_destroy(ao_snd_id sound)
{
td_s32 ret;
dma_mutex_lock();
ret = dma_destroy(sound);
dma_mutex_unlock();
return ret;
}
static td_s32 dma_check_frame(const ext_ao_frame *frame)
{
if ((frame == TD_NULL) || (frame->pcm_buffer == TD_NULL)) {
return SOC_ERR_AO_NULL_PTR;
}
if ((frame->channels != EXT_AUDIO_CH_STEREO) ||
(frame->bit_depth != EXT_BIT_DEPTH_16)) {
soc_log_err("DMA only support 2ch 16bit pcm stream!\n");
return SOC_ERR_AO_INVALID_PARA;
}
if ((frame->sample_rate != EXT_SAMPLE_RATE_48K) &&
(frame->sample_rate != EXT_SAMPLE_RATE_44K)) {
soc_log_err("DMA support 44.1k and 48k pcm stream!\n");
return SOC_ERR_AO_INVALID_PARA;
}
return TD_SUCCESS;
}
static td_s32 dma_send_data(const ext_ao_frame *frame, td_u32 latency_ms)
{
td_s32 ret;
ext_ao_frame tmp_frame;
struct list_head *node = TD_NULL;
snd_port_dma *dma = TD_NULL;
ret = dma_enable(g_dma_source.snd, frame->sample_rate);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(dma_enable, ret);
return ret;
}
if (dma_port_check_all_free(frame, latency_ms) == TD_FALSE) {
return SOC_ERR_AO_OUT_BUF_FULL;
}
list_for_each(node, &g_dma_source.list) {
dma = list_entry((uintptr_t)node, snd_port_dma, node);
ret = memcpy_s(&tmp_frame, sizeof(tmp_frame), frame, sizeof(*frame));
if (ret != EOK) {
soc_err_print_call_fun_err(memcpy_s, ret);
return ret;
}
ret = dma_unify_pcm_data(&dma->dma_attr, &tmp_frame);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(dma_unify_pcm_data, ret);
return ret;
}
ret = dma_port_write_frame(&dma->dma_ctx, &tmp_frame);
if (ret != TD_SUCCESS) {
return ret;
}
}
g_dma_source.frame_cnt++;
return TD_SUCCESS;
}
td_s32 snd_dma_send_data(const ext_ao_frame *ao_frame, td_u32 latency_ms)
{
td_s32 ret;
ret = dma_check_frame(ao_frame);
if (ret != TD_SUCCESS) {
soc_err_print_call_fun_err(dma_check_frame, ret);
return ret;
}
/* this 40 and 10 means max and min value */
if ((latency_ms > 40) || (latency_ms < 10)) {
soc_log_err("invalid DMA latency_ms!\n");
return SOC_ERR_AO_INVALID_PARA;
}
dma_mutex_lock();
ret = dma_send_data(ao_frame, latency_ms);
dma_mutex_unlock();
return ret;
}
static td_s32 dma_get_delay_ms(td_u32 *delay_ms)
{
td_u32 busy_size;
td_u32 frame_size;
struct list_head *node = TD_NULL;
snd_port_dma *dma = TD_NULL;
frame_size = 0;
busy_size = 0;
if (list_empty(&g_dma_source.list) == TD_TRUE) {
soc_log_err("dma no enable!\n");
return SOC_ERR_AO_NOT_ACTIVE;
}
list_for_each(node, &g_dma_source.list) {
dma = list_entry((uintptr_t)node, snd_port_dma, node);
frame_size = (td_u32)g_dma_source.sample_rate * dma_calc_frame_size(&dma->dma_attr);
busy_size = circ_buf_query_busy(&dma->dma_ctx.cb);
break;
}
if (frame_size == 0) {
return SOC_ERR_AO_INVALID_PARA;
}
/* this 1000 means s to ms */
*delay_ms = busy_size * 1000 / frame_size;
return TD_SUCCESS;
}
td_s32 snd_dma_get_delay_ms(td_u32 *delay_ms)
{
td_s32 ret;
if (delay_ms == TD_NULL) {
return SOC_ERR_AO_NULL_PTR;
}
*delay_ms = 0;
dma_mutex_lock();
ret = dma_get_delay_ms(delay_ms);
dma_mutex_unlock();
return ret;
}
#ifdef __cplusplus
#if __cplusplus
}
#endif
#endif /* __cplusplus */