/* * Copyright (c) Hisilicon Technologies Co., Ltd. 2019-2019. All rights reserved. * Description: common code in mailbox driver */ #include "mbx_common.h" #include "los_mbx.h" #include "securec.h" #include "los_printf.h" #include "los_base.h" #include "los_memory.h" #include "los_mux.h" #include static struct mailbox g_mailbox = {0}; static void rx_tail(struct session * const session, const union msg_head *rx_head) { #ifdef SUPPORT_MBX_INTERRUPT td_u32 handle; #endif if (!rx_head->bits.ongoing) { session->rx_status &= ~SESSION_BUSY; #ifdef SUPPORT_MBX_INTERRUPT handle = GEN_SESSION_HANDLE(rx_head->bits.num, rx_head->bits.port); /* Call session callback func */ if (session->func != NULL) { session->func(handle, session->data); } #endif } else { session->rx_status |= SESSION_BUSY; } return; } static td_s32 rx(struct session * const session, const union msg_head *rx_head) { td_u32 rd_idx, wr_idx, empty_len; td_u32 i, j, tmp_data; if (g_mailbox.initalized != TD_TRUE) { return SOC_ERR_MAILBOX_NOT_INIT; } if ((session == NULL) || (rx_head == NULL) || (session->rx_reg == NULL) || (session->rx_reg->pending == NULL)) { return SOC_ERR_MAILBOX_INVALID_HANDLE; } session->rx_status |= SESSION_BUSY; wr_idx = session->rx_buf.wr_idx; rd_idx = session->rx_buf.rd_idx; if (rd_idx > wr_idx) { empty_len = rd_idx - wr_idx; } else { empty_len = session->rx_buf.size + rd_idx - wr_idx; } if (rx_head->bits.msg_len >= empty_len) { session->rx_status &= ~SESSION_BUSY; return SOC_ERR_MAILBOX_ERR_RECEIVE; } for (i = 0; i < rx_head->bits.msg_len / 4; i++) { /* copy 4 bytes for each time */ tmp_data = MBX_READL(session->rx_reg->argv + i); for (j = 0; j < 4; j++) { /* copy 4 bytes in a register */ session->rx_buf.addr[wr_idx++] = (tmp_data >> (8 * j)) & 0xFF; /* 8 bits offset with 0xFF mask */ wr_idx %= session->rx_buf.size; } } if (rx_head->bits.msg_len % 4) { /* copy left bytes less then 4 bytes */ tmp_data = MBX_READL(session->rx_reg->argv + i); for (j = 0; j < rx_head->bits.msg_len % 4; j++) { /* copy for less 4 bytes in a register */ session->rx_buf.addr[wr_idx++] = (tmp_data >> (8 * j)) & 0xFF; /* 8 bits offset with 0xFF mask */ wr_idx %= session->rx_buf.size; } } session->rx_buf.wr_idx = wr_idx; rx_tail(session, rx_head); /* Clean rx pending status */ MBX_WRITEL(0x00, (session->rx_reg->pending)); return SOC_MBX_SUCCESS; } static struct session *find_session(td_u32 session_num, td_u32 session_port) { struct session *session = NULL; struct session *tmp = NULL; MBX_LIST_FOR_EACH_ENTRY(session, tmp, &g_mailbox.list_head, node) { if ((session->num == session_num) && (session->port == session_port)) { return session; } } MBX_LIST_FOR_EACH_ENTRY(session, tmp, &g_mailbox.list_head, node) { if ((session->num == session_num) && (session->port == 0)) { return session; } } return NULL; } td_s32 mbx_rx_msg(const td_void *rx_head_addr) { struct session *session = NULL; union msg_head rx_head = {.head = 0}; td_s32 ret; if (g_mailbox.initalized != TD_TRUE) { return SOC_ERR_MAILBOX_NOT_INIT; } if (rx_head_addr == NULL) { return SOC_ERR_MAILBOX_INVALID_PTR; } rx_head.head = MBX_READL(rx_head_addr); session = find_session(rx_head.bits.num, rx_head.bits.port); if ((session == NULL) || (session->rx_reg == NULL) || (session->rx_reg->pending == NULL)) { return SOC_ERR_MAILBOX_INVALID_HANDLE; } if ((void *)MBX_READL((session->rx_reg->pending)) != NULL) { if (rx_head.bits.msg_len != 0) { ret = rx(session, &rx_head); if (ret != SOC_MBX_SUCCESS) { MBX_ERR_PRINT("Receive in mbx_rx_msg and ret:0x%x\n", ret); return ret; } } } else { return SOC_ERR_MAILBOX_ERR_RECEIVE; } return SOC_MBX_SUCCESS; } static td_s32 async_tx(td_u32 handle, const td_u8 *msg, td_u32 msg_len) { td_u32 i; td_u32 rd_idx; td_u32 wr_idx; td_u32 empty_len; struct session *session = NULL; if (g_mailbox.initalized != TD_TRUE) { return SOC_ERR_MAILBOX_NOT_INIT; } if (msg_len == 0) { return SOC_ERR_MAILBOX_INVALID_PARA; } session = find_session(SESSION_HANDLE_NUM(handle), SESSION_HANDLE_PORT(handle)); if (session == NULL) { return SOC_ERR_MAILBOX_INVALID_HANDLE; } if (session->tx_buf.addr == NULL) { return SOC_ERR_MAILBOX_NOT_SUPPORT; } wr_idx = session->tx_buf.wr_idx; rd_idx = session->tx_buf.rd_idx; if (rd_idx > wr_idx) { empty_len = rd_idx - wr_idx; } else { empty_len = session->tx_buf.size + rd_idx - wr_idx; } if (msg_len >= empty_len) { session->tx_status = (td_s32)SOC_ERR_MAILBOX_NO_MEMORY; return SOC_ERR_MAILBOX_NO_MEMORY; } for (i = 0; i < msg_len; i++) { session->tx_buf.addr[wr_idx++] = msg[i]; wr_idx %= session->tx_buf.size; } session->tx_buf.wr_idx = wr_idx; session->tx_status = (td_s32)SOC_ERR_MAILBOX_PENDING; return SOC_ERR_MAILBOX_NOT_SUPPORT; } static td_s32 sync_tx_to_reg(struct session *session, const td_u8 *msg, td_u32 msg_len, td_s32 tx_count) { td_s32 i, tmp_data; if ((session == TD_NULL) || (msg == TD_NULL) || (session->tx_reg == TD_NULL) || (session->tx_reg->argv == TD_NULL)) { return SOC_ERR_MAILBOX_INVALID_HANDLE; } for (i = 0; i < msg_len / 4; i++) { /* write body data with 4 bytes every times */ tmp_data = 0; if (memcpy_s(&tmp_data, sizeof(tmp_data), &msg[i * 4 + tx_count], sizeof(td_s32))) { /* offset 4 step */ MBX_ERR_PRINT("memcpy_s failed\n"); return SOC_MBX_FAILURE; } MBX_WRITEL(tmp_data, session->tx_reg->argv + i); } if (msg_len % 4) { /* for left data that less than 4 bytes */ tmp_data = 0; if (memcpy_s(&tmp_data, sizeof(tmp_data), &msg[i * 4 + tx_count], msg_len % 4)) { /* offset 4 step */ MBX_ERR_PRINT("memcpy_s failed\n"); return SOC_MBX_FAILURE; } MBX_WRITEL(tmp_data, session->tx_reg->argv + i); } return SOC_MBX_SUCCESS; } static td_s32 check_param(td_u32 handle, const td_u8 *msg, td_u32 msg_len, struct session **ptr_session) { struct session *session = NULL; if (g_mailbox.initalized != TD_TRUE) { return SOC_ERR_MAILBOX_NOT_INIT; } if (msg_len == 0) { return SOC_ERR_MAILBOX_INVALID_PARA; } session = find_session(SESSION_HANDLE_NUM(handle), SESSION_HANDLE_PORT(handle)); if ((session == NULL) || (msg == NULL) || (session->tx_reg == NULL) || (session->tx_reg->argv == NULL) || (session->tx_reg->trigger_rx == NULL) || (session->tx_reg->head == NULL) || (session->tx_reg->lock == NULL) || (session->tx_reg->pending == NULL)) { return SOC_ERR_MAILBOX_INVALID_HANDLE; } *ptr_session = session; return SOC_MBX_SUCCESS; } static td_void load_tx_head(td_u32 handle, union msg_head *tx_head, struct session *session, td_u32 *len) { td_u32 msg_len = *len; /* write head data */ tx_head->bits.num = session->num; tx_head->bits.port = SESSION_HANDLE_PORT(handle); tx_head->bits.msg_len = msg_len / session->tx_reg->argv_size ? \ session->tx_reg->argv_size : msg_len % session->tx_reg->argv_size; msg_len -= tx_head->bits.msg_len; if (msg_len == 0) { tx_head->bits.ongoing = TD_FALSE; session->tx_status = SOC_MBX_SUCCESS; } else { tx_head->bits.ongoing = TD_TRUE; session->tx_status = (td_s32)SOC_ERR_MAILBOX_PENDING; } MBX_WRITEL(tx_head->head, session->tx_reg->head); *len = msg_len; } static td_s32 sync_tx(td_u32 handle, const td_u8 *msg, td_u32 msg_len, td_u32 timeout) { td_s32 tx_count, ret; td_u32 status; struct session *session = NULL; union msg_head tx_head = {.head = 0}; td_ulong lock_flag; ret = check_param(handle, msg, msg_len, &session); if ((ret != SOC_MBX_SUCCESS) || (session == NULL)) { return ret; } tx_count = 0; while (1) { spin_lock_irqsave(session->tx_reg->lock, &lock_flag); status = MBX_READL((session->tx_reg->pending)); spin_unlock_irqrestore(session->tx_reg->lock, &lock_flag); if (status) { MBX_UDELAY(MBX_DELAY_TIME); if (timeout > MBX_DELAY_TIME) { timeout = timeout - MBX_DELAY_TIME; } else { return SOC_ERR_MAILBOX_TIMEOUT; } continue; } /* break until sending completion */ if (msg_len <= 0) { break; } spin_lock_irqsave(session->tx_reg->lock, &lock_flag); status = MBX_READL((session->tx_reg->pending)); if (status) { spin_unlock_irqrestore(session->tx_reg->lock, &lock_flag); continue; } load_tx_head(handle, &tx_head, session, &msg_len); ret = sync_tx_to_reg(session, msg, tx_head.bits.msg_len, tx_count); if (ret != SOC_MBX_SUCCESS) { spin_unlock_irqrestore(session->tx_reg->lock, &lock_flag); return ret; } /* trigger rx interrupt in other side */ MBX_WRITEL(0x01, session->tx_reg->trigger_rx); /* write 0x01 to trigger */ tx_count += (td_s32)tx_head.bits.msg_len; spin_unlock_irqrestore(session->tx_reg->lock, &lock_flag); } return SOC_MBX_SUCCESS; } td_s32 mbx_tx(td_u32 handle, const td_u8 *msg, td_u32 msg_len, td_u32 * const tx_len, td_u32 timeout) { td_s32 ret; if (tx_len == NULL) { return SOC_ERR_MAILBOX_INVALID_PARA; } if (g_mailbox.initalized != TD_TRUE) { *tx_len = 0; return SOC_ERR_MAILBOX_NOT_INIT; } if (msg == NULL) { *tx_len = 0; return SOC_ERR_MAILBOX_INVALID_PARA; } if (msg_len == 0) { *tx_len = 0; return SOC_ERR_MAILBOX_INVALID_PARA; } if (timeout == 0) { ret = async_tx(handle, msg, msg_len); if (ret != SOC_MBX_SUCCESS) { *tx_len = 0; return ret; } } else { ret = sync_tx(handle, msg, msg_len, timeout); if (ret != SOC_MBX_SUCCESS) { *tx_len = 0; return ret; } } *tx_len = msg_len; return SOC_MBX_SUCCESS; } td_s32 mbx_rx(td_u32 handle, td_u8 *msg, td_u32 msg_len, td_u32 * const rx_len, td_u32 timeout) { struct session *session = NULL; td_u32 status, rd_idx, wr_idx; td_u32 i, len; if (g_mailbox.initalized != TD_TRUE) { *rx_len = 0; return SOC_ERR_MAILBOX_NOT_INIT; } if ((msg == NULL) || (msg_len == 0) || (rx_len == NULL)) { return SOC_ERR_MAILBOX_INVALID_PARA; } session = find_session(SESSION_HANDLE_NUM(handle), SESSION_HANDLE_PORT(handle)); if (session == NULL) { *rx_len = 0; return SOC_ERR_MAILBOX_INVALID_HANDLE; } rd_idx = session->rx_buf.rd_idx; wr_idx = session->rx_buf.wr_idx; status = session->rx_status & SESSION_BUSY; while (status || !(wr_idx - rd_idx)) { MBX_UDELAY(MBX_DELAY_TIME); /* Receive message with polling mode if not support interrupt */ mbx_polling_rx(); if (timeout > MBX_DELAY_TIME) { timeout = timeout - MBX_DELAY_TIME; } if (timeout <= MBX_DELAY_TIME) { *rx_len = 0; return SOC_ERR_MAILBOX_TIMEOUT; } rd_idx = session->rx_buf.rd_idx; wr_idx = session->rx_buf.wr_idx; status = session->rx_status & SESSION_BUSY; } if (wr_idx > rd_idx) { len = wr_idx - rd_idx; } else { len = session->rx_buf.size + wr_idx - rd_idx; } len = msg_len > len ? len : msg_len; for (i = 0; i < len; i++) { msg[i] = session->rx_buf.addr[rd_idx++]; rd_idx %= session->rx_buf.size; } session->rx_buf.rd_idx = rd_idx; *rx_len = len; return SOC_MBX_SUCCESS; } td_s32 mbx_register_irq_callback(td_u32 handle, session_callback func, const td_void * const data) { struct session *session = NULL; if (g_mailbox.initalized != TD_TRUE) { return SOC_ERR_MAILBOX_NOT_INIT; } session = find_session(SESSION_HANDLE_NUM(handle), SESSION_HANDLE_PORT(handle)); if (session == NULL) { return SOC_ERR_MAILBOX_INVALID_HANDLE; } session->func = func; session->data = data; return SOC_MBX_SUCCESS; } static struct session *mbx_alloc_session(td_u32 session_id, td_u32 rx_buf_size, td_u32 tx_buf_size) { struct session *session = NULL; struct session *tmp = NULL; td_u32 port; port = 0; /* find the minimun port of free session in the list */ retry: MBX_LIST_FOR_EACH_ENTRY(session, tmp, &g_mailbox.list_head, node) { if (session->num == SESSION_ID_NUM(session_id)) { if (port == session->port) { port++; goto retry; } } } session = (struct session *)MBX_MALLOC(sizeof(struct session)); if (session == NULL) { return NULL; } (void)memset_s(session, sizeof(struct session), 0, sizeof(struct session) - 1); session->num = SESSION_ID_NUM(session_id); session->port = port; if (rx_buf_size > 0) { session->rx_buf.addr = (td_u8 *)MBX_MALLOC(rx_buf_size); if (!session->rx_buf.addr) { return NULL; } session->rx_buf.size = rx_buf_size; } else { session->rx_buf.addr = NULL; session->rx_buf.size = 0; } session->rx_buf.rd_idx = 0; session->rx_buf.wr_idx = 0; if (tx_buf_size > 0) { session->tx_buf.addr = (td_u8 *)MBX_MALLOC(tx_buf_size); if (!session->tx_buf.addr) { return NULL; } session->tx_buf.size = tx_buf_size; } else { session->tx_buf.addr = NULL; session->tx_buf.size = 0; } session->tx_buf.rd_idx = 0; session->tx_buf.wr_idx = 0; return session; } td_s32 mbx_open(td_u32 session_id, td_u32 rx_buf_size, td_u32 tx_buf_size) { struct session *session = NULL; if (g_mailbox.initalized != TD_TRUE) { return SOC_ERR_MAILBOX_NOT_INIT; } if (rx_buf_size == 0) { return SOC_ERR_MAILBOX_NOT_SUPPORT; } if (SESSION_ID_SIDE0(session_id) != g_mailbox.local_cpu && \ SESSION_ID_SIDE1(session_id) != g_mailbox.local_cpu) { return SOC_ERR_MAILBOX_NOT_SUPPORT; } if (SESSION_ID_SIDE0(session_id) >= CPU_MAX || \ SESSION_ID_SIDE1(session_id) >= CPU_MAX) { return SOC_ERR_MAILBOX_NOT_SUPPORT; } mutex_lock(&g_mailbox.list_lock); session = mbx_alloc_session(session_id, rx_buf_size, tx_buf_size); if (session == NULL) { mutex_unlock(&g_mailbox.list_lock); return SOC_ERR_MAILBOX_NO_MEMORY; } init_mailbox_reg(session, session_id, &g_mailbox); if ((session->tx_reg == NULL) || (session->rx_reg == NULL)) { if (session->rx_buf.addr != NULL) { MBX_FREE(session->rx_buf.addr); session->rx_buf.addr = NULL; } if (session->tx_buf.addr != NULL) { MBX_FREE(session->tx_buf.addr); session->tx_buf.addr = NULL; } MBX_FREE(session); mutex_unlock(&g_mailbox.list_lock); return SOC_ERR_MAILBOX_NOT_SUPPORT; } MBX_LIST_ADD(&session->node, &g_mailbox.list_head); mutex_unlock(&g_mailbox.list_lock); return GEN_SESSION_HANDLE(session->num, session->port); } td_s32 mbx_close(td_u32 handle) { struct session *session = NULL; if (g_mailbox.initalized != TD_TRUE) { return SOC_ERR_MAILBOX_NOT_INIT; } session = find_session(SESSION_HANDLE_NUM(handle), SESSION_HANDLE_PORT(handle)); if (session == NULL) { return SOC_ERR_MAILBOX_INVALID_HANDLE; } if (session->tx_reg != NULL) { MBX_FREE(session->tx_reg); session->tx_reg = NULL; } if (session->rx_reg != NULL) { MBX_FREE(session->rx_reg); session->rx_reg = NULL; } if (session->rx_buf.addr != NULL) { MBX_FREE(session->rx_buf.addr); session->rx_buf.addr = NULL; } if (session->tx_buf.addr != NULL) { MBX_FREE(session->tx_buf.addr); session->tx_buf.addr = NULL; } mutex_lock(&g_mailbox.list_lock); MBX_LIST_DEL(&session->node); MBX_FREE(session); session = NULL; mutex_unlock(&g_mailbox.list_lock); return SOC_MBX_SUCCESS; } struct mailbox *get_mailbox_data(td_void) { return &g_mailbox; }