You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
1433 lines
40 KiB
1433 lines
40 KiB
/*
|
|
* Copyright (c) Hisilicon Technologies Co., Ltd. 2016-2021. All rights reserved.
|
|
* Description: tsio hal func impl.
|
|
* Author: Hisilicon
|
|
* Create: 2016-09-07
|
|
*/
|
|
|
|
#include "hal_tsio.h"
|
|
#include "linux/io.h"
|
|
#include "soc_log.h"
|
|
#include "reg_common_ext.h"
|
|
#include "drv_sys_ext.h"
|
|
#include "drv_tsio_reg.h"
|
|
#include "drv_tsio_utils.h"
|
|
|
|
#define TSIO_BITS_PER_REG 32
|
|
#define NUM_SIZE 32
|
|
|
|
static spinlock_t g_tsio_hal_lock = __SPIN_LOCK_UNLOCKED(g_tsio_hal_lock);
|
|
|
|
static inline td_void en_pcr_proof(const struct tsio_mgmt *mgmt)
|
|
{
|
|
timer_ctrl reg1;
|
|
|
|
reg1.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_TIMER_CTL);
|
|
reg1.bits.timer_en = 1;
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_TIMER_CTL, reg1.u32);
|
|
}
|
|
|
|
td_s32 tsio_hal_init_ctrl(const struct tsio_mgmt *mgmt)
|
|
{
|
|
#ifdef CONFIG_SOCT_PHY_LOOPBACK_SUPPORT
|
|
dbg_sc_cts reg1;
|
|
dbg_sc_gen_open reg2;
|
|
#endif
|
|
|
|
#if defined(CONFIG_RESERVED13)
|
|
volatile ext_reg_peri *reg_peri = ext_drv_sys_get_peri_reg_ptr();
|
|
/* some chips not include tsio hw, so check it first. */
|
|
if (unlikely(reg_peri->PERI_SOC_FUSE_2 & 0x100000)) {
|
|
soc_log_err("TSIO not enabled for this chip.\n");
|
|
return SOC_ERR_TSIO_NOT_SUPPORT;
|
|
}
|
|
#endif
|
|
|
|
/* reset tsio ctrl */
|
|
osal_clk_set_reset("tsio_srst_req", true);
|
|
osal_mb();
|
|
|
|
osal_clk_set_enable("clkgate_tsio", true);
|
|
|
|
osal_clk_set_reset("tsio_srst_req", false);
|
|
osal_mb();
|
|
|
|
/* wait ctrl crg reset finished. */
|
|
osal_udelay(100); /* delay 100us */
|
|
|
|
en_pcr_proof(mgmt);
|
|
|
|
#ifdef CONFIG_SOCT_PHY_LOOPBACK_SUPPORT
|
|
reg1.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_DBG_SC_CTS);
|
|
reg1.bits.dbg_sc_cts_en = 1;
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DBG_SC_CTS, reg1.u32);
|
|
|
|
reg2.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_DBG_SC_GEN_OPEN);
|
|
reg2.bits.dbg_sc_gen_open = 1;
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DBG_SC_GEN_OPEN, reg2.u32);
|
|
#endif
|
|
|
|
return TD_SUCCESS;
|
|
}
|
|
|
|
td_void tsio_hal_de_init_ctrl(const struct tsio_mgmt *mgmt)
|
|
{
|
|
bool is_reset = false;
|
|
unsigned long start, end;
|
|
|
|
osal_clk_set_reset("tsio_srst_req", true);
|
|
osal_mb();
|
|
|
|
start = jiffies;
|
|
end = start + HZ; /* 1s */
|
|
do {
|
|
osal_clk_get_reset("tsio_srst_ok", &is_reset);
|
|
} while (is_reset != true && time_in_range(jiffies, start, end));
|
|
|
|
if (is_reset != true) {
|
|
soc_log_err("tsio ctrl reset failed.\n");
|
|
goto out;
|
|
}
|
|
|
|
osal_clk_set_enable("clkgate_tsio", false);
|
|
osal_mb();
|
|
|
|
out:
|
|
return;
|
|
}
|
|
|
|
#ifndef CONFIG_SOCT_PHY_LOOPBACK_SUPPORT
|
|
static td_void _setup_phy_offset_ctl(const struct tsio_mgmt *mgmt, td_u32 offset)
|
|
{
|
|
phy_offset_ctl reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_OFFSET_CTL);
|
|
reg.bits.offset_ctl = offset;
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_PHY_OFFSET_CTL, reg.u32);
|
|
}
|
|
|
|
static td_void _setup_phy_swing_ctl(const struct tsio_mgmt *mgmt, td_u32 swing)
|
|
{
|
|
phy_swing_ctl reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_SWING_CTL);
|
|
reg.bits.swing_ctl = swing;
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_PHY_SWING_CTL, reg.u32);
|
|
}
|
|
|
|
static td_void _setup_phy_pre_emphasis(const struct tsio_mgmt *mgmt, td_u32 pre_emphasis)
|
|
{
|
|
phy_pre_emphasis reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_PRE_EMPHASIS);
|
|
reg.bits.pre_emphasis = pre_emphasis;
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_PHY_PRE_EMPHASIS, reg.u32);
|
|
}
|
|
|
|
static td_void _setup_phy_slew_ctl(const struct tsio_mgmt *mgmt, td_u32 slew)
|
|
{
|
|
phy_slew_ctl reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_SLEW_CTL);
|
|
reg.bits.slew_ctl = slew;
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_PHY_SLEW_CTL, reg.u32);
|
|
}
|
|
|
|
static td_void _setup_phy_clk_data_skew(const struct tsio_mgmt *mgmt, td_u32 skew)
|
|
{
|
|
phy_clk_data_skew reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_CLK_DATA_SKEW);
|
|
reg.bits.skew_ctl = skew;
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_PHY_CLK_DATA_SKEW, reg.u32);
|
|
}
|
|
|
|
static td_void setup_phy_configure(const struct tsio_mgmt *mgmt)
|
|
{
|
|
td_u32 offset, swing, pre_emphasis, slew, skew;
|
|
|
|
if (mgmt->band_width == TSIO_BW_400M) {
|
|
offset = TSIO_PHY_100MHZ_OFFSET;
|
|
swing = TSIO_PHY_100MHZ_SWING;
|
|
pre_emphasis = TSIO_PHY_100MHZ_PRE_EMPHASIS;
|
|
slew = TSIO_PHY_100MHZ_SLEW;
|
|
skew = TSIO_PHY_100MHZ_SKEW;
|
|
} else if (mgmt->band_width == TSIO_BW_200M) {
|
|
offset = TSIO_PHY_50MHZ_OFFSET;
|
|
swing = TSIO_PHY_50MHZ_SWING;
|
|
pre_emphasis = TSIO_PHY_50MHZ_PRE_EMPHASIS;
|
|
slew = TSIO_PHY_50MHZ_SLEW;
|
|
skew = TSIO_PHY_50MHZ_SKEW;
|
|
} else if (mgmt->band_width == TSIO_BW_100M) {
|
|
offset = TSIO_PHY_25MHZ_OFFSET;
|
|
swing = TSIO_PHY_25MHZ_SWING;
|
|
pre_emphasis = TSIO_PHY_25MHZ_PRE_EMPHASIS;
|
|
slew = TSIO_PHY_25MHZ_SLEW;
|
|
skew = TSIO_PHY_25MHZ_SKEW;
|
|
} else if (mgmt->band_width == TSIO_BW_50M) {
|
|
offset = TSIO_PHY_12MHZ_OFFSET;
|
|
swing = TSIO_PHY_12MHZ_SWING;
|
|
pre_emphasis = TSIO_PHY_12MHZ_PRE_EMPHASIS;
|
|
slew = TSIO_PHY_12MHZ_SLEW;
|
|
skew = TSIO_PHY_12MHZ_SKEW;
|
|
} else {
|
|
return;
|
|
}
|
|
|
|
_setup_phy_offset_ctl(mgmt, offset);
|
|
_setup_phy_swing_ctl(mgmt, swing);
|
|
_setup_phy_pre_emphasis(mgmt, pre_emphasis);
|
|
_setup_phy_slew_ctl(mgmt, slew);
|
|
_setup_phy_clk_data_skew(mgmt, skew);
|
|
}
|
|
#endif
|
|
|
|
td_void tsio_hal_init_phy(const struct tsio_mgmt *mgmt)
|
|
{
|
|
phy_ctrl reg1;
|
|
phy_init_reg reg2;
|
|
#ifdef CONFIG_SOCT_PHY_LOOPBACK_SUPPORT
|
|
phy_bist_reg reg3;
|
|
#else
|
|
phy_sync_limit reg4;
|
|
phy_resync_ctrl resync_ctrl;
|
|
phy_sync_limit sync_limit;
|
|
#endif
|
|
unsigned long start, end;
|
|
|
|
/* phy crg reset. */
|
|
osal_clk_set_reset("tsio_phy_srst_req", true);
|
|
osal_clk_set_enable("clkgate_tsio_phy", true);
|
|
|
|
if (mgmt->band_width == TSIO_BW_400M) {
|
|
osal_clk_set_rate("clkmux_tsio_phy", 100000000); /* 100000000 means 100M clock */
|
|
} else if (mgmt->band_width == TSIO_BW_200M) {
|
|
osal_clk_set_rate("clkmux_tsio_phy", 50000000); /* 50000000 means 50M clock */
|
|
} else if (mgmt->band_width == TSIO_BW_100M) {
|
|
osal_clk_set_rate("clkmux_tsio_phy", 25000000); /* 25000000 means 25M clock */
|
|
} else if (mgmt->band_width == TSIO_BW_50M) {
|
|
osal_clk_set_rate("clkmux_tsio_phy", 12500000); /* 12500000 means 12.5M clock */
|
|
} else {
|
|
soc_log_fatal("mgmt band_width mismatch, mgmt band_width is:%u!\n", mgmt->band_width);
|
|
return;
|
|
}
|
|
|
|
osal_mb();
|
|
|
|
osal_clk_set_reset("tsio_phy_srst_req", false);
|
|
|
|
/* wait phy crg reset finished. */
|
|
osal_udelay(100); /* delay 100us */
|
|
|
|
/* phy self reset. */
|
|
reg1.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_CTRL);
|
|
|
|
reg1.bits.phy_rst_n = 0;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_PHY_CTRL, reg1.u32);
|
|
|
|
reg1.bits.phy_rst_n = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_PHY_CTRL, reg1.u32);
|
|
|
|
/* wait phy reset finished */
|
|
start = jiffies;
|
|
end = start + HZ; /* 1s */
|
|
do {
|
|
reg1.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_CTRL);
|
|
|
|
osal_udelay(10); /* delay 10us */
|
|
} while (reg1.bits.phy_ready == 0 && time_in_range(jiffies, start, end));
|
|
|
|
if (reg1.bits.phy_ready == 0) {
|
|
soc_log_err("tsio phy reset failed.\n");
|
|
return;
|
|
}
|
|
|
|
#ifdef CONFIG_SOCT_PHY_LOOPBACK_SUPPORT
|
|
/* do loop back */
|
|
reg3.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_BIST_REG);
|
|
|
|
reg3.bits.internal_loopback = 1;
|
|
reg3.bits.pattern_sel = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_PHY_BIST_REG, reg3.u32);
|
|
|
|
/* wait sync finished */
|
|
start = jiffies;
|
|
end = start + HZ; /* 1s */
|
|
do {
|
|
reg2.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_INIT_REG);
|
|
|
|
osal_udelay(10); /* delay 10us */
|
|
} while (reg2.bits.sync_ready == 0 && time_in_range(jiffies, start, end));
|
|
|
|
if (reg2.bits.sync_ready == 0) {
|
|
soc_log_err("tsio phy sync failed.\n");
|
|
return;
|
|
}
|
|
#else
|
|
reg4.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_SYNC_LIMIT);
|
|
|
|
reg4.bits.sync_cnt = mgmt->sync_thres;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_PHY_SYNC_LIMIT, reg4.u32);
|
|
|
|
/* setup phy configuration */
|
|
setup_phy_configure(mgmt);
|
|
|
|
/* start phy */
|
|
reg1.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_CTRL);
|
|
|
|
reg1.bits.init_start = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_PHY_CTRL, reg1.u32);
|
|
|
|
/* wait phy start finished */
|
|
start = jiffies;
|
|
end = start + HZ; /* 1s */
|
|
do {
|
|
reg2.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_INIT_REG);
|
|
|
|
osal_udelay(10); /* delay 10us */
|
|
} while (reg2.bits.training_finish == 0 && time_in_range(jiffies, start, end));
|
|
|
|
if (reg2.bits.training_pattern_received == 0) {
|
|
soc_log_err("tsio phy training failed.\n");
|
|
return;
|
|
}
|
|
|
|
/* wait sync finished */
|
|
start = jiffies;
|
|
end = start + HZ; /* 1s */
|
|
do {
|
|
reg2.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_INIT_REG);
|
|
|
|
osal_udelay(10); /* delay 10us */
|
|
} while (reg2.bits.sync_finish == 0 && time_in_range(jiffies, start, end));
|
|
|
|
if (reg2.bits.sync_ready == 0) {
|
|
soc_log_err("tsio phy sync failed.\n");
|
|
return;
|
|
}
|
|
|
|
/* enable resync */
|
|
resync_ctrl.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_RESYNC_CTRL);
|
|
resync_ctrl.bits.resync_en = 1;
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_PHY_RESYNC_CTRL, resync_ctrl.u32);
|
|
|
|
/* set sync limit */
|
|
sync_limit.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_SYNC_LIMIT);
|
|
sync_limit.bits.sync_cnt = 8; /* Number of syncs is 8 */
|
|
sync_limit.bits.sync_time = 415; /* Sync time is 415 */
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_PHY_SYNC_LIMIT, sync_limit.u32);
|
|
#endif
|
|
|
|
/* check phy final status */
|
|
reg1.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_CTRL);
|
|
|
|
if (reg1.bits.init_fail_status) {
|
|
soc_log_err("tsio phy start failed.\n");
|
|
return;
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
td_void tsio_hal_de_init_phy(const struct tsio_mgmt *mgmt)
|
|
{
|
|
osal_clk_set_reset("tsio_phy_srst_req", true);
|
|
osal_clk_set_enable("clkgate_tsio_phy", false);
|
|
|
|
osal_mb();
|
|
}
|
|
|
|
td_void tsio_hal_en_pid_channel(const struct tsio_mgmt *mgmt, const struct tsio_en_pidch_para *para)
|
|
{
|
|
pid_table reg;
|
|
td_u32 id = para->id;
|
|
td_u32 pid = para->pid;
|
|
td_u32 port_id = para->port_id;
|
|
td_u32 sid = para->sid;
|
|
td_bool is_live_ts = para->is_live_ts;
|
|
|
|
tsio_tatal_error(id >= mgmt->pid_channel_cnt);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, tsio_reg_pid_table(id));
|
|
|
|
reg.bits.service_id = sid;
|
|
if (is_live_ts) {
|
|
reg.bits.tsid = port_id;
|
|
reg.bits.tsid_type = 0;
|
|
} else {
|
|
reg.bits.tsid = port_id;
|
|
reg.bits.tsid_type = 1;
|
|
}
|
|
reg.bits.pid = pid;
|
|
reg.bits.pid_table_en = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_pid_table(id), reg.u32);
|
|
}
|
|
|
|
td_void tsio_hal_dis_pid_channel(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
pid_table reg;
|
|
|
|
tsio_tatal_error(id >= mgmt->pid_channel_cnt);
|
|
|
|
reg.u32 = 0;
|
|
|
|
reg.bits.pid_table_en = 0;
|
|
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_pid_table(id), reg.u32);
|
|
}
|
|
|
|
td_void tsio_hal_en_tsi_port(const struct tsio_mgmt *mgmt, td_u32 id, td_u32 dvb_port_id,
|
|
tsio_live_port_type port_type)
|
|
{
|
|
td_u8 mask = (port_type == TSIO_LIVE_PORT_IF) ? 0xc0 : 0x20;
|
|
|
|
tsio_tatal_error(id >= mgmt->tsi_port_cnt);
|
|
|
|
if (id < 4) { /* tsio port is 4, [0...3] */
|
|
tsio_to_dmx_sw_ts_sel0 reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_TSIO2DMX_SW_TS_SEL0);
|
|
|
|
switch (id) {
|
|
case 0: /* 0 mens first port id */
|
|
reg.bits.tsio2dmx_sw_ts_sel_cfg_0 = (dvb_port_id | mask);
|
|
break;
|
|
case 1: /* 1 mens second port id */
|
|
reg.bits.tsio2dmx_sw_ts_sel_cfg_1 = (dvb_port_id | mask);
|
|
break;
|
|
case 2: /* 2 mens third port id */
|
|
reg.bits.tsio2dmx_sw_ts_sel_cfg_2 = (dvb_port_id | mask);
|
|
break;
|
|
case 3: /* 3 mens fourth port id */
|
|
reg.bits.tsio2dmx_sw_ts_sel_cfg_3 = (dvb_port_id | mask);
|
|
break;
|
|
default:
|
|
soc_log_fatal("id is mismatch, id is: %u!\n", id);
|
|
return;
|
|
}
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_TSIO2DMX_SW_TS_SEL0, reg.u32);
|
|
} else { /* [4...7] */
|
|
tsio_to_dmx_sw_ts_sel1 reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_TSIO2DMX_SW_TS_SEL1);
|
|
|
|
switch (id) {
|
|
case 4: /* 4 mens fifth port id */
|
|
reg.bits.tsio2dmx_sw_ts_sel_cfg_4 = (dvb_port_id | mask);
|
|
break;
|
|
case 5: /* 5 mens sixth port id */
|
|
reg.bits.tsio2dmx_sw_ts_sel_cfg_5 = (dvb_port_id | mask);
|
|
break;
|
|
case 6: /* 6 mens seventh port id */
|
|
reg.bits.tsio2dmx_sw_ts_sel_cfg_6 = (dvb_port_id | mask);
|
|
break;
|
|
case 7: /* 7 mens eighth port id */
|
|
reg.bits.tsio2dmx_sw_ts_sel_cfg_7 = (dvb_port_id | mask);
|
|
break;
|
|
default:
|
|
soc_log_fatal("id is mismatch, id is: %u!\n", id);
|
|
return;
|
|
}
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_TSIO2DMX_SW_TS_SEL1, reg.u32);
|
|
}
|
|
}
|
|
|
|
td_void tsio_hal_dis_tsi_port(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
tsio_tatal_error(id >= mgmt->tsi_port_cnt);
|
|
|
|
if (id < 4) { /* tsio port is 4, [0...3] */
|
|
tsio_to_dmx_sw_ts_sel0 reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_TSIO2DMX_SW_TS_SEL0);
|
|
|
|
switch (id) {
|
|
case 0: /* 0 mens first port id */
|
|
reg.bits.tsio2dmx_sw_ts_sel_cfg_0 = 0;
|
|
break;
|
|
case 1: /* 1 mens second port id */
|
|
reg.bits.tsio2dmx_sw_ts_sel_cfg_1 = 0;
|
|
break;
|
|
case 2: /* 2 mens third port id */
|
|
reg.bits.tsio2dmx_sw_ts_sel_cfg_2 = 0;
|
|
break;
|
|
case 3: /* 3 mens fourth port id */
|
|
reg.bits.tsio2dmx_sw_ts_sel_cfg_3 = 0;
|
|
break;
|
|
default:
|
|
soc_log_fatal("id is mismatch, id is: %u!\n", id);
|
|
return;
|
|
}
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_TSIO2DMX_SW_TS_SEL0, reg.u32);
|
|
} else { /* [4...7] */
|
|
tsio_to_dmx_sw_ts_sel1 reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_TSIO2DMX_SW_TS_SEL1);
|
|
|
|
switch (id) {
|
|
case 4: /* 4 mens fifth port id */
|
|
reg.bits.tsio2dmx_sw_ts_sel_cfg_4 = 0;
|
|
break;
|
|
case 5: /* 5 mens sixth port id */
|
|
reg.bits.tsio2dmx_sw_ts_sel_cfg_5 = 0;
|
|
break;
|
|
case 6: /* 6 mens seventh port id */
|
|
reg.bits.tsio2dmx_sw_ts_sel_cfg_6 = 0;
|
|
break;
|
|
case 7: /* 7 mens eighth port id */
|
|
reg.bits.tsio2dmx_sw_ts_sel_cfg_7 = 0;
|
|
break;
|
|
default:
|
|
soc_log_fatal("id is mismatch, id is: %u!\n", id);
|
|
return;
|
|
}
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_TSIO2DMX_SW_TS_SEL1, reg.u32);
|
|
}
|
|
}
|
|
|
|
td_void tsio_hal_send_ccout(const struct tsio_mgmt *mgmt, td_u32 cclen)
|
|
{
|
|
cc_len reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_CC_LEN);
|
|
|
|
reg.bits.cc_send_rdy = 1;
|
|
reg.bits.cc_send_length = cclen;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_CC_LEN, reg.u32);
|
|
}
|
|
|
|
td_void tsio_hal_set_ccslot(const struct tsio_mgmt *mgmt, td_u32 id, td_u32 ccdata)
|
|
{
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_cc_data(id), ccdata);
|
|
}
|
|
|
|
td_void tsio_hal_get_ccresp_len(const struct tsio_mgmt *mgmt, td_u32 *ccresp_len)
|
|
{
|
|
cc_ram_len reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_CC_RAM_LEN);
|
|
|
|
*ccresp_len = reg.bits.cc_rsv_length;
|
|
}
|
|
|
|
td_void tsio_hal_get_ccslot(const struct tsio_mgmt *mgmt, td_u32 id, td_u32 *ccdata)
|
|
{
|
|
*ccdata = tsio_read_reg(mgmt->io_base, tsio_reg_cc_ram_data(id));
|
|
}
|
|
|
|
td_void tsio_hal_recv_ccdone(const struct tsio_mgmt *mgmt)
|
|
{
|
|
cc_ram_rdone reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_CC_RAM_RDONE);
|
|
|
|
reg.bits.cc_ram_rdone = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_CC_RAM_RDONE, reg.u32);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_cconflict_status(const struct tsio_mgmt *mgmt)
|
|
{
|
|
return tsio_read_reg(mgmt->io_base, TSIO_REG_CC_REV_HOLD_CONFLICT);
|
|
}
|
|
|
|
td_void tsio_hal_clr_cconflict_status(const struct tsio_mgmt *mgmt)
|
|
{
|
|
cc_rev_hold_conflict reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_CC_REV_HOLD_CONFLICT);
|
|
|
|
reg.bits.cc_rev_hold_conflict = 0;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_CC_REV_HOLD_CONFLICT, reg.u32);
|
|
}
|
|
|
|
td_void tsio_hal_en_stuff_srv(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
sid_table reg1;
|
|
stuff_sid reg2;
|
|
|
|
tsio_tatal_error(id >= mgmt->se_cnt);
|
|
|
|
reg1.u32 = tsio_read_reg(mgmt->io_base, tsio_reg_sid_table(id));
|
|
|
|
reg1.bits.trans_type = 0;
|
|
reg1.bits.sid_table_en = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_sid_table(id), reg1.u32);
|
|
|
|
reg2.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_STUFF_SID);
|
|
|
|
reg2.bits.stuff_sid = id;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_STUFF_SID, reg2.u32);
|
|
}
|
|
|
|
td_void tsio_hal_en2dmx_srv(const struct tsio_mgmt *mgmt, td_u32 id, td_u32 dmx_port_id)
|
|
{
|
|
sid_table reg;
|
|
|
|
tsio_tatal_error(id >= mgmt->se_cnt);
|
|
tsio_tatal_error(dmx_port_id >= mgmt->tsi_port_cnt);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, tsio_reg_sid_table(id));
|
|
|
|
reg.bits.outport_id = dmx_port_id;
|
|
reg.bits.trans_type = 1;
|
|
reg.bits.outport_en = 1;
|
|
reg.bits.sp_save = 0;
|
|
reg.bits.sid_table_en = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_sid_table(id), reg.u32);
|
|
}
|
|
|
|
td_void tsio_hal_dis2dmx_srv(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
tsio_tatal_error(id >= mgmt->se_cnt);
|
|
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_sid_table(id), 0);
|
|
}
|
|
|
|
td_void tsio_hal_en2ram_srv(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
sid_table reg;
|
|
|
|
tsio_tatal_error(id >= mgmt->se_cnt);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, tsio_reg_sid_table(id));
|
|
|
|
reg.bits.buf_id = id;
|
|
reg.bits.trans_type = 1;
|
|
reg.bits.dma_en = 1;
|
|
reg.bits.sp_save = 0;
|
|
reg.bits.sid_table_en = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_sid_table(id), reg.u32);
|
|
}
|
|
|
|
td_void tsio_hal_dis2ram_srv(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
tsio_tatal_error(id >= mgmt->se_cnt);
|
|
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_sid_table(id), 0);
|
|
}
|
|
|
|
td_void tsio_hal_en_sp_save(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
sid_table reg;
|
|
|
|
tsio_tatal_error(id >= mgmt->se_cnt);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, tsio_reg_sid_table(id));
|
|
|
|
reg.bits.sp_save = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_sid_table(id), reg.u32);
|
|
}
|
|
|
|
td_void tsio_hal_dis_sp_save(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
sid_table reg;
|
|
|
|
tsio_tatal_error(id >= mgmt->se_cnt);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, tsio_reg_sid_table(id));
|
|
|
|
reg.bits.sp_save = 0;
|
|
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_sid_table(id), reg.u32);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_srv_pkt_count(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
tsio_tatal_error(id >= mgmt->se_cnt);
|
|
|
|
return tsio_read_reg(mgmt->io_base, tsio_reg_sid_counter(id));
|
|
}
|
|
|
|
static td_void _tsio_config_dma_int(const struct tsio_mgmt *mgmt, dma_ctrl *reg3)
|
|
{
|
|
reg3->u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_CTRL);
|
|
|
|
reg3->bits.chnl_pend_int_en = 1;
|
|
reg3->bits.obuf_pack_int_en = 1;
|
|
reg3->bits.obuf_nr_int_en = 1;
|
|
reg3->bits.dma_err_int_en = 1;
|
|
reg3->bits.dma_end_int_en = 1;
|
|
reg3->bits.dma_flush_int_en = 1;
|
|
reg3->bits.dmux_pend_en = 1;
|
|
reg3->bits.des_end_en = 1;
|
|
reg3->bits.dma_bid_err_en = 1;
|
|
reg3->bits.dma_live_oflw_err_en = 1;
|
|
reg3->bits.chk_code_err_en = 1;
|
|
reg3->bits.obuf_oflw_err_en = 1;
|
|
reg3->bits.des_type_err_en = 1;
|
|
reg3->bits.ichl_wptr_oflw_err_en = 1;
|
|
}
|
|
|
|
td_void tsio_hal_en_all_int(const struct tsio_mgmt *mgmt)
|
|
{
|
|
tsio_ie reg1;
|
|
rx_parser_err_ie reg2;
|
|
dma_ctrl reg3;
|
|
dma_coal_cfg reg4;
|
|
dma_glb_stat reg5;
|
|
|
|
/* general */
|
|
reg1.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_INT_IE);
|
|
|
|
reg1.bits.tx_cc_send_done_ie = 1;
|
|
reg1.bits.rx_cc_done_ie = 1;
|
|
reg1.bits.rx_cts_ie = 0;
|
|
reg1.bits.rx_route_fifo_overflow_ie = 1;
|
|
reg1.bits.tsio_ie = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_INT_IE, reg1.u32);
|
|
|
|
/* rx parser err */
|
|
reg2.u32 = 0;
|
|
|
|
/* reg2.bits.rx_phy_sp_err_ie = 1; no need this irq after phy irq enabled. */
|
|
reg2.bits.rx_fifo_overflow_ie = 1;
|
|
reg2.bits.rx_sp_sync_err_ie = 1;
|
|
reg2.bits.rx_sp_rfu0_err_ie = 1;
|
|
|
|
/*
|
|
* for TS based CC it maybe trigger DMA END interrupt, refer to SC FPGA userguide 2.4.
|
|
* but the original hw design considers this to be an exception.
|
|
* so we mask this interrupt.
|
|
*/
|
|
reg2.bits.rx_sp_dma_end_err_ie = 0;
|
|
|
|
/*
|
|
* tsid and scgen irq cause system hang when change stuff sid.
|
|
*/
|
|
reg2.bits.rx_sp_tsid_err_ie = 0;
|
|
reg2.bits.rx_sp_sc_gen_err_ie = 0;
|
|
|
|
reg2.bits.rx_sp_encry_en_err_ie = 1;
|
|
reg2.bits.rx_sp_soc_define_err_ie = 1;
|
|
reg2.bits.rx_sp_rfu1_err_ie = 1;
|
|
reg2.bits.rx_sp_rfu2_err_ie = 1;
|
|
reg2.bits.rx_sp_stuff_load_err_ie = 1;
|
|
reg2.bits.rx_cc_err_type_ie = 0xf;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_RX_PARSER_ERR_INT_IE, reg2.u32);
|
|
|
|
/* tx rr err */
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_TX_RR_ERR_INT_IE, 0xFFFFFFFF);
|
|
|
|
/* pid filter err */
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_PID_FILTER_ERR_INT_IE, 0xFFFFFFFF);
|
|
|
|
/* DMA */
|
|
_tsio_config_dma_int(mgmt, ®3);
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_CTRL, reg3.u32);
|
|
|
|
/* default cal time cycle 0.5ms = 1000 * 0.5us; 0.5us ~= 1s/27mhz/14division. */
|
|
reg4.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_COAL_CFG);
|
|
|
|
reg4.bits.coal_time_cyc = 10000; /* 5ms: 10000 * 0.5us */
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_COAL_CFG, reg4.u32);
|
|
|
|
/* umask DMA int */
|
|
reg5.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_GBL_STAT);
|
|
|
|
reg5.bits.dma_int_msk = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_GBL_STAT, reg5.u32);
|
|
}
|
|
|
|
td_void tsio_hal_dis_all_int(const struct tsio_mgmt *mgmt)
|
|
{
|
|
dma_glb_stat reg1;
|
|
tsio_ie reg2;
|
|
|
|
/* DMA */
|
|
reg1.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_GBL_STAT);
|
|
|
|
reg1.bits.dma_int_msk = 0;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_GBL_STAT, reg1.u32);
|
|
|
|
/* general */
|
|
reg2.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_INT_IE);
|
|
|
|
reg2.bits.tsio_ie = 0;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_INT_IE, reg2.u32);
|
|
}
|
|
|
|
td_void tsio_hal_en_phy_int(const struct tsio_mgmt *mgmt)
|
|
{
|
|
phy_misc reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_MISC);
|
|
|
|
reg.bits.int_mask = 0;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_PHY_MISC, reg.u32);
|
|
}
|
|
|
|
td_void tsio_hal_dis_phy_int(const struct tsio_mgmt *mgmt)
|
|
{
|
|
phy_misc reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PHY_MISC);
|
|
|
|
reg.bits.int_mask = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_PHY_MISC, reg.u32);
|
|
}
|
|
|
|
td_void tsio_hal_mask_all_dma_int(const struct tsio_mgmt *mgmt)
|
|
{
|
|
dma_glb_stat reg;
|
|
td_size_t lock_flag;
|
|
|
|
spin_lock_irqsave(&g_tsio_hal_lock, lock_flag);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_GBL_STAT);
|
|
|
|
reg.bits.dma_int_msk = 0;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_GBL_STAT, reg.u32);
|
|
|
|
spin_unlock_irqrestore(&g_tsio_hal_lock, lock_flag);
|
|
}
|
|
|
|
td_void tsio_hal_un_mask_all_dma_int(const struct tsio_mgmt *mgmt)
|
|
{
|
|
dma_glb_stat reg;
|
|
td_size_t lock_flag;
|
|
|
|
spin_lock_irqsave(&g_tsio_hal_lock, lock_flag);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_GBL_STAT);
|
|
|
|
reg.bits.dma_int_msk = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_GBL_STAT, reg.u32);
|
|
|
|
spin_unlock_irqrestore(&g_tsio_hal_lock, lock_flag);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_int_flag(const struct tsio_mgmt *mgmt)
|
|
{
|
|
tsio_mis reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_INT_MIS);
|
|
|
|
return reg.u32;
|
|
}
|
|
|
|
td_void tsio_hal_clr_tx_cc_done_int(const struct tsio_mgmt *mgmt)
|
|
{
|
|
tsio_ris reg;
|
|
td_size_t lock_flag;
|
|
|
|
spin_lock_irqsave(&g_tsio_hal_lock, lock_flag);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_INT_RIS);
|
|
|
|
reg.bits.tx_cc_send_done_int = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_INT_RIS, reg.u32);
|
|
|
|
spin_unlock_irqrestore(&g_tsio_hal_lock, lock_flag);
|
|
}
|
|
|
|
td_void tsio_hal_clr_rx_cc_done_int(const struct tsio_mgmt *mgmt)
|
|
{
|
|
tsio_ris reg;
|
|
td_size_t lock_flag;
|
|
|
|
spin_lock_irqsave(&g_tsio_hal_lock, lock_flag);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_INT_RIS);
|
|
|
|
reg.bits.rx_cc_done_int = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_INT_RIS, reg.u32);
|
|
|
|
spin_unlock_irqrestore(&g_tsio_hal_lock, lock_flag);
|
|
}
|
|
|
|
td_void tsio_hal_clr_rx_cts_int(const struct tsio_mgmt *mgmt)
|
|
{
|
|
tsio_ris reg;
|
|
td_size_t lock_flag;
|
|
|
|
spin_lock_irqsave(&g_tsio_hal_lock, lock_flag);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_INT_RIS);
|
|
|
|
reg.bits.rx_cts_int = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_INT_RIS, reg.u32);
|
|
|
|
spin_unlock_irqrestore(&g_tsio_hal_lock, lock_flag);
|
|
}
|
|
|
|
td_void tsio_hal_clr_rx_route_fifo_ovfl_int(const struct tsio_mgmt *mgmt)
|
|
{
|
|
tsio_ris reg;
|
|
td_size_t lock_flag;
|
|
|
|
spin_lock_irqsave(&g_tsio_hal_lock, lock_flag);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_INT_RIS);
|
|
|
|
reg.bits.rx_route_fifo_overflow_int = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_INT_RIS, reg.u32);
|
|
|
|
spin_unlock_irqrestore(&g_tsio_hal_lock, lock_flag);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_org_rx_parser_err_int_flag(const struct tsio_mgmt *mgmt)
|
|
{
|
|
tsio_mis reg;
|
|
td_size_t lock_flag;
|
|
|
|
spin_lock_irqsave(&g_tsio_hal_lock, lock_flag);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_RX_PARSER_ERR_INT_RIS);
|
|
|
|
spin_unlock_irqrestore(&g_tsio_hal_lock, lock_flag);
|
|
|
|
return reg.u32;
|
|
}
|
|
|
|
td_u32 tsio_hal_get_rx_parser_err_int_flag(const struct tsio_mgmt *mgmt)
|
|
{
|
|
tsio_mis reg;
|
|
td_size_t lock_flag;
|
|
|
|
spin_lock_irqsave(&g_tsio_hal_lock, lock_flag);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_RX_PARSER_ERR_INT_MIS);
|
|
|
|
spin_unlock_irqrestore(&g_tsio_hal_lock, lock_flag);
|
|
|
|
return reg.u32;
|
|
}
|
|
|
|
td_void tsio_hal_clr_rx_parser_err_int_flag(const struct tsio_mgmt *mgmt, td_u32 flag)
|
|
{
|
|
td_size_t lock_flag;
|
|
|
|
spin_lock_irqsave(&g_tsio_hal_lock, lock_flag);
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_RX_PARSER_ERR_INT_RIS, flag);
|
|
|
|
spin_unlock_irqrestore(&g_tsio_hal_lock, lock_flag);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_tx_rr_err_int_flag(const struct tsio_mgmt *mgmt)
|
|
{
|
|
tsio_mis reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_TX_RR_ERR_INT_MIS);
|
|
|
|
return reg.u32;
|
|
}
|
|
|
|
td_void tsio_hal_clr_tx_rr_err_int_flag(const struct tsio_mgmt *mgmt, td_u32 flag)
|
|
{
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_TX_RR_ERR_INT_RIS, flag);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_pid_filter_err_int_flag(const struct tsio_mgmt *mgmt)
|
|
{
|
|
tsio_mis reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_PID_FILTER_ERR_INT_MIS);
|
|
|
|
return reg.u32;
|
|
}
|
|
|
|
td_void tsio_hal_clr_pid_filter_err_int_flag(const struct tsio_mgmt *mgmt, td_u32 flag)
|
|
{
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_PID_FILTER_ERR_INT_RIS, flag);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_dma_int_flag(const struct tsio_mgmt *mgmt)
|
|
{
|
|
dma_glb_stat reg;
|
|
td_size_t lock_flag;
|
|
|
|
spin_lock_irqsave(&g_tsio_hal_lock, lock_flag);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_GBL_STAT);
|
|
|
|
spin_unlock_irqrestore(&g_tsio_hal_lock, lock_flag);
|
|
|
|
return reg.u32;
|
|
}
|
|
|
|
td_u32 tsio_hal_get_dma_des_end_status(const struct tsio_mgmt *mgmt)
|
|
{
|
|
return tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_DES_END_INT);
|
|
}
|
|
|
|
td_void tsio_hal_clr_dma_des_end_status(const struct tsio_mgmt *mgmt, td_u32 status)
|
|
{
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_DES_END_INT, status);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_dma_chn_pend_status(const struct tsio_mgmt *mgmt)
|
|
{
|
|
td_u32 val;
|
|
td_size_t lock_flag;
|
|
|
|
spin_lock_irqsave(&g_tsio_hal_lock, lock_flag);
|
|
|
|
val = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_CHNL_PEND_INT);
|
|
|
|
spin_unlock_irqrestore(&g_tsio_hal_lock, lock_flag);
|
|
|
|
return val;
|
|
}
|
|
|
|
td_void tsio_hal_clr_dma_chn_pend_status(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
td_u32 flags;
|
|
td_size_t lock_flag;
|
|
|
|
tsio_tatal_error(id >= mgmt->ram_port_cnt);
|
|
|
|
spin_lock_irqsave(&g_tsio_hal_lock, lock_flag);
|
|
|
|
flags = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_CHNL_PEND_INT);
|
|
if (flags & (0x1 << id)) {
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_CHNL_PEND_INT, 0x1 << id);
|
|
}
|
|
|
|
spin_unlock_irqrestore(&g_tsio_hal_lock, lock_flag);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_dma_pack_int_status_l(const struct tsio_mgmt *mgmt)
|
|
{
|
|
return tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_PACK_INT_L);
|
|
}
|
|
|
|
td_void tsio_hal_clr_dma_pack_int_status_l(const struct tsio_mgmt *mgmt, td_u32 status)
|
|
{
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_PACK_INT_L, status);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_dma_pack_int_status_h(const struct tsio_mgmt *mgmt)
|
|
{
|
|
return tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_PACK_INT_H);
|
|
}
|
|
|
|
td_void tsio_hal_clr_dma_pack_int_status_h(const struct tsio_mgmt *mgmt, td_u32 status)
|
|
{
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_PACK_INT_H, status);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_dma_end_int_status_l(const struct tsio_mgmt *mgmt)
|
|
{
|
|
return tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_END_INT_L);
|
|
}
|
|
|
|
td_void tsio_hal_clr_dma_end_int_status_l(const struct tsio_mgmt *mgmt, td_u32 status)
|
|
{
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_END_INT_L, status);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_dma_end_int_status_h(const struct tsio_mgmt *mgmt)
|
|
{
|
|
return tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_END_INT_H);
|
|
}
|
|
|
|
td_void tsio_hal_clr_dma_end_int_status_h(const struct tsio_mgmt *mgmt, td_u32 status)
|
|
{
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_END_INT_H, status);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_dma_obuf_ovflw_status_l(const struct tsio_mgmt *mgmt)
|
|
{
|
|
return tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_OBUF_OVFLW_L);
|
|
}
|
|
|
|
td_void tsio_hal_clr_dma_obuf_ovflw_status_l(const struct tsio_mgmt *mgmt, td_u32 status)
|
|
{
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_OBUF_OVFLW_L, status);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_dma_obuf_ovflw_status_h(const struct tsio_mgmt *mgmt)
|
|
{
|
|
return tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_OBUF_OVFLW_H);
|
|
}
|
|
|
|
td_void tsio_hal_clr_dma_obuf_ovflw_status_h(const struct tsio_mgmt *mgmt, td_u32 status)
|
|
{
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_OBUF_OVFLW_H, status);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_dma_flush_status_l(const struct tsio_mgmt *mgmt)
|
|
{
|
|
return tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_FLUSH_L);
|
|
}
|
|
|
|
td_void tsio_hal_clr_dma_flush_status_l(const struct tsio_mgmt *mgmt, td_u32 status)
|
|
{
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_FLUSH_L, status);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_dma_flush_status_h(const struct tsio_mgmt *mgmt)
|
|
{
|
|
return tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_FLUSH_H);
|
|
}
|
|
|
|
td_void tsio_hal_clr_dma_flush_status_h(const struct tsio_mgmt *mgmt, td_u32 status)
|
|
{
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_FLUSH_H, status);
|
|
}
|
|
|
|
td_void tsio_hal_set_dma_cnt_unit(const struct tsio_mgmt *mgmt)
|
|
{
|
|
dma_cnt_unit reg;
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_CNT_UNIT);
|
|
/* bus freq 392MHZ, 0.125us, equal 49 cycle, 49-1= 48 */
|
|
reg.bits.pulse_cyc = 48;
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_CNT_UNIT, reg.u32);
|
|
|
|
return;
|
|
}
|
|
|
|
td_void tsio_hal_en_mmu(const struct tsio_mgmt *mgmt)
|
|
{
|
|
dma_ctrl reg;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_TLB_BASE, mgmt->cb_ttbr);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_CTRL);
|
|
|
|
reg.bits.dma_mmu_en = 1;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_CTRL, reg.u32);
|
|
}
|
|
|
|
static inline td_void set_ram_port_rate(const struct tsio_mgmt *mgmt, td_u32 id, td_u32 pace)
|
|
{
|
|
dma_chnl_pace reg;
|
|
|
|
tsio_tatal_error(id >= mgmt->ram_port_cnt);
|
|
tsio_tatal_error(!(pace <= 255)); /* max rate 255 */
|
|
|
|
/* hw request reset to 0 firstly */
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, tsio_reg_dma_chnl_pace(id));
|
|
|
|
reg.bits.dma_chnl_pace = 0;
|
|
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_dma_chnl_pace(id), reg.u32);
|
|
|
|
/*
|
|
* config new pace.
|
|
* only pace is greater than or equal to 160 and less than or equal to 255
|
|
* can control speed to make sure sync_count is right.
|
|
*/
|
|
reg.bits.dma_chnl_pace = 160; /* pace is 160 */
|
|
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_dma_chnl_pace(id), reg.u32);
|
|
}
|
|
|
|
td_void tsio_hal_en_ram_port(const struct tsio_mgmt *mgmt, td_u32 id,
|
|
td_u64 dsc_phy_addr, td_u32 dsc_depth, td_u32 pace)
|
|
{
|
|
dma_chnl_depth depth;
|
|
dma_chnl_dis dis_chnl;
|
|
|
|
tsio_tatal_error(id >= mgmt->ram_port_cnt);
|
|
|
|
/* config dsc base addr */
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_dqct_tab_addr(id), (td_u32)(dsc_phy_addr & 0xffffffff));
|
|
/* config dsc high 4 bit addr */
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_dqct_tab_addr_session_id(id),
|
|
(td_u32)((dsc_phy_addr >> TSIO_BITS_PER_REG) & 0xf));
|
|
|
|
/* config dsc depth */
|
|
depth.u32 = tsio_read_reg(mgmt->io_base, tsio_reg_dma_chal_depth(id));
|
|
|
|
depth.bits.dma_chnl_depth = dsc_depth - 1; /* hw rule: -1. */
|
|
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_dma_chal_depth(id), depth.u32);
|
|
|
|
/* config max data rate with pace value. */
|
|
set_ram_port_rate(mgmt, id, pace);
|
|
|
|
/* invlidate old mmu map */
|
|
dis_chnl.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_CHNL_DIS);
|
|
|
|
dis_chnl.bits.dma_pi_mmu_dis |= (1 << id);
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_CHNL_DIS, dis_chnl.u32);
|
|
}
|
|
|
|
td_void tsio_hal_dis_ram_port(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
dma_chnl_dis dis_chnl;
|
|
|
|
tsio_tatal_error(id >= mgmt->ram_port_cnt);
|
|
|
|
dis_chnl.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_CHNL_DIS);
|
|
|
|
dis_chnl.bits.dma_chanls_dis |= (1 << id);
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_CHNL_DIS, dis_chnl.u32);
|
|
}
|
|
|
|
td_bool tsio_hal_ram_port_enabled(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
td_u32 reg;
|
|
|
|
tsio_tatal_error(id >= mgmt->ram_port_cnt);
|
|
|
|
reg = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_CHNL_STAT);
|
|
|
|
return (reg & (1 << id)) ? TD_TRUE : TD_FALSE;
|
|
}
|
|
|
|
td_void tsio_hal_set_ram_port_rate(const struct tsio_mgmt *mgmt, td_u32 id, td_u32 pace)
|
|
{
|
|
tsio_tatal_error(id >= mgmt->ram_port_cnt);
|
|
|
|
/* config max data rate with pace value. */
|
|
set_ram_port_rate(mgmt, id, pace);
|
|
}
|
|
|
|
td_void tsio_hal_setup_ts_dsc(const struct tsio_mgmt *mgmt,
|
|
const struct tsio_dsc_base_info *base_info, td_bool desepon, td_u32 *cur_dsc_addr)
|
|
{
|
|
dma_dsc_word_0 reg;
|
|
td_u32 id = base_info->id;
|
|
td_u64 buf_phy_addr = base_info->buf_phy_addr;
|
|
td_u32 pkt_cnt = base_info->pkt_cnt;
|
|
|
|
tsio_tatal_error(id >= mgmt->ram_port_cnt);
|
|
|
|
reg.u32 = 0;
|
|
|
|
reg.bits.playnums = pkt_cnt - 1; /* [0, 255] */
|
|
reg.bits.tstype = 0x1; /* 0:live ts; 1:dma ts; 2:dma bulk */
|
|
reg.bits.tsid = 0x80 + id; /* 0x80 is ram port base */
|
|
reg.bits.desep = desepon == TD_TRUE ? 1 : 0;
|
|
|
|
*cur_dsc_addr++ = reg.u32;
|
|
*cur_dsc_addr++ = (td_u32)(buf_phy_addr & 0xffffffff);
|
|
*cur_dsc_addr++ = 0;
|
|
/* 28..31 is the high 4 bit of 36 bit phy addr */
|
|
*cur_dsc_addr++ = (((td_u32)(buf_phy_addr >> TSIO_BITS_PER_REG) & 0xf) << TSIO_DSC_GUIDE_NUM_LEN)
|
|
+ RAM_DSC_GUIDE_NUMBER;
|
|
|
|
osal_mb();
|
|
}
|
|
|
|
td_void tsio_hal_setup_bulk_dsc(const struct tsio_mgmt *mgmt,
|
|
const struct tsio_dsc_base_info *base_info, td_bool desepon, td_u32 *cur_dsc_addr, td_u32 sid)
|
|
{
|
|
dma_dsc_word_0 reg;
|
|
td_u32 id = base_info->id;
|
|
td_u64 buf_phy_addr = base_info->buf_phy_addr;
|
|
td_u32 pkt_cnt = base_info->pkt_cnt;
|
|
|
|
tsio_tatal_error(id >= mgmt->ram_port_cnt);
|
|
tsio_tatal_error(sid >= mgmt->se_cnt);
|
|
|
|
reg.u32 = 0;
|
|
|
|
reg.bits.playnums = pkt_cnt - 1; /* [0, 255] */
|
|
reg.bits.sid = sid;
|
|
reg.bits.tstype = 0x2; /* 0:live ts; 1:dma ts; 2:dma bulk */
|
|
reg.bits.tsid = 0x80 + id; /* 0x80 is ram port base */
|
|
reg.bits.desep = desepon == TD_TRUE ? 1 : 0;
|
|
|
|
*cur_dsc_addr++ = reg.u32;
|
|
*cur_dsc_addr++ = (td_u32)(buf_phy_addr & 0xffffffff);
|
|
*cur_dsc_addr++ = 0;
|
|
/* 28..31 is the high 4 bit of 36 bit phy addr */
|
|
*cur_dsc_addr++ = (((td_u32)(buf_phy_addr >> TSIO_BITS_PER_REG) & 0xf) << TSIO_DSC_GUIDE_NUM_LEN)
|
|
+ RAM_DSC_GUIDE_NUMBER;
|
|
|
|
osal_mb();
|
|
}
|
|
|
|
td_void tsio_hal_setup_bulk_flush_dsc(const struct tsio_mgmt *mgmt, td_u32 id, td_u32 *cur_dsc_addr, td_u32 sid)
|
|
{
|
|
dma_dsc_word_0 reg;
|
|
|
|
tsio_tatal_error(id >= mgmt->ram_port_cnt);
|
|
tsio_tatal_error(sid >= mgmt->se_cnt);
|
|
|
|
reg.u32 = 0;
|
|
|
|
reg.bits.playnums = 0; /* [0, 255] */
|
|
reg.bits.sid = sid;
|
|
reg.bits.tstype = 0x2; /* 0:live ts; 1:dma ts; 2:dma bulk */
|
|
reg.bits.tsid = 0x80 + id; /* 0x80 is ram port base */
|
|
reg.bits.flush = 1;
|
|
reg.bits.desep = 1;
|
|
|
|
*cur_dsc_addr++ = reg.u32;
|
|
*cur_dsc_addr++ = 0;
|
|
*cur_dsc_addr++ = 0;
|
|
*cur_dsc_addr++ = RAM_DSC_GUIDE_NUMBER;
|
|
|
|
osal_mb();
|
|
}
|
|
|
|
td_void tsio_hal_add_dsc(const struct tsio_mgmt *mgmt, td_u32 id, td_u32 write_idx)
|
|
{
|
|
dma_slot_pi_w reg;
|
|
td_u32 en_chnl;
|
|
|
|
tsio_tatal_error(id >= mgmt->ram_port_cnt);
|
|
|
|
/* enable ramport delay to add dsc for the accuracy of rate calculation */
|
|
en_chnl = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_CHNL_STAT);
|
|
if (unlikely(!(en_chnl & (1 << id)))) {
|
|
/* en ram port */
|
|
en_chnl = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_CHNL_EN);
|
|
|
|
en_chnl |= (1 << id);
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_CHNL_EN, en_chnl);
|
|
}
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, TSIO_REG_DMA_SLOT_PI_W);
|
|
|
|
reg.bits.sw_pi_w_bid = id;
|
|
reg.bits.sw_pi_wptr = write_idx;
|
|
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_SLOT_PI_W, reg.u32);
|
|
}
|
|
|
|
td_u32 tsio_hal_get_ram_port_cur_read_idx(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
dma_slot_pi_r reg;
|
|
|
|
tsio_tatal_error(id >= mgmt->ram_port_cnt);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, tsio_reg_dma_slot_pi_r(id));
|
|
|
|
return reg.bits.hw_pi_rptr;
|
|
}
|
|
|
|
td_u32 tsio_hal_get_ram_port_cur_write_idx(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
dma_slot_pi_r reg;
|
|
|
|
tsio_tatal_error(id >= mgmt->ram_port_cnt);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, tsio_reg_dma_slot_pi_r(id));
|
|
|
|
return reg.bits.hw_pi_wptr;
|
|
}
|
|
|
|
td_void tsio_hal_en_obuf(const struct tsio_mgmt *mgmt, td_u32 id, td_u64 buf_phy_addr, td_u32 buf_size)
|
|
{
|
|
dma_obuf_len len;
|
|
dma_obuf_thred thred;
|
|
|
|
tsio_tatal_error(id >= mgmt->se_cnt);
|
|
|
|
/* config buf base addr */
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_dma_obuf_addr(id), buf_phy_addr);
|
|
/* config buf base high 4 bit addr */
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_dma_obuf_addr_high(id),
|
|
(td_u32)((buf_phy_addr >> TSIO_BITS_PER_REG) & 0xf));
|
|
|
|
/* config buf len */
|
|
len.u32 = tsio_read_reg(mgmt->io_base, tsio_reg_dma_obuf_len(id));
|
|
|
|
len.bits.dma_obuf_length = buf_size;
|
|
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_dma_obuf_len(id), len.u32);
|
|
|
|
/* config thresh */
|
|
thred.u32 = tsio_read_reg(mgmt->io_base, tsio_reg_dma_obuf_thred(id));
|
|
|
|
thred.bits.dma_obuf_thresh = DEFAULT_SE_OBUF_THRESH;
|
|
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_dma_obuf_thred(id), thred.u32);
|
|
|
|
/* invlidate old mmu map */
|
|
if (id < NUM_SIZE) {
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_MMU_DIS_L, (1 << id));
|
|
} else {
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_MMU_DIS_H, (1 << (id - NUM_SIZE)));
|
|
}
|
|
|
|
/* en obuf. */
|
|
if (id < NUM_SIZE) {
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_OBUF_ENB_L, (1 << id));
|
|
} else {
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_OBUF_ENB_H, (1 << (id - NUM_SIZE)));
|
|
}
|
|
}
|
|
|
|
td_void tsio_hal_dis_obuf(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
tsio_tatal_error(id >= mgmt->se_cnt);
|
|
|
|
/* en obuf. */
|
|
if (id < NUM_SIZE) {
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_OBUF_DIS_L, (1 << id));
|
|
} else {
|
|
tsio_write_reg(mgmt->io_base, TSIO_REG_DMA_OBUF_DIS_H, (1 << (id - NUM_SIZE)));
|
|
}
|
|
}
|
|
|
|
td_u32 tsio_hal_get_obuf_write(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
dma_slot_po_w reg;
|
|
|
|
tsio_tatal_error(id >= mgmt->se_cnt);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, tsio_reg_dma_slot_po_w(id));
|
|
|
|
return reg.bits.hw_po_wptr;
|
|
}
|
|
|
|
td_u32 tsio_hal_get_obuf_read(const struct tsio_mgmt *mgmt, td_u32 id)
|
|
{
|
|
dma_slot_po_r reg;
|
|
|
|
tsio_tatal_error(id >= mgmt->se_cnt);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, tsio_reg_dma_slot_po_r(id));
|
|
|
|
return reg.bits.sw_po_rptr;
|
|
}
|
|
|
|
td_void tsio_hal_set_obuf_read(const struct tsio_mgmt *mgmt, td_u32 id, td_u32 cur_read)
|
|
{
|
|
dma_slot_po_r reg;
|
|
|
|
tsio_tatal_error(id >= mgmt->se_cnt);
|
|
|
|
reg.u32 = tsio_read_reg(mgmt->io_base, tsio_reg_dma_slot_po_r(id));
|
|
|
|
reg.bits.sw_po_rptr = cur_read;
|
|
|
|
tsio_write_reg(mgmt->io_base, tsio_reg_dma_slot_po_r(id), reg.u32);
|
|
}
|
|
|