forked from luck/tmp_suning_uos_patched
qed/qede: use 8.7.3.0 FW.
This patch moves the qed* driver into utilizing the 8.7.3.0 FW. This new FW is required for a lot of new SW features, including: - Vlan filtering offload - Encapsulation offload support - HW ingress aggregations As well as paving the way for the possibility of adding storage protocols in the future. V2: - Fix kbuild test robot error/warnings. Signed-off-by: Yuval Mintz <Yuval.Mintz@qlogic.com> Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@qlogic.com> Signed-off-by: Manish Chopra <manish.chopra@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
7530e44c54
commit
fc48b7a614
|
@ -70,8 +70,8 @@ struct qed_sb_sp_info;
|
|||
struct qed_mcp_info;
|
||||
|
||||
struct qed_rt_data {
|
||||
u32 init_val;
|
||||
bool b_valid;
|
||||
u32 *init_val;
|
||||
bool *b_valid;
|
||||
};
|
||||
|
||||
/* The PCI personality is not quite synonymous to protocol ID:
|
||||
|
@ -120,6 +120,10 @@ enum QED_PORT_MODE {
|
|||
QED_PORT_MODE_DE_1X25G
|
||||
};
|
||||
|
||||
enum qed_dev_cap {
|
||||
QED_DEV_CAP_ETH,
|
||||
};
|
||||
|
||||
struct qed_hw_info {
|
||||
/* PCI personality */
|
||||
enum qed_pci_personality personality;
|
||||
|
@ -151,6 +155,7 @@ struct qed_hw_info {
|
|||
|
||||
u32 port_mode;
|
||||
u32 hw_mode;
|
||||
unsigned long device_capabilities;
|
||||
};
|
||||
|
||||
struct qed_hw_cid_data {
|
||||
|
@ -267,7 +272,7 @@ struct qed_hwfn {
|
|||
struct qed_hw_info hw_info;
|
||||
|
||||
/* rt_array (for init-tool) */
|
||||
struct qed_rt_data *rt_data;
|
||||
struct qed_rt_data rt_data;
|
||||
|
||||
/* SPQ */
|
||||
struct qed_spq *p_spq;
|
||||
|
@ -350,9 +355,20 @@ struct qed_dev {
|
|||
char name[NAME_SIZE];
|
||||
|
||||
u8 type;
|
||||
#define QED_DEV_TYPE_BB_A0 (0 << 0)
|
||||
#define QED_DEV_TYPE_MASK (0x3)
|
||||
#define QED_DEV_TYPE_SHIFT (0)
|
||||
#define QED_DEV_TYPE_BB (0 << 0)
|
||||
#define QED_DEV_TYPE_AH BIT(0)
|
||||
/* Translate type/revision combo into the proper conditions */
|
||||
#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
|
||||
#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
|
||||
CHIP_REV_IS_A0(dev))
|
||||
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
|
||||
CHIP_REV_IS_B0(dev))
|
||||
|
||||
#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
|
||||
QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
|
||||
|
||||
u16 vendor_id;
|
||||
u16 device_id;
|
||||
|
||||
u16 chip_num;
|
||||
#define CHIP_NUM_MASK 0xffff
|
||||
|
@ -361,6 +377,8 @@ struct qed_dev {
|
|||
u16 chip_rev;
|
||||
#define CHIP_REV_MASK 0xf
|
||||
#define CHIP_REV_SHIFT 12
|
||||
#define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev)
|
||||
#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
|
||||
|
||||
u16 chip_metal;
|
||||
#define CHIP_METAL_MASK 0xff
|
||||
|
@ -375,10 +393,10 @@ struct qed_dev {
|
|||
u8 num_funcs_in_port;
|
||||
|
||||
u8 path_id;
|
||||
enum mf_mode mf_mode;
|
||||
#define IS_MF(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode != SF)
|
||||
#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_NPAR)
|
||||
#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_OVLAN)
|
||||
enum qed_mf_mode mf_mode;
|
||||
#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT)
|
||||
#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
|
||||
#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)
|
||||
|
||||
int pcie_width;
|
||||
int pcie_speed;
|
||||
|
@ -441,11 +459,6 @@ struct qed_dev {
|
|||
const struct firmware *firmware;
|
||||
};
|
||||
|
||||
#define QED_GET_TYPE(dev) (((dev)->type & QED_DEV_TYPE_MASK) >> \
|
||||
QED_DEV_TYPE_SHIFT)
|
||||
#define QED_IS_BB_A0(dev) (QED_GET_TYPE(dev) == QED_DEV_TYPE_BB_A0)
|
||||
#define QED_IS_BB(dev) (QED_IS_BB_A0(dev))
|
||||
|
||||
#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
|
||||
#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
|
||||
|
||||
|
|
|
@ -581,7 +581,8 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
|
|||
params.num_pf_cids = iids.cids;
|
||||
params.start_pq = qm_info->start_pq;
|
||||
params.num_pf_pqs = qm_info->num_pqs;
|
||||
params.start_vport = qm_info->num_vports;
|
||||
params.start_vport = qm_info->start_vport;
|
||||
params.num_vports = qm_info->num_vports;
|
||||
params.pf_wfq = qm_info->pf_wfq;
|
||||
params.pf_rl = qm_info->pf_rl;
|
||||
params.pq_params = qm_info->qm_pq_params;
|
||||
|
|
|
@ -341,11 +341,6 @@ void qed_resc_setup(struct qed_dev *cdev)
|
|||
}
|
||||
}
|
||||
|
||||
#define FINAL_CLEANUP_CMD_OFFSET (0)
|
||||
#define FINAL_CLEANUP_CMD (0x1)
|
||||
#define FINAL_CLEANUP_VALID_OFFSET (6)
|
||||
#define FINAL_CLEANUP_VFPF_ID_SHIFT (7)
|
||||
#define FINAL_CLEANUP_COMP (0x2)
|
||||
#define FINAL_CLEANUP_POLL_CNT (100)
|
||||
#define FINAL_CLEANUP_POLL_TIME (10)
|
||||
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
|
||||
|
@ -355,12 +350,14 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
|
|||
u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
|
||||
int rc = -EBUSY;
|
||||
|
||||
addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET;
|
||||
addr = GTT_BAR0_MAP_REG_USDM_RAM +
|
||||
USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
|
||||
|
||||
command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET;
|
||||
command |= 1 << FINAL_CLEANUP_VALID_OFFSET;
|
||||
command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT;
|
||||
command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT;
|
||||
command |= X_FINAL_CLEANUP_AGG_INT <<
|
||||
SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
|
||||
command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
|
||||
command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
|
||||
command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
|
||||
|
||||
/* Make sure notification is not set before initiating final cleanup */
|
||||
if (REG_RD(p_hwfn, addr)) {
|
||||
|
@ -415,18 +412,16 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
|
|||
}
|
||||
|
||||
switch (p_hwfn->cdev->mf_mode) {
|
||||
case SF:
|
||||
hw_mode |= 1 << MODE_SF;
|
||||
break;
|
||||
case MF_OVLAN:
|
||||
hw_mode |= 1 << MODE_MF_SD;
|
||||
break;
|
||||
case MF_NPAR:
|
||||
case QED_MF_DEFAULT:
|
||||
case QED_MF_NPAR:
|
||||
hw_mode |= 1 << MODE_MF_SI;
|
||||
break;
|
||||
case QED_MF_OVLAN:
|
||||
hw_mode |= 1 << MODE_MF_SD;
|
||||
break;
|
||||
default:
|
||||
DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n");
|
||||
hw_mode |= 1 << MODE_SF;
|
||||
DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
|
||||
hw_mode |= 1 << MODE_MF_SI;
|
||||
}
|
||||
|
||||
hw_mode |= 1 << MODE_ASIC;
|
||||
|
@ -1018,8 +1013,7 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
|
|||
u32 *resc_num = p_hwfn->hw_info.resc_num;
|
||||
int num_funcs, i;
|
||||
|
||||
num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB
|
||||
: p_hwfn->cdev->num_ports_in_engines;
|
||||
num_funcs = MAX_NUM_PFS_BB;
|
||||
|
||||
resc_num[QED_SB] = min_t(u32,
|
||||
(MAX_SB_PER_PATH_BB / num_funcs),
|
||||
|
@ -1071,7 +1065,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
|
|||
struct qed_ptt *p_ptt)
|
||||
{
|
||||
u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
|
||||
u32 port_cfg_addr, link_temp, val, nvm_cfg_addr;
|
||||
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
|
||||
struct qed_mcp_link_params *link;
|
||||
|
||||
/* Read global nvm_cfg address */
|
||||
|
@ -1134,21 +1128,6 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
|
|||
break;
|
||||
}
|
||||
|
||||
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
|
||||
offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) +
|
||||
offsetof(struct nvm_cfg1_func, device_id);
|
||||
val = qed_rd(p_hwfn, p_ptt, addr);
|
||||
|
||||
if (IS_MF(p_hwfn)) {
|
||||
p_hwfn->hw_info.device_id =
|
||||
(val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >>
|
||||
NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET;
|
||||
} else {
|
||||
p_hwfn->hw_info.device_id =
|
||||
(val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >>
|
||||
NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET;
|
||||
}
|
||||
|
||||
/* Read default link configuration */
|
||||
link = &p_hwfn->mcp_info->link_input;
|
||||
port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
|
||||
|
@ -1220,18 +1199,28 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
|
|||
|
||||
switch (mf_mode) {
|
||||
case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
|
||||
p_hwfn->cdev->mf_mode = MF_OVLAN;
|
||||
p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
|
||||
break;
|
||||
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
|
||||
p_hwfn->cdev->mf_mode = MF_NPAR;
|
||||
p_hwfn->cdev->mf_mode = QED_MF_NPAR;
|
||||
break;
|
||||
case NVM_CFG1_GLOB_MF_MODE_FORCED_SF:
|
||||
p_hwfn->cdev->mf_mode = SF;
|
||||
case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
|
||||
p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
|
||||
break;
|
||||
}
|
||||
DP_INFO(p_hwfn, "Multi function mode is %08x\n",
|
||||
p_hwfn->cdev->mf_mode);
|
||||
|
||||
/* Read Multi-function information from shmem */
|
||||
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
|
||||
offsetof(struct nvm_cfg1, glob) +
|
||||
offsetof(struct nvm_cfg1_glob, device_capabilities);
|
||||
|
||||
device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
|
||||
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
|
||||
__set_bit(QED_DEV_CAP_ETH,
|
||||
&p_hwfn->hw_info.device_capabilities);
|
||||
|
||||
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
|
||||
}
|
||||
|
||||
|
@ -1293,29 +1282,36 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
|
|||
|
||||
static void qed_get_dev_info(struct qed_dev *cdev)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
|
||||
u32 tmp;
|
||||
|
||||
cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
|
||||
/* Read Vendor Id / Device Id */
|
||||
pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
|
||||
&cdev->vendor_id);
|
||||
pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
|
||||
&cdev->device_id);
|
||||
cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
|
||||
MISCS_REG_CHIP_NUM);
|
||||
cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
|
||||
cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
|
||||
MISCS_REG_CHIP_REV);
|
||||
MASK_FIELD(CHIP_REV, cdev->chip_rev);
|
||||
|
||||
cdev->type = QED_DEV_TYPE_BB;
|
||||
/* Learn number of HW-functions */
|
||||
tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
|
||||
tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
|
||||
MISCS_REG_CMT_ENABLED_FOR_PAIR);
|
||||
|
||||
if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) {
|
||||
if (tmp & (1 << p_hwfn->rel_pf_id)) {
|
||||
DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
|
||||
cdev->num_hwfns = 2;
|
||||
} else {
|
||||
cdev->num_hwfns = 1;
|
||||
}
|
||||
|
||||
cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
|
||||
cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
|
||||
MISCS_REG_CHIP_TEST_REG) >> 4;
|
||||
MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
|
||||
cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
|
||||
cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
|
||||
MISCS_REG_CHIP_METAL);
|
||||
MASK_FIELD(CHIP_METAL, cdev->chip_metal);
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -513,17 +513,14 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
|
|||
* Return -1 on error.
|
||||
*/
|
||||
static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
|
||||
u8 start_vport,
|
||||
u8 num_vports,
|
||||
struct init_qm_vport_params *vport_params)
|
||||
{
|
||||
u8 tc, i, vport_id;
|
||||
u32 inc_val;
|
||||
u8 tc, i;
|
||||
|
||||
/* go over all PF VPORTs */
|
||||
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
|
||||
u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET;
|
||||
u16 *pq_ids = &vport_params[i].first_tx_pq_id[0];
|
||||
for (i = 0; i < num_vports; i++) {
|
||||
|
||||
if (!vport_params[i].vport_wfq)
|
||||
continue;
|
||||
|
@ -539,20 +536,16 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
|
|||
* different TCs
|
||||
*/
|
||||
for (tc = 0; tc < NUM_OF_TCS; tc++) {
|
||||
u16 vport_pq_id = pq_ids[tc];
|
||||
u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
|
||||
|
||||
if (vport_pq_id != QM_INVALID_PQ_ID) {
|
||||
STORE_RT_REG(p_hwfn,
|
||||
QM_REG_WFQVPWEIGHT_RT_OFFSET +
|
||||
vport_pq_id, inc_val);
|
||||
STORE_RT_REG(p_hwfn, temp + vport_pq_id,
|
||||
QM_WFQ_UPPER_BOUND |
|
||||
QM_WFQ_CRD_REG_SIGN_BIT);
|
||||
STORE_RT_REG(p_hwfn,
|
||||
QM_REG_WFQVPCRD_RT_OFFSET +
|
||||
vport_pq_id,
|
||||
QM_WFQ_INIT_CRD(inc_val) |
|
||||
QM_WFQ_CRD_REG_SIGN_BIT);
|
||||
STORE_RT_REG(p_hwfn,
|
||||
QM_REG_WFQVPWEIGHT_RT_OFFSET +
|
||||
vport_pq_id, inc_val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -709,8 +702,7 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
|
|||
if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
|
||||
return -1;
|
||||
|
||||
if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport,
|
||||
p_params->num_vports, vport_params))
|
||||
if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
|
||||
return -1;
|
||||
|
||||
if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
|
||||
|
|
|
@ -55,63 +55,98 @@ void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
|
||||
p_hwfn->rt_data[i].b_valid = false;
|
||||
p_hwfn->rt_data.b_valid[i] = false;
|
||||
}
|
||||
|
||||
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
|
||||
u32 rt_offset,
|
||||
u32 val)
|
||||
{
|
||||
p_hwfn->rt_data[rt_offset].init_val = val;
|
||||
p_hwfn->rt_data[rt_offset].b_valid = true;
|
||||
p_hwfn->rt_data.init_val[rt_offset] = val;
|
||||
p_hwfn->rt_data.b_valid[rt_offset] = true;
|
||||
}
|
||||
|
||||
void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
|
||||
u32 rt_offset,
|
||||
u32 *val,
|
||||
u32 rt_offset, u32 *p_val,
|
||||
size_t size)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < size / sizeof(u32); i++) {
|
||||
p_hwfn->rt_data[rt_offset + i].init_val = val[i];
|
||||
p_hwfn->rt_data[rt_offset + i].b_valid = true;
|
||||
p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
|
||||
p_hwfn->rt_data.b_valid[rt_offset + i] = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void qed_init_rt(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u32 addr,
|
||||
u32 rt_offset,
|
||||
u32 size)
|
||||
static int qed_init_rt(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u32 addr,
|
||||
u16 rt_offset,
|
||||
u16 size,
|
||||
bool b_must_dmae)
|
||||
{
|
||||
struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset;
|
||||
u32 i;
|
||||
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
|
||||
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
|
||||
u16 i, segment;
|
||||
int rc = 0;
|
||||
|
||||
/* Since not all RT entries are initialized, go over the RT and
|
||||
* for each segment of initialized values use DMA.
|
||||
*/
|
||||
for (i = 0; i < size; i++) {
|
||||
if (!rt_data[i].b_valid)
|
||||
if (!p_valid[i])
|
||||
continue;
|
||||
qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val);
|
||||
|
||||
/* In case there isn't any wide-bus configuration here,
|
||||
* simply write the data instead of using dmae.
|
||||
*/
|
||||
if (!b_must_dmae) {
|
||||
qed_wr(p_hwfn, p_ptt, addr + (i << 2),
|
||||
p_init_val[i]);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Start of a new segment */
|
||||
for (segment = 1; i + segment < size; segment++)
|
||||
if (!p_valid[i + segment])
|
||||
break;
|
||||
|
||||
rc = qed_dmae_host2grc(p_hwfn, p_ptt,
|
||||
(uintptr_t)(p_init_val + i),
|
||||
addr + (i << 2), segment, 0);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
/* Jump over the entire segment, including invalid entry */
|
||||
i += segment;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int qed_init_alloc(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_rt_data *rt_data;
|
||||
struct qed_rt_data *rt_data = &p_hwfn->rt_data;
|
||||
|
||||
rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC);
|
||||
if (!rt_data)
|
||||
rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
|
||||
GFP_KERNEL);
|
||||
if (!rt_data->b_valid)
|
||||
return -ENOMEM;
|
||||
|
||||
p_hwfn->rt_data = rt_data;
|
||||
rt_data->init_val = kzalloc(sizeof(u32) * RUNTIME_ARRAY_SIZE,
|
||||
GFP_KERNEL);
|
||||
if (!rt_data->init_val) {
|
||||
kfree(rt_data->b_valid);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qed_init_free(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
kfree(p_hwfn->rt_data);
|
||||
p_hwfn->rt_data = NULL;
|
||||
kfree(p_hwfn->rt_data.init_val);
|
||||
kfree(p_hwfn->rt_data.b_valid);
|
||||
}
|
||||
|
||||
static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
|
||||
|
@ -289,7 +324,8 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
|
|||
case INIT_SRC_RUNTIME:
|
||||
qed_init_rt(p_hwfn, p_ptt, addr,
|
||||
le16_to_cpu(arg->runtime.offset),
|
||||
le16_to_cpu(arg->runtime.size));
|
||||
le16_to_cpu(arg->runtime.size),
|
||||
b_must_dmae);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -316,49 +352,50 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
|
|||
struct qed_ptt *p_ptt,
|
||||
struct init_read_op *cmd)
|
||||
{
|
||||
u32 data = le32_to_cpu(cmd->op_data);
|
||||
u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
|
||||
bool (*comp_check)(u32 val, u32 expected_val);
|
||||
u32 delay = QED_INIT_POLL_PERIOD_US, val;
|
||||
u32 data, addr, poll;
|
||||
int i;
|
||||
|
||||
data = le32_to_cpu(cmd->op_data);
|
||||
addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
|
||||
poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
|
||||
|
||||
bool (*comp_check)(u32 val,
|
||||
u32 expected_val);
|
||||
u32 delay = QED_INIT_POLL_PERIOD_US, val;
|
||||
|
||||
val = qed_rd(p_hwfn, p_ptt, addr);
|
||||
|
||||
data = le32_to_cpu(cmd->op_data);
|
||||
if (GET_FIELD(data, INIT_READ_OP_POLL)) {
|
||||
int i;
|
||||
if (poll == INIT_POLL_NONE)
|
||||
return;
|
||||
|
||||
switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) {
|
||||
case INIT_COMPARISON_EQ:
|
||||
comp_check = comp_eq;
|
||||
break;
|
||||
case INIT_COMPARISON_OR:
|
||||
comp_check = comp_or;
|
||||
break;
|
||||
case INIT_COMPARISON_AND:
|
||||
comp_check = comp_and;
|
||||
break;
|
||||
default:
|
||||
comp_check = NULL;
|
||||
DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
|
||||
data);
|
||||
return;
|
||||
}
|
||||
switch (poll) {
|
||||
case INIT_POLL_EQ:
|
||||
comp_check = comp_eq;
|
||||
break;
|
||||
case INIT_POLL_OR:
|
||||
comp_check = comp_or;
|
||||
break;
|
||||
case INIT_POLL_AND:
|
||||
comp_check = comp_and;
|
||||
break;
|
||||
default:
|
||||
DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
|
||||
cmd->op_data);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0;
|
||||
i < QED_INIT_MAX_POLL_COUNT &&
|
||||
!comp_check(val, le32_to_cpu(cmd->expected_val));
|
||||
i++) {
|
||||
udelay(delay);
|
||||
val = qed_rd(p_hwfn, p_ptt, addr);
|
||||
}
|
||||
data = le32_to_cpu(cmd->expected_val);
|
||||
for (i = 0;
|
||||
i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data);
|
||||
i++) {
|
||||
udelay(delay);
|
||||
val = qed_rd(p_hwfn, p_ptt, addr);
|
||||
}
|
||||
|
||||
if (i == QED_INIT_MAX_POLL_COUNT)
|
||||
DP_ERR(p_hwfn,
|
||||
"Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
|
||||
addr, le32_to_cpu(cmd->expected_val),
|
||||
val, data);
|
||||
if (i == QED_INIT_MAX_POLL_COUNT) {
|
||||
DP_ERR(p_hwfn,
|
||||
"Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
|
||||
addr, le32_to_cpu(cmd->expected_val),
|
||||
val, le32_to_cpu(cmd->op_data));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -714,7 +714,6 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
|
|||
p_ramrod->sb_id = cpu_to_le16(p_params->sb);
|
||||
p_ramrod->sb_index = p_params->sb_idx;
|
||||
p_ramrod->stats_counter_id = stats_id;
|
||||
p_ramrod->tc = p_pq_params->eth.tc;
|
||||
|
||||
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
|
||||
p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr);
|
||||
|
@ -821,9 +820,8 @@ qed_filter_action(enum qed_filter_opcode opcode)
|
|||
case QED_FILTER_REMOVE:
|
||||
action = ETH_FILTER_ACTION_REMOVE;
|
||||
break;
|
||||
case QED_FILTER_REPLACE:
|
||||
case QED_FILTER_FLUSH:
|
||||
action = ETH_FILTER_ACTION_REPLACE;
|
||||
action = ETH_FILTER_ACTION_REMOVE_ALL;
|
||||
break;
|
||||
default:
|
||||
action = MAX_ETH_FILTER_ACTION;
|
||||
|
@ -892,8 +890,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
|
|||
p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
|
||||
|
||||
switch (p_filter_cmd->opcode) {
|
||||
case QED_FILTER_FLUSH:
|
||||
p_ramrod->filter_cmd_hdr.cmd_cnt = 0; break;
|
||||
case QED_FILTER_REPLACE:
|
||||
case QED_FILTER_MOVE:
|
||||
p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
|
||||
default:
|
||||
|
@ -962,6 +959,12 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
|
|||
|
||||
p_second_filter->action = ETH_FILTER_ACTION_ADD;
|
||||
p_second_filter->vport_id = vport_to_add_to;
|
||||
} else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
|
||||
p_first_filter->vport_id = vport_to_add_to;
|
||||
memcpy(p_second_filter, p_first_filter,
|
||||
sizeof(*p_second_filter));
|
||||
p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
|
||||
p_second_filter->action = ETH_FILTER_ACTION_ADD;
|
||||
} else {
|
||||
action = qed_filter_action(p_filter_cmd->opcode);
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
|
|||
dev_info->pci_mem_start = cdev->pci_params.mem_start;
|
||||
dev_info->pci_mem_end = cdev->pci_params.mem_end;
|
||||
dev_info->pci_irq = cdev->pci_params.irq;
|
||||
dev_info->is_mf = IS_MF(&cdev->hwfns[0]);
|
||||
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
|
||||
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
|
||||
|
||||
dev_info->fw_major = FW_MAJOR_VERSION;
|
||||
|
|
|
@ -720,26 +720,25 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (p_hwfn->cdev->mf_mode != SF) {
|
||||
info->bandwidth_min = (shmem_info.config &
|
||||
FUNC_MF_CFG_MIN_BW_MASK) >>
|
||||
FUNC_MF_CFG_MIN_BW_SHIFT;
|
||||
if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
|
||||
DP_INFO(p_hwfn,
|
||||
"bandwidth minimum out of bounds [%02x]. Set to 1\n",
|
||||
info->bandwidth_min);
|
||||
info->bandwidth_min = 1;
|
||||
}
|
||||
|
||||
info->bandwidth_max = (shmem_info.config &
|
||||
FUNC_MF_CFG_MAX_BW_MASK) >>
|
||||
FUNC_MF_CFG_MAX_BW_SHIFT;
|
||||
if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
|
||||
DP_INFO(p_hwfn,
|
||||
"bandwidth maximum out of bounds [%02x]. Set to 100\n",
|
||||
info->bandwidth_max);
|
||||
info->bandwidth_max = 100;
|
||||
}
|
||||
info->bandwidth_min = (shmem_info.config &
|
||||
FUNC_MF_CFG_MIN_BW_MASK) >>
|
||||
FUNC_MF_CFG_MIN_BW_SHIFT;
|
||||
if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
|
||||
DP_INFO(p_hwfn,
|
||||
"bandwidth minimum out of bounds [%02x]. Set to 1\n",
|
||||
info->bandwidth_min);
|
||||
info->bandwidth_min = 1;
|
||||
}
|
||||
|
||||
info->bandwidth_max = (shmem_info.config &
|
||||
FUNC_MF_CFG_MAX_BW_MASK) >>
|
||||
FUNC_MF_CFG_MAX_BW_SHIFT;
|
||||
if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
|
||||
DP_INFO(p_hwfn,
|
||||
"bandwidth maximum out of bounds [%02x]. Set to 100\n",
|
||||
info->bandwidth_max);
|
||||
info->bandwidth_max = 100;
|
||||
}
|
||||
|
||||
if (shmem_info.mac_upper || shmem_info.mac_lower) {
|
||||
|
|
|
@ -343,7 +343,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
|
|||
*/
|
||||
|
||||
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
|
||||
enum mf_mode mode);
|
||||
enum qed_mf_mode mode);
|
||||
|
||||
/**
|
||||
* @brief qed_sp_pf_stop - PF Function Stop Ramrod
|
||||
|
|
|
@ -90,7 +90,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
|
||||
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
|
||||
enum mf_mode mode)
|
||||
enum qed_mf_mode mode)
|
||||
{
|
||||
struct qed_sp_init_request_params params;
|
||||
struct pf_start_ramrod_data *p_ramrod = NULL;
|
||||
|
@ -125,6 +125,18 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
|
|||
p_ramrod->dont_log_ramrods = 0;
|
||||
p_ramrod->log_type_mask = cpu_to_le16(0xf);
|
||||
p_ramrod->mf_mode = mode;
|
||||
switch (mode) {
|
||||
case QED_MF_DEFAULT:
|
||||
case QED_MF_NPAR:
|
||||
p_ramrod->mf_mode = MF_NPAR;
|
||||
break;
|
||||
case QED_MF_OVLAN:
|
||||
p_ramrod->mf_mode = MF_OVLAN;
|
||||
break;
|
||||
default:
|
||||
DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
|
||||
p_ramrod->mf_mode = MF_NPAR;
|
||||
}
|
||||
p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
|
||||
|
||||
/* Place EQ address in RAMROD */
|
||||
|
@ -142,9 +154,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
|
|||
p_hwfn->hw_info.personality = PERSONALITY_ETH;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
|
||||
"Setting event_ring_sb [id %04x index %02x], mf [%s] outer_tag [%d]\n",
|
||||
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
|
||||
sb, sb_index,
|
||||
(p_ramrod->mf_mode == SF) ? "SF" : "Multi-Pf",
|
||||
p_ramrod->outer_tag);
|
||||
|
||||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
|
|
|
@ -173,9 +173,9 @@ enum QEDE_STATE {
|
|||
* skb are built only after the frame was DMA-ed.
|
||||
*/
|
||||
struct sw_rx_data {
|
||||
u8 *data;
|
||||
|
||||
DEFINE_DMA_UNMAP_ADDR(mapping);
|
||||
struct page *data;
|
||||
dma_addr_t mapping;
|
||||
unsigned int page_offset;
|
||||
};
|
||||
|
||||
struct qede_rx_queue {
|
||||
|
@ -188,6 +188,7 @@ struct qede_rx_queue {
|
|||
void __iomem *hw_rxq_prod_addr;
|
||||
|
||||
int rx_buf_size;
|
||||
unsigned int rx_buf_seg_size;
|
||||
|
||||
u16 num_rx_buffers;
|
||||
u16 rxq_id;
|
||||
|
@ -281,6 +282,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev);
|
|||
#define NUM_TX_BDS_MIN 128
|
||||
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
|
||||
|
||||
#define QEDE_RX_HDR_SIZE 256
|
||||
#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
|
||||
|
||||
#endif /* _QEDE_H_ */
|
||||
|
|
|
@ -217,9 +217,9 @@ static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|||
struct qed_link_params params;
|
||||
u32 speed;
|
||||
|
||||
if (edev->dev_info.common.is_mf) {
|
||||
if (!edev->dev_info.common.is_mf_default) {
|
||||
DP_INFO(edev,
|
||||
"Link parameters can not be changed in MF mode\n");
|
||||
"Link parameters can not be changed in non-default mode\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
|
@ -428,7 +428,7 @@ static int qede_set_pauseparam(struct net_device *dev,
|
|||
struct qed_link_params params;
|
||||
struct qed_link_output current_link;
|
||||
|
||||
if (!edev->dev_info.common.is_mf) {
|
||||
if (!edev->dev_info.common.is_mf_default) {
|
||||
DP_INFO(edev,
|
||||
"Pause parameters can not be updated in non-default mode\n");
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
@ -330,15 +330,15 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
|
|||
struct eth_tx_3rd_bd *third_bd)
|
||||
{
|
||||
u8 l4_proto;
|
||||
u16 bd2_bits = 0, bd2_bits2 = 0;
|
||||
u16 bd2_bits1 = 0, bd2_bits2 = 0;
|
||||
|
||||
bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
|
||||
bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
|
||||
|
||||
bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
|
||||
bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
|
||||
ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
|
||||
<< ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
|
||||
|
||||
bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
|
||||
bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
|
||||
ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
|
||||
|
||||
if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
|
||||
|
@ -347,16 +347,15 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
|
|||
l4_proto = ip_hdr(skb)->protocol;
|
||||
|
||||
if (l4_proto == IPPROTO_UDP)
|
||||
bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
|
||||
bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
|
||||
|
||||
if (third_bd) {
|
||||
if (third_bd)
|
||||
third_bd->data.bitfields |=
|
||||
((tcp_hdrlen(skb) / 4) &
|
||||
ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
|
||||
ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT;
|
||||
}
|
||||
cpu_to_le16(((tcp_hdrlen(skb) / 4) &
|
||||
ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
|
||||
ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
|
||||
|
||||
second_bd->data.bitfields = cpu_to_le16(bd2_bits);
|
||||
second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
|
||||
second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
|
||||
}
|
||||
|
||||
|
@ -464,12 +463,16 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
|
|||
|
||||
/* Fill the parsing flags & params according to the requested offload */
|
||||
if (xmit_type & XMIT_L4_CSUM) {
|
||||
u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT;
|
||||
|
||||
/* We don't re-calculate IP checksum as it is already done by
|
||||
* the upper stack
|
||||
*/
|
||||
first_bd->data.bd_flags.bitfields |=
|
||||
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
|
||||
|
||||
first_bd->data.bitfields |= cpu_to_le16(temp);
|
||||
|
||||
/* If the packet is IPv6 with extension header, indicate that
|
||||
* to FW and pass few params, since the device cracker doesn't
|
||||
* support parsing IPv6 with extension header/s.
|
||||
|
@ -491,7 +494,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
|
|||
|
||||
/* @@@TBD - if will not be removed need to check */
|
||||
third_bd->data.bitfields |=
|
||||
(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
|
||||
cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT));
|
||||
|
||||
/* Make life easier for FW guys who can't deal with header and
|
||||
* data on same BD. If we need to split, use the second bd...
|
||||
|
@ -719,26 +722,52 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
|
|||
return false;
|
||||
}
|
||||
|
||||
/* This function copies the Rx buffer from the CONS position to the PROD
|
||||
* position, since we failed to allocate a new Rx buffer.
|
||||
/* This function reuses the buffer(from an offset) from
|
||||
* consumer index to producer index in the bd ring
|
||||
*/
|
||||
static void qede_reuse_rx_data(struct qede_rx_queue *rxq)
|
||||
static inline void qede_reuse_page(struct qede_dev *edev,
|
||||
struct qede_rx_queue *rxq,
|
||||
struct sw_rx_data *curr_cons)
|
||||
{
|
||||
struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
|
||||
struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
|
||||
struct sw_rx_data *sw_rx_data_cons =
|
||||
&rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
|
||||
struct sw_rx_data *sw_rx_data_prod =
|
||||
&rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
|
||||
struct sw_rx_data *curr_prod;
|
||||
dma_addr_t new_mapping;
|
||||
|
||||
dma_unmap_addr_set(sw_rx_data_prod, mapping,
|
||||
dma_unmap_addr(sw_rx_data_cons, mapping));
|
||||
curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
|
||||
*curr_prod = *curr_cons;
|
||||
|
||||
sw_rx_data_prod->data = sw_rx_data_cons->data;
|
||||
memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
|
||||
new_mapping = curr_prod->mapping + curr_prod->page_offset;
|
||||
|
||||
rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
|
||||
rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
|
||||
|
||||
rxq->sw_rx_cons++;
|
||||
rxq->sw_rx_prod++;
|
||||
curr_cons->data = NULL;
|
||||
}
|
||||
|
||||
static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
|
||||
struct qede_rx_queue *rxq,
|
||||
struct sw_rx_data *curr_cons)
|
||||
{
|
||||
/* Move to the next segment in the page */
|
||||
curr_cons->page_offset += rxq->rx_buf_seg_size;
|
||||
|
||||
if (curr_cons->page_offset == PAGE_SIZE) {
|
||||
if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
|
||||
return -ENOMEM;
|
||||
|
||||
dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
} else {
|
||||
/* Increment refcount of the page as we don't want
|
||||
* network stack to take the ownership of the page
|
||||
* which can be recycled multiple times by the driver.
|
||||
*/
|
||||
atomic_inc(&curr_cons->data->_count);
|
||||
qede_reuse_page(edev, rxq, curr_cons);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void qede_update_rx_prod(struct qede_dev *edev,
|
||||
|
@ -857,9 +886,10 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
|
|||
struct sw_rx_data *sw_rx_data;
|
||||
union eth_rx_cqe *cqe;
|
||||
struct sk_buff *skb;
|
||||
struct page *data;
|
||||
__le16 flags;
|
||||
u16 len, pad;
|
||||
u32 rx_hash;
|
||||
u8 *data;
|
||||
|
||||
/* Get the CQE from the completion ring */
|
||||
cqe = (union eth_rx_cqe *)
|
||||
|
@ -879,56 +909,110 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
|
|||
data = sw_rx_data->data;
|
||||
|
||||
fp_cqe = &cqe->fast_path_regular;
|
||||
len = le16_to_cpu(fp_cqe->pkt_len);
|
||||
len = le16_to_cpu(fp_cqe->len_on_first_bd);
|
||||
pad = fp_cqe->placement_offset;
|
||||
flags = cqe->fast_path_regular.pars_flags.flags;
|
||||
|
||||
/* For every Rx BD consumed, we allocate a new BD so the BD ring
|
||||
* is always with a fixed size. If allocation fails, we take the
|
||||
* consumed BD and return it to the ring in the PROD position.
|
||||
* The packet that was received on that BD will be dropped (and
|
||||
* not passed to the upper stack).
|
||||
*/
|
||||
if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) {
|
||||
dma_unmap_single(&edev->pdev->dev,
|
||||
dma_unmap_addr(sw_rx_data, mapping),
|
||||
rxq->rx_buf_size, DMA_FROM_DEVICE);
|
||||
/* If this is an error packet then drop it */
|
||||
parse_flag = le16_to_cpu(flags);
|
||||
|
||||
/* If this is an error packet then drop it */
|
||||
parse_flag =
|
||||
le16_to_cpu(cqe->fast_path_regular.pars_flags.flags);
|
||||
csum_flag = qede_check_csum(parse_flag);
|
||||
if (csum_flag == QEDE_CSUM_ERROR) {
|
||||
DP_NOTICE(edev,
|
||||
"CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
|
||||
sw_comp_cons, parse_flag);
|
||||
rxq->rx_hw_errors++;
|
||||
kfree(data);
|
||||
goto next_rx;
|
||||
}
|
||||
|
||||
skb = build_skb(data, 0);
|
||||
|
||||
if (unlikely(!skb)) {
|
||||
DP_NOTICE(edev,
|
||||
"Build_skb failed, dropping incoming packet\n");
|
||||
kfree(data);
|
||||
rxq->rx_alloc_errors++;
|
||||
goto next_rx;
|
||||
}
|
||||
|
||||
skb_reserve(skb, pad);
|
||||
|
||||
} else {
|
||||
csum_flag = qede_check_csum(parse_flag);
|
||||
if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
|
||||
DP_NOTICE(edev,
|
||||
"New buffer allocation failed, dropping incoming packet and reusing its buffer\n");
|
||||
qede_reuse_rx_data(rxq);
|
||||
rxq->rx_alloc_errors++;
|
||||
goto next_cqe;
|
||||
"CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
|
||||
sw_comp_cons, parse_flag);
|
||||
rxq->rx_hw_errors++;
|
||||
qede_reuse_page(edev, rxq, sw_rx_data);
|
||||
goto next_rx;
|
||||
}
|
||||
|
||||
sw_rx_data->data = NULL;
|
||||
skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
|
||||
if (unlikely(!skb)) {
|
||||
DP_NOTICE(edev,
|
||||
"Build_skb failed, dropping incoming packet\n");
|
||||
qede_reuse_page(edev, rxq, sw_rx_data);
|
||||
rxq->rx_alloc_errors++;
|
||||
goto next_rx;
|
||||
}
|
||||
|
||||
skb_put(skb, len);
|
||||
/* Copy data into SKB */
|
||||
if (len + pad <= QEDE_RX_HDR_SIZE) {
|
||||
memcpy(skb_put(skb, len),
|
||||
page_address(data) + pad +
|
||||
sw_rx_data->page_offset, len);
|
||||
qede_reuse_page(edev, rxq, sw_rx_data);
|
||||
} else {
|
||||
struct skb_frag_struct *frag;
|
||||
unsigned int pull_len;
|
||||
unsigned char *va;
|
||||
|
||||
frag = &skb_shinfo(skb)->frags[0];
|
||||
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data,
|
||||
pad + sw_rx_data->page_offset,
|
||||
len, rxq->rx_buf_seg_size);
|
||||
|
||||
va = skb_frag_address(frag);
|
||||
pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
|
||||
|
||||
/* Align the pull_len to optimize memcpy */
|
||||
memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
|
||||
|
||||
skb_frag_size_sub(frag, pull_len);
|
||||
frag->page_offset += pull_len;
|
||||
skb->data_len -= pull_len;
|
||||
skb->tail += pull_len;
|
||||
|
||||
if (unlikely(qede_realloc_rx_buffer(edev, rxq,
|
||||
sw_rx_data))) {
|
||||
DP_ERR(edev, "Failed to allocate rx buffer\n");
|
||||
rxq->rx_alloc_errors++;
|
||||
goto next_cqe;
|
||||
}
|
||||
}
|
||||
|
||||
if (fp_cqe->bd_num != 1) {
|
||||
u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
|
||||
u8 num_frags;
|
||||
|
||||
pkt_len -= len;
|
||||
|
||||
for (num_frags = fp_cqe->bd_num - 1; num_frags > 0;
|
||||
num_frags--) {
|
||||
u16 cur_size = pkt_len > rxq->rx_buf_size ?
|
||||
rxq->rx_buf_size : pkt_len;
|
||||
|
||||
WARN_ONCE(!cur_size,
|
||||
"Still got %d BDs for mapping jumbo, but length became 0\n",
|
||||
num_frags);
|
||||
|
||||
if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
|
||||
goto next_cqe;
|
||||
|
||||
rxq->sw_rx_cons++;
|
||||
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
|
||||
sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
|
||||
qed_chain_consume(&rxq->rx_bd_ring);
|
||||
dma_unmap_page(&edev->pdev->dev,
|
||||
sw_rx_data->mapping,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
skb_fill_page_desc(skb,
|
||||
skb_shinfo(skb)->nr_frags++,
|
||||
sw_rx_data->data, 0,
|
||||
cur_size);
|
||||
|
||||
skb->truesize += PAGE_SIZE;
|
||||
skb->data_len += cur_size;
|
||||
skb->len += cur_size;
|
||||
pkt_len -= cur_size;
|
||||
}
|
||||
|
||||
if (pkt_len)
|
||||
DP_ERR(edev,
|
||||
"Mapped all BDs of jumbo, but still have %d bytes\n",
|
||||
pkt_len);
|
||||
}
|
||||
|
||||
skb->protocol = eth_type_trans(skb, edev->ndev);
|
||||
|
||||
|
@ -1566,17 +1650,17 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
|
|||
|
||||
for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
|
||||
struct sw_rx_data *rx_buf;
|
||||
u8 *data;
|
||||
struct page *data;
|
||||
|
||||
rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
|
||||
data = rx_buf->data;
|
||||
|
||||
dma_unmap_single(&edev->pdev->dev,
|
||||
dma_unmap_addr(rx_buf, mapping),
|
||||
rxq->rx_buf_size, DMA_FROM_DEVICE);
|
||||
dma_unmap_page(&edev->pdev->dev,
|
||||
rx_buf->mapping,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
rx_buf->data = NULL;
|
||||
kfree(data);
|
||||
__free_page(data);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1600,29 +1684,32 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
|
|||
struct sw_rx_data *sw_rx_data;
|
||||
struct eth_rx_bd *rx_bd;
|
||||
dma_addr_t mapping;
|
||||
struct page *data;
|
||||
u16 rx_buf_size;
|
||||
u8 *data;
|
||||
|
||||
rx_buf_size = rxq->rx_buf_size;
|
||||
|
||||
data = kmalloc(rx_buf_size, GFP_ATOMIC);
|
||||
data = alloc_pages(GFP_ATOMIC, 0);
|
||||
if (unlikely(!data)) {
|
||||
DP_NOTICE(edev, "Failed to allocate Rx data\n");
|
||||
DP_NOTICE(edev, "Failed to allocate Rx data [page]\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mapping = dma_map_single(&edev->pdev->dev, data,
|
||||
rx_buf_size, DMA_FROM_DEVICE);
|
||||
/* Map the entire page as it would be used
|
||||
* for multiple RX buffer segment size mapping.
|
||||
*/
|
||||
mapping = dma_map_page(&edev->pdev->dev, data, 0,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
|
||||
kfree(data);
|
||||
__free_page(data);
|
||||
DP_NOTICE(edev, "Failed to map Rx buffer\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
|
||||
sw_rx_data->page_offset = 0;
|
||||
sw_rx_data->data = data;
|
||||
|
||||
dma_unmap_addr_set(sw_rx_data, mapping, mapping);
|
||||
sw_rx_data->mapping = mapping;
|
||||
|
||||
/* Advance PROD and get BD pointer */
|
||||
rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
|
||||
|
@ -1643,13 +1730,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
|
|||
|
||||
rxq->num_rx_buffers = edev->q_num_rx_buffers;
|
||||
|
||||
rxq->rx_buf_size = NET_IP_ALIGN +
|
||||
ETH_OVERHEAD +
|
||||
edev->ndev->mtu +
|
||||
QEDE_FW_RX_ALIGN_END;
|
||||
rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD +
|
||||
edev->ndev->mtu;
|
||||
if (rxq->rx_buf_size > PAGE_SIZE)
|
||||
rxq->rx_buf_size = PAGE_SIZE;
|
||||
|
||||
/* Segment size to spilt a page in multiple equal parts */
|
||||
rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
|
||||
|
||||
/* Allocate the parallel driver ring for Rx buffers */
|
||||
size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX;
|
||||
size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
|
||||
rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
|
||||
if (!rxq->sw_rx_ring) {
|
||||
DP_ERR(edev, "Rx buffers ring allocation failed\n");
|
||||
|
@ -1660,7 +1750,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
|
|||
rc = edev->ops->common->chain_alloc(edev->cdev,
|
||||
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
|
||||
QED_CHAIN_MODE_NEXT_PTR,
|
||||
NUM_RX_BDS_MAX,
|
||||
RX_RING_SIZE,
|
||||
sizeof(struct eth_rx_bd),
|
||||
&rxq->rx_bd_ring);
|
||||
|
||||
|
@ -1671,7 +1761,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
|
|||
rc = edev->ops->common->chain_alloc(edev->cdev,
|
||||
QED_CHAIN_USE_TO_CONSUME,
|
||||
QED_CHAIN_MODE_PBL,
|
||||
NUM_RX_BDS_MAX,
|
||||
RX_RING_SIZE,
|
||||
sizeof(union eth_rx_cqe),
|
||||
&rxq->rx_comp_ring);
|
||||
if (rc)
|
||||
|
|
|
@ -11,9 +11,11 @@
|
|||
|
||||
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
|
||||
|
||||
#define X_FINAL_CLEANUP_AGG_INT 1
|
||||
|
||||
#define FW_MAJOR_VERSION 8
|
||||
#define FW_MINOR_VERSION 4
|
||||
#define FW_REVISION_VERSION 2
|
||||
#define FW_MINOR_VERSION 7
|
||||
#define FW_REVISION_VERSION 3
|
||||
#define FW_ENGINEERING_VERSION 0
|
||||
|
||||
/***********************/
|
||||
|
@ -152,6 +154,9 @@
|
|||
/* number of queues in a PF queue group */
|
||||
#define QM_PF_QUEUE_GROUP_SIZE 8
|
||||
|
||||
/* the size of a single queue element in bytes */
|
||||
#define QM_PQ_ELEMENT_SIZE 4
|
||||
|
||||
/* base number of Tx PQs in the CM PQ representation.
|
||||
* should be used when storing PQ IDs in CM PQ registers and context
|
||||
*/
|
||||
|
@ -285,6 +290,16 @@
|
|||
#define PXP_NUM_ILT_RECORDS_K2 11000
|
||||
#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
|
||||
|
||||
#define SDM_COMP_TYPE_NONE 0
|
||||
#define SDM_COMP_TYPE_WAKE_THREAD 1
|
||||
#define SDM_COMP_TYPE_AGG_INT 2
|
||||
#define SDM_COMP_TYPE_CM 3
|
||||
#define SDM_COMP_TYPE_LOADER 4
|
||||
#define SDM_COMP_TYPE_PXP 5
|
||||
#define SDM_COMP_TYPE_INDICATE_ERROR 6
|
||||
#define SDM_COMP_TYPE_RELEASE_THREAD 7
|
||||
#define SDM_COMP_TYPE_RAM 8
|
||||
|
||||
/******************/
|
||||
/* PBF CONSTANTS */
|
||||
/******************/
|
||||
|
@ -335,7 +350,7 @@ struct event_ring_entry {
|
|||
|
||||
/* Multi function mode */
|
||||
enum mf_mode {
|
||||
SF,
|
||||
ERROR_MODE /* Unsupported mode */,
|
||||
MF_OVLAN,
|
||||
MF_NPAR,
|
||||
MAX_MF_MODE
|
||||
|
@ -606,4 +621,19 @@ struct status_block {
|
|||
#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
|
||||
};
|
||||
|
||||
struct tunnel_parsing_flags {
|
||||
u8 flags;
|
||||
#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
|
||||
#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
|
||||
#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
|
||||
#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
|
||||
#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
|
||||
#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
|
||||
#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
|
||||
#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
|
||||
#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
|
||||
#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
|
||||
#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
|
||||
#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
|
||||
};
|
||||
#endif /* __COMMON_HSI__ */
|
||||
|
|
|
@ -17,10 +17,8 @@
|
|||
#define ETH_MAX_RAMROD_PER_CON 8
|
||||
#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
|
||||
#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
|
||||
#define ETH_RX_SGE_PAGE_SIZE_BYTES 4096
|
||||
#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
|
||||
#define ETH_RX_NUM_NEXT_PAGE_BDS 2
|
||||
#define ETH_RX_NUM_NEXT_PAGE_SGES 2
|
||||
|
||||
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
|
||||
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
|
||||
|
@ -34,7 +32,8 @@
|
|||
|
||||
#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
|
||||
|
||||
#define ETH_REG_CQE_PBL_SIZE 3
|
||||
/* Maximum number of buffers, used for RX packet placement */
|
||||
#define ETH_RX_MAX_BUFF_PER_PKT 5
|
||||
|
||||
/* num of MAC/VLAN filters */
|
||||
#define ETH_NUM_MAC_FILTERS 512
|
||||
|
@ -54,9 +53,9 @@
|
|||
|
||||
/* TPA constants */
|
||||
#define ETH_TPA_MAX_AGGS_NUM 64
|
||||
#define ETH_TPA_CQE_START_SGL_SIZE 3
|
||||
#define ETH_TPA_CQE_CONT_SGL_SIZE 6
|
||||
#define ETH_TPA_CQE_END_SGL_SIZE 4
|
||||
#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
|
||||
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
|
||||
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
|
||||
|
||||
/* Queue Zone sizes */
|
||||
#define TSTORM_QZONE_SIZE 0
|
||||
|
@ -74,18 +73,18 @@ struct coalescing_timeset {
|
|||
|
||||
struct eth_tx_1st_bd_flags {
|
||||
u8 bitfields;
|
||||
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
|
||||
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 1
|
||||
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 2
|
||||
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 3
|
||||
#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 4
|
||||
#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 5
|
||||
#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
|
||||
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
|
||||
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
|
||||
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
|
||||
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
|
||||
#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
|
||||
#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
|
||||
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
|
||||
|
@ -97,38 +96,44 @@ struct eth_tx_data_1st_bd {
|
|||
__le16 vlan;
|
||||
u8 nbds;
|
||||
struct eth_tx_1st_bd_flags bd_flags;
|
||||
__le16 fw_use_only;
|
||||
__le16 bitfields;
|
||||
#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1
|
||||
#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0
|
||||
#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
|
||||
#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
|
||||
#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF
|
||||
#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2
|
||||
};
|
||||
|
||||
/* The parsing information data for the second tx bd of a given packet. */
|
||||
struct eth_tx_data_2nd_bd {
|
||||
__le16 tunn_ip_size;
|
||||
__le16 bitfields;
|
||||
#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
|
||||
#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
|
||||
#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
|
||||
#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
|
||||
__le16 bitfields2;
|
||||
__le16 bitfields1;
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
|
||||
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
|
||||
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
|
||||
#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
|
||||
#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 8
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 10
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
|
||||
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
|
||||
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 11
|
||||
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 12
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
|
||||
#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
|
||||
#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 13
|
||||
#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
|
||||
#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
|
||||
#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 14
|
||||
#define ETH_TX_DATA_2ND_BD_RESERVED1_MASK 0x1
|
||||
#define ETH_TX_DATA_2ND_BD_RESERVED1_SHIFT 15
|
||||
#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
|
||||
__le16 bitfields2;
|
||||
#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
|
||||
#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
|
||||
#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
|
||||
#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
|
||||
};
|
||||
|
||||
/* Regular ETH Rx FP CQE. */
|
||||
|
@ -145,11 +150,68 @@ struct eth_fast_path_rx_reg_cqe {
|
|||
struct parsing_and_err_flags pars_flags;
|
||||
__le16 vlan_tag;
|
||||
__le32 rss_hash;
|
||||
__le16 len_on_bd;
|
||||
__le16 len_on_first_bd;
|
||||
u8 placement_offset;
|
||||
u8 reserved;
|
||||
__le16 pbl[ETH_REG_CQE_PBL_SIZE];
|
||||
u8 reserved1[10];
|
||||
struct tunnel_parsing_flags tunnel_pars_flags;
|
||||
u8 bd_num;
|
||||
u8 reserved[7];
|
||||
u32 fw_debug;
|
||||
u8 reserved1[3];
|
||||
u8 flags;
|
||||
#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
|
||||
#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
|
||||
#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
|
||||
#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1
|
||||
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F
|
||||
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2
|
||||
};
|
||||
|
||||
/* TPA-continue ETH Rx FP CQE. */
|
||||
struct eth_fast_path_rx_tpa_cont_cqe {
|
||||
u8 type;
|
||||
u8 tpa_agg_index;
|
||||
__le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
|
||||
u8 reserved[5];
|
||||
u8 reserved1;
|
||||
__le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
|
||||
};
|
||||
|
||||
/* TPA-end ETH Rx FP CQE. */
|
||||
struct eth_fast_path_rx_tpa_end_cqe {
|
||||
u8 type;
|
||||
u8 tpa_agg_index;
|
||||
__le16 total_packet_len;
|
||||
u8 num_of_bds;
|
||||
u8 end_reason;
|
||||
__le16 num_of_coalesced_segs;
|
||||
__le32 ts_delta;
|
||||
__le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
|
||||
u8 reserved1[3];
|
||||
u8 reserved2;
|
||||
__le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
|
||||
};
|
||||
|
||||
/* TPA-start ETH Rx FP CQE. */
|
||||
struct eth_fast_path_rx_tpa_start_cqe {
|
||||
u8 type;
|
||||
u8 bitfields;
|
||||
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
|
||||
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
|
||||
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
|
||||
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
|
||||
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
|
||||
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
|
||||
__le16 seg_len;
|
||||
struct parsing_and_err_flags pars_flags;
|
||||
__le16 vlan_tag;
|
||||
__le32 rss_hash;
|
||||
__le16 len_on_first_bd;
|
||||
u8 placement_offset;
|
||||
struct tunnel_parsing_flags tunnel_pars_flags;
|
||||
u8 tpa_agg_index;
|
||||
u8 header_len;
|
||||
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
|
||||
u32 fw_debug;
|
||||
};
|
||||
|
||||
/* The L4 pseudo checksum mode for Ethernet */
|
||||
|
@ -168,13 +230,26 @@ struct eth_slow_path_rx_cqe {
|
|||
u8 type;
|
||||
u8 ramrod_cmd_id;
|
||||
u8 error_flag;
|
||||
u8 reserved[27];
|
||||
u8 reserved[25];
|
||||
__le16 echo;
|
||||
u8 reserved1;
|
||||
u8 flags;
|
||||
/* for PMD mode - valid indication */
|
||||
#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1
|
||||
#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0
|
||||
/* for PMD mode - valid toggle indication */
|
||||
#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1
|
||||
#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
|
||||
#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F
|
||||
#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2
|
||||
};
|
||||
|
||||
/* union for all ETH Rx CQE types */
|
||||
union eth_rx_cqe {
|
||||
struct eth_fast_path_rx_reg_cqe fast_path_regular;
|
||||
struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
|
||||
struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
|
||||
struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
|
||||
struct eth_slow_path_rx_cqe slow_path;
|
||||
};
|
||||
|
||||
|
@ -183,15 +258,18 @@ enum eth_rx_cqe_type {
|
|||
ETH_RX_CQE_TYPE_UNUSED,
|
||||
ETH_RX_CQE_TYPE_REGULAR,
|
||||
ETH_RX_CQE_TYPE_SLOW_PATH,
|
||||
ETH_RX_CQE_TYPE_TPA_START,
|
||||
ETH_RX_CQE_TYPE_TPA_CONT,
|
||||
ETH_RX_CQE_TYPE_TPA_END,
|
||||
MAX_ETH_RX_CQE_TYPE
|
||||
};
|
||||
|
||||
/* ETH Rx producers data */
|
||||
struct eth_rx_prod_data {
|
||||
__le16 bd_prod;
|
||||
__le16 sge_prod;
|
||||
__le16 cqe_prod;
|
||||
__le16 reserved;
|
||||
__le16 reserved1;
|
||||
};
|
||||
|
||||
/* The first tx bd of a given packet */
|
||||
|
@ -211,12 +289,17 @@ struct eth_tx_2nd_bd {
|
|||
/* The parsing information data for the third tx bd of a given packet. */
|
||||
struct eth_tx_data_3rd_bd {
|
||||
__le16 lso_mss;
|
||||
u8 bitfields;
|
||||
__le16 bitfields;
|
||||
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
|
||||
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
|
||||
#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
|
||||
#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
|
||||
u8 resereved0[3];
|
||||
#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
|
||||
#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
|
||||
#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
|
||||
#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
|
||||
u8 tunn_l4_hdr_start_offset_w;
|
||||
u8 tunn_hdr_size_w;
|
||||
};
|
||||
|
||||
/* The third tx bd of a given packet */
|
||||
|
@ -226,12 +309,24 @@ struct eth_tx_3rd_bd {
|
|||
struct eth_tx_data_3rd_bd data;
|
||||
};
|
||||
|
||||
/* Complementary information for the regular tx bd of a given packet. */
|
||||
struct eth_tx_data_bd {
|
||||
__le16 reserved0;
|
||||
__le16 bitfields;
|
||||
#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
|
||||
#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
|
||||
#define ETH_TX_DATA_BD_START_BD_MASK 0x1
|
||||
#define ETH_TX_DATA_BD_START_BD_SHIFT 8
|
||||
#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
|
||||
#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
|
||||
__le16 reserved3;
|
||||
};
|
||||
|
||||
/* The common non-special TX BD ring element */
|
||||
struct eth_tx_bd {
|
||||
struct regpair addr;
|
||||
__le16 nbytes;
|
||||
__le16 reserved0;
|
||||
__le32 reserved1;
|
||||
struct eth_tx_data_bd data;
|
||||
};
|
||||
|
||||
union eth_tx_bd_types {
|
||||
|
|
|
@ -80,7 +80,7 @@ struct qed_dev_info {
|
|||
u8 num_hwfns;
|
||||
|
||||
u8 hw_mac[ETH_ALEN];
|
||||
bool is_mf;
|
||||
bool is_mf_default;
|
||||
|
||||
/* FW version */
|
||||
u16 fw_major;
|
||||
|
@ -360,6 +360,12 @@ enum DP_MODULE {
|
|||
/* to be added...up to 0x8000000 */
|
||||
};
|
||||
|
||||
enum qed_mf_mode {
|
||||
QED_MF_DEFAULT,
|
||||
QED_MF_OVLAN,
|
||||
QED_MF_NPAR,
|
||||
};
|
||||
|
||||
struct qed_eth_stats {
|
||||
u64 no_buff_discards;
|
||||
u64 packet_too_big_discard;
|
||||
|
|
Loading…
Reference in New Issue
Block a user