forked from luck/tmp_suning_uos_patched
mlx4_core: Modify driver initialization flow to accommodate SRIOV for Ethernet
1. Added module parameters sr_iov and probe_vf for controlling enablement of SRIOV mode. 2. Increased default max num-qps, num-mpts and log_num_macs to accomodate SRIOV mode 3. Added port_type_array as a module parameter to allow driver startup with ports configured as desired. In SRIOV mode, only ETH is supported, and this array is ignored; otherwise, for the case where the FW supports both port types (ETH and IB), the port_type_array parameter is used. By default, the port_type_array is set to configure both ports as IB. 4. When running in sriov mode, the master needs to initialize the ICM eq table to hold the eq's for itself and also for all the slaves. 5. mlx4_set_port_mask() now invoked from mlx4_init_hca, instead of in mlx4_dev_cap. 6. Introduced sriov VF (slave) device startup/teardown logic (mainly procedures mlx4_init_slave, mlx4_slave_exit, mlx4_slave_cap, mlx4_slave_exit and flow modifications in __mlx4_init_one, mlx4_init_hca, and mlx4_setup_hca). VFs obtain their startup information from the PF (master) device via the comm channel. 7. In SRIOV mode (both PF and VF), MSI_X must be enabled, or the driver aborts loading the device. 8. Do not allow setting port type via sysfs when running in SRIOV mode. 9. mlx4_get_ownership: Currently, only one PF is supported by the driver. If the HCA is burned with FW which enables more than one PF, only one of the PFs is allowed to run. The first one up grabs a FW ownership semaphone -- all other PFs will find that semaphore taken, and the driver will not allow them to run. Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il> Signed-off-by: Liran Liss <liranl@mellanox.co.il> Signed-off-by: Marcel Apfelbaum <marcela@mellanox.co.il> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
d81c7186aa
commit
ab9c17a009
|
@ -257,7 +257,7 @@ static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
|
|||
return err;
|
||||
}
|
||||
|
||||
static int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
|
||||
int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
|
||||
unsigned long timeout)
|
||||
{
|
||||
if (mlx4_priv(dev)->cmd.use_events)
|
||||
|
@ -1390,6 +1390,153 @@ void mlx4_master_comm_channel(struct work_struct *work)
|
|||
mlx4_warn(dev, "Failed to arm comm channel events\n");
|
||||
}
|
||||
|
||||
static int sync_toggles(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
int wr_toggle;
|
||||
int rd_toggle;
|
||||
unsigned long end;
|
||||
|
||||
wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
|
||||
end = jiffies + msecs_to_jiffies(5000);
|
||||
|
||||
while (time_before(jiffies, end)) {
|
||||
rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
|
||||
if (rd_toggle == wr_toggle) {
|
||||
priv->cmd.comm_toggle = rd_toggle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
/*
|
||||
* we could reach here if for example the previous VM using this
|
||||
* function misbehaved and left the channel with unsynced state. We
|
||||
* should fix this here and give this VM a chance to use a properly
|
||||
* synced channel
|
||||
*/
|
||||
mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
|
||||
__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
|
||||
__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
|
||||
priv->cmd.comm_toggle = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx4_multi_func_init(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_slave_state *s_state;
|
||||
int i, err, port;
|
||||
|
||||
priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
|
||||
&priv->mfunc.vhcr_dma,
|
||||
GFP_KERNEL);
|
||||
if (!priv->mfunc.vhcr) {
|
||||
mlx4_err(dev, "Couldn't allocate vhcr.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (mlx4_is_master(dev))
|
||||
priv->mfunc.comm =
|
||||
ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
|
||||
priv->fw.comm_base, MLX4_COMM_PAGESIZE);
|
||||
else
|
||||
priv->mfunc.comm =
|
||||
ioremap(pci_resource_start(dev->pdev, 2) +
|
||||
MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
|
||||
if (!priv->mfunc.comm) {
|
||||
mlx4_err(dev, "Couldn't map communication vector.\n");
|
||||
goto err_vhcr;
|
||||
}
|
||||
|
||||
if (mlx4_is_master(dev)) {
|
||||
priv->mfunc.master.slave_state =
|
||||
kzalloc(dev->num_slaves *
|
||||
sizeof(struct mlx4_slave_state), GFP_KERNEL);
|
||||
if (!priv->mfunc.master.slave_state)
|
||||
goto err_comm;
|
||||
|
||||
for (i = 0; i < dev->num_slaves; ++i) {
|
||||
s_state = &priv->mfunc.master.slave_state[i];
|
||||
s_state->last_cmd = MLX4_COMM_CMD_RESET;
|
||||
__raw_writel((__force u32) 0,
|
||||
&priv->mfunc.comm[i].slave_write);
|
||||
__raw_writel((__force u32) 0,
|
||||
&priv->mfunc.comm[i].slave_read);
|
||||
mmiowb();
|
||||
for (port = 1; port <= MLX4_MAX_PORTS; port++) {
|
||||
s_state->vlan_filter[port] =
|
||||
kzalloc(sizeof(struct mlx4_vlan_fltr),
|
||||
GFP_KERNEL);
|
||||
if (!s_state->vlan_filter[port]) {
|
||||
if (--port)
|
||||
kfree(s_state->vlan_filter[port]);
|
||||
goto err_slaves;
|
||||
}
|
||||
INIT_LIST_HEAD(&s_state->mcast_filters[port]);
|
||||
}
|
||||
spin_lock_init(&s_state->lock);
|
||||
}
|
||||
|
||||
memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
|
||||
priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
|
||||
INIT_WORK(&priv->mfunc.master.comm_work,
|
||||
mlx4_master_comm_channel);
|
||||
INIT_WORK(&priv->mfunc.master.slave_event_work,
|
||||
mlx4_gen_slave_eqe);
|
||||
INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
|
||||
mlx4_master_handle_slave_flr);
|
||||
spin_lock_init(&priv->mfunc.master.slave_state_lock);
|
||||
priv->mfunc.master.comm_wq =
|
||||
create_singlethread_workqueue("mlx4_comm");
|
||||
if (!priv->mfunc.master.comm_wq)
|
||||
goto err_slaves;
|
||||
|
||||
if (mlx4_init_resource_tracker(dev))
|
||||
goto err_thread;
|
||||
|
||||
sema_init(&priv->cmd.slave_sem, 1);
|
||||
err = mlx4_ARM_COMM_CHANNEL(dev);
|
||||
if (err) {
|
||||
mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
|
||||
err);
|
||||
goto err_resource;
|
||||
}
|
||||
|
||||
} else {
|
||||
err = sync_toggles(dev);
|
||||
if (err) {
|
||||
mlx4_err(dev, "Couldn't sync toggles\n");
|
||||
goto err_comm;
|
||||
}
|
||||
|
||||
sema_init(&priv->cmd.slave_sem, 1);
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_resource:
|
||||
mlx4_free_resource_tracker(dev);
|
||||
err_thread:
|
||||
flush_workqueue(priv->mfunc.master.comm_wq);
|
||||
destroy_workqueue(priv->mfunc.master.comm_wq);
|
||||
err_slaves:
|
||||
while (--i) {
|
||||
for (port = 1; port <= MLX4_MAX_PORTS; port++)
|
||||
kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
|
||||
}
|
||||
kfree(priv->mfunc.master.slave_state);
|
||||
err_comm:
|
||||
iounmap(priv->mfunc.comm);
|
||||
err_vhcr:
|
||||
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
|
||||
priv->mfunc.vhcr,
|
||||
priv->mfunc.vhcr_dma);
|
||||
priv->mfunc.vhcr = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int mlx4_cmd_init(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
@ -1425,6 +1572,27 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
int i, port;
|
||||
|
||||
if (mlx4_is_master(dev)) {
|
||||
flush_workqueue(priv->mfunc.master.comm_wq);
|
||||
destroy_workqueue(priv->mfunc.master.comm_wq);
|
||||
for (i = 0; i < dev->num_slaves; i++) {
|
||||
for (port = 1; port <= MLX4_MAX_PORTS; port++)
|
||||
kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
|
||||
}
|
||||
kfree(priv->mfunc.master.slave_state);
|
||||
iounmap(priv->mfunc.comm);
|
||||
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
|
||||
priv->mfunc.vhcr,
|
||||
priv->mfunc.vhcr_dma);
|
||||
priv->mfunc.vhcr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void mlx4_cmd_cleanup(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
|
|
@ -1071,7 +1071,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
|
|||
|
||||
/* UAR attributes */
|
||||
|
||||
MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
|
||||
MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
|
||||
MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
|
||||
|
||||
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
|
||||
|
@ -1084,6 +1084,72 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
|
|||
return err;
|
||||
}
|
||||
|
||||
int mlx4_QUERY_HCA(struct mlx4_dev *dev,
|
||||
struct mlx4_init_hca_param *param)
|
||||
{
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
__be32 *outbox;
|
||||
int err;
|
||||
|
||||
#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
outbox = mailbox->buf;
|
||||
|
||||
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
|
||||
MLX4_CMD_QUERY_HCA,
|
||||
MLX4_CMD_TIME_CLASS_B,
|
||||
!mlx4_is_slave(dev));
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
|
||||
|
||||
/* QPC/EEC/CQC/EQC/RDMARC attributes */
|
||||
|
||||
MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
|
||||
MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
|
||||
MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
|
||||
MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
|
||||
MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
|
||||
MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
|
||||
MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
|
||||
MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
|
||||
MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
|
||||
MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
|
||||
MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
|
||||
MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
|
||||
|
||||
/* multicast attributes */
|
||||
|
||||
MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
|
||||
MLX4_GET(param->log_mc_entry_sz, outbox,
|
||||
INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
|
||||
MLX4_GET(param->log_mc_hash_sz, outbox,
|
||||
INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
|
||||
MLX4_GET(param->log_mc_table_sz, outbox,
|
||||
INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
|
||||
|
||||
/* TPT attributes */
|
||||
|
||||
MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
|
||||
MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
|
||||
MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
|
||||
MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
|
||||
|
||||
/* UAR attributes */
|
||||
|
||||
MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
|
||||
MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
|
||||
|
||||
out:
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
|
|
|
@ -161,6 +161,7 @@ struct mlx4_init_hca_param {
|
|||
u8 log_mc_table_sz;
|
||||
u8 log_mpt_sz;
|
||||
u8 log_uar_sz;
|
||||
u8 uar_page_sz; /* log pg sz in 4k chunks */
|
||||
};
|
||||
|
||||
struct mlx4_init_ib_param {
|
||||
|
@ -197,6 +198,7 @@ int mlx4_RUN_FW(struct mlx4_dev *dev);
|
|||
int mlx4_QUERY_FW(struct mlx4_dev *dev);
|
||||
int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
|
||||
int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
|
||||
int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
|
||||
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
|
||||
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
|
||||
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -49,6 +49,7 @@
|
|||
#include <linux/mlx4/cmd.h>
|
||||
|
||||
#define DRV_NAME "mlx4_core"
|
||||
#define PFX DRV_NAME ": "
|
||||
#define DRV_VERSION "1.0"
|
||||
#define DRV_RELDATE "July 14, 2011"
|
||||
|
||||
|
@ -957,10 +958,15 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
|
|||
|
||||
int mlx4_cmd_init(struct mlx4_dev *dev);
|
||||
void mlx4_cmd_cleanup(struct mlx4_dev *dev);
|
||||
int mlx4_multi_func_init(struct mlx4_dev *dev);
|
||||
void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
|
||||
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
|
||||
int mlx4_cmd_use_events(struct mlx4_dev *dev);
|
||||
void mlx4_cmd_use_polling(struct mlx4_dev *dev);
|
||||
|
||||
int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
|
||||
unsigned long timeout);
|
||||
|
||||
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
|
||||
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
|
||||
|
||||
|
|
|
@ -225,4 +225,6 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo
|
|||
|
||||
u32 mlx4_comm_get_version(void);
|
||||
|
||||
#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
|
||||
|
||||
#endif /* MLX4_CMD_H */
|
||||
|
|
|
@ -489,6 +489,7 @@ struct mlx4_dev {
|
|||
struct radix_tree_root qp_table_tree;
|
||||
u8 rev_id;
|
||||
char board_id[MLX4_BOARD_ID_LEN];
|
||||
int num_vfs;
|
||||
};
|
||||
|
||||
struct mlx4_init_port_param {
|
||||
|
|
Loading…
Reference in New Issue
Block a user