forked from luck/tmp_suning_uos_patched
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: IB/ipath: Workaround problem of errormask register being overwritten IB/ipath: Fix some issues with buffer cancel and sendctrl register update IB/ipath: Use faster put_tid_2 routine after initialization IB/ipath: Remove unsafe fastrcvint code from interrupt handler IB/ehca: Move extern declarations from .c files to .h files IB/mlx4: Whitespace fix IB/ehca: Fix include order to better match kernel style mlx4_core: Remove kfree() in mlx4_mr_alloc() error flow RDMA/amso1100: Initialize the wait_queue_head_t in the c2_qp structure
This commit is contained in:
commit
4dcf39c6cc
|
@ -506,6 +506,7 @@ int c2_alloc_qp(struct c2_dev *c2dev,
|
||||||
qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
|
qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
|
||||||
qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
|
qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
|
||||||
qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
|
qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
|
||||||
|
init_waitqueue_head(&qp->wait);
|
||||||
|
|
||||||
/* Initialize the SQ MQ */
|
/* Initialize the SQ MQ */
|
||||||
q_size = be32_to_cpu(reply->sq_depth);
|
q_size = be32_to_cpu(reply->sq_depth);
|
||||||
|
|
|
@ -322,6 +322,7 @@ extern int ehca_static_rate;
|
||||||
extern int ehca_port_act_time;
|
extern int ehca_port_act_time;
|
||||||
extern int ehca_use_hp_mr;
|
extern int ehca_use_hp_mr;
|
||||||
extern int ehca_scaling_code;
|
extern int ehca_scaling_code;
|
||||||
|
extern int ehca_mr_largepage;
|
||||||
|
|
||||||
struct ipzu_queue_resp {
|
struct ipzu_queue_resp {
|
||||||
u32 qe_size; /* queue entry size */
|
u32 qe_size; /* queue entry size */
|
||||||
|
|
|
@ -40,10 +40,10 @@
|
||||||
* POSSIBILITY OF SUCH DAMAGE.
|
* POSSIBILITY OF SUCH DAMAGE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <rdma/ib_umem.h>
|
|
||||||
|
|
||||||
#include <asm/current.h>
|
#include <asm/current.h>
|
||||||
|
|
||||||
|
#include <rdma/ib_umem.h>
|
||||||
|
|
||||||
#include "ehca_iverbs.h"
|
#include "ehca_iverbs.h"
|
||||||
#include "ehca_mrmw.h"
|
#include "ehca_mrmw.h"
|
||||||
#include "hcp_if.h"
|
#include "hcp_if.h"
|
||||||
|
@ -64,8 +64,6 @@ enum ehca_mr_pgsize {
|
||||||
EHCA_MR_PGSIZE16M = 0x1000000L
|
EHCA_MR_PGSIZE16M = 0x1000000L
|
||||||
};
|
};
|
||||||
|
|
||||||
extern int ehca_mr_largepage;
|
|
||||||
|
|
||||||
static u32 ehca_encode_hwpage_size(u32 pgsize)
|
static u32 ehca_encode_hwpage_size(u32 pgsize)
|
||||||
{
|
{
|
||||||
u32 idx = 0;
|
u32 idx = 0;
|
||||||
|
|
|
@ -88,7 +88,6 @@ int ehca_dealloc_pd(struct ib_pd *pd)
|
||||||
u32 cur_pid = current->tgid;
|
u32 cur_pid = current->tgid;
|
||||||
struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
|
struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
|
||||||
int i, leftovers = 0;
|
int i, leftovers = 0;
|
||||||
extern struct kmem_cache *small_qp_cache;
|
|
||||||
struct ipz_small_queue_page *page, *tmp;
|
struct ipz_small_queue_page *page, *tmp;
|
||||||
|
|
||||||
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
|
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
|
||||||
|
|
|
@ -758,7 +758,6 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
|
||||||
const u64 logical_address_of_page,
|
const u64 logical_address_of_page,
|
||||||
const u64 count)
|
const u64 count)
|
||||||
{
|
{
|
||||||
extern int ehca_debug_level;
|
|
||||||
u64 ret;
|
u64 ret;
|
||||||
|
|
||||||
if (unlikely(ehca_debug_level >= 2)) {
|
if (unlikely(ehca_debug_level >= 2)) {
|
||||||
|
|
|
@ -54,6 +54,8 @@
|
||||||
struct ehca_pd;
|
struct ehca_pd;
|
||||||
struct ipz_small_queue_page;
|
struct ipz_small_queue_page;
|
||||||
|
|
||||||
|
extern struct kmem_cache *small_qp_cache;
|
||||||
|
|
||||||
/* struct generic ehca page */
|
/* struct generic ehca page */
|
||||||
struct ipz_page {
|
struct ipz_page {
|
||||||
u8 entries[EHCA_PAGESIZE];
|
u8 entries[EHCA_PAGESIZE];
|
||||||
|
|
|
@ -100,8 +100,7 @@ struct infinipath_stats {
|
||||||
__u64 sps_hwerrs;
|
__u64 sps_hwerrs;
|
||||||
/* number of times IB link changed state unexpectedly */
|
/* number of times IB link changed state unexpectedly */
|
||||||
__u64 sps_iblink;
|
__u64 sps_iblink;
|
||||||
/* kernel receive interrupts that didn't read intstat */
|
__u64 sps_unused; /* was fastrcvint, no longer implemented */
|
||||||
__u64 sps_fastrcvint;
|
|
||||||
/* number of kernel (port0) packets received */
|
/* number of kernel (port0) packets received */
|
||||||
__u64 sps_port0pkts;
|
__u64 sps_port0pkts;
|
||||||
/* number of "ethernet" packets sent by driver */
|
/* number of "ethernet" packets sent by driver */
|
||||||
|
|
|
@ -740,7 +740,7 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
|
||||||
* pioavail updates to memory to stop.
|
* pioavail updates to memory to stop.
|
||||||
*/
|
*/
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
||||||
sendorig & ~IPATH_S_PIOBUFAVAILUPD);
|
sendorig & ~INFINIPATH_S_PIOBUFAVAILUPD);
|
||||||
sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
||||||
dd->ipath_sendctrl);
|
dd->ipath_sendctrl);
|
||||||
|
@ -1614,7 +1614,7 @@ int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd)
|
||||||
* it's safer to always do it.
|
* it's safer to always do it.
|
||||||
* PIOAvail bits are updated by the chip as if normal send had happened.
|
* PIOAvail bits are updated by the chip as if normal send had happened.
|
||||||
*/
|
*/
|
||||||
void ipath_cancel_sends(struct ipath_devdata *dd)
|
void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
|
||||||
{
|
{
|
||||||
ipath_dbg("Cancelling all in-progress send buffers\n");
|
ipath_dbg("Cancelling all in-progress send buffers\n");
|
||||||
dd->ipath_lastcancel = jiffies+HZ/2; /* skip armlaunch errs a bit */
|
dd->ipath_lastcancel = jiffies+HZ/2; /* skip armlaunch errs a bit */
|
||||||
|
@ -1627,6 +1627,9 @@ void ipath_cancel_sends(struct ipath_devdata *dd)
|
||||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||||
ipath_disarm_piobufs(dd, 0,
|
ipath_disarm_piobufs(dd, 0,
|
||||||
(unsigned)(dd->ipath_piobcnt2k + dd->ipath_piobcnt4k));
|
(unsigned)(dd->ipath_piobcnt2k + dd->ipath_piobcnt4k));
|
||||||
|
if (restore_sendctrl) /* else done by caller later */
|
||||||
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
||||||
|
dd->ipath_sendctrl);
|
||||||
|
|
||||||
/* and again, be sure all have hit the chip */
|
/* and again, be sure all have hit the chip */
|
||||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||||
|
@ -1655,7 +1658,7 @@ static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
|
||||||
/* flush all queued sends when going to DOWN or INIT, to be sure that
|
/* flush all queued sends when going to DOWN or INIT, to be sure that
|
||||||
* they don't block MAD packets */
|
* they don't block MAD packets */
|
||||||
if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT)
|
if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT)
|
||||||
ipath_cancel_sends(dd);
|
ipath_cancel_sends(dd, 1);
|
||||||
|
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
|
||||||
dd->ipath_ibcctrl | which);
|
dd->ipath_ibcctrl | which);
|
||||||
|
@ -2000,7 +2003,7 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
|
||||||
|
|
||||||
ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
|
ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
|
||||||
INFINIPATH_IBCC_LINKINITCMD_SHIFT);
|
INFINIPATH_IBCC_LINKINITCMD_SHIFT);
|
||||||
ipath_cancel_sends(dd);
|
ipath_cancel_sends(dd, 0);
|
||||||
|
|
||||||
/* disable IBC */
|
/* disable IBC */
|
||||||
dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
|
dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
|
||||||
|
|
|
@ -321,6 +321,8 @@ static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
|
||||||
<< INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
|
<< INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
|
||||||
|
|
||||||
static int ipath_pe_txe_recover(struct ipath_devdata *);
|
static int ipath_pe_txe_recover(struct ipath_devdata *);
|
||||||
|
static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *,
|
||||||
|
u32, unsigned long);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ipath_pe_handle_hwerrors - display hardware errors.
|
* ipath_pe_handle_hwerrors - display hardware errors.
|
||||||
|
@ -555,8 +557,11 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
|
||||||
ipath_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n",
|
ipath_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n",
|
||||||
dd->ipath_majrev, dd->ipath_minrev);
|
dd->ipath_majrev, dd->ipath_minrev);
|
||||||
ret = 1;
|
ret = 1;
|
||||||
} else
|
} else {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
if (dd->ipath_minrev >= 2)
|
||||||
|
dd->ipath_f_put_tid = ipath_pe_put_tid_2;
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1220,7 +1225,7 @@ static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port)
|
||||||
port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
|
port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
|
||||||
|
|
||||||
for (i = 0; i < dd->ipath_rcvtidcnt; i++)
|
for (i = 0; i < dd->ipath_rcvtidcnt; i++)
|
||||||
ipath_pe_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
|
dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
|
||||||
tidinv);
|
tidinv);
|
||||||
|
|
||||||
tidbase = (u64 __iomem *)
|
tidbase = (u64 __iomem *)
|
||||||
|
@ -1229,7 +1234,7 @@ static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port)
|
||||||
port * dd->ipath_rcvegrcnt * sizeof(*tidbase));
|
port * dd->ipath_rcvegrcnt * sizeof(*tidbase));
|
||||||
|
|
||||||
for (i = 0; i < dd->ipath_rcvegrcnt; i++)
|
for (i = 0; i < dd->ipath_rcvegrcnt; i++)
|
||||||
ipath_pe_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
|
dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
|
||||||
tidinv);
|
tidinv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1395,10 +1400,11 @@ void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
|
||||||
dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes;
|
dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes;
|
||||||
dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes;
|
dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes;
|
||||||
dd->ipath_f_clear_tids = ipath_pe_clear_tids;
|
dd->ipath_f_clear_tids = ipath_pe_clear_tids;
|
||||||
if (dd->ipath_minrev >= 2)
|
/*
|
||||||
dd->ipath_f_put_tid = ipath_pe_put_tid_2;
|
* this may get changed after we read the chip revision,
|
||||||
else
|
* but we start with the safe version for all revs
|
||||||
dd->ipath_f_put_tid = ipath_pe_put_tid;
|
*/
|
||||||
|
dd->ipath_f_put_tid = ipath_pe_put_tid;
|
||||||
dd->ipath_f_cleanup = ipath_setup_pe_cleanup;
|
dd->ipath_f_cleanup = ipath_setup_pe_cleanup;
|
||||||
dd->ipath_f_setextled = ipath_setup_pe_setextled;
|
dd->ipath_f_setextled = ipath_setup_pe_setextled;
|
||||||
dd->ipath_f_get_base_info = ipath_pe_get_base_info;
|
dd->ipath_f_get_base_info = ipath_pe_get_base_info;
|
||||||
|
|
|
@ -782,7 +782,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
||||||
* Follows early_init because some chips have to initialize
|
* Follows early_init because some chips have to initialize
|
||||||
* PIO buffers in early_init to avoid false parity errors.
|
* PIO buffers in early_init to avoid false parity errors.
|
||||||
*/
|
*/
|
||||||
ipath_cancel_sends(dd);
|
ipath_cancel_sends(dd, 0);
|
||||||
|
|
||||||
/* early_init sets rcvhdrentsize and rcvhdrsize, so this must be
|
/* early_init sets rcvhdrentsize and rcvhdrsize, so this must be
|
||||||
* done after early_init */
|
* done after early_init */
|
||||||
|
@ -851,13 +851,14 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
|
||||||
dd->ipath_hwerrmask);
|
dd->ipath_hwerrmask);
|
||||||
|
|
||||||
dd->ipath_maskederrs = dd->ipath_ignorederrs;
|
|
||||||
/* clear all */
|
/* clear all */
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
|
||||||
/* enable errors that are masked, at least this first time. */
|
/* enable errors that are masked, at least this first time. */
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
|
||||||
~dd->ipath_maskederrs);
|
~dd->ipath_maskederrs);
|
||||||
/* clear any interrups up to this point (ints still not enabled) */
|
dd->ipath_errormask = ipath_read_kreg64(dd,
|
||||||
|
dd->ipath_kregs->kr_errormask);
|
||||||
|
/* clear any interrupts up to this point (ints still not enabled) */
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -303,7 +303,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
|
||||||
* Flush all queued sends when link went to DOWN or INIT,
|
* Flush all queued sends when link went to DOWN or INIT,
|
||||||
* to be sure that they don't block SMA and other MAD packets
|
* to be sure that they don't block SMA and other MAD packets
|
||||||
*/
|
*/
|
||||||
ipath_cancel_sends(dd);
|
ipath_cancel_sends(dd, 1);
|
||||||
}
|
}
|
||||||
else if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM ||
|
else if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM ||
|
||||||
lstate == IPATH_IBSTATE_ACTIVE) {
|
lstate == IPATH_IBSTATE_ACTIVE) {
|
||||||
|
@ -517,10 +517,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||||
|
|
||||||
supp_msgs = handle_frequent_errors(dd, errs, msg, &noprint);
|
supp_msgs = handle_frequent_errors(dd, errs, msg, &noprint);
|
||||||
|
|
||||||
/*
|
/* don't report errors that are masked */
|
||||||
* don't report errors that are masked (includes those always
|
|
||||||
* ignored)
|
|
||||||
*/
|
|
||||||
errs &= ~dd->ipath_maskederrs;
|
errs &= ~dd->ipath_maskederrs;
|
||||||
|
|
||||||
/* do these first, they are most important */
|
/* do these first, they are most important */
|
||||||
|
@ -566,19 +563,19 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||||
* ones on this particular interrupt, which also isn't great
|
* ones on this particular interrupt, which also isn't great
|
||||||
*/
|
*/
|
||||||
dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
|
dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
|
||||||
|
dd->ipath_errormask &= ~dd->ipath_maskederrs;
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
|
||||||
~dd->ipath_maskederrs);
|
dd->ipath_errormask);
|
||||||
s_iserr = ipath_decode_err(msg, sizeof msg,
|
s_iserr = ipath_decode_err(msg, sizeof msg,
|
||||||
(dd->ipath_maskederrs & ~dd->
|
dd->ipath_maskederrs);
|
||||||
ipath_ignorederrs));
|
|
||||||
|
|
||||||
if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) &
|
if (dd->ipath_maskederrs &
|
||||||
~(INFINIPATH_E_RRCVEGRFULL |
|
~(INFINIPATH_E_RRCVEGRFULL |
|
||||||
INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
|
INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
|
||||||
ipath_dev_err(dd, "Temporarily disabling "
|
ipath_dev_err(dd, "Temporarily disabling "
|
||||||
"error(s) %llx reporting; too frequent (%s)\n",
|
"error(s) %llx reporting; too frequent (%s)\n",
|
||||||
(unsigned long long) (dd->ipath_maskederrs &
|
(unsigned long long)dd->ipath_maskederrs,
|
||||||
~dd->ipath_ignorederrs), msg);
|
msg);
|
||||||
else {
|
else {
|
||||||
/*
|
/*
|
||||||
* rcvegrfull and rcvhdrqfull are "normal",
|
* rcvegrfull and rcvhdrqfull are "normal",
|
||||||
|
@ -793,19 +790,22 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
|
||||||
/* disable error interrupts, to avoid confusion */
|
/* disable error interrupts, to avoid confusion */
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
|
||||||
|
|
||||||
|
/* also disable interrupts; errormask is sometimes overwriten */
|
||||||
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* clear all sends, because they have may been
|
* clear all sends, because they have may been
|
||||||
* completed by usercode while in freeze mode, and
|
* completed by usercode while in freeze mode, and
|
||||||
* therefore would not be sent, and eventually
|
* therefore would not be sent, and eventually
|
||||||
* might cause the process to run out of bufs
|
* might cause the process to run out of bufs
|
||||||
*/
|
*/
|
||||||
ipath_cancel_sends(dd);
|
ipath_cancel_sends(dd, 0);
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
|
||||||
dd->ipath_control);
|
dd->ipath_control);
|
||||||
|
|
||||||
/* ensure pio avail updates continue */
|
/* ensure pio avail updates continue */
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
||||||
dd->ipath_sendctrl & ~IPATH_S_PIOBUFAVAILUPD);
|
dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
|
||||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
||||||
dd->ipath_sendctrl);
|
dd->ipath_sendctrl);
|
||||||
|
@ -817,7 +817,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
|
||||||
for (i = 0; i < dd->ipath_pioavregs; i++) {
|
for (i = 0; i < dd->ipath_pioavregs; i++) {
|
||||||
/* deal with 6110 chip bug */
|
/* deal with 6110 chip bug */
|
||||||
im = i > 3 ? ((i&1) ? i-1 : i+1) : i;
|
im = i > 3 ? ((i&1) ? i-1 : i+1) : i;
|
||||||
val = ipath_read_kreg64(dd, 0x1000+(im*sizeof(u64)));
|
val = ipath_read_kreg64(dd, (0x1000/sizeof(u64))+im);
|
||||||
dd->ipath_pioavailregs_dma[i] = dd->ipath_pioavailshadow[i]
|
dd->ipath_pioavailregs_dma[i] = dd->ipath_pioavailshadow[i]
|
||||||
= le64_to_cpu(val);
|
= le64_to_cpu(val);
|
||||||
}
|
}
|
||||||
|
@ -832,7 +832,8 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
|
||||||
E_SPKT_ERRS_IGNORE);
|
E_SPKT_ERRS_IGNORE);
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
|
||||||
~dd->ipath_maskederrs);
|
dd->ipath_errormask);
|
||||||
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, -1LL);
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1002,7 +1003,6 @@ irqreturn_t ipath_intr(int irq, void *data)
|
||||||
u32 istat, chk0rcv = 0;
|
u32 istat, chk0rcv = 0;
|
||||||
ipath_err_t estat = 0;
|
ipath_err_t estat = 0;
|
||||||
irqreturn_t ret;
|
irqreturn_t ret;
|
||||||
u32 oldhead, curtail;
|
|
||||||
static unsigned unexpected = 0;
|
static unsigned unexpected = 0;
|
||||||
static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) |
|
static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) |
|
||||||
(1U<<INFINIPATH_I_RCVURG_SHIFT);
|
(1U<<INFINIPATH_I_RCVURG_SHIFT);
|
||||||
|
@ -1035,36 +1035,6 @@ irqreturn_t ipath_intr(int irq, void *data)
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* We try to avoid reading the interrupt status register, since
|
|
||||||
* that's a PIO read, and stalls the processor for up to about
|
|
||||||
* ~0.25 usec. The idea is that if we processed a port0 packet,
|
|
||||||
* we blindly clear the port 0 receive interrupt bits, and nothing
|
|
||||||
* else, then return. If other interrupts are pending, the chip
|
|
||||||
* will re-interrupt us as soon as we write the intclear register.
|
|
||||||
* We then won't process any more kernel packets (if not the 2nd
|
|
||||||
* time, then the 3rd or 4th) and we'll then handle the other
|
|
||||||
* interrupts. We clear the interrupts first so that we don't
|
|
||||||
* lose intr for later packets that arrive while we are processing.
|
|
||||||
*/
|
|
||||||
oldhead = dd->ipath_port0head;
|
|
||||||
curtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
|
|
||||||
if (oldhead != curtail) {
|
|
||||||
if (dd->ipath_flags & IPATH_GPIO_INTR) {
|
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
|
|
||||||
(u64) (1 << IPATH_GPIO_PORT0_BIT));
|
|
||||||
istat = port0rbits | INFINIPATH_I_GPIO;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
istat = port0rbits;
|
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
|
|
||||||
ipath_kreceive(dd);
|
|
||||||
if (oldhead != dd->ipath_port0head) {
|
|
||||||
ipath_stats.sps_fastrcvint++;
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
|
istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
|
||||||
|
|
||||||
if (unlikely(!istat)) {
|
if (unlikely(!istat)) {
|
||||||
|
@ -1225,7 +1195,6 @@ irqreturn_t ipath_intr(int irq, void *data)
|
||||||
handle_layer_pioavail(dd);
|
handle_layer_pioavail(dd);
|
||||||
}
|
}
|
||||||
|
|
||||||
done:
|
|
||||||
ret = IRQ_HANDLED;
|
ret = IRQ_HANDLED;
|
||||||
|
|
||||||
bail:
|
bail:
|
||||||
|
|
|
@ -261,18 +261,10 @@ struct ipath_devdata {
|
||||||
* limiting of hwerror reporting
|
* limiting of hwerror reporting
|
||||||
*/
|
*/
|
||||||
ipath_err_t ipath_lasthwerror;
|
ipath_err_t ipath_lasthwerror;
|
||||||
/*
|
/* errors masked because they occur too fast */
|
||||||
* errors masked because they occur too fast, also includes errors
|
|
||||||
* that are always ignored (ipath_ignorederrs)
|
|
||||||
*/
|
|
||||||
ipath_err_t ipath_maskederrs;
|
ipath_err_t ipath_maskederrs;
|
||||||
/* time in jiffies at which to re-enable maskederrs */
|
/* time in jiffies at which to re-enable maskederrs */
|
||||||
unsigned long ipath_unmasktime;
|
unsigned long ipath_unmasktime;
|
||||||
/*
|
|
||||||
* errors always ignored (masked), at least for a given
|
|
||||||
* chip/device, because they are wrong or not useful
|
|
||||||
*/
|
|
||||||
ipath_err_t ipath_ignorederrs;
|
|
||||||
/* count of egrfull errors, combined for all ports */
|
/* count of egrfull errors, combined for all ports */
|
||||||
u64 ipath_last_tidfull;
|
u64 ipath_last_tidfull;
|
||||||
/* for ipath_qcheck() */
|
/* for ipath_qcheck() */
|
||||||
|
@ -436,6 +428,7 @@ struct ipath_devdata {
|
||||||
u64 ipath_lastibcstat;
|
u64 ipath_lastibcstat;
|
||||||
/* hwerrmask shadow */
|
/* hwerrmask shadow */
|
||||||
ipath_err_t ipath_hwerrmask;
|
ipath_err_t ipath_hwerrmask;
|
||||||
|
ipath_err_t ipath_errormask; /* errormask shadow */
|
||||||
/* interrupt config reg shadow */
|
/* interrupt config reg shadow */
|
||||||
u64 ipath_intconfig;
|
u64 ipath_intconfig;
|
||||||
/* kr_sendpiobufbase value */
|
/* kr_sendpiobufbase value */
|
||||||
|
@ -683,7 +676,7 @@ int ipath_unordered_wc(void);
|
||||||
|
|
||||||
void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
|
void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
|
||||||
unsigned cnt);
|
unsigned cnt);
|
||||||
void ipath_cancel_sends(struct ipath_devdata *);
|
void ipath_cancel_sends(struct ipath_devdata *, int);
|
||||||
|
|
||||||
int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
|
int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
|
||||||
void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
|
void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
|
||||||
|
|
|
@ -196,6 +196,45 @@ static void ipath_qcheck(struct ipath_devdata *dd)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ipath_chk_errormask(struct ipath_devdata *dd)
|
||||||
|
{
|
||||||
|
static u32 fixed;
|
||||||
|
u32 ctrl;
|
||||||
|
unsigned long errormask;
|
||||||
|
unsigned long hwerrs;
|
||||||
|
|
||||||
|
if (!dd->ipath_errormask || !(dd->ipath_flags & IPATH_INITTED))
|
||||||
|
return;
|
||||||
|
|
||||||
|
errormask = ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
|
||||||
|
|
||||||
|
if (errormask == dd->ipath_errormask)
|
||||||
|
return;
|
||||||
|
fixed++;
|
||||||
|
|
||||||
|
hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
|
||||||
|
ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
|
||||||
|
|
||||||
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
|
||||||
|
dd->ipath_errormask);
|
||||||
|
|
||||||
|
if ((hwerrs & dd->ipath_hwerrmask) ||
|
||||||
|
(ctrl & INFINIPATH_C_FREEZEMODE)) {
|
||||||
|
/* force re-interrupt of pending events, just in case */
|
||||||
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
|
||||||
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, 0ULL);
|
||||||
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
|
||||||
|
dev_info(&dd->pcidev->dev,
|
||||||
|
"errormask fixed(%u) %lx -> %lx, ctrl %x hwerr %lx\n",
|
||||||
|
fixed, errormask, (unsigned long)dd->ipath_errormask,
|
||||||
|
ctrl, hwerrs);
|
||||||
|
} else
|
||||||
|
ipath_dbg("errormask fixed(%u) %lx -> %lx, no freeze\n",
|
||||||
|
fixed, errormask,
|
||||||
|
(unsigned long)dd->ipath_errormask);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ipath_get_faststats - get word counters from chip before they overflow
|
* ipath_get_faststats - get word counters from chip before they overflow
|
||||||
* @opaque - contains a pointer to the infinipath device ipath_devdata
|
* @opaque - contains a pointer to the infinipath device ipath_devdata
|
||||||
|
@ -251,14 +290,13 @@ void ipath_get_faststats(unsigned long opaque)
|
||||||
dd->ipath_lasterror = 0;
|
dd->ipath_lasterror = 0;
|
||||||
if (dd->ipath_lasthwerror)
|
if (dd->ipath_lasthwerror)
|
||||||
dd->ipath_lasthwerror = 0;
|
dd->ipath_lasthwerror = 0;
|
||||||
if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs)
|
if (dd->ipath_maskederrs
|
||||||
&& time_after(jiffies, dd->ipath_unmasktime)) {
|
&& time_after(jiffies, dd->ipath_unmasktime)) {
|
||||||
char ebuf[256];
|
char ebuf[256];
|
||||||
int iserr;
|
int iserr;
|
||||||
iserr = ipath_decode_err(ebuf, sizeof ebuf,
|
iserr = ipath_decode_err(ebuf, sizeof ebuf,
|
||||||
(dd->ipath_maskederrs & ~dd->
|
dd->ipath_maskederrs);
|
||||||
ipath_ignorederrs));
|
if (dd->ipath_maskederrs &
|
||||||
if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) &
|
|
||||||
~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
|
~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
|
||||||
INFINIPATH_E_PKTERRS ))
|
INFINIPATH_E_PKTERRS ))
|
||||||
ipath_dev_err(dd, "Re-enabling masked errors "
|
ipath_dev_err(dd, "Re-enabling masked errors "
|
||||||
|
@ -278,9 +316,12 @@ void ipath_get_faststats(unsigned long opaque)
|
||||||
ipath_cdbg(ERRPKT, "Re-enabling packet"
|
ipath_cdbg(ERRPKT, "Re-enabling packet"
|
||||||
" problem interrupt (%s)\n", ebuf);
|
" problem interrupt (%s)\n", ebuf);
|
||||||
}
|
}
|
||||||
dd->ipath_maskederrs = dd->ipath_ignorederrs;
|
|
||||||
|
/* re-enable masked errors */
|
||||||
|
dd->ipath_errormask |= dd->ipath_maskederrs;
|
||||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
|
||||||
~dd->ipath_maskederrs);
|
dd->ipath_errormask);
|
||||||
|
dd->ipath_maskederrs = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* limit qfull messages to ~one per minute per port */
|
/* limit qfull messages to ~one per minute per port */
|
||||||
|
@ -294,6 +335,7 @@ void ipath_get_faststats(unsigned long opaque)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ipath_chk_errormask(dd);
|
||||||
done:
|
done:
|
||||||
mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
|
mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1209,7 +1209,6 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
|
||||||
memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
|
memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
|
||||||
dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
|
dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
|
||||||
dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
|
dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_data_seg(struct mlx4_wqe_data_seg *dseg,
|
static void set_data_seg(struct mlx4_wqe_data_seg *dseg,
|
||||||
|
|
|
@ -255,10 +255,8 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
|
index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
|
||||||
if (index == -1) {
|
if (index == -1)
|
||||||
err = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
mr->iova = iova;
|
mr->iova = iova;
|
||||||
mr->size = size;
|
mr->size = size;
|
||||||
|
@ -269,15 +267,8 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
|
||||||
|
|
||||||
err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
|
err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_index;
|
mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
err_index:
|
|
||||||
mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
|
|
||||||
|
|
||||||
err:
|
|
||||||
kfree(mr);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
|
EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
|
||||||
|
|
Loading…
Reference in New Issue
Block a user