summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2005-09-13 10:41:03 -0700
committerRoland Dreier <rolandd@cisco.com>2005-09-18 22:02:37 -0700
commitd6cff021e24515255b296d399ec517a68bf2ed08 (patch)
treef3d21aca027cb7065522629c00f65fc3a8fb1d96 /drivers
parentbb4a7f0da75ce9f3e933880428d39b2aa1f16961 (diff)
downloadkernel-crypto-d6cff021e24515255b296d399ec517a68bf2ed08.tar.gz
kernel-crypto-d6cff021e24515255b296d399ec517a68bf2ed08.tar.xz
kernel-crypto-d6cff021e24515255b296d399ec517a68bf2ed08.zip
[PATCH] IB/mthca: fix posting of first work request
Fix posting first WQE for mem-free HCAs: we need to link to previous WQE even in that case. While we're at it, simplify code for Tavor-mode HCAs. We don't really need the conditional test there either; we can similarly always link to the previous WQE. Based on Michael S. Tsirkin's analogous fix for userspace libmthca. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c48
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c14
2 files changed, 28 insertions, 34 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 2c883c1fd53..eaa52185763 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -227,7 +227,6 @@ static void mthca_wq_init(struct mthca_wq *wq)
wq->last_comp = wq->max - 1;
wq->head = 0;
wq->tail = 0;
- wq->last = NULL;
}
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
@@ -1103,6 +1102,9 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
}
}
+ qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
+ qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
+
return 0;
}
@@ -1583,15 +1585,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out;
}
- if (prev_wqe) {
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32(((ind << qp->sq.wqe_shift) +
- qp->send_wqe_offset) |
- mthca_opcode[wr->opcode]);
- wmb();
- ((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
- }
+ ((struct mthca_next_seg *) prev_wqe)->nda_op =
+ cpu_to_be32(((ind << qp->sq.wqe_shift) +
+ qp->send_wqe_offset) |
+ mthca_opcode[wr->opcode]);
+ wmb();
+ ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+ cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
if (!size0) {
size0 = size;
@@ -1688,13 +1688,11 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qp->wrid[ind] = wr->wr_id;
- if (likely(prev_wqe)) {
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
- wmb();
- ((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cpu_to_be32(MTHCA_NEXT_DBD | size);
- }
+ ((struct mthca_next_seg *) prev_wqe)->nda_op =
+ cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
+ wmb();
+ ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+ cpu_to_be32(MTHCA_NEXT_DBD | size);
if (!size0)
size0 = size;
@@ -1905,15 +1903,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out;
}
- if (likely(prev_wqe)) {
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32(((ind << qp->sq.wqe_shift) +
- qp->send_wqe_offset) |
- mthca_opcode[wr->opcode]);
- wmb();
- ((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cpu_to_be32(MTHCA_NEXT_DBD | size);
- }
+ ((struct mthca_next_seg *) prev_wqe)->nda_op =
+ cpu_to_be32(((ind << qp->sq.wqe_shift) +
+ qp->send_wqe_offset) |
+ mthca_opcode[wr->opcode]);
+ wmb();
+ ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+ cpu_to_be32(MTHCA_NEXT_DBD | size);
if (!size0) {
size0 = size;
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 75cd2d84ef1..fe06cc0df93 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -189,7 +189,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
srq->max = attr->max_wr;
srq->max_gs = attr->max_sge;
- srq->last = NULL;
srq->counter = 0;
if (mthca_is_memfree(dev))
@@ -264,6 +263,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
srq->first_free = 0;
srq->last_free = srq->max - 1;
+ srq->last = get_wqe(srq, srq->max - 1);
return 0;
@@ -446,13 +446,11 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
((struct mthca_data_seg *) wqe)->addr = 0;
}
- if (likely(prev_wqe)) {
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32((ind << srq->wqe_shift) | 1);
- wmb();
- ((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cpu_to_be32(MTHCA_NEXT_DBD);
- }
+ ((struct mthca_next_seg *) prev_wqe)->nda_op =
+ cpu_to_be32((ind << srq->wqe_shift) | 1);
+ wmb();
+ ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+ cpu_to_be32(MTHCA_NEXT_DBD);
srq->wrid[ind] = wr->wr_id;
srq->first_free = next_ind;