From 1c69fc2a9012e160c8d459f63df74a6b01db8322 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 6 Feb 2008 21:07:54 -0800 Subject: IB/mlx4: Consolidate code to get an entry from a struct mlx4_buf We use struct mlx4_buf for kernel QP, CQ and SRQ buffers, and the code to look up an entry is duplicated in get_cqe_from_buf() and the QP and SRQ versions of get_wqe(). Factor this out into mlx4_buf_offset(). This will also make it easier to switch over to using vmap() for buffers. Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mlx4/cq.c | 8 +------- drivers/infiniband/hw/mlx4/qp.c | 6 +----- drivers/infiniband/hw/mlx4/srq.c | 8 +------- include/linux/mlx4/device.h | 8 ++++++++ 4 files changed, 11 insertions(+), 19 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 7950aa6e818..8ac7b973f87 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -64,13 +64,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n) { - int offset = n * sizeof (struct mlx4_cqe); - - if (buf->buf.nbufs == 1) - return buf->buf.u.direct.buf + offset; - else - return buf->buf.u.page_list[offset >> PAGE_SHIFT].buf + - (offset & (PAGE_SIZE - 1)); + return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe)); } static void *get_cqe(struct mlx4_ib_cq *cq, int n) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 8cba9c532e6..376db730bc7 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -96,11 +96,7 @@ static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) static void *get_wqe(struct mlx4_ib_qp *qp, int offset) { - if (qp->buf.nbufs == 1) - return qp->buf.u.direct.buf + offset; - else - return qp->buf.u.page_list[offset >> PAGE_SHIFT].buf + - (offset & (PAGE_SIZE - 1)); + return mlx4_buf_offset(&qp->buf, offset); } static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index e7e9a3d0dac..beaa3b06cf5 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c @@ -38,13 +38,7 @@ static void *get_wqe(struct mlx4_ib_srq *srq, int n) { - int offset = n << srq->msrq.wqe_shift; - - if (srq->buf.nbufs == 1) - return srq->buf.u.direct.buf + offset; - else - return srq->buf.u.page_list[offset >> PAGE_SHIFT].buf + - (offset & (PAGE_SIZE - 1)); + return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); } static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type) diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 222815d91c4..a0afa7511a3 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -308,6 +308,14 @@ struct mlx4_init_port_param { int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, struct mlx4_buf *buf); void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); +static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) +{ + if (buf->nbufs == 1) + return buf->u.direct.buf + offset; + else + return buf->u.page_list[offset >> PAGE_SHIFT].buf + + (offset & (PAGE_SIZE - 1)); +} int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn); void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn); -- cgit From 313abe55a87bc10e55d00f337d609e17ad5f8c9a Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Mon, 28 Jan 2008 10:40:51 +0200 Subject: mlx4_core: For 64-bit systems, vmap() kernel queue buffers Since kernel virtual memory is not a problem on 64-bit systems, there is no reason to use our own 2-layer page mapping scheme for large kernel queue buffers on such systems. Instead, map the page list to a single virtually contiguous buffer with vmap(), so that can we access buffer memory via direct indexing. Signed-off-by: Michael S. Tsirkin Signed-off-by: Jack Morgenstein Signed-off-by: Roland Dreier --- drivers/net/mlx4/alloc.c | 16 ++++++++++++++++ include/linux/mlx4/device.h | 4 ++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index b226e019bc8..2da2c2ec1f2 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c @@ -151,6 +151,19 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, memset(buf->u.page_list[i].buf, 0, PAGE_SIZE); } + + if (BITS_PER_LONG == 64) { + struct page **pages; + pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); + if (!pages) + goto err_free; + for (i = 0; i < buf->nbufs; ++i) + pages[i] = virt_to_page(buf->u.page_list[i].buf); + buf->u.direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); + kfree(pages); + if (!buf->u.direct.buf) + goto err_free; + } } return 0; @@ -170,6 +183,9 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf, buf->u.direct.map); else { + if (BITS_PER_LONG == 64) + vunmap(buf->u.direct.buf); + for (i = 0; i < buf->nbufs; ++i) if (buf->u.page_list[i].buf) dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index a0afa7511a3..631607788f8 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -189,7 +189,7 @@ struct mlx4_buf_list { }; struct mlx4_buf { - union { + struct { struct mlx4_buf_list direct; struct mlx4_buf_list *page_list; } u; @@ -310,7 +310,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) { - if (buf->nbufs == 1) + if (BITS_PER_LONG == 64 || buf->nbufs == 1) return buf->u.direct.buf + offset; else return buf->u.page_list[offset >> PAGE_SHIFT].buf + -- cgit From b57aacfa7a95328f469d0360e49289b023c47e9e Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 6 Feb 2008 21:17:59 -0800 Subject: mlx4_core: Clean up struct mlx4_buf Now that struct mlx4_buf.u is a struct instead of a union because of the vmap() changes, there's no point in having a struct at all. So move .direct and .page_list directly into struct mlx4_buf and get rid of a bunch of unnecessary ".u"s. Signed-off-by: Roland Dreier --- drivers/net/mlx4/alloc.c | 40 ++++++++++++++++++++-------------------- drivers/net/mlx4/mr.c | 4 ++-- include/linux/mlx4/device.h | 10 ++++------ 3 files changed, 26 insertions(+), 28 deletions(-) diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index 2da2c2ec1f2..521dc0322ee 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c @@ -116,40 +116,40 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, buf->nbufs = 1; buf->npages = 1; buf->page_shift = get_order(size) + PAGE_SHIFT; - buf->u.direct.buf = dma_alloc_coherent(&dev->pdev->dev, + buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, size, &t, GFP_KERNEL); - if (!buf->u.direct.buf) + if (!buf->direct.buf) return -ENOMEM; - buf->u.direct.map = t; + buf->direct.map = t; while (t & ((1 << buf->page_shift) - 1)) { --buf->page_shift; buf->npages *= 2; } - memset(buf->u.direct.buf, 0, size); + memset(buf->direct.buf, 0, size); } else { int i; buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; buf->npages = buf->nbufs; buf->page_shift = PAGE_SHIFT; - buf->u.page_list = kzalloc(buf->nbufs * sizeof *buf->u.page_list, + buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list, GFP_KERNEL); - if (!buf->u.page_list) + if (!buf->page_list) return -ENOMEM; for (i = 0; i < buf->nbufs; ++i) { - buf->u.page_list[i].buf = + buf->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, &t, GFP_KERNEL); - if (!buf->u.page_list[i].buf) + if (!buf->page_list[i].buf) goto err_free; - buf->u.page_list[i].map = t; + buf->page_list[i].map = t; - memset(buf->u.page_list[i].buf, 0, PAGE_SIZE); + memset(buf->page_list[i].buf, 0, PAGE_SIZE); } if (BITS_PER_LONG == 64) { @@ -158,10 +158,10 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, if (!pages) goto err_free; for (i = 0; i < buf->nbufs; ++i) - pages[i] = virt_to_page(buf->u.page_list[i].buf); - buf->u.direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); + pages[i] = virt_to_page(buf->page_list[i].buf); + buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); kfree(pages); - if (!buf->u.direct.buf) + if (!buf->direct.buf) goto err_free; } } @@ -180,18 +180,18 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) int i; if (buf->nbufs == 1) - dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf, - buf->u.direct.map); + dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, + buf->direct.map); else { if (BITS_PER_LONG == 64) - vunmap(buf->u.direct.buf); + vunmap(buf->direct.buf); for (i = 0; i < buf->nbufs; ++i) - if (buf->u.page_list[i].buf) + if (buf->page_list[i].buf) dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, - buf->u.page_list[i].buf, - buf->u.page_list[i].map); - kfree(buf->u.page_list); + buf->page_list[i].buf, + buf->page_list[i].map); + kfree(buf->page_list); } } EXPORT_SYMBOL_GPL(mlx4_buf_free); diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index 9c9e308d091..679dfdb6807 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c @@ -419,9 +419,9 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, for (i = 0; i < buf->npages; ++i) if (buf->nbufs == 1) - page_list[i] = buf->u.direct.map + (i << buf->page_shift); + page_list[i] = buf->direct.map + (i << buf->page_shift); else - page_list[i] = buf->u.page_list[i].map; + page_list[i] = buf->page_list[i].map; err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 631607788f8..4210ac4a8bc 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -189,10 +189,8 @@ struct mlx4_buf_list { }; struct mlx4_buf { - struct { - struct mlx4_buf_list direct; - struct mlx4_buf_list *page_list; - } u; + struct mlx4_buf_list direct; + struct mlx4_buf_list *page_list; int nbufs; int npages; int page_shift; @@ -311,9 +309,9 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) { if (BITS_PER_LONG == 64 || buf->nbufs == 1) - return buf->u.direct.buf + offset; + return buf->direct.buf + offset; else - return buf->u.page_list[offset >> PAGE_SHIFT].buf + + return buf->page_list[offset >> PAGE_SHIFT].buf + (offset & (PAGE_SIZE - 1)); } -- cgit From ea54b10c7773007e173da31fe7adcc049da33331 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Mon, 28 Jan 2008 10:40:59 +0200 Subject: IB/mlx4: Use multiple WQ blocks to post smaller send WQEs ConnectX HCA supports shrinking WQEs, so that a single work request can be made of multiple units of wqe_shift. This way, WRs can differ in size, and do not have to be a power of 2 in size, saving memory and speeding up send WR posting. Unfortunately, if we do this then the wqe_index field in CQEs can't be used to look up the WR ID anymore, so our implementation does this only if selective signaling is off. Further, on 32-bit platforms, we can't use vmap() to make the QP buffer virtually contigious. Thus we have to use constant-sized WRs to make sure a WR is always fully within a single page-sized chunk. Finally, we use WRs with the NOP opcode to avoid wrapping around the queue buffer in the middle of posting a WR, and we set the NoErrorCompletion bit to avoid getting completions with error for NOP WRs. However, NEC is only supported starting with firmware 2.2.232, so we use constant-sized WRs for older firmware. And, since MLX QPs only support SEND, we use constant-sized WRs in this case. When stamping during NOP posting, do stamping following setting of the NOP WQE valid bit. Signed-off-by: Michael S. Tsirkin Signed-off-by: Jack Morgenstein Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mlx4/cq.c | 12 +- drivers/infiniband/hw/mlx4/mlx4_ib.h | 2 + drivers/infiniband/hw/mlx4/qp.c | 210 +++++++++++++++++++++++++++++------ include/linux/mlx4/device.h | 5 + include/linux/mlx4/qp.h | 4 + 5 files changed, 197 insertions(+), 36 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 8ac7b973f87..7360bbafbe8 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -326,6 +326,12 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR; + if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP && + is_send)) { + printk(KERN_WARNING "Completion for NOP opcode detected!\n"); + return -EINVAL; + } + if (!*cur_qp || (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) { /* @@ -348,8 +354,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, if (is_send) { wq = &(*cur_qp)->sq; - wqe_ctr = be16_to_cpu(cqe->wqe_index); - wq->tail += (u16) (wqe_ctr - (u16) wq->tail); + if (!(*cur_qp)->sq_signal_bits) { + wqe_ctr = be16_to_cpu(cqe->wqe_index); + wq->tail += (u16) (wqe_ctr - (u16) wq->tail); + } wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; ++wq->tail; } else if ((*cur_qp)->ibqp.srq) { diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 28697653a37..3726e451a32 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -120,6 +120,8 @@ struct mlx4_ib_qp { u32 doorbell_qpn; __be32 sq_signal_bits; + unsigned sq_next_wqe; + int sq_max_wqes_per_wr; int sq_spare_wqes; struct mlx4_ib_wq sq; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 376db730bc7..958e205b6d7 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -30,6 +30,8 @@ * SOFTWARE. */ +#include + #include #include @@ -111,16 +113,87 @@ static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) /* * Stamp a SQ WQE so that it is invalid if prefetched by marking the - * first four bytes of every 64 byte chunk with 0xffffffff, except for - * the very first chunk of the WQE. + * first four bytes of every 64 byte chunk with + * 0x7FFFFFF | (invalid_ownership_value << 31). + * + * When the max work request size is less than or equal to the WQE + * basic block size, as an optimization, we can stamp all WQEs with + * 0xffffffff, and skip the very first chunk of each WQE. */ -static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n) +static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) { - u32 *wqe = get_send_wqe(qp, n); + u32 *wqe; int i; + int s; + int ind; + void *buf; + __be32 stamp; + + s = roundup(size, 1U << qp->sq.wqe_shift); + if (qp->sq_max_wqes_per_wr > 1) { + for (i = 0; i < s; i += 64) { + ind = (i >> qp->sq.wqe_shift) + n; + stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) : + cpu_to_be32(0xffffffff); + buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); + wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); + *wqe = stamp; + } + } else { + buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); + for (i = 64; i < s; i += 64) { + wqe = buf + i; + *wqe = 0xffffffff; + } + } +} + +static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size) +{ + struct mlx4_wqe_ctrl_seg *ctrl; + struct mlx4_wqe_inline_seg *inl; + void *wqe; + int s; + + ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); + s = sizeof(struct mlx4_wqe_ctrl_seg); + + if (qp->ibqp.qp_type == IB_QPT_UD) { + struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl; + struct mlx4_av *av = (struct mlx4_av *)dgram->av; + memset(dgram, 0, sizeof *dgram); + av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn); + s += sizeof(struct mlx4_wqe_datagram_seg); + } + + /* Pad the remainder of the WQE with an inline data segment. */ + if (size > s) { + inl = wqe + s; + inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl)); + } + ctrl->srcrb_flags = 0; + ctrl->fence_size = size / 16; + /* + * Make sure descriptor is fully written before setting ownership bit + * (because HW can start executing as soon as we do). + */ + wmb(); + + ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) | + (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); - for (i = 16; i < 1 << (qp->sq.wqe_shift - 2); i += 16) - wqe[i] = 0xffffffff; + stamp_send_wqe(qp, n + qp->sq_spare_wqes, size); +} + +/* Post NOP WQE to prevent wrap-around in the middle of WR */ +static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind) +{ + unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1)); + if (unlikely(s < qp->sq_max_wqes_per_wr)) { + post_nop_wqe(qp, ind, s << qp->sq.wqe_shift); + ind += s; + } + return ind; } static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) @@ -237,6 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, enum ib_qp_type type, struct mlx4_ib_qp *qp) { + int s; + /* Sanity check SQ size before proceeding */ if (cap->max_send_wr > dev->dev->caps.max_wqes || cap->max_send_sge > dev->dev->caps.max_sq_sg || @@ -252,20 +327,74 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) return -EINVAL; - qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge * - sizeof (struct mlx4_wqe_data_seg), - cap->max_inline_data + - sizeof (struct mlx4_wqe_inline_seg)) + - send_wqe_overhead(type))); - qp->sq.max_gs = ((1 << qp->sq.wqe_shift) - send_wqe_overhead(type)) / - sizeof (struct mlx4_wqe_data_seg); + s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg), + cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + + send_wqe_overhead(type); /* - * We need to leave 2 KB + 1 WQE of headroom in the SQ to - * allow HW to prefetch. + * Hermon supports shrinking WQEs, such that a single work + * request can include multiple units of 1 << wqe_shift. This + * way, work requests can differ in size, and do not have to + * be a power of 2 in size, saving memory and speeding up send + * WR posting. Unfortunately, if we do this then the + * wqe_index field in CQEs can't be used to look up the WR ID + * anymore, so we do this only if selective signaling is off. + * + * Further, on 32-bit platforms, we can't use vmap() to make + * the QP buffer virtually contigious. Thus we have to use + * constant-sized WRs to make sure a WR is always fully within + * a single page-sized chunk. + * + * Finally, we use NOP work requests to pad the end of the + * work queue, to avoid wrap-around in the middle of WR. We + * set NEC bit to avoid getting completions with error for + * these NOP WRs, but since NEC is only supported starting + * with firmware 2.2.232, we use constant-sized WRs for older + * firmware. + * + * And, since MLX QPs only support SEND, we use constant-sized + * WRs in this case. + * + * We look for the smallest value of wqe_shift such that the + * resulting number of wqes does not exceed device + * capabilities. + * + * We set WQE size to at least 64 bytes, this way stamping + * invalidates each WQE. */ - qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1; - qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr + qp->sq_spare_wqes); + if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC && + qp->sq_signal_bits && BITS_PER_LONG == 64 && + type != IB_QPT_SMI && type != IB_QPT_GSI) + qp->sq.wqe_shift = ilog2(64); + else + qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); + + for (;;) { + if (1 << qp->sq.wqe_shift > dev->dev->caps.max_sq_desc_sz) + return -EINVAL; + + qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); + + /* + * We need to leave 2 KB + 1 WR of headroom in the SQ to + * allow HW to prefetch. + */ + qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr; + qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr * + qp->sq_max_wqes_per_wr + + qp->sq_spare_wqes); + + if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes) + break; + + if (qp->sq_max_wqes_per_wr <= 1) + return -EINVAL; + + ++qp->sq.wqe_shift; + } + + qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) - + send_wqe_overhead(type)) / sizeof (struct mlx4_wqe_data_seg); qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + (qp->sq.wqe_cnt << qp->sq.wqe_shift); @@ -277,7 +406,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, qp->sq.offset = 0; } - cap->max_send_wr = qp->sq.max_post = qp->sq.wqe_cnt - qp->sq_spare_wqes; + cap->max_send_wr = qp->sq.max_post = + (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; cap->max_send_sge = qp->sq.max_gs; /* We don't support inline sends for kernel QPs (yet) */ cap->max_inline_data = 0; @@ -323,6 +453,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, qp->rq.tail = 0; qp->sq.head = 0; qp->sq.tail = 0; + qp->sq_next_wqe = 0; + + if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) + qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); + else + qp->sq_signal_bits = 0; err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp); if (err) @@ -413,11 +549,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, */ qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); - if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) - qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); - else - qp->sq_signal_bits = 0; - qp->mqp.event = mlx4_ib_qp_event; return 0; @@ -912,7 +1043,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, ctrl = get_send_wqe(qp, i); ctrl->owner_opcode = cpu_to_be32(1 << 31); - stamp_send_wqe(qp, i); + stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift); } } @@ -965,6 +1096,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, qp->rq.tail = 0; qp->sq.head = 0; qp->sq.tail = 0; + qp->sq_next_wqe = 0; if (!ibqp->srq) *qp->db.db = 0; } @@ -1274,13 +1406,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, unsigned long flags; int nreq; int err = 0; - int ind; - int size; + unsigned ind; + int uninitialized_var(stamp); + int uninitialized_var(size); int i; spin_lock_irqsave(&qp->sq.lock, flags); - ind = qp->sq.head; + ind = qp->sq_next_wqe; for (nreq = 0; wr; ++nreq, wr = wr->next) { if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { @@ -1296,7 +1429,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, } ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); - qp->sq.wrid[ind & (qp->sq.wqe_cnt - 1)] = wr->wr_id; + qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; ctrl->srcrb_flags = (wr->send_flags & IB_SEND_SIGNALED ? @@ -1409,16 +1542,23 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); + stamp = ind + qp->sq_spare_wqes; + ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); + /* * We can improve latency by not stamping the last * send queue WQE until after ringing the doorbell, so * only stamp here if there are still more WQEs to post. + * + * Same optimization applies to padding with NOP wqe + * in case of WQE shrinking (used to prevent wrap-around + * in the middle of WR). */ - if (wr->next) - stamp_send_wqe(qp, (ind + qp->sq_spare_wqes) & - (qp->sq.wqe_cnt - 1)); + if (wr->next) { + stamp_send_wqe(qp, stamp, size * 16); + ind = pad_wraparound(qp, ind); + } - ++ind; } out: @@ -1440,8 +1580,10 @@ out: */ mmiowb(); - stamp_send_wqe(qp, (ind + qp->sq_spare_wqes - 1) & - (qp->sq.wqe_cnt - 1)); + stamp_send_wqe(qp, stamp, size * 16); + + ind = pad_wraparound(qp, ind); + qp->sq_next_wqe = ind; } spin_unlock_irqrestore(&qp->sq.lock, flags); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 4210ac4a8bc..6cdf813cd47 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -133,6 +133,11 @@ enum { MLX4_STAT_RATE_OFFSET = 5 }; +static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) +{ + return (major << 32) | (minor << 16) | subminor; +} + struct mlx4_caps { u64 fw_ver; int num_ports; diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 3968b943259..09a2230923f 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -154,7 +154,11 @@ struct mlx4_qp_context { u32 reserved5[10]; }; +/* Which firmware version adds support for NEC (NoErrorCompletion) bit */ +#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232) + enum { + MLX4_WQE_CTRL_NEC = 1 << 29, MLX4_WQE_CTRL_FENCE = 1 << 6, MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2, MLX4_WQE_CTRL_SOLICITED = 1 << 1, -- cgit From eb14032f9eb595621270f3269f40094adb3144e8 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 30 Jan 2008 18:30:46 +0200 Subject: IPoIB: Add high DMA feature flag All current InfiniBand devices can handle all DMA addresses, and it's hard to imagine anyone would be silly enough to build a new device that couldn't. Therefore, enable the NETIF_F_HIGHDMA feature for IPoIB. This has no effect for no, but is needed when we enable gather/scatter support and checksum stateless offloads. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- drivers/infiniband/ulp/ipoib/ipoib_main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 09f5371137a..f96477a8ca5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -965,7 +965,9 @@ static void ipoib_setup(struct net_device *dev) dev->addr_len = INFINIBAND_ALEN; dev->type = ARPHRD_INFINIBAND; dev->tx_queue_len = ipoib_sendq_size * 2; - dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX; + dev->features = (NETIF_F_VLAN_CHALLENGED | + NETIF_F_LLTX | + NETIF_F_HIGHDMA); /* MTU will be reset when mcast join happens */ dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN; -- cgit From 7143740d26098aca84ecc7376ccfe2c58fd0412e Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 30 Jan 2008 18:30:53 +0200 Subject: IPoIB: Add send gather support This patch acts as a preparation for using checksum offload for IB devices capable of inserting/verifying checksum in IP packets. The patch does not actaully turn on NETIF_F_SG - we defer that to the patches adding checksum offload capabilities. We only add support for send gathers for datagram mode, since existing HW does not support checksum offload on connected QPs. Signed-off-by: Michael S. Tsirkin Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- drivers/infiniband/ulp/ipoib/ipoib.h | 4 +- drivers/infiniband/ulp/ipoib/ipoib_cm.c | 10 ++-- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 89 +++++++++++++++++++++++------- drivers/infiniband/ulp/ipoib/ipoib_verbs.c | 10 +++- 4 files changed, 83 insertions(+), 30 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index fe250c60607..f9b7caa5414 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -143,7 +143,7 @@ struct ipoib_rx_buf { struct ipoib_tx_buf { struct sk_buff *skb; - u64 mapping; + u64 mapping[MAX_SKB_FRAGS + 1]; }; struct ib_cm_id; @@ -296,7 +296,7 @@ struct ipoib_dev_priv { struct ipoib_tx_buf *tx_ring; unsigned tx_head; unsigned tx_tail; - struct ib_sge tx_sge; + struct ib_sge tx_sge[MAX_SKB_FRAGS + 1]; struct ib_send_wr tx_wr; unsigned tx_outstanding; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 1818f958c25..7dd2ec473d2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -634,8 +634,8 @@ static inline int post_send(struct ipoib_dev_priv *priv, { struct ib_send_wr *bad_wr; - priv->tx_sge.addr = addr; - priv->tx_sge.length = len; + priv->tx_sge[0].addr = addr; + priv->tx_sge[0].length = len; priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; @@ -676,7 +676,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ return; } - tx_req->mapping = addr; + tx_req->mapping[0] = addr; if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), addr, skb->len))) { @@ -715,7 +715,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) tx_req = &tx->tx_ring[wr_id]; - ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); + ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len, DMA_TO_DEVICE); /* FIXME: is this right? Shouldn't we only increment on success? */ ++dev->stats.tx_packets; @@ -1110,7 +1110,7 @@ timeout: while ((int) p->tx_tail - (int) p->tx_head < 0) { tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; - ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, + ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(tx_req->skb); ++p->tx_tail; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 52bc2bd5799..9d3e778dc56 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -239,6 +239,54 @@ repost: "for buf %d\n", wr_id); } +static int ipoib_dma_map_tx(struct ib_device *ca, + struct ipoib_tx_buf *tx_req) +{ + struct sk_buff *skb = tx_req->skb; + u64 *mapping = tx_req->mapping; + int i; + + mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (unlikely(ib_dma_mapping_error(ca, mapping[0]))) + return -EIO; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + mapping[i + 1] = ib_dma_map_page(ca, frag->page, + frag->page_offset, frag->size, + DMA_TO_DEVICE); + if (unlikely(ib_dma_mapping_error(ca, mapping[i + 1]))) + goto partial_error; + } + return 0; + +partial_error: + ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); + + for (; i > 0; --i) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; + ib_dma_unmap_page(ca, mapping[i], frag->size, DMA_TO_DEVICE); + } + return -EIO; +} + +static void ipoib_dma_unmap_tx(struct ib_device *ca, + struct ipoib_tx_buf *tx_req) +{ + struct sk_buff *skb = tx_req->skb; + u64 *mapping = tx_req->mapping; + int i; + + ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + ib_dma_unmap_page(ca, mapping[i + 1], frag->size, + DMA_TO_DEVICE); + } +} + static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -257,8 +305,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) tx_req = &priv->tx_ring[wr_id]; - ib_dma_unmap_single(priv->ca, tx_req->mapping, - tx_req->skb->len, DMA_TO_DEVICE); + ipoib_dma_unmap_tx(priv->ca, tx_req); ++dev->stats.tx_packets; dev->stats.tx_bytes += tx_req->skb->len; @@ -341,16 +388,23 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) static inline int post_send(struct ipoib_dev_priv *priv, unsigned int wr_id, struct ib_ah *address, u32 qpn, - u64 addr, int len) + u64 *mapping, int headlen, + skb_frag_t *frags, + int nr_frags) { struct ib_send_wr *bad_wr; + int i; - priv->tx_sge.addr = addr; - priv->tx_sge.length = len; - - priv->tx_wr.wr_id = wr_id; - priv->tx_wr.wr.ud.remote_qpn = qpn; - priv->tx_wr.wr.ud.ah = address; + priv->tx_sge[0].addr = mapping[0]; + priv->tx_sge[0].length = headlen; + for (i = 0; i < nr_frags; ++i) { + priv->tx_sge[i + 1].addr = mapping[i + 1]; + priv->tx_sge[i + 1].length = frags[i].size; + } + priv->tx_wr.num_sge = nr_frags + 1; + priv->tx_wr.wr_id = wr_id; + priv->tx_wr.wr.ud.remote_qpn = qpn; + priv->tx_wr.wr.ud.ah = address; return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr); } @@ -360,7 +414,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_tx_buf *tx_req; - u64 addr; if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", @@ -383,20 +436,19 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, */ tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; tx_req->skb = skb; - addr = ib_dma_map_single(priv->ca, skb->data, skb->len, - DMA_TO_DEVICE); - if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { + if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) { ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } - tx_req->mapping = addr; if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), - address->ah, qpn, addr, skb->len))) { + address->ah, qpn, + tx_req->mapping, skb_headlen(skb), + skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags))) { ipoib_warn(priv, "post_send failed\n"); ++dev->stats.tx_errors; - ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); + ipoib_dma_unmap_tx(priv->ca, tx_req); dev_kfree_skb_any(skb); } else { dev->trans_start = jiffies; @@ -615,10 +667,7 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush) while ((int) priv->tx_tail - (int) priv->tx_head < 0) { tx_req = &priv->tx_ring[priv->tx_tail & (ipoib_sendq_size - 1)]; - ib_dma_unmap_single(priv->ca, - tx_req->mapping, - tx_req->skb->len, - DMA_TO_DEVICE); + ipoib_dma_unmap_tx(priv->ca, tx_req); dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; --priv->tx_outstanding; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 433e99ac227..a3aeb911f02 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -157,6 +157,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) }; int ret, size; + int i; priv->pd = ib_alloc_pd(priv->ca); if (IS_ERR(priv->pd)) { @@ -191,6 +192,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) init_attr.send_cq = priv->cq; init_attr.recv_cq = priv->cq; + if (dev->features & NETIF_F_SG) + init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1; + priv->qp = ib_create_qp(priv->pd, &init_attr); if (IS_ERR(priv->qp)) { printk(KERN_WARNING "%s: failed to create QP\n", ca->name); @@ -201,11 +205,11 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff; priv->dev->dev_addr[3] = (priv->qp->qp_num ) & 0xff; - priv->tx_sge.lkey = priv->mr->lkey; + for (i = 0; i < MAX_SKB_FRAGS + 1; ++i) + priv->tx_sge[i].lkey = priv->mr->lkey; priv->tx_wr.opcode = IB_WR_SEND; - priv->tx_wr.sg_list = &priv->tx_sge; - priv->tx_wr.num_sge = 1; + priv->tx_wr.sg_list = priv->tx_sge; priv->tx_wr.send_flags = IB_SEND_SIGNALED; return 0; -- cgit From e0605d9199b462454f2f2e5ca01810255a6d5cfa Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 30 Jan 2008 18:30:57 +0200 Subject: IB/core: Add IP checksum offload support Add a device capability to show when it can handle checksum offload. Also add a send flag for inserting checksums and a csum_ok field to the completion record. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- include/rdma/ib_verbs.h | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index cfbd38fe299..a5a7f9678ab 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -95,7 +95,15 @@ enum ib_device_cap_flags { IB_DEVICE_N_NOTIFY_CQ = (1<<14), IB_DEVICE_ZERO_STAG = (1<<15), IB_DEVICE_SEND_W_INV = (1<<16), - IB_DEVICE_MEM_WINDOW = (1<<17) + IB_DEVICE_MEM_WINDOW = (1<<17), + /* + * Devices should set IB_DEVICE_UD_IP_SUM if they support + * insertion of UDP and TCP checksum on outgoing UD IPoIB + * messages and can verify the validity of checksum for + * incoming messages. Setting this flag implies that the + * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. + */ + IB_DEVICE_UD_IP_CSUM = (1<<18), }; enum ib_atomic_cap { @@ -431,6 +439,7 @@ struct ib_wc { u8 sl; u8 dlid_path_bits; u8 port_num; /* valid only for DR SMPs on switches */ + int csum_ok; }; enum ib_cq_notify_flags { @@ -615,7 +624,8 @@ enum ib_send_flags { IB_SEND_FENCE = 1, IB_SEND_SIGNALED = (1<<1), IB_SEND_SOLICITED = (1<<2), - IB_SEND_INLINE = (1<<3) + IB_SEND_INLINE = (1<<3), + IB_SEND_IP_CSUM = (1<<4) }; struct ib_sge { -- cgit From 5128bdc97a1018aacac2550cf73bda61041cc3b8 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Fri, 8 Feb 2008 14:47:26 -0800 Subject: IB/core: Remove unused struct ib_device.flags member Avoid confusion about what it might mean, since it's never initialized. Signed-off-by: Roland Dreier --- include/rdma/ib_verbs.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index a5a7f9678ab..701e7b40560 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -900,8 +900,6 @@ struct ib_device { int *pkey_tbl_len; int *gid_tbl_len; - u32 flags; - int num_comp_vectors; struct iw_cm_verbs *iwcm; -- cgit