From 767e468c06fc0e88f95881c1056437688b37c7c6 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 1 Sep 2008 12:43:14 +0100 Subject: sfc: Replace net_dev->priv with netdev_priv(net_dev) Use of the net_device::priv field is deprecated. Signed-off-by: Ben Hutchings Signed-off-by: Jeff Garzik --- drivers/net/sfc/tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/sfc/tx.c') diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 5e8374ab28e..2a09101f67e 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -367,7 +367,7 @@ inline int efx_xmit(struct efx_nic *efx, */ int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { - struct efx_nic *efx = net_dev->priv; + struct efx_nic *efx = netdev_priv(net_dev); return efx_xmit(efx, &efx->tx_queue[0], skb); } -- cgit From 60ac10658c2e234cf7bc27e0930e324c6c6fcf61 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 1 Sep 2008 12:44:59 +0100 Subject: sfc: Use separate hardware TX queues to select checksum generation Checksum generation is an attribute of our hardware TX queues, not TX descriptors. We previously used a single queue and turned checksum generation on or off as requested through ethtool. However, this can result in regenerating checksums in raw packets that should not be modified. We now create 2 hardware TX queues with checksum generation on or off. They are presented to the net core as one queue since it does not know how to select between them. The self-test verifies that a bad checksum is unaltered on the queue with checksum generation off. Signed-off-by: Ben Hutchings Signed-off-by: Jeff Garzik --- drivers/net/sfc/tx.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'drivers/net/sfc/tx.c') diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 2a09101f67e..e5e0bab313e 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -368,7 +368,14 @@ inline int efx_xmit(struct efx_nic *efx, int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); - return efx_xmit(efx, &efx->tx_queue[0], skb); + struct efx_tx_queue *tx_queue; + + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) + tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM]; + else + tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM]; + + return efx_xmit(efx, tx_queue, skb); } void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) @@ -412,26 +419,21 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) /* Allocate software ring */ txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer); tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); - if (!tx_queue->buffer) { - rc = -ENOMEM; - goto fail1; - } + if (!tx_queue->buffer) + return -ENOMEM; for (i = 0; i <= efx->type->txd_ring_mask; ++i) tx_queue->buffer[i].continuation = 1; /* Allocate hardware ring */ rc = falcon_probe_tx(tx_queue); if (rc) - goto fail2; + goto fail; return 0; - fail2: + fail: kfree(tx_queue->buffer); tx_queue->buffer = NULL; - fail1: - tx_queue->used = 0; - return rc; } @@ -494,7 +496,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) kfree(tx_queue->buffer); tx_queue->buffer = NULL; - tx_queue->used = 0; } -- cgit From 5988b63a53e120a9db4439d4512f4c1b17e7170e Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 1 Sep 2008 12:46:36 +0100 Subject: sfc: Don't leak PCI DMA maps in the TSO code when the queue fills up Signed-off-by: Ben Hutchings Signed-off-by: Jeff Garzik --- drivers/net/sfc/tx.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/net/sfc/tx.c') diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index e5e0bab313e..0452ea6937a 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -1096,6 +1096,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, efx_stop_queue(tx_queue->efx); unwind: + /* Free the DMA mapping we were in the process of writing out */ + if (state.ifc.unmap_len) + pci_unmap_page(tx_queue->efx->pci_dev, state.ifc.unmap_addr, + state.ifc.unmap_len, PCI_DMA_TODEVICE); + efx_enqueue_unwind(tx_queue); return rc2; } -- cgit From ecbd95c17c221913cc3c5776051b2fa8b3b97316 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 1 Sep 2008 12:46:40 +0100 Subject: sfc: Use pci_map_single() to map the skb header when doing TSO Signed-off-by: Ben Hutchings Signed-off-by: Jeff Garzik --- drivers/net/sfc/tx.c | 152 ++++++++++++++++++++++++++++----------------------- 1 file changed, 83 insertions(+), 69 deletions(-) (limited to 'drivers/net/sfc/tx.c') diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 0452ea6937a..11127757c05 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -287,9 +287,14 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, } /* Free the fragment we were mid-way through pushing */ - if (unmap_len) - pci_unmap_page(pci_dev, unmap_addr, unmap_len, - PCI_DMA_TODEVICE); + if (unmap_len) { + if (unmap_single) + pci_unmap_single(pci_dev, unmap_addr, unmap_len, + PCI_DMA_TODEVICE); + else + pci_unmap_page(pci_dev, unmap_addr, unmap_len, + PCI_DMA_TODEVICE); + } return rc; } @@ -561,8 +566,7 @@ struct tso_state { /* DMA address and length of the whole fragment */ unsigned int unmap_len; dma_addr_t unmap_addr; - struct page *page; - unsigned page_off; + unsigned int unmap_single; } ifc; struct { @@ -686,18 +690,14 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) * @tx_queue: Efx TX queue * @dma_addr: DMA address of fragment * @len: Length of fragment - * @skb: Only non-null for end of last segment - * @end_of_packet: True if last fragment in a packet - * @unmap_addr: DMA address of fragment for unmapping - * @unmap_len: Only set this in last segment of a fragment + * @final_buffer: The final buffer inserted into the queue * * Push descriptors onto the TX queue. Return 0 on success or 1 if * @tx_queue full. */ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, dma_addr_t dma_addr, unsigned len, - const struct sk_buff *skb, int end_of_packet, - dma_addr_t unmap_addr, unsigned unmap_len) + struct efx_tx_buffer **final_buffer) { struct efx_tx_buffer *buffer; struct efx_nic *efx = tx_queue->efx; @@ -725,8 +725,10 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, fill_level = (tx_queue->insert_count - tx_queue->old_read_count); q_space = efx->type->txd_ring_mask - 1 - fill_level; - if (unlikely(q_space-- <= 0)) + if (unlikely(q_space-- <= 0)) { + *final_buffer = NULL; return 1; + } smp_mb(); --tx_queue->stopped; } @@ -766,10 +768,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, EFX_BUG_ON_PARANOID(!len); buffer->len = len; - buffer->skb = skb; - buffer->continuation = !end_of_packet; - buffer->unmap_addr = unmap_addr; - buffer->unmap_len = unmap_len; + *final_buffer = buffer; return 0; } @@ -817,9 +816,16 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) buffer->len = 0; buffer->continuation = 1; if (buffer->unmap_len) { - pci_unmap_page(tx_queue->efx->pci_dev, - buffer->unmap_addr, - buffer->unmap_len, PCI_DMA_TODEVICE); + if (buffer->unmap_single) + pci_unmap_single(tx_queue->efx->pci_dev, + buffer->unmap_addr, + buffer->unmap_len, + PCI_DMA_TODEVICE); + else + pci_unmap_page(tx_queue->efx->pci_dev, + buffer->unmap_addr, + buffer->unmap_len, + PCI_DMA_TODEVICE); buffer->unmap_len = 0; } } @@ -846,31 +852,40 @@ static inline void tso_start(struct tso_state *st, const struct sk_buff *skb) st->packet_space = st->p.full_packet_size; st->remaining_len = skb->len - st->p.header_length; + st->ifc.unmap_len = 0; + st->ifc.unmap_single = 0; } - -/** - * tso_get_fragment - record fragment details and map for DMA - * @st: TSO state - * @efx: Efx NIC - * @data: Pointer to fragment data - * @len: Length of fragment - * - * Record fragment details and map for DMA. Return 0 on success, or - * -%ENOMEM if DMA mapping fails. - */ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, - int len, struct page *page, int page_off) + skb_frag_t *frag) { + st->ifc.unmap_addr = pci_map_page(efx->pci_dev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { + st->ifc.unmap_single = 0; + st->ifc.unmap_len = frag->size; + st->ifc.len = frag->size; + st->ifc.dma_addr = st->ifc.unmap_addr; + return 0; + } + return -ENOMEM; +} + +static inline int +tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, + const struct sk_buff *skb) +{ + int hl = st->p.header_length; + int len = skb_headlen(skb) - hl; - st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, - len, PCI_DMA_TODEVICE); + st->ifc.unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, + len, PCI_DMA_TODEVICE); if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { + st->ifc.unmap_single = 1; st->ifc.unmap_len = len; st->ifc.len = len; st->ifc.dma_addr = st->ifc.unmap_addr; - st->ifc.page = page; - st->ifc.page_off = page_off; return 0; } return -ENOMEM; @@ -891,7 +906,7 @@ static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, const struct sk_buff *skb, struct tso_state *st) { - + struct efx_tx_buffer *buffer; int n, end_of_packet, rc; if (st->ifc.len == 0) @@ -907,16 +922,25 @@ static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, st->packet_space -= n; st->remaining_len -= n; st->ifc.len -= n; - st->ifc.page_off += n; - end_of_packet = st->remaining_len == 0 || st->packet_space == 0; - rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n, - st->remaining_len ? NULL : skb, - end_of_packet, st->ifc.unmap_addr, - st->ifc.len ? 0 : st->ifc.unmap_len); + rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n, &buffer); + if (likely(rc == 0)) { + if (st->remaining_len == 0) + /* Transfer ownership of the skb */ + buffer->skb = skb; - st->ifc.dma_addr += n; + end_of_packet = st->remaining_len == 0 || st->packet_space == 0; + buffer->continuation = !end_of_packet; + if (st->ifc.len == 0) { + /* Transfer ownership of the pci mapping */ + buffer->unmap_len = st->ifc.unmap_len; + buffer->unmap_single = st->ifc.unmap_single; + st->ifc.unmap_len = 0; + } + } + + st->ifc.dma_addr += n; return rc; } @@ -1008,9 +1032,9 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, const struct sk_buff *skb) { + struct efx_nic *efx = tx_queue->efx; int frag_i, rc, rc2 = NETDEV_TX_OK; struct tso_state state; - skb_frag_t *f; /* Verify TSO is safe - these checks should never fail. */ efx_tso_check_safe(skb); @@ -1026,25 +1050,12 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, /* Grab the first payload fragment. */ EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); frag_i = 0; - f = &skb_shinfo(skb)->frags[frag_i]; - rc = tso_get_fragment(&state, tx_queue->efx, - f->size, f->page, f->page_offset); + rc = tso_get_fragment(&state, efx, + skb_shinfo(skb)->frags + frag_i); if (rc) goto mem_err; } else { - /* It may look like this code fragment assumes that the - * skb->data portion does not cross a page boundary, but - * that is not the case. It is guaranteed to be direct - * mapped memory, and therefore is physically contiguous, - * and so DMA will work fine. kmap_atomic() on this region - * will just return the direct mapping, so that will work - * too. - */ - int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1); - int hl = state.p.header_length; - rc = tso_get_fragment(&state, tx_queue->efx, - skb_headlen(skb) - hl, - virt_to_page(skb->data), page_off + hl); + rc = tso_get_head_fragment(&state, efx, skb); if (rc) goto mem_err; frag_i = -1; @@ -1063,9 +1074,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, if (++frag_i >= skb_shinfo(skb)->nr_frags) /* End of payload reached. */ break; - f = &skb_shinfo(skb)->frags[frag_i]; - rc = tso_get_fragment(&state, tx_queue->efx, - f->size, f->page, f->page_offset); + rc = tso_get_fragment(&state, efx, + skb_shinfo(skb)->frags + frag_i); if (rc) goto mem_err; } @@ -1083,8 +1093,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, return NETDEV_TX_OK; mem_err: - EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping" - " error\n"); + EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n"); dev_kfree_skb_any((struct sk_buff *)skb); goto unwind; @@ -1093,13 +1102,18 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, /* Stop the queue if it wasn't stopped before. */ if (tx_queue->stopped == 1) - efx_stop_queue(tx_queue->efx); + efx_stop_queue(efx); unwind: /* Free the DMA mapping we were in the process of writing out */ - if (state.ifc.unmap_len) - pci_unmap_page(tx_queue->efx->pci_dev, state.ifc.unmap_addr, - state.ifc.unmap_len, PCI_DMA_TODEVICE); + if (state.ifc.unmap_len) { + if (state.ifc.unmap_single) + pci_unmap_single(efx->pci_dev, state.ifc.unmap_addr, + state.ifc.unmap_len, PCI_DMA_TODEVICE); + else + pci_unmap_page(efx->pci_dev, state.ifc.unmap_addr, + state.ifc.unmap_len, PCI_DMA_TODEVICE); + } efx_enqueue_unwind(tx_queue); return rc2; -- cgit From cc12dac2e512c2b6185ed91899e09e9910630315 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 1 Sep 2008 12:46:43 +0100 Subject: sfc: Reduce the size of struct efx_tx_buffer Remove unmap_addr since it can be calculated from dma_addr, len and unmap_len. This saves 4-16 bytes. Signed-off-by: Ben Hutchings Signed-off-by: Jeff Garzik --- drivers/net/sfc/tx.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers/net/sfc/tx.c') diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 11127757c05..5f01371baaf 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -64,12 +64,14 @@ static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, { if (buffer->unmap_len) { struct pci_dev *pci_dev = tx_queue->efx->pci_dev; + dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - + buffer->unmap_len); if (buffer->unmap_single) - pci_unmap_single(pci_dev, buffer->unmap_addr, - buffer->unmap_len, PCI_DMA_TODEVICE); + pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len, + PCI_DMA_TODEVICE); else - pci_unmap_page(pci_dev, buffer->unmap_addr, - buffer->unmap_len, PCI_DMA_TODEVICE); + pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len, + PCI_DMA_TODEVICE); buffer->unmap_len = 0; buffer->unmap_single = 0; } @@ -233,7 +235,6 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, } while (len); /* Transfer ownership of the unmapping to the final buffer */ - buffer->unmap_addr = unmap_addr; buffer->unmap_single = unmap_single; buffer->unmap_len = unmap_len; unmap_len = 0; @@ -805,6 +806,7 @@ static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue, static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) { struct efx_tx_buffer *buffer; + dma_addr_t unmap_addr; /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { @@ -816,15 +818,15 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) buffer->len = 0; buffer->continuation = 1; if (buffer->unmap_len) { + unmap_addr = (buffer->dma_addr + buffer->len - + buffer->unmap_len); if (buffer->unmap_single) pci_unmap_single(tx_queue->efx->pci_dev, - buffer->unmap_addr, - buffer->unmap_len, + unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); else pci_unmap_page(tx_queue->efx->pci_dev, - buffer->unmap_addr, - buffer->unmap_len, + unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); buffer->unmap_len = 0; } -- cgit From dc8cfa55da8c21e0b3290c29677a9d05c0a3e595 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 1 Sep 2008 12:46:50 +0100 Subject: sfc: Use explicit bool for boolean variables, parameters and return values Replace (cond ? 1 : 0) with cond or !!cond as appropriate, and (cond ? 0 : 1) with !cond. Remove some redundant boolean temporaries. Rename one field that looks like a flag but isn't. Signed-off-by: Ben Hutchings Signed-off-by: Jeff Garzik --- drivers/net/sfc/tx.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'drivers/net/sfc/tx.c') diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 5f01371baaf..51429b6a4de 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -73,7 +73,7 @@ static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); buffer->unmap_len = 0; - buffer->unmap_single = 0; + buffer->unmap_single = false; } if (buffer->skb) { @@ -150,7 +150,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign; dma_addr_t dma_addr, unmap_addr = 0; unsigned int dma_len; - unsigned unmap_single; + bool unmap_single; int q_space, i = 0; int rc = NETDEV_TX_OK; @@ -169,7 +169,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, * since this is more efficient on machines with sparse * memory. */ - unmap_single = 1; + unmap_single = true; dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); /* Process all fragments */ @@ -215,7 +215,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, EFX_BUG_ON_PARANOID(buffer->tsoh); EFX_BUG_ON_PARANOID(buffer->skb); EFX_BUG_ON_PARANOID(buffer->len); - EFX_BUG_ON_PARANOID(buffer->continuation != 1); + EFX_BUG_ON_PARANOID(!buffer->continuation); EFX_BUG_ON_PARANOID(buffer->unmap_len); dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1); @@ -248,14 +248,14 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, page_offset = fragment->page_offset; i++; /* Map for DMA */ - unmap_single = 0; + unmap_single = false; dma_addr = pci_map_page(pci_dev, page, page_offset, len, PCI_DMA_TODEVICE); } /* Transfer ownership of the skb to the final buffer */ buffer->skb = skb; - buffer->continuation = 0; + buffer->continuation = false; /* Pass off to hardware */ falcon_push_buffers(tx_queue); @@ -326,7 +326,7 @@ static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, } efx_dequeue_buffer(tx_queue, buffer); - buffer->continuation = 1; + buffer->continuation = true; buffer->len = 0; ++tx_queue->read_count; @@ -428,7 +428,7 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) if (!tx_queue->buffer) return -ENOMEM; for (i = 0; i <= efx->type->txd_ring_mask; ++i) - tx_queue->buffer[i].continuation = 1; + tx_queue->buffer[i].continuation = true; /* Allocate hardware ring */ rc = falcon_probe_tx(tx_queue); @@ -469,7 +469,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->efx->type->txd_ring_mask]; efx_dequeue_buffer(tx_queue, buffer); - buffer->continuation = 1; + buffer->continuation = true; buffer->len = 0; ++tx_queue->read_count; @@ -567,7 +567,7 @@ struct tso_state { /* DMA address and length of the whole fragment */ unsigned int unmap_len; dma_addr_t unmap_addr; - unsigned int unmap_single; + bool unmap_single; } ifc; struct { @@ -746,7 +746,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(buffer->unmap_len); EFX_BUG_ON_PARANOID(buffer->skb); - EFX_BUG_ON_PARANOID(buffer->continuation != 1); + EFX_BUG_ON_PARANOID(!buffer->continuation); EFX_BUG_ON_PARANOID(buffer->tsoh); buffer->dma_addr = dma_addr; @@ -792,7 +792,7 @@ static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue, EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(buffer->unmap_len); EFX_BUG_ON_PARANOID(buffer->skb); - EFX_BUG_ON_PARANOID(buffer->continuation != 1); + EFX_BUG_ON_PARANOID(!buffer->continuation); EFX_BUG_ON_PARANOID(buffer->tsoh); buffer->len = len; buffer->dma_addr = tsoh->dma_addr; @@ -816,7 +816,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) efx_tsoh_free(tx_queue, buffer); EFX_BUG_ON_PARANOID(buffer->skb); buffer->len = 0; - buffer->continuation = 1; + buffer->continuation = true; if (buffer->unmap_len) { unmap_addr = (buffer->dma_addr + buffer->len - buffer->unmap_len); @@ -855,7 +855,7 @@ static inline void tso_start(struct tso_state *st, const struct sk_buff *skb) st->packet_space = st->p.full_packet_size; st->remaining_len = skb->len - st->p.header_length; st->ifc.unmap_len = 0; - st->ifc.unmap_single = 0; + st->ifc.unmap_single = false; } static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, @@ -865,7 +865,7 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, frag->page_offset, frag->size, PCI_DMA_TODEVICE); if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { - st->ifc.unmap_single = 0; + st->ifc.unmap_single = false; st->ifc.unmap_len = frag->size; st->ifc.len = frag->size; st->ifc.dma_addr = st->ifc.unmap_addr; @@ -884,7 +884,7 @@ tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, st->ifc.unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, len, PCI_DMA_TODEVICE); if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { - st->ifc.unmap_single = 1; + st->ifc.unmap_single = true; st->ifc.unmap_len = len; st->ifc.len = len; st->ifc.dma_addr = st->ifc.unmap_addr; -- cgit From 23d9e60b1ddc67ffedd77161ecff4895708088a4 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 1 Sep 2008 12:47:02 +0100 Subject: sfc: Cleaned up struct tso_state fields Squashed nested structures. Renamed remaining_len to out_len, ifc.len to in_len, header_length to header_len. Moved ipv4_id into the group of output variables where it belongs. Signed-off-by: Ben Hutchings Signed-off-by: Jeff Garzik --- drivers/net/sfc/tx.c | 161 ++++++++++++++++++++++++--------------------------- 1 file changed, 75 insertions(+), 86 deletions(-) (limited to 'drivers/net/sfc/tx.c') diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 51429b6a4de..550856fab16 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -540,46 +540,37 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) /** * struct tso_state - TSO state for an SKB - * @remaining_len: Bytes of data we've yet to segment + * @out_len: Remaining length in current segment * @seqnum: Current sequence number + * @ipv4_id: Current IPv4 ID, host endian * @packet_space: Remaining space in current packet - * @ifc: Input fragment cursor. - * Where we are in the current fragment of the incoming SKB. These - * values get updated in place when we split a fragment over - * multiple packets. - * @p: Parameters. - * These values are set once at the start of the TSO send and do - * not get changed as the routine progresses. + * @dma_addr: DMA address of current position + * @in_len: Remaining length in current SKB fragment + * @unmap_len: Length of SKB fragment + * @unmap_addr: DMA address of SKB fragment + * @unmap_single: DMA single vs page mapping flag + * @header_len: Number of bytes of header + * @full_packet_size: Number of bytes to put in each outgoing segment * * The state used during segmentation. It is put into this data structure * just to make it easy to pass into inline functions. */ struct tso_state { - unsigned remaining_len; + /* Output position */ + unsigned out_len; unsigned seqnum; + unsigned ipv4_id; unsigned packet_space; - struct { - /* DMA address of current position */ - dma_addr_t dma_addr; - /* Remaining length */ - unsigned int len; - /* DMA address and length of the whole fragment */ - unsigned int unmap_len; - dma_addr_t unmap_addr; - bool unmap_single; - } ifc; - - struct { - /* The number of bytes of header */ - unsigned int header_length; - - /* The number of bytes to put in each outgoing segment. */ - int full_packet_size; - - /* Current IPv4 ID, host endian. */ - unsigned ipv4_id; - } p; + /* Input position */ + dma_addr_t dma_addr; + unsigned in_len; + unsigned unmap_len; + dma_addr_t unmap_addr; + bool unmap_single; + + unsigned header_len; + int full_packet_size; }; @@ -840,35 +831,34 @@ static inline void tso_start(struct tso_state *st, const struct sk_buff *skb) /* All ethernet/IP/TCP headers combined size is TCP header size * plus offset of TCP header relative to start of packet. */ - st->p.header_length = ((tcp_hdr(skb)->doff << 2u) - + PTR_DIFF(tcp_hdr(skb), skb->data)); - st->p.full_packet_size = (st->p.header_length - + skb_shinfo(skb)->gso_size); + st->header_len = ((tcp_hdr(skb)->doff << 2u) + + PTR_DIFF(tcp_hdr(skb), skb->data)); + st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; - st->p.ipv4_id = ntohs(ip_hdr(skb)->id); + st->ipv4_id = ntohs(ip_hdr(skb)->id); st->seqnum = ntohl(tcp_hdr(skb)->seq); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); - st->packet_space = st->p.full_packet_size; - st->remaining_len = skb->len - st->p.header_length; - st->ifc.unmap_len = 0; - st->ifc.unmap_single = false; + st->packet_space = st->full_packet_size; + st->out_len = skb->len - st->header_len; + st->unmap_len = 0; + st->unmap_single = false; } static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, skb_frag_t *frag) { - st->ifc.unmap_addr = pci_map_page(efx->pci_dev, frag->page, - frag->page_offset, frag->size, - PCI_DMA_TODEVICE); - if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { - st->ifc.unmap_single = false; - st->ifc.unmap_len = frag->size; - st->ifc.len = frag->size; - st->ifc.dma_addr = st->ifc.unmap_addr; + st->unmap_addr = pci_map_page(efx->pci_dev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { + st->unmap_single = false; + st->unmap_len = frag->size; + st->in_len = frag->size; + st->dma_addr = st->unmap_addr; return 0; } return -ENOMEM; @@ -878,16 +868,16 @@ static inline int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, const struct sk_buff *skb) { - int hl = st->p.header_length; + int hl = st->header_len; int len = skb_headlen(skb) - hl; - st->ifc.unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, - len, PCI_DMA_TODEVICE); - if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { - st->ifc.unmap_single = true; - st->ifc.unmap_len = len; - st->ifc.len = len; - st->ifc.dma_addr = st->ifc.unmap_addr; + st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, + len, PCI_DMA_TODEVICE); + if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { + st->unmap_single = true; + st->unmap_len = len; + st->in_len = len; + st->dma_addr = st->unmap_addr; return 0; } return -ENOMEM; @@ -911,38 +901,38 @@ static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer; int n, end_of_packet, rc; - if (st->ifc.len == 0) + if (st->in_len == 0) return 0; if (st->packet_space == 0) return 0; - EFX_BUG_ON_PARANOID(st->ifc.len <= 0); + EFX_BUG_ON_PARANOID(st->in_len <= 0); EFX_BUG_ON_PARANOID(st->packet_space <= 0); - n = min(st->ifc.len, st->packet_space); + n = min(st->in_len, st->packet_space); st->packet_space -= n; - st->remaining_len -= n; - st->ifc.len -= n; + st->out_len -= n; + st->in_len -= n; - rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n, &buffer); + rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); if (likely(rc == 0)) { - if (st->remaining_len == 0) + if (st->out_len == 0) /* Transfer ownership of the skb */ buffer->skb = skb; - end_of_packet = st->remaining_len == 0 || st->packet_space == 0; + end_of_packet = st->out_len == 0 || st->packet_space == 0; buffer->continuation = !end_of_packet; - if (st->ifc.len == 0) { + if (st->in_len == 0) { /* Transfer ownership of the pci mapping */ - buffer->unmap_len = st->ifc.unmap_len; - buffer->unmap_single = st->ifc.unmap_single; - st->ifc.unmap_len = 0; + buffer->unmap_len = st->unmap_len; + buffer->unmap_single = st->unmap_single; + st->unmap_len = 0; } } - st->ifc.dma_addr += n; + st->dma_addr += n; return rc; } @@ -967,7 +957,7 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, u8 *header; /* Allocate a DMA-mapped header buffer. */ - if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { + if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { if (tx_queue->tso_headers_free == NULL) { if (efx_tsoh_block_alloc(tx_queue)) return -1; @@ -978,7 +968,7 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, tsoh->unmap_len = 0; } else { tx_queue->tso_long_headers++; - tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length); + tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len); if (unlikely(!tsoh)) return -1; } @@ -988,33 +978,32 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb)); /* Copy and update the headers. */ - memcpy(header, skb->data, st->p.header_length); + memcpy(header, skb->data, st->header_len); tsoh_th->seq = htonl(st->seqnum); st->seqnum += skb_shinfo(skb)->gso_size; - if (st->remaining_len > skb_shinfo(skb)->gso_size) { + if (st->out_len > skb_shinfo(skb)->gso_size) { /* This packet will not finish the TSO burst. */ - ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb); + ip_length = st->full_packet_size - ETH_HDR_LEN(skb); tsoh_th->fin = 0; tsoh_th->psh = 0; } else { /* This packet will be the last in the TSO burst. */ - ip_length = (st->p.header_length - ETH_HDR_LEN(skb) - + st->remaining_len); + ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; tsoh_th->fin = tcp_hdr(skb)->fin; tsoh_th->psh = tcp_hdr(skb)->psh; } tsoh_iph->tot_len = htons(ip_length); /* Linux leaves suitable gaps in the IP ID space for us to fill. */ - tsoh_iph->id = htons(st->p.ipv4_id); - st->p.ipv4_id++; + tsoh_iph->id = htons(st->ipv4_id); + st->ipv4_id++; st->packet_space = skb_shinfo(skb)->gso_size; ++tx_queue->tso_packets; /* Form a descriptor for this header. */ - efx_tso_put_header(tx_queue, tsoh, st->p.header_length); + efx_tso_put_header(tx_queue, tsoh, st->header_len); return 0; } @@ -1048,7 +1037,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, /* Assume that skb header area contains exactly the headers, and * all payload is in the frag list. */ - if (skb_headlen(skb) == state.p.header_length) { + if (skb_headlen(skb) == state.header_len) { /* Grab the first payload fragment. */ EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); frag_i = 0; @@ -1072,7 +1061,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, goto stop; /* Move onto the next fragment? */ - if (state.ifc.len == 0) { + if (state.in_len == 0) { if (++frag_i >= skb_shinfo(skb)->nr_frags) /* End of payload reached. */ break; @@ -1108,13 +1097,13 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, unwind: /* Free the DMA mapping we were in the process of writing out */ - if (state.ifc.unmap_len) { - if (state.ifc.unmap_single) - pci_unmap_single(efx->pci_dev, state.ifc.unmap_addr, - state.ifc.unmap_len, PCI_DMA_TODEVICE); + if (state.unmap_len) { + if (state.unmap_single) + pci_unmap_single(efx->pci_dev, state.unmap_addr, + state.unmap_len, PCI_DMA_TODEVICE); else - pci_unmap_page(efx->pci_dev, state.ifc.unmap_addr, - state.ifc.unmap_len, PCI_DMA_TODEVICE); + pci_unmap_page(efx->pci_dev, state.unmap_addr, + state.unmap_len, PCI_DMA_TODEVICE); } efx_enqueue_unwind(tx_queue); -- cgit From 4d566063a799231b99d9a21128634ea78b89ab72 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 1 Sep 2008 12:47:12 +0100 Subject: sfc: Removed forced inlining of long functions gcc will automatically inline static functions with only one caller, and may inline other functions depending on the kernel configuration and size of the intermediate code. Signed-off-by: Ben Hutchings Signed-off-by: Jeff Garzik --- drivers/net/sfc/tx.c | 47 +++++++++++++++++++++++------------------------ 1 file changed, 23 insertions(+), 24 deletions(-) (limited to 'drivers/net/sfc/tx.c') diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 550856fab16..0e9889ca68f 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -47,7 +47,7 @@ void efx_stop_queue(struct efx_nic *efx) * We want to be able to nest calls to netif_stop_queue(), since each * channel can have an individual stop on the queue. */ -inline void efx_wake_queue(struct efx_nic *efx) +void efx_wake_queue(struct efx_nic *efx) { local_bh_disable(); if (atomic_dec_and_lock(&efx->netif_stop_count, @@ -59,8 +59,8 @@ inline void efx_wake_queue(struct efx_nic *efx) local_bh_enable(); } -static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, - struct efx_tx_buffer *buffer) +static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer) { if (buffer->unmap_len) { struct pci_dev *pci_dev = tx_queue->efx->pci_dev; @@ -110,8 +110,8 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue); static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh); -static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue, - struct efx_tx_buffer *buffer) +static void efx_tsoh_free(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer) { if (buffer->tsoh) { if (likely(!buffer->tsoh->unmap_len)) { @@ -138,8 +138,8 @@ static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue, * Returns NETDEV_TX_OK or NETDEV_TX_BUSY * You must hold netif_tx_lock() to call this function. */ -static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, - const struct sk_buff *skb) +static int efx_enqueue_skb(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb) { struct efx_nic *efx = tx_queue->efx; struct pci_dev *pci_dev = efx->pci_dev; @@ -305,8 +305,8 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, * This removes packets from the TX queue, up to and including the * specified index. */ -static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, - unsigned int index) +static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, + unsigned int index) { struct efx_nic *efx = tx_queue->efx; unsigned int stop_index, read_ptr; @@ -578,7 +578,7 @@ struct tso_state { * Verify that our various assumptions about sk_buffs and the conditions * under which TSO will be attempted hold true. */ -static inline void efx_tso_check_safe(const struct sk_buff *skb) +static void efx_tso_check_safe(const struct sk_buff *skb) { EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP)); EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != @@ -772,8 +772,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, * a single fragment, and we know it doesn't cross a page boundary. It * also allows us to not worry about end-of-packet etc. */ -static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue, - struct efx_tso_header *tsoh, unsigned len) +static void efx_tso_put_header(struct efx_tx_queue *tx_queue, + struct efx_tso_header *tsoh, unsigned len) { struct efx_tx_buffer *buffer; @@ -826,7 +826,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) /* Parse the SKB header and initialise state. */ -static inline void tso_start(struct tso_state *st, const struct sk_buff *skb) +static void tso_start(struct tso_state *st, const struct sk_buff *skb) { /* All ethernet/IP/TCP headers combined size is TCP header size * plus offset of TCP header relative to start of packet. @@ -848,8 +848,8 @@ static inline void tso_start(struct tso_state *st, const struct sk_buff *skb) st->unmap_single = false; } -static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, - skb_frag_t *frag) +static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, + skb_frag_t *frag) { st->unmap_addr = pci_map_page(efx->pci_dev, frag->page, frag->page_offset, frag->size, @@ -864,9 +864,8 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, return -ENOMEM; } -static inline int -tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, - const struct sk_buff *skb) +static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, + const struct sk_buff *skb) { int hl = st->header_len; int len = skb_headlen(skb) - hl; @@ -894,9 +893,9 @@ tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, * of fragment or end-of-packet. Return 0 on success, 1 if not enough * space in @tx_queue. */ -static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, - const struct sk_buff *skb, - struct tso_state *st) +static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb, + struct tso_state *st) { struct efx_tx_buffer *buffer; int n, end_of_packet, rc; @@ -946,9 +945,9 @@ static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, * Generate a new header and prepare for the new packet. Return 0 on * success, or -1 if failed to alloc header. */ -static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, - const struct sk_buff *skb, - struct tso_state *st) +static int tso_start_new_packet(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb, + struct tso_state *st) { struct efx_tso_header *tsoh; struct iphdr *tsoh_iph; -- cgit From 740847dab16b1a410a0f833df2bf21c8ed8265f3 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 1 Sep 2008 12:48:23 +0100 Subject: sfc: Enable TSO for 802.1q VLAN devices Signed-off-by: Ben Hutchings Signed-off-by: Jeff Garzik --- drivers/net/sfc/tx.c | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) (limited to 'drivers/net/sfc/tx.c') diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 0e9889ca68f..a3a3eddd30b 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -105,7 +105,7 @@ struct efx_tso_header { }; static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, - const struct sk_buff *skb); + struct sk_buff *skb); static void efx_fini_tso(struct efx_tx_queue *tx_queue); static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh); @@ -139,7 +139,7 @@ static void efx_tsoh_free(struct efx_tx_queue *tx_queue, * You must hold netif_tx_lock() to call this function. */ static int efx_enqueue_skb(struct efx_tx_queue *tx_queue, - const struct sk_buff *skb) + struct sk_buff *skb) { struct efx_nic *efx = tx_queue->efx; struct pci_dev *pci_dev = efx->pci_dev; @@ -578,11 +578,24 @@ struct tso_state { * Verify that our various assumptions about sk_buffs and the conditions * under which TSO will be attempted hold true. */ -static void efx_tso_check_safe(const struct sk_buff *skb) +static void efx_tso_check_safe(struct sk_buff *skb) { - EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP)); + __be16 protocol = skb->protocol; + EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != - skb->protocol); + protocol); + if (protocol == htons(ETH_P_8021Q)) { + /* Find the encapsulated protocol; reset network header + * and transport header based on that. */ + struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; + protocol = veh->h_vlan_encapsulated_proto; + skb_set_network_header(skb, sizeof(*veh)); + if (protocol == htons(ETH_P_IP)) + skb_set_transport_header(skb, sizeof(*veh) + + 4 * ip_hdr(skb)->ihl); + } + + EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IP)); EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) + (tcp_hdr(skb)->doff << 2u)) > @@ -1020,7 +1033,7 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, * %NETDEV_TX_OK or %NETDEV_TX_BUSY. */ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, - const struct sk_buff *skb) + struct sk_buff *skb) { struct efx_nic *efx = tx_queue->efx; int frag_i, rc, rc2 = NETDEV_TX_OK; -- cgit From bc3c90a2b70652b87cde8de65275d6f41d0f24c3 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 1 Sep 2008 12:48:46 +0100 Subject: sfc: Remove some unreachable error paths Some functions return an error code which is always 0. Change their return types to void and simplify their callers accordingly. Signed-off-by: Ben Hutchings Signed-off-by: Jeff Garzik --- drivers/net/sfc/tx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/sfc/tx.c') diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index a3a3eddd30b..cdee7c200d6 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -443,7 +443,7 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) return rc; } -int efx_init_tx_queue(struct efx_tx_queue *tx_queue) +void efx_init_tx_queue(struct efx_tx_queue *tx_queue) { EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue); @@ -454,7 +454,7 @@ int efx_init_tx_queue(struct efx_tx_queue *tx_queue) BUG_ON(tx_queue->stopped); /* Set up TX descriptor ring */ - return falcon_init_tx(tx_queue); + falcon_init_tx(tx_queue); } void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) -- cgit From 13e9ab11430c4bdc4b6bb97e3d3821ebdc043712 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 1 Sep 2008 12:50:28 +0100 Subject: sfc: Use CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS where appropriate For some buffers we use a starting offset of either NET_IP_ALIGN or 0 depending on whether we believe the architecture supports efficient access to unaligned words. There is now a config macro specifying whether this is the case, so check that rather than checking for specific architectures. Signed-off-by: Ben Hutchings Signed-off-by: Jeff Garzik --- drivers/net/sfc/tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/sfc/tx.c') diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index cdee7c200d6..da3e9ff339f 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -516,7 +516,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) /* Number of bytes inserted at the start of a TSO header buffer, * similar to NET_IP_ALIGN. */ -#if defined(__i386__) || defined(__x86_64__) +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS #define TSOH_OFFSET 0 #else #define TSOH_OFFSET NET_IP_ALIGN -- cgit