2 * Linux driver for VMware's vmxnet3 ethernet NIC.
4 * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
23 * Maintained by: pv-drivers@vmware.com
27 #include <linux/module.h>
28 #include <net/ip6_checksum.h>
30 #include "vmxnet3_int.h"
32 char vmxnet3_driver_name[] = "vmxnet3";
33 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
37 * Last entry must be all 0s
39 static const struct pci_device_id vmxnet3_pciid_table[] = {
40 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
44 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
46 static int enable_mq = 1;
49 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
52 * Enable/Disable the given intr
55 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
57 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
62 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
64 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
69 * Enable/Disable all intrs used by the device
72 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
76 for (i = 0; i < adapter->intr.num_intrs; i++)
77 vmxnet3_enable_intr(adapter, i);
78 adapter->shared->devRead.intrConf.intrCtrl &=
79 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
84 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
88 adapter->shared->devRead.intrConf.intrCtrl |=
89 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
90 for (i = 0; i < adapter->intr.num_intrs; i++)
91 vmxnet3_disable_intr(adapter, i);
96 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
98 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
103 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
110 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
118 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
126 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
135 * Check the link state. This may start or stop the tx queue.
138 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
144 spin_lock_irqsave(&adapter->cmd_lock, flags);
145 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
146 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
147 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
149 adapter->link_speed = ret >> 16;
150 if (ret & 1) { /* Link is up. */
151 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
152 adapter->link_speed);
153 netif_carrier_on(adapter->netdev);
156 for (i = 0; i < adapter->num_tx_queues; i++)
157 vmxnet3_tq_start(&adapter->tx_queue[i],
161 netdev_info(adapter->netdev, "NIC Link is Down\n");
162 netif_carrier_off(adapter->netdev);
165 for (i = 0; i < adapter->num_tx_queues; i++)
166 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
172 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
176 u32 events = le32_to_cpu(adapter->shared->ecr);
180 vmxnet3_ack_events(adapter, events);
182 /* Check if link state has changed */
183 if (events & VMXNET3_ECR_LINK)
184 vmxnet3_check_link(adapter, true);
186 /* Check if there is an error on xmit/recv queues */
187 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
188 spin_lock_irqsave(&adapter->cmd_lock, flags);
189 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
190 VMXNET3_CMD_GET_QUEUE_STATUS);
191 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
193 for (i = 0; i < adapter->num_tx_queues; i++)
194 if (adapter->tqd_start[i].status.stopped)
195 dev_err(&adapter->netdev->dev,
196 "%s: tq[%d] error 0x%x\n",
197 adapter->netdev->name, i, le32_to_cpu(
198 adapter->tqd_start[i].status.error));
199 for (i = 0; i < adapter->num_rx_queues; i++)
200 if (adapter->rqd_start[i].status.stopped)
201 dev_err(&adapter->netdev->dev,
202 "%s: rq[%d] error 0x%x\n",
203 adapter->netdev->name, i,
204 adapter->rqd_start[i].status.error);
206 schedule_work(&adapter->work);
210 #ifdef __BIG_ENDIAN_BITFIELD
212 * The device expects the bitfields in shared structures to be written in
213 * little endian. When CPU is big endian, the following routines are used to
214 * correctly read and write into ABI.
215 * The general technique used here is : double word bitfields are defined in
216 * opposite order for big endian architecture. Then before reading them in
217 * driver the complete double word is translated using le32_to_cpu. Similarly
218 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
219 * double words into required format.
220 * In order to avoid touching bits in shared structure more than once, temporary
221 * descriptors are used. These are passed as srcDesc to following functions.
223 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
224 struct Vmxnet3_RxDesc *dstDesc)
226 u32 *src = (u32 *)srcDesc + 2;
227 u32 *dst = (u32 *)dstDesc + 2;
228 dstDesc->addr = le64_to_cpu(srcDesc->addr);
229 *dst = le32_to_cpu(*src);
230 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
233 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
234 struct Vmxnet3_TxDesc *dstDesc)
237 u32 *src = (u32 *)(srcDesc + 1);
238 u32 *dst = (u32 *)(dstDesc + 1);
240 /* Working backwards so that the gen bit is set at the end. */
241 for (i = 2; i > 0; i--) {
244 *dst = cpu_to_le32(*src);
249 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
250 struct Vmxnet3_RxCompDesc *dstDesc)
253 u32 *src = (u32 *)srcDesc;
254 u32 *dst = (u32 *)dstDesc;
255 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
256 *dst = le32_to_cpu(*src);
263 /* Used to read bitfield values from double words. */
264 static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
266 u32 temp = le32_to_cpu(*bitfield);
267 u32 mask = ((1 << size) - 1) << pos;
275 #endif /* __BIG_ENDIAN_BITFIELD */
277 #ifdef __BIG_ENDIAN_BITFIELD
279 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
280 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
281 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
282 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
283 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
284 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
285 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
286 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
287 VMXNET3_TCD_GEN_SIZE)
288 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
289 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
290 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
292 vmxnet3_RxCompToCPU((rcd), (tmp)); \
294 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
296 vmxnet3_RxDescToCPU((rxd), (tmp)); \
301 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
302 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
303 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
304 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
305 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
306 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
308 #endif /* __BIG_ENDIAN_BITFIELD */
312 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
313 struct pci_dev *pdev)
315 if (tbi->map_type == VMXNET3_MAP_SINGLE)
316 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
318 else if (tbi->map_type == VMXNET3_MAP_PAGE)
319 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
322 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
324 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
329 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
330 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
335 /* no out of order completion */
336 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
337 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
339 skb = tq->buf_info[eop_idx].skb;
341 tq->buf_info[eop_idx].skb = NULL;
343 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
345 while (tq->tx_ring.next2comp != eop_idx) {
346 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
349 /* update next2comp w/o tx_lock. Since we are marking more,
350 * instead of less, tx ring entries avail, the worst case is
351 * that the tx routine incorrectly re-queues a pkt due to
352 * insufficient tx ring entries.
354 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
358 dev_kfree_skb_any(skb);
364 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
365 struct vmxnet3_adapter *adapter)
368 union Vmxnet3_GenericDesc *gdesc;
370 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
371 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
372 /* Prevent any &gdesc->tcd field from being (speculatively)
373 * read before (&gdesc->tcd)->gen is read.
377 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
378 &gdesc->tcd), tq, adapter->pdev,
381 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
382 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
386 spin_lock(&tq->tx_lock);
387 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
388 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
389 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
390 netif_carrier_ok(adapter->netdev))) {
391 vmxnet3_tq_wake(tq, adapter);
393 spin_unlock(&tq->tx_lock);
400 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
401 struct vmxnet3_adapter *adapter)
405 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
406 struct vmxnet3_tx_buf_info *tbi;
408 tbi = tq->buf_info + tq->tx_ring.next2comp;
410 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
412 dev_kfree_skb_any(tbi->skb);
415 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
418 /* sanity check, verify all buffers are indeed unmapped and freed */
419 for (i = 0; i < tq->tx_ring.size; i++) {
420 BUG_ON(tq->buf_info[i].skb != NULL ||
421 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
424 tq->tx_ring.gen = VMXNET3_INIT_GEN;
425 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
427 tq->comp_ring.gen = VMXNET3_INIT_GEN;
428 tq->comp_ring.next2proc = 0;
433 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
434 struct vmxnet3_adapter *adapter)
436 if (tq->tx_ring.base) {
437 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
438 sizeof(struct Vmxnet3_TxDesc),
439 tq->tx_ring.base, tq->tx_ring.basePA);
440 tq->tx_ring.base = NULL;
442 if (tq->data_ring.base) {
443 dma_free_coherent(&adapter->pdev->dev,
444 tq->data_ring.size * tq->txdata_desc_size,
445 tq->data_ring.base, tq->data_ring.basePA);
446 tq->data_ring.base = NULL;
448 if (tq->comp_ring.base) {
449 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
450 sizeof(struct Vmxnet3_TxCompDesc),
451 tq->comp_ring.base, tq->comp_ring.basePA);
452 tq->comp_ring.base = NULL;
459 /* Destroy all tx queues */
461 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
465 for (i = 0; i < adapter->num_tx_queues; i++)
466 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
471 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
472 struct vmxnet3_adapter *adapter)
476 /* reset the tx ring contents to 0 and reset the tx ring states */
477 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
478 sizeof(struct Vmxnet3_TxDesc));
479 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
480 tq->tx_ring.gen = VMXNET3_INIT_GEN;
482 memset(tq->data_ring.base, 0,
483 tq->data_ring.size * tq->txdata_desc_size);
485 /* reset the tx comp ring contents to 0 and reset comp ring states */
486 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
487 sizeof(struct Vmxnet3_TxCompDesc));
488 tq->comp_ring.next2proc = 0;
489 tq->comp_ring.gen = VMXNET3_INIT_GEN;
491 /* reset the bookkeeping data */
492 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
493 for (i = 0; i < tq->tx_ring.size; i++)
494 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
496 /* stats are not reset */
501 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
502 struct vmxnet3_adapter *adapter)
504 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
505 tq->comp_ring.base || tq->buf_info);
507 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
508 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
509 &tq->tx_ring.basePA, GFP_KERNEL);
510 if (!tq->tx_ring.base) {
511 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
515 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
516 tq->data_ring.size * tq->txdata_desc_size,
517 &tq->data_ring.basePA, GFP_KERNEL);
518 if (!tq->data_ring.base) {
519 netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
523 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
524 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
525 &tq->comp_ring.basePA, GFP_KERNEL);
526 if (!tq->comp_ring.base) {
527 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
531 tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
533 dev_to_node(&adapter->pdev->dev));
540 vmxnet3_tq_destroy(tq, adapter);
545 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
549 for (i = 0; i < adapter->num_tx_queues; i++)
550 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
554 * starting from ring->next2fill, allocate rx buffers for the given ring
555 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
556 * are allocated or allocation fails
560 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
561 int num_to_alloc, struct vmxnet3_adapter *adapter)
563 int num_allocated = 0;
564 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
565 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
568 while (num_allocated <= num_to_alloc) {
569 struct vmxnet3_rx_buf_info *rbi;
570 union Vmxnet3_GenericDesc *gd;
572 rbi = rbi_base + ring->next2fill;
573 gd = ring->base + ring->next2fill;
575 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
576 if (rbi->skb == NULL) {
577 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
580 if (unlikely(rbi->skb == NULL)) {
581 rq->stats.rx_buf_alloc_failure++;
585 rbi->dma_addr = dma_map_single(
587 rbi->skb->data, rbi->len,
589 if (dma_mapping_error(&adapter->pdev->dev,
591 dev_kfree_skb_any(rbi->skb);
593 rq->stats.rx_buf_alloc_failure++;
597 /* rx buffer skipped by the device */
599 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
601 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
602 rbi->len != PAGE_SIZE);
604 if (rbi->page == NULL) {
605 rbi->page = alloc_page(GFP_ATOMIC);
606 if (unlikely(rbi->page == NULL)) {
607 rq->stats.rx_buf_alloc_failure++;
610 rbi->dma_addr = dma_map_page(
612 rbi->page, 0, PAGE_SIZE,
614 if (dma_mapping_error(&adapter->pdev->dev,
618 rq->stats.rx_buf_alloc_failure++;
622 /* rx buffers skipped by the device */
624 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
627 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
628 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
631 /* Fill the last buffer but dont mark it ready, or else the
632 * device will think that the queue is full */
633 if (num_allocated == num_to_alloc)
636 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
638 vmxnet3_cmd_ring_adv_next2fill(ring);
641 netdev_dbg(adapter->netdev,
642 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
643 num_allocated, ring->next2fill, ring->next2comp);
645 /* so that the device can distinguish a full ring and an empty ring */
646 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
648 return num_allocated;
653 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
654 struct vmxnet3_rx_buf_info *rbi)
656 skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
658 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
660 __skb_frag_set_page(frag, rbi->page);
661 skb_frag_off_set(frag, 0);
662 skb_frag_size_set(frag, rcd->len);
663 skb->data_len += rcd->len;
664 skb->truesize += PAGE_SIZE;
665 skb_shinfo(skb)->nr_frags++;
670 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
671 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
672 struct vmxnet3_adapter *adapter)
675 unsigned long buf_offset;
677 union Vmxnet3_GenericDesc *gdesc;
678 struct vmxnet3_tx_buf_info *tbi = NULL;
680 BUG_ON(ctx->copy_size > skb_headlen(skb));
682 /* use the previous gen bit for the SOP desc */
683 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
685 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
686 gdesc = ctx->sop_txd; /* both loops below can be skipped */
688 /* no need to map the buffer if headers are copied */
689 if (ctx->copy_size) {
690 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
691 tq->tx_ring.next2fill *
692 tq->txdata_desc_size);
693 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
694 ctx->sop_txd->dword[3] = 0;
696 tbi = tq->buf_info + tq->tx_ring.next2fill;
697 tbi->map_type = VMXNET3_MAP_NONE;
699 netdev_dbg(adapter->netdev,
700 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
701 tq->tx_ring.next2fill,
702 le64_to_cpu(ctx->sop_txd->txd.addr),
703 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
704 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
706 /* use the right gen for non-SOP desc */
707 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
710 /* linear part can use multiple tx desc if it's big */
711 len = skb_headlen(skb) - ctx->copy_size;
712 buf_offset = ctx->copy_size;
716 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
720 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
721 /* spec says that for TxDesc.len, 0 == 2^14 */
724 tbi = tq->buf_info + tq->tx_ring.next2fill;
725 tbi->map_type = VMXNET3_MAP_SINGLE;
726 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
727 skb->data + buf_offset, buf_size,
729 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
734 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
735 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
737 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
738 gdesc->dword[2] = cpu_to_le32(dw2);
741 netdev_dbg(adapter->netdev,
742 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
743 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
744 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
745 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
746 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
749 buf_offset += buf_size;
752 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
753 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
757 len = skb_frag_size(frag);
759 tbi = tq->buf_info + tq->tx_ring.next2fill;
760 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
764 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
765 /* spec says that for TxDesc.len, 0 == 2^14 */
767 tbi->map_type = VMXNET3_MAP_PAGE;
768 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
769 buf_offset, buf_size,
771 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
776 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
777 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
779 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
780 gdesc->dword[2] = cpu_to_le32(dw2);
783 netdev_dbg(adapter->netdev,
784 "txd[%u]: 0x%llx %u %u\n",
785 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
786 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
787 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
788 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
791 buf_offset += buf_size;
795 ctx->eop_txd = gdesc;
797 /* set the last buf_info for the pkt */
799 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
805 /* Init all tx queues */
807 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
811 for (i = 0; i < adapter->num_tx_queues; i++)
812 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
817 * parse relevant protocol headers:
818 * For a tso pkt, relevant headers are L2/3/4 including options
819 * For a pkt requesting csum offloading, they are L2/3 and may include L4
820 * if it's a TCP/UDP pkt
823 * -1: error happens during parsing
824 * 0: protocol headers parsed, but too big to be copied
825 * 1: protocol headers parsed and copied
828 * 1. related *ctx fields are updated.
829 * 2. ctx->copy_size is # of bytes copied
830 * 3. the portion to be copied is guaranteed to be in the linear part
834 vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
835 struct vmxnet3_tx_ctx *ctx,
836 struct vmxnet3_adapter *adapter)
840 if (ctx->mss) { /* TSO */
841 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
842 ctx->l4_offset = skb_inner_transport_offset(skb);
843 ctx->l4_hdr_size = inner_tcp_hdrlen(skb);
844 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
846 ctx->l4_offset = skb_transport_offset(skb);
847 ctx->l4_hdr_size = tcp_hdrlen(skb);
848 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
851 if (skb->ip_summed == CHECKSUM_PARTIAL) {
852 /* For encap packets, skb_checksum_start_offset refers
853 * to inner L4 offset. Thus, below works for encap as
854 * well as non-encap case
856 ctx->l4_offset = skb_checksum_start_offset(skb);
858 if (VMXNET3_VERSION_GE_4(adapter) &&
859 skb->encapsulation) {
860 struct iphdr *iph = inner_ip_hdr(skb);
862 if (iph->version == 4) {
863 protocol = iph->protocol;
865 const struct ipv6hdr *ipv6h;
867 ipv6h = inner_ipv6_hdr(skb);
868 protocol = ipv6h->nexthdr;
872 const struct iphdr *iph = ip_hdr(skb);
874 protocol = iph->protocol;
875 } else if (ctx->ipv6) {
876 const struct ipv6hdr *ipv6h;
878 ipv6h = ipv6_hdr(skb);
879 protocol = ipv6h->nexthdr;
885 ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
889 ctx->l4_hdr_size = sizeof(struct udphdr);
892 ctx->l4_hdr_size = 0;
896 ctx->copy_size = min(ctx->l4_offset +
897 ctx->l4_hdr_size, skb->len);
900 ctx->l4_hdr_size = 0;
901 /* copy as much as allowed */
902 ctx->copy_size = min_t(unsigned int,
903 tq->txdata_desc_size,
907 if (skb->len <= VMXNET3_HDR_COPY_SIZE)
908 ctx->copy_size = skb->len;
910 /* make sure headers are accessible directly */
911 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
915 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
916 tq->stats.oversized_hdr++;
927 * copy relevant protocol headers to the transmit ring:
928 * For a tso pkt, relevant headers are L2/3/4 including options
929 * For a pkt requesting csum offloading, they are L2/3 and may include L4
930 * if it's a TCP/UDP pkt
933 * Note that this requires that vmxnet3_parse_hdr be called first to set the
934 * appropriate bits in ctx first
937 vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
938 struct vmxnet3_tx_ctx *ctx,
939 struct vmxnet3_adapter *adapter)
941 struct Vmxnet3_TxDataDesc *tdd;
943 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
944 tq->tx_ring.next2fill *
945 tq->txdata_desc_size);
947 memcpy(tdd->data, skb->data, ctx->copy_size);
948 netdev_dbg(adapter->netdev,
949 "copy %u bytes to dataRing[%u]\n",
950 ctx->copy_size, tq->tx_ring.next2fill);
955 vmxnet3_prepare_inner_tso(struct sk_buff *skb,
956 struct vmxnet3_tx_ctx *ctx)
958 struct tcphdr *tcph = inner_tcp_hdr(skb);
959 struct iphdr *iph = inner_ip_hdr(skb);
961 if (iph->version == 4) {
963 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
966 struct ipv6hdr *iph = inner_ipv6_hdr(skb);
968 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
974 vmxnet3_prepare_tso(struct sk_buff *skb,
975 struct vmxnet3_tx_ctx *ctx)
977 struct tcphdr *tcph = tcp_hdr(skb);
980 struct iphdr *iph = ip_hdr(skb);
983 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
985 } else if (ctx->ipv6) {
986 tcp_v6_gso_csum_prep(skb);
990 static int txd_estimate(const struct sk_buff *skb)
992 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
995 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
996 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
998 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
1004 * Transmits a pkt thru a given tq
1006 * NETDEV_TX_OK: descriptors are setup successfully
1007 * NETDEV_TX_OK: error occurred, the pkt is dropped
1008 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
1011 * 1. tx ring may be changed
1012 * 2. tq stats may be updated accordingly
1013 * 3. shared->txNumDeferred may be updated
1017 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1018 struct vmxnet3_adapter *adapter, struct net_device *netdev)
1023 int tx_num_deferred;
1024 unsigned long flags;
1025 struct vmxnet3_tx_ctx ctx;
1026 union Vmxnet3_GenericDesc *gdesc;
1027 #ifdef __BIG_ENDIAN_BITFIELD
1028 /* Use temporary descriptor to avoid touching bits multiple times */
1029 union Vmxnet3_GenericDesc tempTxDesc;
1032 count = txd_estimate(skb);
1034 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
1035 ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
1037 ctx.mss = skb_shinfo(skb)->gso_size;
1039 if (skb_header_cloned(skb)) {
1040 if (unlikely(pskb_expand_head(skb, 0, 0,
1041 GFP_ATOMIC) != 0)) {
1042 tq->stats.drop_tso++;
1045 tq->stats.copy_skb_header++;
1047 if (skb->encapsulation) {
1048 vmxnet3_prepare_inner_tso(skb, &ctx);
1050 vmxnet3_prepare_tso(skb, &ctx);
1053 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1055 /* non-tso pkts must not use more than
1056 * VMXNET3_MAX_TXD_PER_PKT entries
1058 if (skb_linearize(skb) != 0) {
1059 tq->stats.drop_too_many_frags++;
1062 tq->stats.linearized++;
1064 /* recalculate the # of descriptors to use */
1065 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1069 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1071 BUG_ON(ret <= 0 && ctx.copy_size != 0);
1072 /* hdrs parsed, check against other limits */
1074 if (unlikely(ctx.l4_offset + ctx.l4_hdr_size >
1075 VMXNET3_MAX_TX_BUF_SIZE)) {
1076 tq->stats.drop_oversized_hdr++;
1080 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1081 if (unlikely(ctx.l4_offset +
1083 VMXNET3_MAX_CSUM_OFFSET)) {
1084 tq->stats.drop_oversized_hdr++;
1090 tq->stats.drop_hdr_inspect_err++;
1094 spin_lock_irqsave(&tq->tx_lock, flags);
1096 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1097 tq->stats.tx_ring_full++;
1098 netdev_dbg(adapter->netdev,
1099 "tx queue stopped on %s, next2comp %u"
1100 " next2fill %u\n", adapter->netdev->name,
1101 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1103 vmxnet3_tq_stop(tq, adapter);
1104 spin_unlock_irqrestore(&tq->tx_lock, flags);
1105 return NETDEV_TX_BUSY;
1109 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1111 /* fill tx descs related to addr & len */
1112 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1113 goto unlock_drop_pkt;
1115 /* setup the EOP desc */
1116 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1118 /* setup the SOP desc */
1119 #ifdef __BIG_ENDIAN_BITFIELD
1120 gdesc = &tempTxDesc;
1121 gdesc->dword[2] = ctx.sop_txd->dword[2];
1122 gdesc->dword[3] = ctx.sop_txd->dword[3];
1124 gdesc = ctx.sop_txd;
1126 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1128 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
1129 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1130 gdesc->txd.om = VMXNET3_OM_ENCAP;
1131 gdesc->txd.msscof = ctx.mss;
1133 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
1136 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1137 gdesc->txd.om = VMXNET3_OM_TSO;
1138 gdesc->txd.msscof = ctx.mss;
1140 num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
1142 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1143 if (VMXNET3_VERSION_GE_4(adapter) &&
1144 skb->encapsulation) {
1145 gdesc->txd.hlen = ctx.l4_offset +
1147 gdesc->txd.om = VMXNET3_OM_ENCAP;
1148 gdesc->txd.msscof = 0; /* Reserved */
1150 gdesc->txd.hlen = ctx.l4_offset;
1151 gdesc->txd.om = VMXNET3_OM_CSUM;
1152 gdesc->txd.msscof = ctx.l4_offset +
1157 gdesc->txd.msscof = 0;
1161 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
1162 tx_num_deferred += num_pkts;
1164 if (skb_vlan_tag_present(skb)) {
1166 gdesc->txd.tci = skb_vlan_tag_get(skb);
1169 /* Ensure that the write to (&gdesc->txd)->gen will be observed after
1170 * all other writes to &gdesc->txd.
1174 /* finally flips the GEN bit of the SOP desc. */
1175 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1177 #ifdef __BIG_ENDIAN_BITFIELD
1178 /* Finished updating in bitfields of Tx Desc, so write them in original
1181 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1182 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1183 gdesc = ctx.sop_txd;
1185 netdev_dbg(adapter->netdev,
1186 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1188 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1189 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1191 spin_unlock_irqrestore(&tq->tx_lock, flags);
1193 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1194 tq->shared->txNumDeferred = 0;
1195 VMXNET3_WRITE_BAR0_REG(adapter,
1196 VMXNET3_REG_TXPROD + tq->qid * 8,
1197 tq->tx_ring.next2fill);
1200 return NETDEV_TX_OK;
1203 spin_unlock_irqrestore(&tq->tx_lock, flags);
1205 tq->stats.drop_total++;
1206 dev_kfree_skb_any(skb);
1207 return NETDEV_TX_OK;
1212 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1214 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1216 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1217 return vmxnet3_tq_xmit(skb,
1218 &adapter->tx_queue[skb->queue_mapping],
1224 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1225 struct sk_buff *skb,
1226 union Vmxnet3_GenericDesc *gdesc)
1228 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1229 if (gdesc->rcd.v4 &&
1230 (le32_to_cpu(gdesc->dword[3]) &
1231 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1232 skb->ip_summed = CHECKSUM_UNNECESSARY;
1233 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1234 !(le32_to_cpu(gdesc->dword[0]) &
1235 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1236 WARN_ON_ONCE(gdesc->rcd.frg &&
1237 !(le32_to_cpu(gdesc->dword[0]) &
1238 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1239 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1240 (1 << VMXNET3_RCD_TUC_SHIFT))) {
1241 skb->ip_summed = CHECKSUM_UNNECESSARY;
1242 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1243 !(le32_to_cpu(gdesc->dword[0]) &
1244 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1245 WARN_ON_ONCE(gdesc->rcd.frg &&
1246 !(le32_to_cpu(gdesc->dword[0]) &
1247 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1249 if (gdesc->rcd.csum) {
1250 skb->csum = htons(gdesc->rcd.csum);
1251 skb->ip_summed = CHECKSUM_PARTIAL;
1253 skb_checksum_none_assert(skb);
1257 skb_checksum_none_assert(skb);
1263 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1264 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1266 rq->stats.drop_err++;
1268 rq->stats.drop_fcs++;
1270 rq->stats.drop_total++;
1273 * We do not unmap and chain the rx buffer to the skb.
1274 * We basically pretend this buffer is not used and will be recycled
1275 * by vmxnet3_rq_alloc_rx_buf()
1279 * ctx->skb may be NULL if this is the first and the only one
1283 dev_kfree_skb_irq(ctx->skb);
1290 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1291 union Vmxnet3_GenericDesc *gdesc)
1297 struct vlan_ethhdr *veth;
1299 struct ipv6hdr *ipv6;
1302 BUG_ON(gdesc->rcd.tcp == 0);
1304 maplen = skb_headlen(skb);
1305 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1308 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
1309 skb->protocol == cpu_to_be16(ETH_P_8021AD))
1310 hlen = sizeof(struct vlan_ethhdr);
1312 hlen = sizeof(struct ethhdr);
1314 hdr.eth = eth_hdr(skb);
1315 if (gdesc->rcd.v4) {
1316 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
1317 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
1319 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1320 hlen = hdr.ipv4->ihl << 2;
1321 hdr.ptr += hdr.ipv4->ihl << 2;
1322 } else if (gdesc->rcd.v6) {
1323 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
1324 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
1326 /* Use an estimated value, since we also need to handle
1329 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1330 return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1331 hlen = sizeof(struct ipv6hdr);
1332 hdr.ptr += sizeof(struct ipv6hdr);
1334 /* Non-IP pkt, dont estimate header length */
1338 if (hlen + sizeof(struct tcphdr) > maplen)
1341 return (hlen + (hdr.tcp->doff << 2));
1345 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1346 struct vmxnet3_adapter *adapter, int quota)
1348 static const u32 rxprod_reg[2] = {
1349 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1352 bool skip_page_frags = false;
1353 struct Vmxnet3_RxCompDesc *rcd;
1354 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1355 u16 segCnt = 0, mss = 0;
1356 #ifdef __BIG_ENDIAN_BITFIELD
1357 struct Vmxnet3_RxDesc rxCmdDesc;
1358 struct Vmxnet3_RxCompDesc rxComp;
1360 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1362 while (rcd->gen == rq->comp_ring.gen) {
1363 struct vmxnet3_rx_buf_info *rbi;
1364 struct sk_buff *skb, *new_skb = NULL;
1365 struct page *new_page = NULL;
1366 dma_addr_t new_dma_addr;
1368 struct Vmxnet3_RxDesc *rxd;
1370 struct vmxnet3_cmd_ring *ring = NULL;
1371 if (num_pkts >= quota) {
1372 /* we may stop even before we see the EOP desc of
1378 /* Prevent any rcd field from being (speculatively) read before
1383 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1384 rcd->rqID != rq->dataRingQid);
1386 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
1387 ring = rq->rx_ring + ring_idx;
1388 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1390 rbi = rq->buf_info[ring_idx] + idx;
1392 BUG_ON(rxd->addr != rbi->dma_addr ||
1393 rxd->len != rbi->len);
1395 if (unlikely(rcd->eop && rcd->err)) {
1396 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1400 if (rcd->sop) { /* first buf of the pkt */
1401 bool rxDataRingUsed;
1404 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1405 (rcd->rqID != rq->qid &&
1406 rcd->rqID != rq->dataRingQid));
1408 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1409 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1411 if (unlikely(rcd->len == 0)) {
1412 /* Pretend the rx buffer is skipped. */
1413 BUG_ON(!(rcd->sop && rcd->eop));
1414 netdev_dbg(adapter->netdev,
1415 "rxRing[%u][%u] 0 length\n",
1420 skip_page_frags = false;
1421 ctx->skb = rbi->skb;
1424 VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1425 len = rxDataRingUsed ? rcd->len : rbi->len;
1426 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1428 if (new_skb == NULL) {
1429 /* Skb allocation failed, do not handover this
1430 * skb to stack. Reuse it. Drop the existing pkt
1432 rq->stats.rx_buf_alloc_failure++;
1434 rq->stats.drop_total++;
1435 skip_page_frags = true;
1439 if (rxDataRingUsed) {
1442 BUG_ON(rcd->len > rq->data_ring.desc_size);
1445 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1446 memcpy(new_skb->data,
1447 &rq->data_ring.base[sz], rcd->len);
1449 ctx->skb = rbi->skb;
1452 dma_map_single(&adapter->pdev->dev,
1453 new_skb->data, rbi->len,
1455 if (dma_mapping_error(&adapter->pdev->dev,
1457 dev_kfree_skb(new_skb);
1458 /* Skb allocation failed, do not
1459 * handover this skb to stack. Reuse
1460 * it. Drop the existing pkt.
1462 rq->stats.rx_buf_alloc_failure++;
1464 rq->stats.drop_total++;
1465 skip_page_frags = true;
1469 dma_unmap_single(&adapter->pdev->dev,
1474 /* Immediate refill */
1476 rbi->dma_addr = new_dma_addr;
1477 rxd->addr = cpu_to_le64(rbi->dma_addr);
1478 rxd->len = rbi->len;
1482 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
1483 (adapter->netdev->features & NETIF_F_RXHASH)) {
1484 enum pkt_hash_types hash_type;
1486 switch (rcd->rssType) {
1487 case VMXNET3_RCD_RSS_TYPE_IPV4:
1488 case VMXNET3_RCD_RSS_TYPE_IPV6:
1489 hash_type = PKT_HASH_TYPE_L3;
1491 case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
1492 case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
1493 case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
1494 case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
1495 hash_type = PKT_HASH_TYPE_L4;
1498 hash_type = PKT_HASH_TYPE_L3;
1501 skb_set_hash(ctx->skb,
1502 le32_to_cpu(rcd->rssHash),
1506 skb_put(ctx->skb, rcd->len);
1508 if (VMXNET3_VERSION_GE_2(adapter) &&
1509 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1510 struct Vmxnet3_RxCompDescExt *rcdlro;
1511 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
1513 segCnt = rcdlro->segCnt;
1514 WARN_ON_ONCE(segCnt == 0);
1516 if (unlikely(segCnt <= 1))
1522 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1524 /* non SOP buffer must be type 1 in most cases */
1525 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1526 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1528 /* If an sop buffer was dropped, skip all
1529 * following non-sop fragments. They will be reused.
1531 if (skip_page_frags)
1535 new_page = alloc_page(GFP_ATOMIC);
1536 /* Replacement page frag could not be allocated.
1537 * Reuse this page. Drop the pkt and free the
1538 * skb which contained this page as a frag. Skip
1539 * processing all the following non-sop frags.
1541 if (unlikely(!new_page)) {
1542 rq->stats.rx_buf_alloc_failure++;
1543 dev_kfree_skb(ctx->skb);
1545 skip_page_frags = true;
1548 new_dma_addr = dma_map_page(&adapter->pdev->dev,
1552 if (dma_mapping_error(&adapter->pdev->dev,
1555 rq->stats.rx_buf_alloc_failure++;
1556 dev_kfree_skb(ctx->skb);
1558 skip_page_frags = true;
1562 dma_unmap_page(&adapter->pdev->dev,
1563 rbi->dma_addr, rbi->len,
1566 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1568 /* Immediate refill */
1569 rbi->page = new_page;
1570 rbi->dma_addr = new_dma_addr;
1571 rxd->addr = cpu_to_le64(rbi->dma_addr);
1572 rxd->len = rbi->len;
1579 u32 mtu = adapter->netdev->mtu;
1580 skb->len += skb->data_len;
1582 vmxnet3_rx_csum(adapter, skb,
1583 (union Vmxnet3_GenericDesc *)rcd);
1584 skb->protocol = eth_type_trans(skb, adapter->netdev);
1586 !(adapter->netdev->features & NETIF_F_LRO))
1589 if (segCnt != 0 && mss != 0) {
1590 skb_shinfo(skb)->gso_type = rcd->v4 ?
1591 SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1592 skb_shinfo(skb)->gso_size = mss;
1593 skb_shinfo(skb)->gso_segs = segCnt;
1594 } else if (segCnt != 0 || skb->len > mtu) {
1597 hlen = vmxnet3_get_hdr_len(adapter, skb,
1598 (union Vmxnet3_GenericDesc *)rcd);
1602 skb_shinfo(skb)->gso_type =
1603 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1605 skb_shinfo(skb)->gso_segs = segCnt;
1606 skb_shinfo(skb)->gso_size =
1607 DIV_ROUND_UP(skb->len -
1610 skb_shinfo(skb)->gso_size = mtu - hlen;
1614 if (unlikely(rcd->ts))
1615 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
1617 if (adapter->netdev->features & NETIF_F_LRO)
1618 netif_receive_skb(skb);
1620 napi_gro_receive(&rq->napi, skb);
1627 /* device may have skipped some rx descs */
1628 ring->next2comp = idx;
1629 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1630 ring = rq->rx_ring + ring_idx;
1632 /* Ensure that the writes to rxd->gen bits will be observed
1633 * after all other writes to rxd objects.
1637 while (num_to_alloc) {
1638 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1642 /* Recv desc is ready to be used by the device */
1643 rxd->gen = ring->gen;
1644 vmxnet3_cmd_ring_adv_next2fill(ring);
1648 /* if needed, update the register */
1649 if (unlikely(rq->shared->updateRxProd)) {
1650 VMXNET3_WRITE_BAR0_REG(adapter,
1651 rxprod_reg[ring_idx] + rq->qid * 8,
1655 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1656 vmxnet3_getRxComp(rcd,
1657 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1665 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1666 struct vmxnet3_adapter *adapter)
1669 struct Vmxnet3_RxDesc *rxd;
1671 /* ring has already been cleaned up */
1672 if (!rq->rx_ring[0].base)
1675 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1676 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1677 #ifdef __BIG_ENDIAN_BITFIELD
1678 struct Vmxnet3_RxDesc rxDesc;
1680 vmxnet3_getRxDesc(rxd,
1681 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1683 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1684 rq->buf_info[ring_idx][i].skb) {
1685 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1686 rxd->len, DMA_FROM_DEVICE);
1687 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1688 rq->buf_info[ring_idx][i].skb = NULL;
1689 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1690 rq->buf_info[ring_idx][i].page) {
1691 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1692 rxd->len, DMA_FROM_DEVICE);
1693 put_page(rq->buf_info[ring_idx][i].page);
1694 rq->buf_info[ring_idx][i].page = NULL;
1698 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1699 rq->rx_ring[ring_idx].next2fill =
1700 rq->rx_ring[ring_idx].next2comp = 0;
1703 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1704 rq->comp_ring.next2proc = 0;
1709 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1713 for (i = 0; i < adapter->num_rx_queues; i++)
1714 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1718 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1719 struct vmxnet3_adapter *adapter)
1724 /* all rx buffers must have already been freed */
1725 for (i = 0; i < 2; i++) {
1726 if (rq->buf_info[i]) {
1727 for (j = 0; j < rq->rx_ring[i].size; j++)
1728 BUG_ON(rq->buf_info[i][j].page != NULL);
1733 for (i = 0; i < 2; i++) {
1734 if (rq->rx_ring[i].base) {
1735 dma_free_coherent(&adapter->pdev->dev,
1737 * sizeof(struct Vmxnet3_RxDesc),
1738 rq->rx_ring[i].base,
1739 rq->rx_ring[i].basePA);
1740 rq->rx_ring[i].base = NULL;
1744 if (rq->data_ring.base) {
1745 dma_free_coherent(&adapter->pdev->dev,
1746 rq->rx_ring[0].size * rq->data_ring.desc_size,
1747 rq->data_ring.base, rq->data_ring.basePA);
1748 rq->data_ring.base = NULL;
1751 if (rq->comp_ring.base) {
1752 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1753 * sizeof(struct Vmxnet3_RxCompDesc),
1754 rq->comp_ring.base, rq->comp_ring.basePA);
1755 rq->comp_ring.base = NULL;
1758 kfree(rq->buf_info[0]);
1759 rq->buf_info[0] = NULL;
1760 rq->buf_info[1] = NULL;
1764 vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
1768 for (i = 0; i < adapter->num_rx_queues; i++) {
1769 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1771 if (rq->data_ring.base) {
1772 dma_free_coherent(&adapter->pdev->dev,
1773 (rq->rx_ring[0].size *
1774 rq->data_ring.desc_size),
1776 rq->data_ring.basePA);
1777 rq->data_ring.base = NULL;
1778 rq->data_ring.desc_size = 0;
1784 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1785 struct vmxnet3_adapter *adapter)
1789 /* initialize buf_info */
1790 for (i = 0; i < rq->rx_ring[0].size; i++) {
1792 /* 1st buf for a pkt is skbuff */
1793 if (i % adapter->rx_buf_per_pkt == 0) {
1794 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1795 rq->buf_info[0][i].len = adapter->skb_buf_size;
1796 } else { /* subsequent bufs for a pkt is frag */
1797 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1798 rq->buf_info[0][i].len = PAGE_SIZE;
1801 for (i = 0; i < rq->rx_ring[1].size; i++) {
1802 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1803 rq->buf_info[1][i].len = PAGE_SIZE;
1806 /* reset internal state and allocate buffers for both rings */
1807 for (i = 0; i < 2; i++) {
1808 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1810 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1811 sizeof(struct Vmxnet3_RxDesc));
1812 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1814 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1816 /* at least has 1 rx buffer for the 1st ring */
1819 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1821 /* reset the comp ring */
1822 rq->comp_ring.next2proc = 0;
1823 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1824 sizeof(struct Vmxnet3_RxCompDesc));
1825 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1828 rq->rx_ctx.skb = NULL;
1830 /* stats are not reset */
1836 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1840 for (i = 0; i < adapter->num_rx_queues; i++) {
1841 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1842 if (unlikely(err)) {
1843 dev_err(&adapter->netdev->dev, "%s: failed to "
1844 "initialize rx queue%i\n",
1845 adapter->netdev->name, i);
1855 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1859 struct vmxnet3_rx_buf_info *bi;
1861 for (i = 0; i < 2; i++) {
1863 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1864 rq->rx_ring[i].base = dma_alloc_coherent(
1865 &adapter->pdev->dev, sz,
1866 &rq->rx_ring[i].basePA,
1868 if (!rq->rx_ring[i].base) {
1869 netdev_err(adapter->netdev,
1870 "failed to allocate rx ring %d\n", i);
1875 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
1876 sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
1877 rq->data_ring.base =
1878 dma_alloc_coherent(&adapter->pdev->dev, sz,
1879 &rq->data_ring.basePA,
1881 if (!rq->data_ring.base) {
1882 netdev_err(adapter->netdev,
1883 "rx data ring will be disabled\n");
1884 adapter->rxdataring_enabled = false;
1887 rq->data_ring.base = NULL;
1888 rq->data_ring.desc_size = 0;
1891 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1892 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1893 &rq->comp_ring.basePA,
1895 if (!rq->comp_ring.base) {
1896 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
1900 bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
1901 sizeof(rq->buf_info[0][0]), GFP_KERNEL,
1902 dev_to_node(&adapter->pdev->dev));
1906 rq->buf_info[0] = bi;
1907 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1912 vmxnet3_rq_destroy(rq, adapter);
1918 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1922 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
1924 for (i = 0; i < adapter->num_rx_queues; i++) {
1925 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1926 if (unlikely(err)) {
1927 dev_err(&adapter->netdev->dev,
1928 "%s: failed to create rx queue%i\n",
1929 adapter->netdev->name, i);
1934 if (!adapter->rxdataring_enabled)
1935 vmxnet3_rq_destroy_all_rxdataring(adapter);
1939 vmxnet3_rq_destroy_all(adapter);
1944 /* Multiple queue aware polling function for tx and rx */
1947 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1949 int rcd_done = 0, i;
1950 if (unlikely(adapter->shared->ecr))
1951 vmxnet3_process_events(adapter);
1952 for (i = 0; i < adapter->num_tx_queues; i++)
1953 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
1955 for (i = 0; i < adapter->num_rx_queues; i++)
1956 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1963 vmxnet3_poll(struct napi_struct *napi, int budget)
1965 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1966 struct vmxnet3_rx_queue, napi);
1969 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1971 if (rxd_done < budget) {
1972 napi_complete_done(napi, rxd_done);
1973 vmxnet3_enable_all_intrs(rx_queue->adapter);
1979 * NAPI polling function for MSI-X mode with multiple Rx queues
1980 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1984 vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1986 struct vmxnet3_rx_queue *rq = container_of(napi,
1987 struct vmxnet3_rx_queue, napi);
1988 struct vmxnet3_adapter *adapter = rq->adapter;
1991 /* When sharing interrupt with corresponding tx queue, process
1992 * tx completions in that queue as well
1994 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1995 struct vmxnet3_tx_queue *tq =
1996 &adapter->tx_queue[rq - adapter->rx_queue];
1997 vmxnet3_tq_tx_complete(tq, adapter);
2000 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
2002 if (rxd_done < budget) {
2003 napi_complete_done(napi, rxd_done);
2004 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
2010 #ifdef CONFIG_PCI_MSI
2013 * Handle completion interrupts on tx queues
2014 * Returns whether or not the intr is handled
2018 vmxnet3_msix_tx(int irq, void *data)
2020 struct vmxnet3_tx_queue *tq = data;
2021 struct vmxnet3_adapter *adapter = tq->adapter;
2023 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2024 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
2026 /* Handle the case where only one irq is allocate for all tx queues */
2027 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2029 for (i = 0; i < adapter->num_tx_queues; i++) {
2030 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
2031 vmxnet3_tq_tx_complete(txq, adapter);
2034 vmxnet3_tq_tx_complete(tq, adapter);
2036 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
2043 * Handle completion interrupts on rx queues. Returns whether or not the
2048 vmxnet3_msix_rx(int irq, void *data)
2050 struct vmxnet3_rx_queue *rq = data;
2051 struct vmxnet3_adapter *adapter = rq->adapter;
2053 /* disable intr if needed */
2054 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2055 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
2056 napi_schedule(&rq->napi);
2062 *----------------------------------------------------------------------------
2064 * vmxnet3_msix_event --
2066 * vmxnet3 msix event intr handler
2069 * whether or not the intr is handled
2071 *----------------------------------------------------------------------------
2075 vmxnet3_msix_event(int irq, void *data)
2077 struct net_device *dev = data;
2078 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2080 /* disable intr if needed */
2081 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2082 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
2084 if (adapter->shared->ecr)
2085 vmxnet3_process_events(adapter);
2087 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
2092 #endif /* CONFIG_PCI_MSI */
2095 /* Interrupt handler for vmxnet3 */
2097 vmxnet3_intr(int irq, void *dev_id)
2099 struct net_device *dev = dev_id;
2100 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2102 if (adapter->intr.type == VMXNET3_IT_INTX) {
2103 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
2104 if (unlikely(icr == 0))
2110 /* disable intr if needed */
2111 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2112 vmxnet3_disable_all_intrs(adapter);
2114 napi_schedule(&adapter->rx_queue[0].napi);
2119 #ifdef CONFIG_NET_POLL_CONTROLLER
2121 /* netpoll callback. */
2123 vmxnet3_netpoll(struct net_device *netdev)
2125 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2127 switch (adapter->intr.type) {
2128 #ifdef CONFIG_PCI_MSI
2129 case VMXNET3_IT_MSIX: {
2131 for (i = 0; i < adapter->num_rx_queues; i++)
2132 vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2136 case VMXNET3_IT_MSI:
2138 vmxnet3_intr(0, adapter->netdev);
2143 #endif /* CONFIG_NET_POLL_CONTROLLER */
2146 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2148 struct vmxnet3_intr *intr = &adapter->intr;
2152 #ifdef CONFIG_PCI_MSI
2153 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2154 for (i = 0; i < adapter->num_tx_queues; i++) {
2155 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2156 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
2157 adapter->netdev->name, vector);
2159 intr->msix_entries[vector].vector,
2161 adapter->tx_queue[i].name,
2162 &adapter->tx_queue[i]);
2164 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
2165 adapter->netdev->name, vector);
2168 dev_err(&adapter->netdev->dev,
2169 "Failed to request irq for MSIX, %s, "
2171 adapter->tx_queue[i].name, err);
2175 /* Handle the case where only 1 MSIx was allocated for
2177 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2178 for (; i < adapter->num_tx_queues; i++)
2179 adapter->tx_queue[i].comp_ring.intr_idx
2184 adapter->tx_queue[i].comp_ring.intr_idx
2188 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
2191 for (i = 0; i < adapter->num_rx_queues; i++) {
2192 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
2193 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
2194 adapter->netdev->name, vector);
2196 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
2197 adapter->netdev->name, vector);
2198 err = request_irq(intr->msix_entries[vector].vector,
2200 adapter->rx_queue[i].name,
2201 &(adapter->rx_queue[i]));
2203 netdev_err(adapter->netdev,
2204 "Failed to request irq for MSIX, "
2206 adapter->rx_queue[i].name, err);
2210 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
2213 sprintf(intr->event_msi_vector_name, "%s-event-%d",
2214 adapter->netdev->name, vector);
2215 err = request_irq(intr->msix_entries[vector].vector,
2216 vmxnet3_msix_event, 0,
2217 intr->event_msi_vector_name, adapter->netdev);
2218 intr->event_intr_idx = vector;
2220 } else if (intr->type == VMXNET3_IT_MSI) {
2221 adapter->num_rx_queues = 1;
2222 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2223 adapter->netdev->name, adapter->netdev);
2226 adapter->num_rx_queues = 1;
2227 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2228 IRQF_SHARED, adapter->netdev->name,
2230 #ifdef CONFIG_PCI_MSI
2233 intr->num_intrs = vector + 1;
2235 netdev_err(adapter->netdev,
2236 "Failed to request irq (intr type:%d), error %d\n",
2239 /* Number of rx queues will not change after this */
2240 for (i = 0; i < adapter->num_rx_queues; i++) {
2241 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2243 rq->qid2 = i + adapter->num_rx_queues;
2244 rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2247 /* init our intr settings */
2248 for (i = 0; i < intr->num_intrs; i++)
2249 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
2250 if (adapter->intr.type != VMXNET3_IT_MSIX) {
2251 adapter->intr.event_intr_idx = 0;
2252 for (i = 0; i < adapter->num_tx_queues; i++)
2253 adapter->tx_queue[i].comp_ring.intr_idx = 0;
2254 adapter->rx_queue[0].comp_ring.intr_idx = 0;
2257 netdev_info(adapter->netdev,
2258 "intr type %u, mode %u, %u vectors allocated\n",
2259 intr->type, intr->mask_mode, intr->num_intrs);
2267 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2269 struct vmxnet3_intr *intr = &adapter->intr;
2270 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
2272 switch (intr->type) {
2273 #ifdef CONFIG_PCI_MSI
2274 case VMXNET3_IT_MSIX:
2278 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2279 for (i = 0; i < adapter->num_tx_queues; i++) {
2280 free_irq(intr->msix_entries[vector++].vector,
2281 &(adapter->tx_queue[i]));
2282 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
2287 for (i = 0; i < adapter->num_rx_queues; i++) {
2288 free_irq(intr->msix_entries[vector++].vector,
2289 &(adapter->rx_queue[i]));
2292 free_irq(intr->msix_entries[vector].vector,
2294 BUG_ON(vector >= intr->num_intrs);
2298 case VMXNET3_IT_MSI:
2299 free_irq(adapter->pdev->irq, adapter->netdev);
2301 case VMXNET3_IT_INTX:
2302 free_irq(adapter->pdev->irq, adapter->netdev);
2311 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2313 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2316 /* allow untagged pkts */
2317 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
2319 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2320 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2325 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2327 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2329 if (!(netdev->flags & IFF_PROMISC)) {
2330 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2331 unsigned long flags;
2333 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2334 spin_lock_irqsave(&adapter->cmd_lock, flags);
2335 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2336 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2337 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2340 set_bit(vid, adapter->active_vlans);
2347 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2349 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2351 if (!(netdev->flags & IFF_PROMISC)) {
2352 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2353 unsigned long flags;
2355 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
2356 spin_lock_irqsave(&adapter->cmd_lock, flags);
2357 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2358 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2359 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2362 clear_bit(vid, adapter->active_vlans);
2369 vmxnet3_copy_mc(struct net_device *netdev)
2372 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2374 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2376 /* We may be called with BH disabled */
2377 buf = kmalloc(sz, GFP_ATOMIC);
2379 struct netdev_hw_addr *ha;
2382 netdev_for_each_mc_addr(ha, netdev)
2383 memcpy(buf + i++ * ETH_ALEN, ha->addr,
2392 vmxnet3_set_mc(struct net_device *netdev)
2394 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2395 unsigned long flags;
2396 struct Vmxnet3_RxFilterConf *rxConf =
2397 &adapter->shared->devRead.rxFilterConf;
2398 u8 *new_table = NULL;
2399 dma_addr_t new_table_pa = 0;
2400 bool new_table_pa_valid = false;
2401 u32 new_mode = VMXNET3_RXM_UCAST;
2403 if (netdev->flags & IFF_PROMISC) {
2404 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2405 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2407 new_mode |= VMXNET3_RXM_PROMISC;
2409 vmxnet3_restore_vlan(adapter);
2412 if (netdev->flags & IFF_BROADCAST)
2413 new_mode |= VMXNET3_RXM_BCAST;
2415 if (netdev->flags & IFF_ALLMULTI)
2416 new_mode |= VMXNET3_RXM_ALL_MULTI;
2418 if (!netdev_mc_empty(netdev)) {
2419 new_table = vmxnet3_copy_mc(netdev);
2421 size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2423 rxConf->mfTableLen = cpu_to_le16(sz);
2424 new_table_pa = dma_map_single(
2425 &adapter->pdev->dev,
2429 if (!dma_mapping_error(&adapter->pdev->dev,
2431 new_mode |= VMXNET3_RXM_MCAST;
2432 new_table_pa_valid = true;
2433 rxConf->mfTablePA = cpu_to_le64(
2437 if (!new_table_pa_valid) {
2439 "failed to copy mcast list, setting ALL_MULTI\n");
2440 new_mode |= VMXNET3_RXM_ALL_MULTI;
2444 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2445 rxConf->mfTableLen = 0;
2446 rxConf->mfTablePA = 0;
2449 spin_lock_irqsave(&adapter->cmd_lock, flags);
2450 if (new_mode != rxConf->rxMode) {
2451 rxConf->rxMode = cpu_to_le32(new_mode);
2452 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2453 VMXNET3_CMD_UPDATE_RX_MODE);
2454 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2455 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2458 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2459 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2460 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2462 if (new_table_pa_valid)
2463 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2464 rxConf->mfTableLen, DMA_TO_DEVICE);
2469 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2473 for (i = 0; i < adapter->num_rx_queues; i++)
2474 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2479 * Set up driver_shared based on settings in adapter.
2483 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2485 struct Vmxnet3_DriverShared *shared = adapter->shared;
2486 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2487 struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
2488 struct Vmxnet3_TxQueueConf *tqc;
2489 struct Vmxnet3_RxQueueConf *rqc;
2492 memset(shared, 0, sizeof(*shared));
2494 /* driver settings */
2495 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2496 devRead->misc.driverInfo.version = cpu_to_le32(
2497 VMXNET3_DRIVER_VERSION_NUM);
2498 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2499 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2500 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2501 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2502 *((u32 *)&devRead->misc.driverInfo.gos));
2503 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2504 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2506 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2507 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2509 /* set up feature flags */
2510 if (adapter->netdev->features & NETIF_F_RXCSUM)
2511 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2513 if (adapter->netdev->features & NETIF_F_LRO) {
2514 devRead->misc.uptFeatures |= UPT1_F_LRO;
2515 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2517 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2518 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2520 if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL |
2521 NETIF_F_GSO_UDP_TUNNEL_CSUM))
2522 devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD;
2524 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2525 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2526 devRead->misc.queueDescLen = cpu_to_le32(
2527 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2528 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2530 /* tx queue settings */
2531 devRead->misc.numTxQueues = adapter->num_tx_queues;
2532 for (i = 0; i < adapter->num_tx_queues; i++) {
2533 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2534 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2535 tqc = &adapter->tqd_start[i].conf;
2536 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2537 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2538 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2539 tqc->ddPA = cpu_to_le64(~0ULL);
2540 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2541 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2542 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
2543 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2544 tqc->ddLen = cpu_to_le32(0);
2545 tqc->intrIdx = tq->comp_ring.intr_idx;
2548 /* rx queue settings */
2549 devRead->misc.numRxQueues = adapter->num_rx_queues;
2550 for (i = 0; i < adapter->num_rx_queues; i++) {
2551 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2552 rqc = &adapter->rqd_start[i].conf;
2553 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2554 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2555 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2556 rqc->ddPA = cpu_to_le64(~0ULL);
2557 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2558 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2559 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2560 rqc->ddLen = cpu_to_le32(0);
2561 rqc->intrIdx = rq->comp_ring.intr_idx;
2562 if (VMXNET3_VERSION_GE_3(adapter)) {
2563 rqc->rxDataRingBasePA =
2564 cpu_to_le64(rq->data_ring.basePA);
2565 rqc->rxDataRingDescSize =
2566 cpu_to_le16(rq->data_ring.desc_size);
2571 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2574 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2576 devRead->misc.uptFeatures |= UPT1_F_RSS;
2577 devRead->misc.numRxQueues = adapter->num_rx_queues;
2578 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2579 UPT1_RSS_HASH_TYPE_IPV4 |
2580 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2581 UPT1_RSS_HASH_TYPE_IPV6;
2582 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2583 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2584 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2585 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
2587 for (i = 0; i < rssConf->indTableSize; i++)
2588 rssConf->indTable[i] = ethtool_rxfh_indir_default(
2589 i, adapter->num_rx_queues);
2591 devRead->rssConfDesc.confVer = 1;
2592 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2593 devRead->rssConfDesc.confPA =
2594 cpu_to_le64(adapter->rss_conf_pa);
2597 #endif /* VMXNET3_RSS */
2600 if (!VMXNET3_VERSION_GE_6(adapter) ||
2601 !adapter->queuesExtEnabled) {
2602 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2604 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2605 for (i = 0; i < adapter->intr.num_intrs; i++)
2606 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2608 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2609 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2611 devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode ==
2613 devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs;
2614 for (i = 0; i < adapter->intr.num_intrs; i++)
2615 devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i];
2617 devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx;
2618 devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2621 /* rx filter settings */
2622 devRead->rxFilterConf.rxMode = 0;
2623 vmxnet3_restore_vlan(adapter);
2624 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2626 /* the rest are already zeroed */
2630 vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
2632 struct Vmxnet3_DriverShared *shared = adapter->shared;
2633 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2634 unsigned long flags;
2636 if (!VMXNET3_VERSION_GE_3(adapter))
2639 spin_lock_irqsave(&adapter->cmd_lock, flags);
2640 cmdInfo->varConf.confVer = 1;
2641 cmdInfo->varConf.confLen =
2642 cpu_to_le32(sizeof(*adapter->coal_conf));
2643 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
2645 if (adapter->default_coal_mode) {
2646 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2647 VMXNET3_CMD_GET_COALESCE);
2649 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2650 VMXNET3_CMD_SET_COALESCE);
2653 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2657 vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
2659 struct Vmxnet3_DriverShared *shared = adapter->shared;
2660 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2661 unsigned long flags;
2663 if (!VMXNET3_VERSION_GE_4(adapter))
2666 spin_lock_irqsave(&adapter->cmd_lock, flags);
2668 if (adapter->default_rss_fields) {
2669 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2670 VMXNET3_CMD_GET_RSS_FIELDS);
2671 adapter->rss_fields =
2672 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2674 cmdInfo->setRssFields = adapter->rss_fields;
2675 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2676 VMXNET3_CMD_SET_RSS_FIELDS);
2677 /* Not all requested RSS may get applied, so get and
2678 * cache what was actually applied.
2680 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2681 VMXNET3_CMD_GET_RSS_FIELDS);
2682 adapter->rss_fields =
2683 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2686 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2690 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2694 unsigned long flags;
2696 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2697 " ring sizes %u %u %u\n", adapter->netdev->name,
2698 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2699 adapter->tx_queue[0].tx_ring.size,
2700 adapter->rx_queue[0].rx_ring[0].size,
2701 adapter->rx_queue[0].rx_ring[1].size);
2703 vmxnet3_tq_init_all(adapter);
2704 err = vmxnet3_rq_init_all(adapter);
2706 netdev_err(adapter->netdev,
2707 "Failed to init rx queue error %d\n", err);
2711 err = vmxnet3_request_irqs(adapter);
2713 netdev_err(adapter->netdev,
2714 "Failed to setup irq for error %d\n", err);
2718 vmxnet3_setup_driver_shared(adapter);
2720 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2721 adapter->shared_pa));
2722 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2723 adapter->shared_pa));
2724 spin_lock_irqsave(&adapter->cmd_lock, flags);
2725 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2726 VMXNET3_CMD_ACTIVATE_DEV);
2727 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2728 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2731 netdev_err(adapter->netdev,
2732 "Failed to activate dev: error %u\n", ret);
2737 vmxnet3_init_coalesce(adapter);
2738 vmxnet3_init_rssfields(adapter);
2740 for (i = 0; i < adapter->num_rx_queues; i++) {
2741 VMXNET3_WRITE_BAR0_REG(adapter,
2742 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2743 adapter->rx_queue[i].rx_ring[0].next2fill);
2744 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2745 (i * VMXNET3_REG_ALIGN)),
2746 adapter->rx_queue[i].rx_ring[1].next2fill);
2749 /* Apply the rx filter settins last. */
2750 vmxnet3_set_mc(adapter->netdev);
2753 * Check link state when first activating device. It will start the
2754 * tx queue if the link is up.
2756 vmxnet3_check_link(adapter, true);
2757 netif_tx_wake_all_queues(adapter->netdev);
2758 for (i = 0; i < adapter->num_rx_queues; i++)
2759 napi_enable(&adapter->rx_queue[i].napi);
2760 vmxnet3_enable_all_intrs(adapter);
2761 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2765 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2766 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2767 vmxnet3_free_irqs(adapter);
2770 /* free up buffers we allocated */
2771 vmxnet3_rq_cleanup_all(adapter);
2777 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2779 unsigned long flags;
2780 spin_lock_irqsave(&adapter->cmd_lock, flags);
2781 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2782 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2787 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2790 unsigned long flags;
2791 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2795 spin_lock_irqsave(&adapter->cmd_lock, flags);
2796 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2797 VMXNET3_CMD_QUIESCE_DEV);
2798 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2799 vmxnet3_disable_all_intrs(adapter);
2801 for (i = 0; i < adapter->num_rx_queues; i++)
2802 napi_disable(&adapter->rx_queue[i].napi);
2803 netif_tx_disable(adapter->netdev);
2804 adapter->link_speed = 0;
2805 netif_carrier_off(adapter->netdev);
2807 vmxnet3_tq_cleanup_all(adapter);
2808 vmxnet3_rq_cleanup_all(adapter);
2809 vmxnet3_free_irqs(adapter);
2815 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac)
2820 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2822 tmp = (mac[5] << 8) | mac[4];
2823 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2828 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2830 struct sockaddr *addr = p;
2831 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2833 dev_addr_set(netdev, addr->sa_data);
2834 vmxnet3_write_mac_addr(adapter, addr->sa_data);
2840 /* ==================== initialization and cleanup routines ============ */
2843 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
2846 unsigned long mmio_start, mmio_len;
2847 struct pci_dev *pdev = adapter->pdev;
2849 err = pci_enable_device(pdev);
2851 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
2855 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2856 vmxnet3_driver_name);
2859 "Failed to request region for adapter: error %d\n", err);
2860 goto err_enable_device;
2863 pci_set_master(pdev);
2865 mmio_start = pci_resource_start(pdev, 0);
2866 mmio_len = pci_resource_len(pdev, 0);
2867 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2868 if (!adapter->hw_addr0) {
2869 dev_err(&pdev->dev, "Failed to map bar0\n");
2874 mmio_start = pci_resource_start(pdev, 1);
2875 mmio_len = pci_resource_len(pdev, 1);
2876 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2877 if (!adapter->hw_addr1) {
2878 dev_err(&pdev->dev, "Failed to map bar1\n");
2885 iounmap(adapter->hw_addr0);
2887 pci_release_selected_regions(pdev, (1 << 2) - 1);
2889 pci_disable_device(pdev);
2895 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2897 BUG_ON(!adapter->pdev);
2899 iounmap(adapter->hw_addr0);
2900 iounmap(adapter->hw_addr1);
2901 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2902 pci_disable_device(adapter->pdev);
2907 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2909 size_t sz, i, ring0_size, ring1_size, comp_size;
2910 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2911 VMXNET3_MAX_ETH_HDR_SIZE) {
2912 adapter->skb_buf_size = adapter->netdev->mtu +
2913 VMXNET3_MAX_ETH_HDR_SIZE;
2914 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2915 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2917 adapter->rx_buf_per_pkt = 1;
2919 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2920 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2921 VMXNET3_MAX_ETH_HDR_SIZE;
2922 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2926 * for simplicity, force the ring0 size to be a multiple of
2927 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2929 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2930 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2931 ring0_size = (ring0_size + sz - 1) / sz * sz;
2932 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
2934 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2935 ring1_size = (ring1_size + sz - 1) / sz * sz;
2936 ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
2938 comp_size = ring0_size + ring1_size;
2940 for (i = 0; i < adapter->num_rx_queues; i++) {
2941 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2943 rq->rx_ring[0].size = ring0_size;
2944 rq->rx_ring[1].size = ring1_size;
2945 rq->comp_ring.size = comp_size;
2951 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2952 u32 rx_ring_size, u32 rx_ring2_size,
2953 u16 txdata_desc_size, u16 rxdata_desc_size)
2957 for (i = 0; i < adapter->num_tx_queues; i++) {
2958 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2959 tq->tx_ring.size = tx_ring_size;
2960 tq->data_ring.size = tx_ring_size;
2961 tq->comp_ring.size = tx_ring_size;
2962 tq->txdata_desc_size = txdata_desc_size;
2963 tq->shared = &adapter->tqd_start[i].ctrl;
2965 tq->adapter = adapter;
2967 err = vmxnet3_tq_create(tq, adapter);
2969 * Too late to change num_tx_queues. We cannot do away with
2970 * lesser number of queues than what we asked for
2976 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2977 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2978 vmxnet3_adjust_rx_ring_size(adapter);
2980 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
2981 for (i = 0; i < adapter->num_rx_queues; i++) {
2982 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2983 /* qid and qid2 for rx queues will be assigned later when num
2984 * of rx queues is finalized after allocating intrs */
2985 rq->shared = &adapter->rqd_start[i].ctrl;
2986 rq->adapter = adapter;
2987 rq->data_ring.desc_size = rxdata_desc_size;
2988 err = vmxnet3_rq_create(rq, adapter);
2991 netdev_err(adapter->netdev,
2992 "Could not allocate any rx queues. "
2996 netdev_info(adapter->netdev,
2997 "Number of rx queues changed "
2999 adapter->num_rx_queues = i;
3006 if (!adapter->rxdataring_enabled)
3007 vmxnet3_rq_destroy_all_rxdataring(adapter);
3011 vmxnet3_tq_destroy_all(adapter);
3016 vmxnet3_open(struct net_device *netdev)
3018 struct vmxnet3_adapter *adapter;
3021 adapter = netdev_priv(netdev);
3023 for (i = 0; i < adapter->num_tx_queues; i++)
3024 spin_lock_init(&adapter->tx_queue[i].tx_lock);
3026 if (VMXNET3_VERSION_GE_3(adapter)) {
3027 unsigned long flags;
3028 u16 txdata_desc_size;
3030 spin_lock_irqsave(&adapter->cmd_lock, flags);
3031 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3032 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
3033 txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
3035 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3037 if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
3038 (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
3039 (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
3040 adapter->txdata_desc_size =
3041 sizeof(struct Vmxnet3_TxDataDesc);
3043 adapter->txdata_desc_size = txdata_desc_size;
3046 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
3049 err = vmxnet3_create_queues(adapter,
3050 adapter->tx_ring_size,
3051 adapter->rx_ring_size,
3052 adapter->rx_ring2_size,
3053 adapter->txdata_desc_size,
3054 adapter->rxdata_desc_size);
3058 err = vmxnet3_activate_dev(adapter);
3065 vmxnet3_rq_destroy_all(adapter);
3066 vmxnet3_tq_destroy_all(adapter);
3073 vmxnet3_close(struct net_device *netdev)
3075 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3078 * Reset_work may be in the middle of resetting the device, wait for its
3081 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3082 usleep_range(1000, 2000);
3084 vmxnet3_quiesce_dev(adapter);
3086 vmxnet3_rq_destroy_all(adapter);
3087 vmxnet3_tq_destroy_all(adapter);
3089 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3097 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
3102 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
3103 * vmxnet3_close() will deadlock.
3105 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
3107 /* we need to enable NAPI, otherwise dev_close will deadlock */
3108 for (i = 0; i < adapter->num_rx_queues; i++)
3109 napi_enable(&adapter->rx_queue[i].napi);
3111 * Need to clear the quiesce bit to ensure that vmxnet3_close
3112 * can quiesce the device properly
3114 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3115 dev_close(adapter->netdev);
3120 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
3122 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3125 netdev->mtu = new_mtu;
3128 * Reset_work may be in the middle of resetting the device, wait for its
3131 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3132 usleep_range(1000, 2000);
3134 if (netif_running(netdev)) {
3135 vmxnet3_quiesce_dev(adapter);
3136 vmxnet3_reset_dev(adapter);
3138 /* we need to re-create the rx queue based on the new mtu */
3139 vmxnet3_rq_destroy_all(adapter);
3140 vmxnet3_adjust_rx_ring_size(adapter);
3141 err = vmxnet3_rq_create_all(adapter);
3144 "failed to re-create rx queues, "
3145 " error %d. Closing it.\n", err);
3149 err = vmxnet3_activate_dev(adapter);
3152 "failed to re-activate, error %d. "
3153 "Closing it\n", err);
3159 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3161 vmxnet3_force_close(adapter);
3168 vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
3170 struct net_device *netdev = adapter->netdev;
3172 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3173 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3174 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3175 NETIF_F_LRO | NETIF_F_HIGHDMA;
3177 if (VMXNET3_VERSION_GE_4(adapter)) {
3178 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3179 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3181 netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM |
3182 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3183 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3184 NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
3185 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3188 netdev->vlan_features = netdev->hw_features &
3189 ~(NETIF_F_HW_VLAN_CTAG_TX |
3190 NETIF_F_HW_VLAN_CTAG_RX);
3191 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3196 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3200 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3203 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3204 mac[4] = tmp & 0xff;
3205 mac[5] = (tmp >> 8) & 0xff;
3208 #ifdef CONFIG_PCI_MSI
3211 * Enable MSIx vectors.
3213 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
3215 * number of vectors which were enabled otherwise (this number is greater
3216 * than VMXNET3_LINUX_MIN_MSIX_VECT)
3220 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
3222 int ret = pci_enable_msix_range(adapter->pdev,
3223 adapter->intr.msix_entries, nvec, nvec);
3225 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
3226 dev_err(&adapter->netdev->dev,
3227 "Failed to enable %d MSI-X, trying %d\n",
3228 nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
3230 ret = pci_enable_msix_range(adapter->pdev,
3231 adapter->intr.msix_entries,
3232 VMXNET3_LINUX_MIN_MSIX_VECT,
3233 VMXNET3_LINUX_MIN_MSIX_VECT);
3237 dev_err(&adapter->netdev->dev,
3238 "Failed to enable MSI-X, error: %d\n", ret);
3245 #endif /* CONFIG_PCI_MSI */
3248 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3251 unsigned long flags;
3254 spin_lock_irqsave(&adapter->cmd_lock, flags);
3255 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3256 VMXNET3_CMD_GET_CONF_INTR);
3257 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3258 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3259 adapter->intr.type = cfg & 0x3;
3260 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3262 if (adapter->intr.type == VMXNET3_IT_AUTO) {
3263 adapter->intr.type = VMXNET3_IT_MSIX;
3266 #ifdef CONFIG_PCI_MSI
3267 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3268 int i, nvec, nvec_allocated;
3270 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3271 1 : adapter->num_tx_queues;
3272 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3273 0 : adapter->num_rx_queues;
3274 nvec += 1; /* for link event */
3275 nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3276 nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
3278 for (i = 0; i < nvec; i++)
3279 adapter->intr.msix_entries[i].entry = i;
3281 nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
3282 if (nvec_allocated < 0)
3285 /* If we cannot allocate one MSIx vector per queue
3286 * then limit the number of rx queues to 1
3288 if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT &&
3289 nvec != VMXNET3_LINUX_MIN_MSIX_VECT) {
3290 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
3291 || adapter->num_rx_queues != 1) {
3292 adapter->share_intr = VMXNET3_INTR_TXSHARE;
3293 netdev_err(adapter->netdev,
3294 "Number of rx queues : 1\n");
3295 adapter->num_rx_queues = 1;
3299 adapter->intr.num_intrs = nvec_allocated;
3303 /* If we cannot allocate MSIx vectors use only one rx queue */
3304 dev_info(&adapter->pdev->dev,
3305 "Failed to enable MSI-X, error %d. "
3306 "Limiting #rx queues to 1, try MSI.\n", nvec_allocated);
3308 adapter->intr.type = VMXNET3_IT_MSI;
3311 if (adapter->intr.type == VMXNET3_IT_MSI) {
3312 if (!pci_enable_msi(adapter->pdev)) {
3313 adapter->num_rx_queues = 1;
3314 adapter->intr.num_intrs = 1;
3318 #endif /* CONFIG_PCI_MSI */
3320 adapter->num_rx_queues = 1;
3321 dev_info(&adapter->netdev->dev,
3322 "Using INTx interrupt, #Rx queues: 1.\n");
3323 adapter->intr.type = VMXNET3_IT_INTX;
3325 /* INT-X related setting */
3326 adapter->intr.num_intrs = 1;
3331 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3333 if (adapter->intr.type == VMXNET3_IT_MSIX)
3334 pci_disable_msix(adapter->pdev);
3335 else if (adapter->intr.type == VMXNET3_IT_MSI)
3336 pci_disable_msi(adapter->pdev);
3338 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3343 vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3345 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3346 adapter->tx_timeout_count++;
3348 netdev_err(adapter->netdev, "tx hang\n");
3349 schedule_work(&adapter->work);
3354 vmxnet3_reset_work(struct work_struct *data)
3356 struct vmxnet3_adapter *adapter;
3358 adapter = container_of(data, struct vmxnet3_adapter, work);
3360 /* if another thread is resetting the device, no need to proceed */
3361 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3364 /* if the device is closed, we must leave it alone */
3366 if (netif_running(adapter->netdev)) {
3367 netdev_notice(adapter->netdev, "resetting\n");
3368 vmxnet3_quiesce_dev(adapter);
3369 vmxnet3_reset_dev(adapter);
3370 vmxnet3_activate_dev(adapter);
3372 netdev_info(adapter->netdev, "already closed\n");
3376 netif_wake_queue(adapter->netdev);
3377 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3382 vmxnet3_probe_device(struct pci_dev *pdev,
3383 const struct pci_device_id *id)
3385 static const struct net_device_ops vmxnet3_netdev_ops = {
3386 .ndo_open = vmxnet3_open,
3387 .ndo_stop = vmxnet3_close,
3388 .ndo_start_xmit = vmxnet3_xmit_frame,
3389 .ndo_set_mac_address = vmxnet3_set_mac_addr,
3390 .ndo_change_mtu = vmxnet3_change_mtu,
3391 .ndo_fix_features = vmxnet3_fix_features,
3392 .ndo_set_features = vmxnet3_set_features,
3393 .ndo_features_check = vmxnet3_features_check,
3394 .ndo_get_stats64 = vmxnet3_get_stats64,
3395 .ndo_tx_timeout = vmxnet3_tx_timeout,
3396 .ndo_set_rx_mode = vmxnet3_set_mc,
3397 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3398 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3399 #ifdef CONFIG_NET_POLL_CONTROLLER
3400 .ndo_poll_controller = vmxnet3_netpoll,
3405 struct net_device *netdev;
3406 struct vmxnet3_adapter *adapter;
3412 unsigned long flags;
3414 if (!pci_msi_enabled())
3419 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3420 (int)num_online_cpus());
3426 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
3427 (int)num_online_cpus());
3431 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
3432 max(num_tx_queues, num_rx_queues));
3436 pci_set_drvdata(pdev, netdev);
3437 adapter = netdev_priv(netdev);
3438 adapter->netdev = netdev;
3439 adapter->pdev = pdev;
3441 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3442 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
3443 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3445 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3447 dev_err(&pdev->dev, "dma_set_mask failed\n");
3451 spin_lock_init(&adapter->cmd_lock);
3452 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3453 sizeof(struct vmxnet3_adapter),
3455 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3456 dev_err(&pdev->dev, "Failed to map dma\n");
3460 adapter->shared = dma_alloc_coherent(
3461 &adapter->pdev->dev,
3462 sizeof(struct Vmxnet3_DriverShared),
3463 &adapter->shared_pa, GFP_KERNEL);
3464 if (!adapter->shared) {
3465 dev_err(&pdev->dev, "Failed to allocate memory\n");
3467 goto err_alloc_shared;
3470 err = vmxnet3_alloc_pci_resources(adapter);
3474 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
3475 if (ver & (1 << VMXNET3_REV_6)) {
3476 VMXNET3_WRITE_BAR1_REG(adapter,
3478 1 << VMXNET3_REV_6);
3479 adapter->version = VMXNET3_REV_6 + 1;
3480 } else if (ver & (1 << VMXNET3_REV_5)) {
3481 VMXNET3_WRITE_BAR1_REG(adapter,
3483 1 << VMXNET3_REV_5);
3484 adapter->version = VMXNET3_REV_5 + 1;
3485 } else if (ver & (1 << VMXNET3_REV_4)) {
3486 VMXNET3_WRITE_BAR1_REG(adapter,
3488 1 << VMXNET3_REV_4);
3489 adapter->version = VMXNET3_REV_4 + 1;
3490 } else if (ver & (1 << VMXNET3_REV_3)) {
3491 VMXNET3_WRITE_BAR1_REG(adapter,
3493 1 << VMXNET3_REV_3);
3494 adapter->version = VMXNET3_REV_3 + 1;
3495 } else if (ver & (1 << VMXNET3_REV_2)) {
3496 VMXNET3_WRITE_BAR1_REG(adapter,
3498 1 << VMXNET3_REV_2);
3499 adapter->version = VMXNET3_REV_2 + 1;
3500 } else if (ver & (1 << VMXNET3_REV_1)) {
3501 VMXNET3_WRITE_BAR1_REG(adapter,
3503 1 << VMXNET3_REV_1);
3504 adapter->version = VMXNET3_REV_1 + 1;
3507 "Incompatible h/w version (0x%x) for adapter\n", ver);
3511 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
3513 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3515 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3518 "Incompatible upt version (0x%x) for adapter\n", ver);
3523 if (VMXNET3_VERSION_GE_6(adapter)) {
3524 spin_lock_irqsave(&adapter->cmd_lock, flags);
3525 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3526 VMXNET3_CMD_GET_MAX_QUEUES_CONF);
3527 queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3528 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3530 adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff));
3531 adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff));
3533 adapter->num_rx_queues = min(num_rx_queues,
3534 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3535 adapter->num_tx_queues = min(num_tx_queues,
3536 VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
3538 if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES ||
3539 adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) {
3540 adapter->queuesExtEnabled = true;
3542 adapter->queuesExtEnabled = false;
3545 adapter->queuesExtEnabled = false;
3546 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3547 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
3548 adapter->num_rx_queues = min(num_rx_queues,
3549 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3550 adapter->num_tx_queues = min(num_tx_queues,
3551 VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
3553 dev_info(&pdev->dev,
3554 "# of Tx queues : %d, # of Rx queues : %d\n",
3555 adapter->num_tx_queues, adapter->num_rx_queues);
3557 adapter->rx_buf_per_pkt = 1;
3559 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3560 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
3561 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
3562 &adapter->queue_desc_pa,
3565 if (!adapter->tqd_start) {
3566 dev_err(&pdev->dev, "Failed to allocate memory\n");
3570 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
3571 adapter->num_tx_queues);
3573 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3574 sizeof(struct Vmxnet3_PMConf),
3575 &adapter->pm_conf_pa,
3577 if (adapter->pm_conf == NULL) {
3584 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
3585 sizeof(struct UPT1_RSSConf),
3586 &adapter->rss_conf_pa,
3588 if (adapter->rss_conf == NULL) {
3592 #endif /* VMXNET3_RSS */
3594 if (VMXNET3_VERSION_GE_3(adapter)) {
3595 adapter->coal_conf =
3596 dma_alloc_coherent(&adapter->pdev->dev,
3597 sizeof(struct Vmxnet3_CoalesceScheme)
3599 &adapter->coal_conf_pa,
3601 if (!adapter->coal_conf) {
3605 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
3606 adapter->default_coal_mode = true;
3609 if (VMXNET3_VERSION_GE_4(adapter)) {
3610 adapter->default_rss_fields = true;
3611 adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT;
3614 SET_NETDEV_DEV(netdev, &pdev->dev);
3615 vmxnet3_declare_features(adapter);
3617 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
3618 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
3620 if (adapter->num_tx_queues == adapter->num_rx_queues)
3621 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
3623 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3625 vmxnet3_alloc_intr_resources(adapter);
3628 if (adapter->num_rx_queues > 1 &&
3629 adapter->intr.type == VMXNET3_IT_MSIX) {
3630 adapter->rss = true;
3631 netdev->hw_features |= NETIF_F_RXHASH;
3632 netdev->features |= NETIF_F_RXHASH;
3633 dev_dbg(&pdev->dev, "RSS is enabled.\n");
3635 adapter->rss = false;
3639 vmxnet3_read_mac_addr(adapter, mac);
3640 dev_addr_set(netdev, mac);
3642 netdev->netdev_ops = &vmxnet3_netdev_ops;
3643 vmxnet3_set_ethtool_ops(netdev);
3644 netdev->watchdog_timeo = 5 * HZ;
3646 /* MTU range: 60 - 9190 */
3647 netdev->min_mtu = VMXNET3_MIN_MTU;
3648 if (VMXNET3_VERSION_GE_6(adapter))
3649 netdev->max_mtu = VMXNET3_V6_MAX_MTU;
3651 netdev->max_mtu = VMXNET3_MAX_MTU;
3653 INIT_WORK(&adapter->work, vmxnet3_reset_work);
3654 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3656 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3658 for (i = 0; i < adapter->num_rx_queues; i++) {
3659 netif_napi_add(adapter->netdev,
3660 &adapter->rx_queue[i].napi,
3661 vmxnet3_poll_rx_only, 64);
3664 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3668 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3669 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3671 netif_carrier_off(netdev);
3672 err = register_netdev(netdev);
3675 dev_err(&pdev->dev, "Failed to register adapter\n");
3679 vmxnet3_check_link(adapter, false);
3683 if (VMXNET3_VERSION_GE_3(adapter)) {
3684 dma_free_coherent(&adapter->pdev->dev,
3685 sizeof(struct Vmxnet3_CoalesceScheme),
3686 adapter->coal_conf, adapter->coal_conf_pa);
3688 vmxnet3_free_intr_resources(adapter);
3691 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3692 adapter->rss_conf, adapter->rss_conf_pa);
3695 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3696 adapter->pm_conf, adapter->pm_conf_pa);
3698 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3699 adapter->queue_desc_pa);
3701 vmxnet3_free_pci_resources(adapter);
3703 dma_free_coherent(&adapter->pdev->dev,
3704 sizeof(struct Vmxnet3_DriverShared),
3705 adapter->shared, adapter->shared_pa);
3707 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3708 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
3710 free_netdev(netdev);
3716 vmxnet3_remove_device(struct pci_dev *pdev)
3718 struct net_device *netdev = pci_get_drvdata(pdev);
3719 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3721 int num_rx_queues, rx_queues;
3722 unsigned long flags;
3726 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3727 (int)num_online_cpus());
3731 if (!VMXNET3_VERSION_GE_6(adapter)) {
3732 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3734 if (VMXNET3_VERSION_GE_6(adapter)) {
3735 spin_lock_irqsave(&adapter->cmd_lock, flags);
3736 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3737 VMXNET3_CMD_GET_MAX_QUEUES_CONF);
3738 rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3739 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3741 rx_queues = (rx_queues >> 8) & 0xff;
3743 rx_queues = min(num_rx_queues, VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3744 num_rx_queues = min(num_rx_queues, rx_queues);
3746 num_rx_queues = min(num_rx_queues,
3747 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3750 cancel_work_sync(&adapter->work);
3752 unregister_netdev(netdev);
3754 vmxnet3_free_intr_resources(adapter);
3755 vmxnet3_free_pci_resources(adapter);
3756 if (VMXNET3_VERSION_GE_3(adapter)) {
3757 dma_free_coherent(&adapter->pdev->dev,
3758 sizeof(struct Vmxnet3_CoalesceScheme),
3759 adapter->coal_conf, adapter->coal_conf_pa);
3762 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3763 adapter->rss_conf, adapter->rss_conf_pa);
3765 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3766 adapter->pm_conf, adapter->pm_conf_pa);
3768 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3769 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3770 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3771 adapter->queue_desc_pa);
3772 dma_free_coherent(&adapter->pdev->dev,
3773 sizeof(struct Vmxnet3_DriverShared),
3774 adapter->shared, adapter->shared_pa);
3775 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3776 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
3777 free_netdev(netdev);
3780 static void vmxnet3_shutdown_device(struct pci_dev *pdev)
3782 struct net_device *netdev = pci_get_drvdata(pdev);
3783 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3784 unsigned long flags;
3786 /* Reset_work may be in the middle of resetting the device, wait for its
3789 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3790 usleep_range(1000, 2000);
3792 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
3794 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3797 spin_lock_irqsave(&adapter->cmd_lock, flags);
3798 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3799 VMXNET3_CMD_QUIESCE_DEV);
3800 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3801 vmxnet3_disable_all_intrs(adapter);
3803 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3810 vmxnet3_suspend(struct device *device)
3812 struct pci_dev *pdev = to_pci_dev(device);
3813 struct net_device *netdev = pci_get_drvdata(pdev);
3814 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3815 struct Vmxnet3_PMConf *pmConf;
3816 struct ethhdr *ehdr;
3817 struct arphdr *ahdr;
3819 struct in_device *in_dev;
3820 struct in_ifaddr *ifa;
3821 unsigned long flags;
3824 if (!netif_running(netdev))
3827 for (i = 0; i < adapter->num_rx_queues; i++)
3828 napi_disable(&adapter->rx_queue[i].napi);
3830 vmxnet3_disable_all_intrs(adapter);
3831 vmxnet3_free_irqs(adapter);
3832 vmxnet3_free_intr_resources(adapter);
3834 netif_device_detach(netdev);
3836 /* Create wake-up filters. */
3837 pmConf = adapter->pm_conf;
3838 memset(pmConf, 0, sizeof(*pmConf));
3840 if (adapter->wol & WAKE_UCAST) {
3841 pmConf->filters[i].patternSize = ETH_ALEN;
3842 pmConf->filters[i].maskSize = 1;
3843 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3844 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3846 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3850 if (adapter->wol & WAKE_ARP) {
3853 in_dev = __in_dev_get_rcu(netdev);
3859 ifa = rcu_dereference(in_dev->ifa_list);
3865 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3866 sizeof(struct arphdr) + /* ARP header */
3867 2 * ETH_ALEN + /* 2 Ethernet addresses*/
3868 2 * sizeof(u32); /*2 IPv4 addresses */
3869 pmConf->filters[i].maskSize =
3870 (pmConf->filters[i].patternSize - 1) / 8 + 1;
3872 /* ETH_P_ARP in Ethernet header. */
3873 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3874 ehdr->h_proto = htons(ETH_P_ARP);
3876 /* ARPOP_REQUEST in ARP header. */
3877 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3878 ahdr->ar_op = htons(ARPOP_REQUEST);
3879 arpreq = (u8 *)(ahdr + 1);
3881 /* The Unicast IPv4 address in 'tip' field. */
3882 arpreq += 2 * ETH_ALEN + sizeof(u32);
3883 *(__be32 *)arpreq = ifa->ifa_address;
3887 /* The mask for the relevant bits. */
3888 pmConf->filters[i].mask[0] = 0x00;
3889 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3890 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3891 pmConf->filters[i].mask[3] = 0x00;
3892 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3893 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3895 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3900 if (adapter->wol & WAKE_MAGIC)
3901 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
3903 pmConf->numFilters = i;
3905 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3906 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3908 adapter->shared->devRead.pmConfDesc.confPA =
3909 cpu_to_le64(adapter->pm_conf_pa);
3911 spin_lock_irqsave(&adapter->cmd_lock, flags);
3912 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3913 VMXNET3_CMD_UPDATE_PMCFG);
3914 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3916 pci_save_state(pdev);
3917 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3919 pci_disable_device(pdev);
3920 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3927 vmxnet3_resume(struct device *device)
3930 unsigned long flags;
3931 struct pci_dev *pdev = to_pci_dev(device);
3932 struct net_device *netdev = pci_get_drvdata(pdev);
3933 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3935 if (!netif_running(netdev))
3938 pci_set_power_state(pdev, PCI_D0);
3939 pci_restore_state(pdev);
3940 err = pci_enable_device_mem(pdev);
3944 pci_enable_wake(pdev, PCI_D0, 0);
3946 vmxnet3_alloc_intr_resources(adapter);
3948 /* During hibernate and suspend, device has to be reinitialized as the
3949 * device state need not be preserved.
3952 /* Need not check adapter state as other reset tasks cannot run during
3955 spin_lock_irqsave(&adapter->cmd_lock, flags);
3956 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3957 VMXNET3_CMD_QUIESCE_DEV);
3958 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3959 vmxnet3_tq_cleanup_all(adapter);
3960 vmxnet3_rq_cleanup_all(adapter);
3962 vmxnet3_reset_dev(adapter);
3963 err = vmxnet3_activate_dev(adapter);
3966 "failed to re-activate on resume, error: %d", err);
3967 vmxnet3_force_close(adapter);
3970 netif_device_attach(netdev);
3975 static const struct dev_pm_ops vmxnet3_pm_ops = {
3976 .suspend = vmxnet3_suspend,
3977 .resume = vmxnet3_resume,
3978 .freeze = vmxnet3_suspend,
3979 .restore = vmxnet3_resume,
3983 static struct pci_driver vmxnet3_driver = {
3984 .name = vmxnet3_driver_name,
3985 .id_table = vmxnet3_pciid_table,
3986 .probe = vmxnet3_probe_device,
3987 .remove = vmxnet3_remove_device,
3988 .shutdown = vmxnet3_shutdown_device,
3990 .driver.pm = &vmxnet3_pm_ops,
3996 vmxnet3_init_module(void)
3998 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
3999 VMXNET3_DRIVER_VERSION_REPORT);
4000 return pci_register_driver(&vmxnet3_driver);
4003 module_init(vmxnet3_init_module);
4007 vmxnet3_exit_module(void)
4009 pci_unregister_driver(&vmxnet3_driver);
4012 module_exit(vmxnet3_exit_module);
4014 MODULE_AUTHOR("VMware, Inc.");
4015 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
4016 MODULE_LICENSE("GPL v2");
4017 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);