1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
16 /* Messages are passed between the VNIC driver and the VNIC server using */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20 /* are used by the driver to notify the server that a packet is */
21 /* ready for transmission or that a buffer has been added to receive a */
22 /* packet. Subsequently, sCRQs are used by the server to notify the */
23 /* driver that a packet transmission has been completed or that a packet */
24 /* has been received and placed in a waiting buffer. */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit */
28 /* or receive has been completed, the VNIC driver is required to use */
29 /* "long term mapping". This entails that large, continuous DMA mapped */
30 /* buffers are allocated on driver initialization and these buffers are */
31 /* then continuously reused to pass skbs to and from the VNIC server. */
33 /**************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/kthread.h>
57 #include <linux/seq_file.h>
58 #include <linux/interrupt.h>
59 #include <net/net_namespace.h>
60 #include <asm/hvcall.h>
61 #include <linux/atomic.h>
63 #include <asm/iommu.h>
64 #include <linux/uaccess.h>
65 #include <asm/firmware.h>
66 #include <linux/workqueue.h>
67 #include <linux/if_vlan.h>
68 #include <linux/utsname.h>
72 static const char ibmvnic_driver_name[] = "ibmvnic";
73 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
75 MODULE_AUTHOR("Santiago Leon");
76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
80 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81 static int ibmvnic_remove(struct vio_dev *);
82 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
83 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
88 union sub_crq *sub_crq);
89 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
90 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91 static int enable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93 static int disable_scrq_irq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95 static int pending_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98 struct ibmvnic_sub_crq_queue *);
99 static int ibmvnic_poll(struct napi_struct *napi, int data);
100 static void send_query_map(struct ibmvnic_adapter *adapter);
101 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
102 static int send_request_unmap(struct ibmvnic_adapter *, u8);
103 static int send_login(struct ibmvnic_adapter *adapter);
104 static void send_query_cap(struct ibmvnic_adapter *adapter);
105 static int init_sub_crqs(struct ibmvnic_adapter *);
106 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
107 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
108 static void release_crq_queue(struct ibmvnic_adapter *);
109 static int __ibmvnic_set_mac(struct net_device *, u8 *);
110 static int init_crq_queue(struct ibmvnic_adapter *adapter);
111 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
113 struct ibmvnic_stat {
114 char name[ETH_GSTRING_LEN];
118 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
119 offsetof(struct ibmvnic_statistics, stat))
120 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
122 static const struct ibmvnic_stat ibmvnic_stats[] = {
123 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
124 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
125 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
126 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
127 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
128 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
129 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
130 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
131 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
132 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
133 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
134 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
135 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
136 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
137 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
138 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
139 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
140 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
141 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
142 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
143 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
144 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
147 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
148 unsigned long length, unsigned long *number,
151 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
154 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
162 * ibmvnic_wait_for_completion - Check device state and wait for completion
163 * @adapter: private device data
164 * @comp_done: completion structure to wait for
165 * @timeout: time to wait in milliseconds
167 * Wait for a completion signal or until the timeout limit is reached
168 * while checking that the device is still active.
170 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
171 struct completion *comp_done,
172 unsigned long timeout)
174 struct net_device *netdev;
175 unsigned long div_timeout;
178 netdev = adapter->netdev;
180 div_timeout = msecs_to_jiffies(timeout / retry);
182 if (!adapter->crq.active) {
183 netdev_err(netdev, "Device down!\n");
188 if (wait_for_completion_timeout(comp_done, div_timeout))
191 netdev_err(netdev, "Operation timed out.\n");
195 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
196 struct ibmvnic_long_term_buff *ltb, int size)
198 struct device *dev = &adapter->vdev->dev;
202 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
206 dev_err(dev, "Couldn't alloc long term buffer\n");
209 ltb->map_id = adapter->map_id;
212 mutex_lock(&adapter->fw_lock);
213 adapter->fw_done_rc = 0;
214 reinit_completion(&adapter->fw_done);
216 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
218 dev_err(dev, "send_request_map failed, rc = %d\n", rc);
222 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
225 "Long term map request aborted or timed out,rc = %d\n",
230 if (adapter->fw_done_rc) {
231 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
232 adapter->fw_done_rc);
239 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
242 mutex_unlock(&adapter->fw_lock);
246 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
247 struct ibmvnic_long_term_buff *ltb)
249 struct device *dev = &adapter->vdev->dev;
254 /* VIOS automatically unmaps the long term buffer at remote
255 * end for the following resets:
256 * FAILOVER, MOBILITY, TIMEOUT.
258 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
259 adapter->reset_reason != VNIC_RESET_MOBILITY &&
260 adapter->reset_reason != VNIC_RESET_TIMEOUT)
261 send_request_unmap(adapter, ltb->map_id);
262 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
267 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
268 struct ibmvnic_long_term_buff *ltb)
270 struct device *dev = &adapter->vdev->dev;
273 memset(ltb->buff, 0, ltb->size);
275 mutex_lock(&adapter->fw_lock);
276 adapter->fw_done_rc = 0;
278 reinit_completion(&adapter->fw_done);
279 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
281 mutex_unlock(&adapter->fw_lock);
285 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
288 "Reset failed, long term map request timed out or aborted\n");
289 mutex_unlock(&adapter->fw_lock);
293 if (adapter->fw_done_rc) {
295 "Reset failed, attempting to free and reallocate buffer\n");
296 free_long_term_buff(adapter, ltb);
297 mutex_unlock(&adapter->fw_lock);
298 return alloc_long_term_buff(adapter, ltb, ltb->size);
300 mutex_unlock(&adapter->fw_lock);
304 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
308 for (i = 0; i < adapter->num_active_rx_pools; i++)
309 adapter->rx_pool[i].active = 0;
312 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
313 struct ibmvnic_rx_pool *pool)
315 int count = pool->size - atomic_read(&pool->available);
316 u64 handle = adapter->rx_scrq[pool->index]->handle;
317 struct device *dev = &adapter->vdev->dev;
318 int buffers_added = 0;
319 unsigned long lpar_rc;
320 union sub_crq sub_crq;
332 for (i = 0; i < count; ++i) {
333 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
335 dev_err(dev, "Couldn't replenish rx buff\n");
336 adapter->replenish_no_mem++;
340 index = pool->free_map[pool->next_free];
342 if (pool->rx_buff[index].skb)
343 dev_err(dev, "Inconsistent free_map!\n");
345 /* Copy the skb to the long term mapped DMA buffer */
346 offset = index * pool->buff_size;
347 dst = pool->long_term_buff.buff + offset;
348 memset(dst, 0, pool->buff_size);
349 dma_addr = pool->long_term_buff.addr + offset;
350 pool->rx_buff[index].data = dst;
352 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
353 pool->rx_buff[index].dma = dma_addr;
354 pool->rx_buff[index].skb = skb;
355 pool->rx_buff[index].pool_index = pool->index;
356 pool->rx_buff[index].size = pool->buff_size;
358 memset(&sub_crq, 0, sizeof(sub_crq));
359 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
360 sub_crq.rx_add.correlator =
361 cpu_to_be64((u64)&pool->rx_buff[index]);
362 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
363 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
365 /* The length field of the sCRQ is defined to be 24 bits so the
366 * buffer size needs to be left shifted by a byte before it is
367 * converted to big endian to prevent the last byte from being
370 #ifdef __LITTLE_ENDIAN__
373 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
375 lpar_rc = send_subcrq(adapter, handle, &sub_crq);
376 if (lpar_rc != H_SUCCESS)
380 adapter->replenish_add_buff_success++;
381 pool->next_free = (pool->next_free + 1) % pool->size;
383 atomic_add(buffers_added, &pool->available);
387 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
388 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
389 pool->free_map[pool->next_free] = index;
390 pool->rx_buff[index].skb = NULL;
392 dev_kfree_skb_any(skb);
393 adapter->replenish_add_buff_failure++;
394 atomic_add(buffers_added, &pool->available);
396 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
397 /* Disable buffer pool replenishment and report carrier off if
398 * queue is closed or pending failover.
399 * Firmware guarantees that a signal will be sent to the
400 * driver, triggering a reset.
402 deactivate_rx_pools(adapter);
403 netif_carrier_off(adapter->netdev);
407 static void replenish_pools(struct ibmvnic_adapter *adapter)
411 adapter->replenish_task_cycles++;
412 for (i = 0; i < adapter->num_active_rx_pools; i++) {
413 if (adapter->rx_pool[i].active)
414 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
417 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
420 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
422 kfree(adapter->tx_stats_buffers);
423 kfree(adapter->rx_stats_buffers);
424 adapter->tx_stats_buffers = NULL;
425 adapter->rx_stats_buffers = NULL;
428 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
430 adapter->tx_stats_buffers =
431 kcalloc(IBMVNIC_MAX_QUEUES,
432 sizeof(struct ibmvnic_tx_queue_stats),
434 if (!adapter->tx_stats_buffers)
437 adapter->rx_stats_buffers =
438 kcalloc(IBMVNIC_MAX_QUEUES,
439 sizeof(struct ibmvnic_rx_queue_stats),
441 if (!adapter->rx_stats_buffers)
447 static void release_stats_token(struct ibmvnic_adapter *adapter)
449 struct device *dev = &adapter->vdev->dev;
451 if (!adapter->stats_token)
454 dma_unmap_single(dev, adapter->stats_token,
455 sizeof(struct ibmvnic_statistics),
457 adapter->stats_token = 0;
460 static int init_stats_token(struct ibmvnic_adapter *adapter)
462 struct device *dev = &adapter->vdev->dev;
465 stok = dma_map_single(dev, &adapter->stats,
466 sizeof(struct ibmvnic_statistics),
468 if (dma_mapping_error(dev, stok)) {
469 dev_err(dev, "Couldn't map stats buffer\n");
473 adapter->stats_token = stok;
474 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
478 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
480 struct ibmvnic_rx_pool *rx_pool;
485 if (!adapter->rx_pool)
488 buff_size = adapter->cur_rx_buf_sz;
489 rx_scrqs = adapter->num_active_rx_pools;
490 for (i = 0; i < rx_scrqs; i++) {
491 rx_pool = &adapter->rx_pool[i];
493 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
495 if (rx_pool->buff_size != buff_size) {
496 free_long_term_buff(adapter, &rx_pool->long_term_buff);
497 rx_pool->buff_size = buff_size;
498 rc = alloc_long_term_buff(adapter,
499 &rx_pool->long_term_buff,
503 rc = reset_long_term_buff(adapter,
504 &rx_pool->long_term_buff);
510 for (j = 0; j < rx_pool->size; j++)
511 rx_pool->free_map[j] = j;
513 memset(rx_pool->rx_buff, 0,
514 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
516 atomic_set(&rx_pool->available, 0);
517 rx_pool->next_alloc = 0;
518 rx_pool->next_free = 0;
525 static void release_rx_pools(struct ibmvnic_adapter *adapter)
527 struct ibmvnic_rx_pool *rx_pool;
530 if (!adapter->rx_pool)
533 for (i = 0; i < adapter->num_active_rx_pools; i++) {
534 rx_pool = &adapter->rx_pool[i];
536 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
538 kfree(rx_pool->free_map);
539 free_long_term_buff(adapter, &rx_pool->long_term_buff);
541 if (!rx_pool->rx_buff)
544 for (j = 0; j < rx_pool->size; j++) {
545 if (rx_pool->rx_buff[j].skb) {
546 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
547 rx_pool->rx_buff[j].skb = NULL;
551 kfree(rx_pool->rx_buff);
554 kfree(adapter->rx_pool);
555 adapter->rx_pool = NULL;
556 adapter->num_active_rx_pools = 0;
559 static int init_rx_pools(struct net_device *netdev)
561 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
562 struct device *dev = &adapter->vdev->dev;
563 struct ibmvnic_rx_pool *rx_pool;
568 rxadd_subcrqs = adapter->num_active_rx_scrqs;
569 buff_size = adapter->cur_rx_buf_sz;
571 adapter->rx_pool = kcalloc(rxadd_subcrqs,
572 sizeof(struct ibmvnic_rx_pool),
574 if (!adapter->rx_pool) {
575 dev_err(dev, "Failed to allocate rx pools\n");
579 adapter->num_active_rx_pools = rxadd_subcrqs;
581 for (i = 0; i < rxadd_subcrqs; i++) {
582 rx_pool = &adapter->rx_pool[i];
584 netdev_dbg(adapter->netdev,
585 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
586 i, adapter->req_rx_add_entries_per_subcrq,
589 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
591 rx_pool->buff_size = buff_size;
594 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
596 if (!rx_pool->free_map) {
597 release_rx_pools(adapter);
601 rx_pool->rx_buff = kcalloc(rx_pool->size,
602 sizeof(struct ibmvnic_rx_buff),
604 if (!rx_pool->rx_buff) {
605 dev_err(dev, "Couldn't alloc rx buffers\n");
606 release_rx_pools(adapter);
610 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
611 rx_pool->size * rx_pool->buff_size)) {
612 release_rx_pools(adapter);
616 for (j = 0; j < rx_pool->size; ++j)
617 rx_pool->free_map[j] = j;
619 atomic_set(&rx_pool->available, 0);
620 rx_pool->next_alloc = 0;
621 rx_pool->next_free = 0;
627 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
628 struct ibmvnic_tx_pool *tx_pool)
632 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
636 memset(tx_pool->tx_buff, 0,
637 tx_pool->num_buffers *
638 sizeof(struct ibmvnic_tx_buff));
640 for (i = 0; i < tx_pool->num_buffers; i++)
641 tx_pool->free_map[i] = i;
643 tx_pool->consumer_index = 0;
644 tx_pool->producer_index = 0;
649 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
654 if (!adapter->tx_pool)
657 tx_scrqs = adapter->num_active_tx_pools;
658 for (i = 0; i < tx_scrqs; i++) {
659 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
662 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
670 static void release_vpd_data(struct ibmvnic_adapter *adapter)
675 kfree(adapter->vpd->buff);
681 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
682 struct ibmvnic_tx_pool *tx_pool)
684 kfree(tx_pool->tx_buff);
685 kfree(tx_pool->free_map);
686 free_long_term_buff(adapter, &tx_pool->long_term_buff);
689 static void release_tx_pools(struct ibmvnic_adapter *adapter)
693 if (!adapter->tx_pool)
696 for (i = 0; i < adapter->num_active_tx_pools; i++) {
697 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
698 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
701 kfree(adapter->tx_pool);
702 adapter->tx_pool = NULL;
703 kfree(adapter->tso_pool);
704 adapter->tso_pool = NULL;
705 adapter->num_active_tx_pools = 0;
708 static int init_one_tx_pool(struct net_device *netdev,
709 struct ibmvnic_tx_pool *tx_pool,
710 int num_entries, int buf_size)
712 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
715 tx_pool->tx_buff = kcalloc(num_entries,
716 sizeof(struct ibmvnic_tx_buff),
718 if (!tx_pool->tx_buff)
721 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
722 num_entries * buf_size))
725 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
726 if (!tx_pool->free_map)
729 for (i = 0; i < num_entries; i++)
730 tx_pool->free_map[i] = i;
732 tx_pool->consumer_index = 0;
733 tx_pool->producer_index = 0;
734 tx_pool->num_buffers = num_entries;
735 tx_pool->buf_size = buf_size;
740 static int init_tx_pools(struct net_device *netdev)
742 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
746 tx_subcrqs = adapter->num_active_tx_scrqs;
747 adapter->tx_pool = kcalloc(tx_subcrqs,
748 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
749 if (!adapter->tx_pool)
752 adapter->tso_pool = kcalloc(tx_subcrqs,
753 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
754 if (!adapter->tso_pool) {
755 kfree(adapter->tx_pool);
756 adapter->tx_pool = NULL;
760 adapter->num_active_tx_pools = tx_subcrqs;
762 for (i = 0; i < tx_subcrqs; i++) {
763 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
764 adapter->req_tx_entries_per_subcrq,
765 adapter->req_mtu + VLAN_HLEN);
767 release_tx_pools(adapter);
771 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
775 release_tx_pools(adapter);
783 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
787 if (adapter->napi_enabled)
790 for (i = 0; i < adapter->req_rx_queues; i++)
791 napi_enable(&adapter->napi[i]);
793 adapter->napi_enabled = true;
796 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
800 if (!adapter->napi_enabled)
803 for (i = 0; i < adapter->req_rx_queues; i++) {
804 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
805 napi_disable(&adapter->napi[i]);
808 adapter->napi_enabled = false;
811 static int init_napi(struct ibmvnic_adapter *adapter)
815 adapter->napi = kcalloc(adapter->req_rx_queues,
816 sizeof(struct napi_struct), GFP_KERNEL);
820 for (i = 0; i < adapter->req_rx_queues; i++) {
821 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
822 netif_napi_add(adapter->netdev, &adapter->napi[i],
823 ibmvnic_poll, NAPI_POLL_WEIGHT);
826 adapter->num_active_rx_napi = adapter->req_rx_queues;
830 static void release_napi(struct ibmvnic_adapter *adapter)
837 for (i = 0; i < adapter->num_active_rx_napi; i++) {
838 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
839 netif_napi_del(&adapter->napi[i]);
842 kfree(adapter->napi);
843 adapter->napi = NULL;
844 adapter->num_active_rx_napi = 0;
845 adapter->napi_enabled = false;
848 static int ibmvnic_login(struct net_device *netdev)
850 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
851 unsigned long timeout = msecs_to_jiffies(20000);
859 if (retry_count > retries) {
860 netdev_warn(netdev, "Login attempts exceeded\n");
864 adapter->init_done_rc = 0;
865 reinit_completion(&adapter->init_done);
866 rc = send_login(adapter);
870 if (!wait_for_completion_timeout(&adapter->init_done,
872 netdev_warn(netdev, "Login timed out, retrying...\n");
874 adapter->init_done_rc = 0;
879 if (adapter->init_done_rc == ABORTED) {
880 netdev_warn(netdev, "Login aborted, retrying...\n");
882 adapter->init_done_rc = 0;
884 /* FW or device may be busy, so
885 * wait a bit before retrying login
888 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
890 release_sub_crqs(adapter, 1);
894 "Received partial success, retrying...\n");
895 adapter->init_done_rc = 0;
896 reinit_completion(&adapter->init_done);
897 send_query_cap(adapter);
898 if (!wait_for_completion_timeout(&adapter->init_done,
901 "Capabilities query timed out\n");
905 rc = init_sub_crqs(adapter);
908 "SCRQ initialization failed\n");
912 rc = init_sub_crq_irqs(adapter);
915 "SCRQ irq initialization failed\n");
918 } else if (adapter->init_done_rc) {
919 netdev_warn(netdev, "Adapter login failed\n");
924 __ibmvnic_set_mac(netdev, adapter->mac_addr);
926 netdev_dbg(netdev, "[S:%d] Login succeeded\n", adapter->state);
930 static void release_login_buffer(struct ibmvnic_adapter *adapter)
932 if (!adapter->login_buf)
935 dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token,
936 adapter->login_buf_sz, DMA_TO_DEVICE);
937 kfree(adapter->login_buf);
938 adapter->login_buf = NULL;
941 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
943 if (!adapter->login_rsp_buf)
946 dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token,
947 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
948 kfree(adapter->login_rsp_buf);
949 adapter->login_rsp_buf = NULL;
952 static void release_resources(struct ibmvnic_adapter *adapter)
954 release_vpd_data(adapter);
956 release_tx_pools(adapter);
957 release_rx_pools(adapter);
959 release_napi(adapter);
960 release_login_buffer(adapter);
961 release_login_rsp_buffer(adapter);
964 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
966 struct net_device *netdev = adapter->netdev;
967 unsigned long timeout = msecs_to_jiffies(20000);
968 union ibmvnic_crq crq;
972 netdev_dbg(netdev, "setting link state %d\n", link_state);
974 memset(&crq, 0, sizeof(crq));
975 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
976 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
977 crq.logical_link_state.link_state = link_state;
982 reinit_completion(&adapter->init_done);
983 rc = ibmvnic_send_crq(adapter, &crq);
985 netdev_err(netdev, "Failed to set link state\n");
989 if (!wait_for_completion_timeout(&adapter->init_done,
991 netdev_err(netdev, "timeout setting link state\n");
995 if (adapter->init_done_rc == PARTIALSUCCESS) {
996 /* Partuial success, delay and re-send */
999 } else if (adapter->init_done_rc) {
1000 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1001 adapter->init_done_rc);
1002 return adapter->init_done_rc;
1009 static int set_real_num_queues(struct net_device *netdev)
1011 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1014 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1015 adapter->req_tx_queues, adapter->req_rx_queues);
1017 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1019 netdev_err(netdev, "failed to set the number of tx queues\n");
1023 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1025 netdev_err(netdev, "failed to set the number of rx queues\n");
1030 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1032 struct device *dev = &adapter->vdev->dev;
1033 union ibmvnic_crq crq;
1037 if (adapter->vpd->buff)
1038 len = adapter->vpd->len;
1040 mutex_lock(&adapter->fw_lock);
1041 adapter->fw_done_rc = 0;
1042 reinit_completion(&adapter->fw_done);
1044 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1045 crq.get_vpd_size.cmd = GET_VPD_SIZE;
1046 rc = ibmvnic_send_crq(adapter, &crq);
1048 mutex_unlock(&adapter->fw_lock);
1052 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1054 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1055 mutex_unlock(&adapter->fw_lock);
1058 mutex_unlock(&adapter->fw_lock);
1060 if (!adapter->vpd->len)
1063 if (!adapter->vpd->buff)
1064 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1065 else if (adapter->vpd->len != len)
1066 adapter->vpd->buff =
1067 krealloc(adapter->vpd->buff,
1068 adapter->vpd->len, GFP_KERNEL);
1070 if (!adapter->vpd->buff) {
1071 dev_err(dev, "Could allocate VPD buffer\n");
1075 adapter->vpd->dma_addr =
1076 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1078 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1079 dev_err(dev, "Could not map VPD buffer\n");
1080 kfree(adapter->vpd->buff);
1081 adapter->vpd->buff = NULL;
1085 mutex_lock(&adapter->fw_lock);
1086 adapter->fw_done_rc = 0;
1087 reinit_completion(&adapter->fw_done);
1089 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1090 crq.get_vpd.cmd = GET_VPD;
1091 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1092 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1093 rc = ibmvnic_send_crq(adapter, &crq);
1095 kfree(adapter->vpd->buff);
1096 adapter->vpd->buff = NULL;
1097 mutex_unlock(&adapter->fw_lock);
1101 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1103 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1104 kfree(adapter->vpd->buff);
1105 adapter->vpd->buff = NULL;
1106 mutex_unlock(&adapter->fw_lock);
1110 mutex_unlock(&adapter->fw_lock);
1114 static int init_resources(struct ibmvnic_adapter *adapter)
1116 struct net_device *netdev = adapter->netdev;
1119 rc = set_real_num_queues(netdev);
1123 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1127 /* Vital Product Data (VPD) */
1128 rc = ibmvnic_get_vpd(adapter);
1130 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1134 adapter->map_id = 1;
1136 rc = init_napi(adapter);
1140 send_query_map(adapter);
1142 rc = init_rx_pools(netdev);
1146 rc = init_tx_pools(netdev);
1150 static int __ibmvnic_open(struct net_device *netdev)
1152 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1153 enum vnic_state prev_state = adapter->state;
1156 adapter->state = VNIC_OPENING;
1157 replenish_pools(adapter);
1158 ibmvnic_napi_enable(adapter);
1160 /* We're ready to receive frames, enable the sub-crq interrupts and
1161 * set the logical link state to up
1163 for (i = 0; i < adapter->req_rx_queues; i++) {
1164 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1165 if (prev_state == VNIC_CLOSED)
1166 enable_irq(adapter->rx_scrq[i]->irq);
1167 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1170 for (i = 0; i < adapter->req_tx_queues; i++) {
1171 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1172 if (prev_state == VNIC_CLOSED)
1173 enable_irq(adapter->tx_scrq[i]->irq);
1174 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1177 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1179 ibmvnic_napi_disable(adapter);
1180 release_resources(adapter);
1184 netif_tx_start_all_queues(netdev);
1186 if (prev_state == VNIC_CLOSED) {
1187 for (i = 0; i < adapter->req_rx_queues; i++)
1188 napi_schedule(&adapter->napi[i]);
1191 adapter->state = VNIC_OPEN;
1195 static int ibmvnic_open(struct net_device *netdev)
1197 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1200 /* If device failover is pending, just set device state and return.
1201 * Device operation will be handled by reset routine.
1203 if (adapter->failover_pending) {
1204 adapter->state = VNIC_OPEN;
1208 if (adapter->state != VNIC_CLOSED) {
1209 rc = ibmvnic_login(netdev);
1213 rc = init_resources(adapter);
1215 netdev_err(netdev, "failed to initialize resources\n");
1216 release_resources(adapter);
1221 rc = __ibmvnic_open(netdev);
1225 * If open fails due to a pending failover, set device state and
1226 * return. Device operation will be handled by reset routine.
1228 if (rc && adapter->failover_pending) {
1229 adapter->state = VNIC_OPEN;
1235 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1237 struct ibmvnic_rx_pool *rx_pool;
1238 struct ibmvnic_rx_buff *rx_buff;
1243 if (!adapter->rx_pool)
1246 rx_scrqs = adapter->num_active_rx_pools;
1247 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1249 /* Free any remaining skbs in the rx buffer pools */
1250 for (i = 0; i < rx_scrqs; i++) {
1251 rx_pool = &adapter->rx_pool[i];
1252 if (!rx_pool || !rx_pool->rx_buff)
1255 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1256 for (j = 0; j < rx_entries; j++) {
1257 rx_buff = &rx_pool->rx_buff[j];
1258 if (rx_buff && rx_buff->skb) {
1259 dev_kfree_skb_any(rx_buff->skb);
1260 rx_buff->skb = NULL;
1266 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1267 struct ibmvnic_tx_pool *tx_pool)
1269 struct ibmvnic_tx_buff *tx_buff;
1273 if (!tx_pool || !tx_pool->tx_buff)
1276 tx_entries = tx_pool->num_buffers;
1278 for (i = 0; i < tx_entries; i++) {
1279 tx_buff = &tx_pool->tx_buff[i];
1280 if (tx_buff && tx_buff->skb) {
1281 dev_kfree_skb_any(tx_buff->skb);
1282 tx_buff->skb = NULL;
1287 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1292 if (!adapter->tx_pool || !adapter->tso_pool)
1295 tx_scrqs = adapter->num_active_tx_pools;
1297 /* Free any remaining skbs in the tx buffer pools */
1298 for (i = 0; i < tx_scrqs; i++) {
1299 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1300 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1301 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1305 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1307 struct net_device *netdev = adapter->netdev;
1310 if (adapter->tx_scrq) {
1311 for (i = 0; i < adapter->req_tx_queues; i++)
1312 if (adapter->tx_scrq[i]->irq) {
1314 "Disabling tx_scrq[%d] irq\n", i);
1315 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1316 disable_irq(adapter->tx_scrq[i]->irq);
1320 if (adapter->rx_scrq) {
1321 for (i = 0; i < adapter->req_rx_queues; i++) {
1322 if (adapter->rx_scrq[i]->irq) {
1324 "Disabling rx_scrq[%d] irq\n", i);
1325 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1326 disable_irq(adapter->rx_scrq[i]->irq);
1332 static void ibmvnic_cleanup(struct net_device *netdev)
1334 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1336 /* ensure that transmissions are stopped if called by do_reset */
1337 if (test_bit(0, &adapter->resetting))
1338 netif_tx_disable(netdev);
1340 netif_tx_stop_all_queues(netdev);
1342 ibmvnic_napi_disable(adapter);
1343 ibmvnic_disable_irqs(adapter);
1345 clean_rx_pools(adapter);
1346 clean_tx_pools(adapter);
1349 static int __ibmvnic_close(struct net_device *netdev)
1351 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1354 adapter->state = VNIC_CLOSING;
1355 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1356 adapter->state = VNIC_CLOSED;
1360 static int ibmvnic_close(struct net_device *netdev)
1362 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1365 netdev_dbg(netdev, "[S:%d FOP:%d FRR:%d] Closing\n",
1366 adapter->state, adapter->failover_pending,
1367 adapter->force_reset_recovery);
1369 /* If device failover is pending, just set device state and return.
1370 * Device operation will be handled by reset routine.
1372 if (adapter->failover_pending) {
1373 adapter->state = VNIC_CLOSED;
1377 rc = __ibmvnic_close(netdev);
1378 ibmvnic_cleanup(netdev);
1384 * build_hdr_data - creates L2/L3/L4 header data buffer
1385 * @hdr_field - bitfield determining needed headers
1386 * @skb - socket buffer
1387 * @hdr_len - array of header lengths
1388 * @tot_len - total length of data
1390 * Reads hdr_field to determine which headers are needed by firmware.
1391 * Builds a buffer containing these headers. Saves individual header
1392 * lengths and total buffer length to be used to build descriptors.
1394 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1395 int *hdr_len, u8 *hdr_data)
1400 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1401 hdr_len[0] = sizeof(struct vlan_ethhdr);
1403 hdr_len[0] = sizeof(struct ethhdr);
1405 if (skb->protocol == htons(ETH_P_IP)) {
1406 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1407 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1408 hdr_len[2] = tcp_hdrlen(skb);
1409 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1410 hdr_len[2] = sizeof(struct udphdr);
1411 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1412 hdr_len[1] = sizeof(struct ipv6hdr);
1413 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1414 hdr_len[2] = tcp_hdrlen(skb);
1415 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1416 hdr_len[2] = sizeof(struct udphdr);
1417 } else if (skb->protocol == htons(ETH_P_ARP)) {
1418 hdr_len[1] = arp_hdr_len(skb->dev);
1422 memset(hdr_data, 0, 120);
1423 if ((hdr_field >> 6) & 1) {
1424 hdr = skb_mac_header(skb);
1425 memcpy(hdr_data, hdr, hdr_len[0]);
1429 if ((hdr_field >> 5) & 1) {
1430 hdr = skb_network_header(skb);
1431 memcpy(hdr_data + len, hdr, hdr_len[1]);
1435 if ((hdr_field >> 4) & 1) {
1436 hdr = skb_transport_header(skb);
1437 memcpy(hdr_data + len, hdr, hdr_len[2]);
1444 * create_hdr_descs - create header and header extension descriptors
1445 * @hdr_field - bitfield determining needed headers
1446 * @data - buffer containing header data
1447 * @len - length of data buffer
1448 * @hdr_len - array of individual header lengths
1449 * @scrq_arr - descriptor array
1451 * Creates header and, if needed, header extension descriptors and
1452 * places them in a descriptor array, scrq_arr
1455 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1456 union sub_crq *scrq_arr)
1458 union sub_crq hdr_desc;
1464 while (tmp_len > 0) {
1465 cur = hdr_data + len - tmp_len;
1467 memset(&hdr_desc, 0, sizeof(hdr_desc));
1468 if (cur != hdr_data) {
1469 data = hdr_desc.hdr_ext.data;
1470 tmp = tmp_len > 29 ? 29 : tmp_len;
1471 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1472 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1473 hdr_desc.hdr_ext.len = tmp;
1475 data = hdr_desc.hdr.data;
1476 tmp = tmp_len > 24 ? 24 : tmp_len;
1477 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1478 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1479 hdr_desc.hdr.len = tmp;
1480 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1481 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1482 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1483 hdr_desc.hdr.flag = hdr_field << 1;
1485 memcpy(data, cur, tmp);
1487 *scrq_arr = hdr_desc;
1496 * build_hdr_descs_arr - build a header descriptor array
1497 * @skb - socket buffer
1498 * @num_entries - number of descriptors to be sent
1499 * @subcrq - first TX descriptor
1500 * @hdr_field - bit field determining which headers will be sent
1502 * This function will build a TX descriptor array with applicable
1503 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1506 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1507 int *num_entries, u8 hdr_field)
1509 int hdr_len[3] = {0, 0, 0};
1511 u8 *hdr_data = txbuff->hdr_data;
1513 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1515 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1516 txbuff->indir_arr + 1);
1519 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1520 struct net_device *netdev)
1522 /* For some backing devices, mishandling of small packets
1523 * can result in a loss of connection or TX stall. Device
1524 * architects recommend that no packet should be smaller
1525 * than the minimum MTU value provided to the driver, so
1526 * pad any packets to that length
1528 if (skb->len < netdev->min_mtu)
1529 return skb_put_padto(skb, netdev->min_mtu);
1534 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1536 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1537 int queue_num = skb_get_queue_mapping(skb);
1538 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1539 struct device *dev = &adapter->vdev->dev;
1540 struct ibmvnic_tx_buff *tx_buff = NULL;
1541 struct ibmvnic_sub_crq_queue *tx_scrq;
1542 struct ibmvnic_tx_pool *tx_pool;
1543 unsigned int tx_send_failed = 0;
1544 unsigned int tx_map_failed = 0;
1545 unsigned int tx_dropped = 0;
1546 unsigned int tx_packets = 0;
1547 unsigned int tx_bytes = 0;
1548 dma_addr_t data_dma_addr;
1549 struct netdev_queue *txq;
1550 unsigned long lpar_rc;
1551 union sub_crq tx_crq;
1552 unsigned int offset;
1553 int num_entries = 1;
1558 netdev_tx_t ret = NETDEV_TX_OK;
1560 if (test_bit(0, &adapter->resetting)) {
1561 dev_kfree_skb_any(skb);
1569 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1575 if (skb_is_gso(skb))
1576 tx_pool = &adapter->tso_pool[queue_num];
1578 tx_pool = &adapter->tx_pool[queue_num];
1580 tx_scrq = adapter->tx_scrq[queue_num];
1581 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1582 handle = tx_scrq->handle;
1584 index = tx_pool->free_map[tx_pool->consumer_index];
1586 if (index == IBMVNIC_INVALID_MAP) {
1587 dev_kfree_skb_any(skb);
1594 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1596 offset = index * tx_pool->buf_size;
1597 dst = tx_pool->long_term_buff.buff + offset;
1598 memset(dst, 0, tx_pool->buf_size);
1599 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1601 if (skb_shinfo(skb)->nr_frags) {
1605 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1606 cur = skb_headlen(skb);
1608 /* Copy the frags */
1609 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1610 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1613 page_address(skb_frag_page(frag)) +
1614 skb_frag_off(frag), skb_frag_size(frag));
1615 cur += skb_frag_size(frag);
1618 skb_copy_from_linear_data(skb, dst, skb->len);
1621 /* post changes to long_term_buff *dst before VIOS accessing it */
1624 tx_pool->consumer_index =
1625 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1627 tx_buff = &tx_pool->tx_buff[index];
1629 tx_buff->data_dma[0] = data_dma_addr;
1630 tx_buff->data_len[0] = skb->len;
1631 tx_buff->index = index;
1632 tx_buff->pool_index = queue_num;
1633 tx_buff->last_frag = true;
1635 memset(&tx_crq, 0, sizeof(tx_crq));
1636 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1637 tx_crq.v1.type = IBMVNIC_TX_DESC;
1638 tx_crq.v1.n_crq_elem = 1;
1639 tx_crq.v1.n_sge = 1;
1640 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1642 if (skb_is_gso(skb))
1643 tx_crq.v1.correlator =
1644 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1646 tx_crq.v1.correlator = cpu_to_be32(index);
1647 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1648 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1649 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1651 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1652 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1653 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1656 if (skb->protocol == htons(ETH_P_IP)) {
1657 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1658 proto = ip_hdr(skb)->protocol;
1659 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1660 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1661 proto = ipv6_hdr(skb)->nexthdr;
1664 if (proto == IPPROTO_TCP)
1665 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1666 else if (proto == IPPROTO_UDP)
1667 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1669 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1670 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1673 if (skb_is_gso(skb)) {
1674 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1675 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1678 /* determine if l2/3/4 headers are sent to firmware */
1679 if ((*hdrs >> 7) & 1) {
1680 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1681 tx_crq.v1.n_crq_elem = num_entries;
1682 tx_buff->num_entries = num_entries;
1683 tx_buff->indir_arr[0] = tx_crq;
1684 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1685 sizeof(tx_buff->indir_arr),
1687 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1688 dev_kfree_skb_any(skb);
1689 tx_buff->skb = NULL;
1690 if (!firmware_has_feature(FW_FEATURE_CMO))
1691 dev_err(dev, "tx: unable to map descriptor array\n");
1697 lpar_rc = send_subcrq_indirect(adapter, handle,
1698 (u64)tx_buff->indir_dma,
1700 dma_unmap_single(dev, tx_buff->indir_dma,
1701 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
1703 tx_buff->num_entries = num_entries;
1704 lpar_rc = send_subcrq(adapter, handle,
1707 if (lpar_rc != H_SUCCESS) {
1708 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1709 dev_err_ratelimited(dev, "tx: send failed\n");
1710 dev_kfree_skb_any(skb);
1711 tx_buff->skb = NULL;
1713 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1714 /* Disable TX and report carrier off if queue is closed
1715 * or pending failover.
1716 * Firmware guarantees that a signal will be sent to the
1717 * driver, triggering a reset or some other action.
1719 netif_tx_stop_all_queues(netdev);
1720 netif_carrier_off(netdev);
1729 if (atomic_add_return(num_entries, &tx_scrq->used)
1730 >= adapter->req_tx_entries_per_subcrq) {
1731 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1732 netif_stop_subqueue(netdev, queue_num);
1736 tx_bytes += skb->len;
1737 txq->trans_start = jiffies;
1742 /* roll back consumer index and map array*/
1743 if (tx_pool->consumer_index == 0)
1744 tx_pool->consumer_index =
1745 tx_pool->num_buffers - 1;
1747 tx_pool->consumer_index--;
1748 tx_pool->free_map[tx_pool->consumer_index] = index;
1750 netdev->stats.tx_dropped += tx_dropped;
1751 netdev->stats.tx_bytes += tx_bytes;
1752 netdev->stats.tx_packets += tx_packets;
1753 adapter->tx_send_failed += tx_send_failed;
1754 adapter->tx_map_failed += tx_map_failed;
1755 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1756 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1757 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1762 static void ibmvnic_set_multi(struct net_device *netdev)
1764 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1765 struct netdev_hw_addr *ha;
1766 union ibmvnic_crq crq;
1768 memset(&crq, 0, sizeof(crq));
1769 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1770 crq.request_capability.cmd = REQUEST_CAPABILITY;
1772 if (netdev->flags & IFF_PROMISC) {
1773 if (!adapter->promisc_supported)
1776 if (netdev->flags & IFF_ALLMULTI) {
1777 /* Accept all multicast */
1778 memset(&crq, 0, sizeof(crq));
1779 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1780 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1781 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1782 ibmvnic_send_crq(adapter, &crq);
1783 } else if (netdev_mc_empty(netdev)) {
1784 /* Reject all multicast */
1785 memset(&crq, 0, sizeof(crq));
1786 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1787 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1788 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1789 ibmvnic_send_crq(adapter, &crq);
1791 /* Accept one or more multicast(s) */
1792 netdev_for_each_mc_addr(ha, netdev) {
1793 memset(&crq, 0, sizeof(crq));
1794 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1795 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1796 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1797 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1799 ibmvnic_send_crq(adapter, &crq);
1805 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1807 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1808 union ibmvnic_crq crq;
1811 if (!is_valid_ether_addr(dev_addr)) {
1812 rc = -EADDRNOTAVAIL;
1816 memset(&crq, 0, sizeof(crq));
1817 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1818 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1819 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1821 mutex_lock(&adapter->fw_lock);
1822 adapter->fw_done_rc = 0;
1823 reinit_completion(&adapter->fw_done);
1825 rc = ibmvnic_send_crq(adapter, &crq);
1828 mutex_unlock(&adapter->fw_lock);
1832 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1833 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1834 if (rc || adapter->fw_done_rc) {
1836 mutex_unlock(&adapter->fw_lock);
1839 mutex_unlock(&adapter->fw_lock);
1842 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1846 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1848 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1849 struct sockaddr *addr = p;
1853 if (!is_valid_ether_addr(addr->sa_data))
1854 return -EADDRNOTAVAIL;
1856 ether_addr_copy(adapter->mac_addr, addr->sa_data);
1857 if (adapter->state != VNIC_PROBED)
1858 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1864 * do_change_param_reset returns zero if we are able to keep processing reset
1865 * events, or non-zero if we hit a fatal error and must halt.
1867 static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1868 struct ibmvnic_rwi *rwi,
1871 struct net_device *netdev = adapter->netdev;
1874 netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1877 netif_carrier_off(netdev);
1878 adapter->reset_reason = rwi->reset_reason;
1880 ibmvnic_cleanup(netdev);
1882 if (reset_state == VNIC_OPEN) {
1883 rc = __ibmvnic_close(netdev);
1888 release_resources(adapter);
1889 release_sub_crqs(adapter, 1);
1890 release_crq_queue(adapter);
1892 adapter->state = VNIC_PROBED;
1894 rc = init_crq_queue(adapter);
1897 netdev_err(adapter->netdev,
1898 "Couldn't initialize crq. rc=%d\n", rc);
1902 rc = ibmvnic_reset_init(adapter, true);
1904 rc = IBMVNIC_INIT_FAILED;
1908 /* If the adapter was in PROBE state prior to the reset,
1911 if (reset_state == VNIC_PROBED)
1914 rc = ibmvnic_login(netdev);
1919 rc = init_resources(adapter);
1923 ibmvnic_disable_irqs(adapter);
1925 adapter->state = VNIC_CLOSED;
1927 if (reset_state == VNIC_CLOSED)
1930 rc = __ibmvnic_open(netdev);
1932 rc = IBMVNIC_OPEN_FAILED;
1936 /* refresh device's multicast list */
1937 ibmvnic_set_multi(netdev);
1940 for (i = 0; i < adapter->req_rx_queues; i++)
1941 napi_schedule(&adapter->napi[i]);
1945 adapter->state = reset_state;
1950 * do_reset returns zero if we are able to keep processing reset events, or
1951 * non-zero if we hit a fatal error and must halt.
1953 static int do_reset(struct ibmvnic_adapter *adapter,
1954 struct ibmvnic_rwi *rwi, u32 reset_state)
1956 u64 old_num_rx_queues, old_num_tx_queues;
1957 u64 old_num_rx_slots, old_num_tx_slots;
1958 struct net_device *netdev = adapter->netdev;
1961 netdev_dbg(adapter->netdev,
1962 "[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
1963 adapter->state, adapter->failover_pending,
1964 rwi->reset_reason, reset_state);
1968 * Now that we have the rtnl lock, clear any pending failover.
1969 * This will ensure ibmvnic_open() has either completed or will
1970 * block until failover is complete.
1972 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
1973 adapter->failover_pending = false;
1975 netif_carrier_off(netdev);
1976 adapter->reset_reason = rwi->reset_reason;
1978 old_num_rx_queues = adapter->req_rx_queues;
1979 old_num_tx_queues = adapter->req_tx_queues;
1980 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1981 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1983 ibmvnic_cleanup(netdev);
1985 if (reset_state == VNIC_OPEN &&
1986 adapter->reset_reason != VNIC_RESET_MOBILITY &&
1987 adapter->reset_reason != VNIC_RESET_FAILOVER) {
1988 adapter->state = VNIC_CLOSING;
1990 /* Release the RTNL lock before link state change and
1991 * re-acquire after the link state change to allow
1992 * linkwatch_event to grab the RTNL lock and run during
1996 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2001 if (adapter->state != VNIC_CLOSING) {
2006 adapter->state = VNIC_CLOSED;
2009 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2010 /* remove the closed state so when we call open it appears
2011 * we are coming from the probed state.
2013 adapter->state = VNIC_PROBED;
2015 if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2016 rc = ibmvnic_reenable_crq_queue(adapter);
2017 release_sub_crqs(adapter, 1);
2019 rc = ibmvnic_reset_crq(adapter);
2020 if (rc == H_CLOSED || rc == H_SUCCESS) {
2021 rc = vio_enable_interrupts(adapter->vdev);
2023 netdev_err(adapter->netdev,
2024 "Reset failed to enable interrupts. rc=%d\n",
2030 netdev_err(adapter->netdev,
2031 "Reset couldn't initialize crq. rc=%d\n", rc);
2035 rc = ibmvnic_reset_init(adapter, true);
2037 rc = IBMVNIC_INIT_FAILED;
2041 /* If the adapter was in PROBE state prior to the reset,
2044 if (reset_state == VNIC_PROBED) {
2049 rc = ibmvnic_login(netdev);
2054 if (adapter->req_rx_queues != old_num_rx_queues ||
2055 adapter->req_tx_queues != old_num_tx_queues ||
2056 adapter->req_rx_add_entries_per_subcrq !=
2058 adapter->req_tx_entries_per_subcrq !=
2060 !adapter->rx_pool ||
2061 !adapter->tso_pool ||
2062 !adapter->tx_pool) {
2063 release_rx_pools(adapter);
2064 release_tx_pools(adapter);
2065 release_napi(adapter);
2066 release_vpd_data(adapter);
2068 rc = init_resources(adapter);
2073 rc = reset_tx_pools(adapter);
2075 netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
2080 rc = reset_rx_pools(adapter);
2082 netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
2087 ibmvnic_disable_irqs(adapter);
2089 adapter->state = VNIC_CLOSED;
2091 if (reset_state == VNIC_CLOSED) {
2096 rc = __ibmvnic_open(netdev);
2098 rc = IBMVNIC_OPEN_FAILED;
2102 /* refresh device's multicast list */
2103 ibmvnic_set_multi(netdev);
2105 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2106 adapter->reset_reason == VNIC_RESET_MOBILITY) {
2107 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
2108 call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
2114 /* restore the adapter state if reset failed */
2116 adapter->state = reset_state;
2119 netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n",
2120 adapter->state, adapter->failover_pending, rc);
2124 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2125 struct ibmvnic_rwi *rwi, u32 reset_state)
2127 struct net_device *netdev = adapter->netdev;
2130 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
2133 netif_carrier_off(netdev);
2134 adapter->reset_reason = rwi->reset_reason;
2136 ibmvnic_cleanup(netdev);
2137 release_resources(adapter);
2138 release_sub_crqs(adapter, 0);
2139 release_crq_queue(adapter);
2141 /* remove the closed state so when we call open it appears
2142 * we are coming from the probed state.
2144 adapter->state = VNIC_PROBED;
2146 reinit_completion(&adapter->init_done);
2147 rc = init_crq_queue(adapter);
2149 netdev_err(adapter->netdev,
2150 "Couldn't initialize crq. rc=%d\n", rc);
2154 rc = ibmvnic_reset_init(adapter, false);
2158 /* If the adapter was in PROBE state prior to the reset,
2161 if (reset_state == VNIC_PROBED)
2164 rc = ibmvnic_login(netdev);
2168 rc = init_resources(adapter);
2172 ibmvnic_disable_irqs(adapter);
2173 adapter->state = VNIC_CLOSED;
2175 if (reset_state == VNIC_CLOSED)
2178 rc = __ibmvnic_open(netdev);
2180 rc = IBMVNIC_OPEN_FAILED;
2184 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
2185 call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
2187 /* restore adapter state if reset failed */
2189 adapter->state = reset_state;
2190 netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Hard reset done, rc %d\n",
2191 adapter->state, adapter->failover_pending, rc);
2195 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2197 struct ibmvnic_rwi *rwi;
2198 unsigned long flags;
2200 spin_lock_irqsave(&adapter->rwi_lock, flags);
2202 if (!list_empty(&adapter->rwi_list)) {
2203 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2205 list_del(&rwi->list);
2210 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2214 static void __ibmvnic_reset(struct work_struct *work)
2216 struct ibmvnic_rwi *rwi;
2217 struct ibmvnic_adapter *adapter;
2218 bool saved_state = false;
2219 unsigned long flags;
2223 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2225 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2226 schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2227 IBMVNIC_RESET_DELAY);
2231 rwi = get_next_rwi(adapter);
2233 spin_lock_irqsave(&adapter->state_lock, flags);
2235 if (adapter->state == VNIC_REMOVING ||
2236 adapter->state == VNIC_REMOVED) {
2237 spin_unlock_irqrestore(&adapter->state_lock, flags);
2244 reset_state = adapter->state;
2247 spin_unlock_irqrestore(&adapter->state_lock, flags);
2249 if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2250 /* CHANGE_PARAM requestor holds rtnl_lock */
2251 rc = do_change_param_reset(adapter, rwi, reset_state);
2252 } else if (adapter->force_reset_recovery) {
2254 * Since we are doing a hard reset now, clear the
2255 * failover_pending flag so we don't ignore any
2256 * future MOBILITY or other resets.
2258 adapter->failover_pending = false;
2260 /* Transport event occurred during previous reset */
2261 if (adapter->wait_for_reset) {
2262 /* Previous was CHANGE_PARAM; caller locked */
2263 adapter->force_reset_recovery = false;
2264 rc = do_hard_reset(adapter, rwi, reset_state);
2267 adapter->force_reset_recovery = false;
2268 rc = do_hard_reset(adapter, rwi, reset_state);
2272 /* give backing device time to settle down */
2273 netdev_dbg(adapter->netdev,
2274 "[S:%d] Hard reset failed, waiting 60 secs\n",
2276 set_current_state(TASK_UNINTERRUPTIBLE);
2277 schedule_timeout(60 * HZ);
2280 rc = do_reset(adapter, rwi, reset_state);
2283 adapter->last_reset_time = jiffies;
2286 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
2288 rwi = get_next_rwi(adapter);
2290 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2291 rwi->reset_reason == VNIC_RESET_MOBILITY))
2292 adapter->force_reset_recovery = true;
2295 if (adapter->wait_for_reset) {
2296 adapter->reset_done_rc = rc;
2297 complete(&adapter->reset_done);
2300 clear_bit_unlock(0, &adapter->resetting);
2302 netdev_dbg(adapter->netdev,
2303 "[S:%d FRR:%d WFR:%d] Done processing resets\n",
2304 adapter->state, adapter->force_reset_recovery,
2305 adapter->wait_for_reset);
2308 static void __ibmvnic_delayed_reset(struct work_struct *work)
2310 struct ibmvnic_adapter *adapter;
2312 adapter = container_of(work, struct ibmvnic_adapter,
2313 ibmvnic_delayed_reset.work);
2314 __ibmvnic_reset(&adapter->ibmvnic_reset);
2317 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2318 enum ibmvnic_reset_reason reason)
2320 struct list_head *entry, *tmp_entry;
2321 struct ibmvnic_rwi *rwi, *tmp;
2322 struct net_device *netdev = adapter->netdev;
2323 unsigned long flags;
2326 spin_lock_irqsave(&adapter->rwi_lock, flags);
2329 * If failover is pending don't schedule any other reset.
2330 * Instead let the failover complete. If there is already a
2331 * a failover reset scheduled, we will detect and drop the
2332 * duplicate reset when walking the ->rwi_list below.
2334 if (adapter->state == VNIC_REMOVING ||
2335 adapter->state == VNIC_REMOVED ||
2336 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
2338 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2342 if (adapter->state == VNIC_PROBING) {
2343 netdev_warn(netdev, "Adapter reset during probe\n");
2344 adapter->init_done_rc = EAGAIN;
2349 list_for_each(entry, &adapter->rwi_list) {
2350 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2351 if (tmp->reset_reason == reason) {
2352 netdev_dbg(netdev, "Skipping matching reset, reason=%d\n",
2359 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2364 /* if we just received a transport event,
2365 * flush reset queue and process this reset
2367 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2368 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
2370 kfree(list_entry(entry, struct ibmvnic_rwi, list));
2373 rwi->reset_reason = reason;
2374 list_add_tail(&rwi->list, &adapter->rwi_list);
2375 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2376 schedule_work(&adapter->ibmvnic_reset);
2380 /* ibmvnic_close() below can block, so drop the lock first */
2381 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2384 ibmvnic_close(netdev);
2389 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
2391 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2393 if (test_bit(0, &adapter->resetting)) {
2394 netdev_err(adapter->netdev,
2395 "Adapter is resetting, skip timeout reset\n");
2398 /* No queuing up reset until at least 5 seconds (default watchdog val)
2401 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
2402 netdev_dbg(dev, "Not yet time to tx timeout.\n");
2405 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2408 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2409 struct ibmvnic_rx_buff *rx_buff)
2411 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2413 rx_buff->skb = NULL;
2415 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2416 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2418 atomic_dec(&pool->available);
2421 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2423 struct net_device *netdev = napi->dev;
2424 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2425 int scrq_num = (int)(napi - adapter->napi);
2426 int frames_processed = 0;
2429 while (frames_processed < budget) {
2430 struct sk_buff *skb;
2431 struct ibmvnic_rx_buff *rx_buff;
2432 union sub_crq *next;
2437 if (unlikely(test_bit(0, &adapter->resetting) &&
2438 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2439 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2440 napi_complete_done(napi, frames_processed);
2441 return frames_processed;
2444 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2446 /* The queue entry at the current index is peeked at above
2447 * to determine that there is a valid descriptor awaiting
2448 * processing. We want to be sure that the current slot
2449 * holds a valid descriptor before reading its contents.
2452 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2454 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2455 rx_comp.correlator);
2456 /* do error checking */
2457 if (next->rx_comp.rc) {
2458 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2459 be16_to_cpu(next->rx_comp.rc));
2460 /* free the entry */
2461 next->rx_comp.first = 0;
2462 dev_kfree_skb_any(rx_buff->skb);
2463 remove_buff_from_pool(adapter, rx_buff);
2465 } else if (!rx_buff->skb) {
2466 /* free the entry */
2467 next->rx_comp.first = 0;
2468 remove_buff_from_pool(adapter, rx_buff);
2472 length = be32_to_cpu(next->rx_comp.len);
2473 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2474 flags = next->rx_comp.flags;
2476 /* load long_term_buff before copying to skb */
2478 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2481 /* VLAN Header has been stripped by the system firmware and
2482 * needs to be inserted by the driver
2484 if (adapter->rx_vlan_header_insertion &&
2485 (flags & IBMVNIC_VLAN_STRIPPED))
2486 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2487 ntohs(next->rx_comp.vlan_tci));
2489 /* free the entry */
2490 next->rx_comp.first = 0;
2491 remove_buff_from_pool(adapter, rx_buff);
2493 skb_put(skb, length);
2494 skb->protocol = eth_type_trans(skb, netdev);
2495 skb_record_rx_queue(skb, scrq_num);
2497 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2498 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2499 skb->ip_summed = CHECKSUM_UNNECESSARY;
2503 napi_gro_receive(napi, skb); /* send it up */
2504 netdev->stats.rx_packets++;
2505 netdev->stats.rx_bytes += length;
2506 adapter->rx_stats_buffers[scrq_num].packets++;
2507 adapter->rx_stats_buffers[scrq_num].bytes += length;
2511 if (adapter->state != VNIC_CLOSING)
2512 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2514 if (frames_processed < budget) {
2515 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2516 napi_complete_done(napi, frames_processed);
2517 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2518 napi_reschedule(napi)) {
2519 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2523 return frames_processed;
2526 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2530 adapter->fallback.mtu = adapter->req_mtu;
2531 adapter->fallback.rx_queues = adapter->req_rx_queues;
2532 adapter->fallback.tx_queues = adapter->req_tx_queues;
2533 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2534 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2536 reinit_completion(&adapter->reset_done);
2537 adapter->wait_for_reset = true;
2538 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2544 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2551 if (adapter->reset_done_rc) {
2553 adapter->desired.mtu = adapter->fallback.mtu;
2554 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2555 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2556 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2557 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2559 reinit_completion(&adapter->reset_done);
2560 adapter->wait_for_reset = true;
2561 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2566 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2574 adapter->wait_for_reset = false;
2579 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2581 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2583 adapter->desired.mtu = new_mtu + ETH_HLEN;
2585 return wait_for_reset(adapter);
2588 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2589 struct net_device *dev,
2590 netdev_features_t features)
2592 /* Some backing hardware adapters can not
2593 * handle packets with a MSS less than 224
2594 * or with only one segment.
2596 if (skb_is_gso(skb)) {
2597 if (skb_shinfo(skb)->gso_size < 224 ||
2598 skb_shinfo(skb)->gso_segs == 1)
2599 features &= ~NETIF_F_GSO_MASK;
2605 static const struct net_device_ops ibmvnic_netdev_ops = {
2606 .ndo_open = ibmvnic_open,
2607 .ndo_stop = ibmvnic_close,
2608 .ndo_start_xmit = ibmvnic_xmit,
2609 .ndo_set_rx_mode = ibmvnic_set_multi,
2610 .ndo_set_mac_address = ibmvnic_set_mac,
2611 .ndo_validate_addr = eth_validate_addr,
2612 .ndo_tx_timeout = ibmvnic_tx_timeout,
2613 .ndo_change_mtu = ibmvnic_change_mtu,
2614 .ndo_features_check = ibmvnic_features_check,
2617 /* ethtool functions */
2619 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2620 struct ethtool_link_ksettings *cmd)
2622 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2625 rc = send_query_phys_parms(adapter);
2627 adapter->speed = SPEED_UNKNOWN;
2628 adapter->duplex = DUPLEX_UNKNOWN;
2630 cmd->base.speed = adapter->speed;
2631 cmd->base.duplex = adapter->duplex;
2632 cmd->base.port = PORT_FIBRE;
2633 cmd->base.phy_address = 0;
2634 cmd->base.autoneg = AUTONEG_ENABLE;
2639 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2640 struct ethtool_drvinfo *info)
2642 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2644 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2645 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2646 strlcpy(info->fw_version, adapter->fw_version,
2647 sizeof(info->fw_version));
2650 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2652 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2654 return adapter->msg_enable;
2657 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2659 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2661 adapter->msg_enable = data;
2664 static u32 ibmvnic_get_link(struct net_device *netdev)
2666 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2668 /* Don't need to send a query because we request a logical link up at
2669 * init and then we wait for link state indications
2671 return adapter->logical_link_state;
2674 static void ibmvnic_get_ringparam(struct net_device *netdev,
2675 struct ethtool_ringparam *ring)
2677 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2679 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2680 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2681 ring->rx_mini_max_pending = 0;
2682 ring->rx_jumbo_max_pending = 0;
2683 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2684 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2685 ring->rx_mini_pending = 0;
2686 ring->rx_jumbo_pending = 0;
2689 static int ibmvnic_set_ringparam(struct net_device *netdev,
2690 struct ethtool_ringparam *ring)
2692 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2694 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
2695 ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
2696 netdev_err(netdev, "Invalid request.\n");
2697 netdev_err(netdev, "Max tx buffers = %llu\n",
2698 adapter->max_rx_add_entries_per_subcrq);
2699 netdev_err(netdev, "Max rx buffers = %llu\n",
2700 adapter->max_tx_entries_per_subcrq);
2704 adapter->desired.rx_entries = ring->rx_pending;
2705 adapter->desired.tx_entries = ring->tx_pending;
2707 return wait_for_reset(adapter);
2710 static void ibmvnic_get_channels(struct net_device *netdev,
2711 struct ethtool_channels *channels)
2713 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2715 channels->max_rx = adapter->max_rx_queues;
2716 channels->max_tx = adapter->max_tx_queues;
2717 channels->max_other = 0;
2718 channels->max_combined = 0;
2719 channels->rx_count = adapter->req_rx_queues;
2720 channels->tx_count = adapter->req_tx_queues;
2721 channels->other_count = 0;
2722 channels->combined_count = 0;
2725 static int ibmvnic_set_channels(struct net_device *netdev,
2726 struct ethtool_channels *channels)
2728 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2730 adapter->desired.rx_queues = channels->rx_count;
2731 adapter->desired.tx_queues = channels->tx_count;
2733 return wait_for_reset(adapter);
2736 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2738 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2741 if (stringset != ETH_SS_STATS)
2744 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
2745 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2747 for (i = 0; i < adapter->req_tx_queues; i++) {
2748 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2749 data += ETH_GSTRING_LEN;
2751 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2752 data += ETH_GSTRING_LEN;
2754 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
2755 data += ETH_GSTRING_LEN;
2758 for (i = 0; i < adapter->req_rx_queues; i++) {
2759 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2760 data += ETH_GSTRING_LEN;
2762 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2763 data += ETH_GSTRING_LEN;
2765 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2766 data += ETH_GSTRING_LEN;
2770 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2772 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2776 return ARRAY_SIZE(ibmvnic_stats) +
2777 adapter->req_tx_queues * NUM_TX_STATS +
2778 adapter->req_rx_queues * NUM_RX_STATS;
2784 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2785 struct ethtool_stats *stats, u64 *data)
2787 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2788 union ibmvnic_crq crq;
2792 memset(&crq, 0, sizeof(crq));
2793 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2794 crq.request_statistics.cmd = REQUEST_STATISTICS;
2795 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2796 crq.request_statistics.len =
2797 cpu_to_be32(sizeof(struct ibmvnic_statistics));
2799 /* Wait for data to be written */
2800 reinit_completion(&adapter->stats_done);
2801 rc = ibmvnic_send_crq(adapter, &crq);
2804 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
2808 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2809 data[i] = be64_to_cpu(IBMVNIC_GET_STAT
2810 (adapter, ibmvnic_stats[i].offset));
2812 for (j = 0; j < adapter->req_tx_queues; j++) {
2813 data[i] = adapter->tx_stats_buffers[j].packets;
2815 data[i] = adapter->tx_stats_buffers[j].bytes;
2817 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2821 for (j = 0; j < adapter->req_rx_queues; j++) {
2822 data[i] = adapter->rx_stats_buffers[j].packets;
2824 data[i] = adapter->rx_stats_buffers[j].bytes;
2826 data[i] = adapter->rx_stats_buffers[j].interrupts;
2831 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2832 .get_drvinfo = ibmvnic_get_drvinfo,
2833 .get_msglevel = ibmvnic_get_msglevel,
2834 .set_msglevel = ibmvnic_set_msglevel,
2835 .get_link = ibmvnic_get_link,
2836 .get_ringparam = ibmvnic_get_ringparam,
2837 .set_ringparam = ibmvnic_set_ringparam,
2838 .get_channels = ibmvnic_get_channels,
2839 .set_channels = ibmvnic_set_channels,
2840 .get_strings = ibmvnic_get_strings,
2841 .get_sset_count = ibmvnic_get_sset_count,
2842 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2843 .get_link_ksettings = ibmvnic_get_link_ksettings,
2846 /* Routines for managing CRQs/sCRQs */
2848 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2849 struct ibmvnic_sub_crq_queue *scrq)
2854 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
2859 free_irq(scrq->irq, scrq);
2860 irq_dispose_mapping(scrq->irq);
2864 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2865 atomic_set(&scrq->used, 0);
2868 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
2872 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2873 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2877 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2881 if (!adapter->tx_scrq || !adapter->rx_scrq)
2884 for (i = 0; i < adapter->req_tx_queues; i++) {
2885 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2886 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2891 for (i = 0; i < adapter->req_rx_queues; i++) {
2892 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2893 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2901 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2902 struct ibmvnic_sub_crq_queue *scrq,
2905 struct device *dev = &adapter->vdev->dev;
2908 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2911 /* Close the sub-crqs */
2913 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2914 adapter->vdev->unit_address,
2916 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2919 netdev_err(adapter->netdev,
2920 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2925 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2927 free_pages((unsigned long)scrq->msgs, 2);
2931 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2934 struct device *dev = &adapter->vdev->dev;
2935 struct ibmvnic_sub_crq_queue *scrq;
2938 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2943 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2945 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2946 goto zero_page_failed;
2949 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2951 if (dma_mapping_error(dev, scrq->msg_token)) {
2952 dev_warn(dev, "Couldn't map crq queue messages page\n");
2956 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2957 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2959 if (rc == H_RESOURCE)
2960 rc = ibmvnic_reset_crq(adapter);
2962 if (rc == H_CLOSED) {
2963 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2965 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2969 scrq->adapter = adapter;
2970 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2971 spin_lock_init(&scrq->lock);
2973 netdev_dbg(adapter->netdev,
2974 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2975 scrq->crq_num, scrq->hw_irq, scrq->irq);
2980 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2983 free_pages((unsigned long)scrq->msgs, 2);
2990 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2994 if (adapter->tx_scrq) {
2995 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2996 if (!adapter->tx_scrq[i])
2999 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3001 if (adapter->tx_scrq[i]->irq) {
3002 free_irq(adapter->tx_scrq[i]->irq,
3003 adapter->tx_scrq[i]);
3004 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
3005 adapter->tx_scrq[i]->irq = 0;
3008 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3012 kfree(adapter->tx_scrq);
3013 adapter->tx_scrq = NULL;
3014 adapter->num_active_tx_scrqs = 0;
3017 if (adapter->rx_scrq) {
3018 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
3019 if (!adapter->rx_scrq[i])
3022 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3024 if (adapter->rx_scrq[i]->irq) {
3025 free_irq(adapter->rx_scrq[i]->irq,
3026 adapter->rx_scrq[i]);
3027 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
3028 adapter->rx_scrq[i]->irq = 0;
3031 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3035 kfree(adapter->rx_scrq);
3036 adapter->rx_scrq = NULL;
3037 adapter->num_active_rx_scrqs = 0;
3041 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3042 struct ibmvnic_sub_crq_queue *scrq)
3044 struct device *dev = &adapter->vdev->dev;
3047 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3048 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3050 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3055 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3056 struct ibmvnic_sub_crq_queue *scrq)
3058 struct device *dev = &adapter->vdev->dev;
3061 if (scrq->hw_irq > 0x100000000ULL) {
3062 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3066 if (test_bit(0, &adapter->resetting) &&
3067 adapter->reset_reason == VNIC_RESET_MOBILITY) {
3068 u64 val = (0xff000000) | scrq->hw_irq;
3070 rc = plpar_hcall_norets(H_EOI, val);
3071 /* H_EOI would fail with rc = H_FUNCTION when running
3072 * in XIVE mode which is expected, but not an error.
3074 if (rc && rc != H_FUNCTION)
3075 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3079 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3080 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3082 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3087 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3088 struct ibmvnic_sub_crq_queue *scrq)
3090 struct device *dev = &adapter->vdev->dev;
3091 struct ibmvnic_tx_pool *tx_pool;
3092 struct ibmvnic_tx_buff *txbuff;
3093 union sub_crq *next;
3098 while (pending_scrq(adapter, scrq)) {
3099 unsigned int pool = scrq->pool_index;
3100 int num_entries = 0;
3102 /* The queue entry at the current index is peeked at above
3103 * to determine that there is a valid descriptor awaiting
3104 * processing. We want to be sure that the current slot
3105 * holds a valid descriptor before reading its contents.
3109 next = ibmvnic_next_scrq(adapter, scrq);
3110 for (i = 0; i < next->tx_comp.num_comps; i++) {
3111 if (next->tx_comp.rcs[i])
3112 dev_err(dev, "tx error %x\n",
3113 next->tx_comp.rcs[i]);
3114 index = be32_to_cpu(next->tx_comp.correlators[i]);
3115 if (index & IBMVNIC_TSO_POOL_MASK) {
3116 tx_pool = &adapter->tso_pool[pool];
3117 index &= ~IBMVNIC_TSO_POOL_MASK;
3119 tx_pool = &adapter->tx_pool[pool];
3122 txbuff = &tx_pool->tx_buff[index];
3124 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
3125 if (!txbuff->data_dma[j])
3128 txbuff->data_dma[j] = 0;
3131 if (txbuff->last_frag) {
3132 dev_kfree_skb_any(txbuff->skb);
3136 num_entries += txbuff->num_entries;
3138 tx_pool->free_map[tx_pool->producer_index] = index;
3139 tx_pool->producer_index =
3140 (tx_pool->producer_index + 1) %
3141 tx_pool->num_buffers;
3143 /* remove tx_comp scrq*/
3144 next->tx_comp.first = 0;
3146 if (atomic_sub_return(num_entries, &scrq->used) <=
3147 (adapter->req_tx_entries_per_subcrq / 2) &&
3148 __netif_subqueue_stopped(adapter->netdev,
3149 scrq->pool_index)) {
3150 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
3151 netdev_dbg(adapter->netdev, "Started queue %d\n",
3156 enable_scrq_irq(adapter, scrq);
3158 if (pending_scrq(adapter, scrq)) {
3159 disable_scrq_irq(adapter, scrq);
3166 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3168 struct ibmvnic_sub_crq_queue *scrq = instance;
3169 struct ibmvnic_adapter *adapter = scrq->adapter;
3171 disable_scrq_irq(adapter, scrq);
3172 ibmvnic_complete_tx(adapter, scrq);
3177 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3179 struct ibmvnic_sub_crq_queue *scrq = instance;
3180 struct ibmvnic_adapter *adapter = scrq->adapter;
3182 /* When booting a kdump kernel we can hit pending interrupts
3183 * prior to completing driver initialization.
3185 if (unlikely(adapter->state != VNIC_OPEN))
3188 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3190 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3191 disable_scrq_irq(adapter, scrq);
3192 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3198 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3200 struct device *dev = &adapter->vdev->dev;
3201 struct ibmvnic_sub_crq_queue *scrq;
3205 for (i = 0; i < adapter->req_tx_queues; i++) {
3206 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3208 scrq = adapter->tx_scrq[i];
3209 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3213 dev_err(dev, "Error mapping irq\n");
3214 goto req_tx_irq_failed;
3217 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3218 adapter->vdev->unit_address, i);
3219 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3220 0, scrq->name, scrq);
3223 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3225 irq_dispose_mapping(scrq->irq);
3226 goto req_tx_irq_failed;
3230 for (i = 0; i < adapter->req_rx_queues; i++) {
3231 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3233 scrq = adapter->rx_scrq[i];
3234 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3237 dev_err(dev, "Error mapping irq\n");
3238 goto req_rx_irq_failed;
3240 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3241 adapter->vdev->unit_address, i);
3242 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3243 0, scrq->name, scrq);
3245 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3247 irq_dispose_mapping(scrq->irq);
3248 goto req_rx_irq_failed;
3254 for (j = 0; j < i; j++) {
3255 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3256 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3258 i = adapter->req_tx_queues;
3260 for (j = 0; j < i; j++) {
3261 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3262 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
3264 release_sub_crqs(adapter, 1);
3268 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3270 struct device *dev = &adapter->vdev->dev;
3271 struct ibmvnic_sub_crq_queue **allqueues;
3272 int registered_queues = 0;
3277 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3279 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3283 for (i = 0; i < total_queues; i++) {
3284 allqueues[i] = init_sub_crq_queue(adapter);
3285 if (!allqueues[i]) {
3286 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3289 registered_queues++;
3292 /* Make sure we were able to register the minimum number of queues */
3293 if (registered_queues <
3294 adapter->min_tx_queues + adapter->min_rx_queues) {
3295 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3299 /* Distribute the failed allocated queues*/
3300 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3301 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3304 if (adapter->req_rx_queues > adapter->min_rx_queues)
3305 adapter->req_rx_queues--;
3310 if (adapter->req_tx_queues > adapter->min_tx_queues)
3311 adapter->req_tx_queues--;
3318 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3319 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3320 if (!adapter->tx_scrq)
3323 for (i = 0; i < adapter->req_tx_queues; i++) {
3324 adapter->tx_scrq[i] = allqueues[i];
3325 adapter->tx_scrq[i]->pool_index = i;
3326 adapter->num_active_tx_scrqs++;
3329 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3330 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3331 if (!adapter->rx_scrq)
3334 for (i = 0; i < adapter->req_rx_queues; i++) {
3335 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3336 adapter->rx_scrq[i]->scrq_num = i;
3337 adapter->num_active_rx_scrqs++;
3344 kfree(adapter->tx_scrq);
3345 adapter->tx_scrq = NULL;
3347 for (i = 0; i < registered_queues; i++)
3348 release_sub_crq_queue(adapter, allqueues[i], 1);
3353 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
3355 struct device *dev = &adapter->vdev->dev;
3356 union ibmvnic_crq crq;
3360 /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
3361 * the PROMISC flag). Initialize this count upfront. When the tasklet
3362 * receives a response to all of these, it will send the next protocol
3363 * message (QUERY_IP_OFFLOAD).
3365 if (!(adapter->netdev->flags & IFF_PROMISC) ||
3366 adapter->promisc_supported)
3372 /* Sub-CRQ entries are 32 byte long */
3373 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3375 atomic_set(&adapter->running_cap_crqs, cap_reqs);
3377 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3378 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3379 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3383 if (adapter->desired.mtu)
3384 adapter->req_mtu = adapter->desired.mtu;
3386 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3388 if (!adapter->desired.tx_entries)
3389 adapter->desired.tx_entries =
3390 adapter->max_tx_entries_per_subcrq;
3391 if (!adapter->desired.rx_entries)
3392 adapter->desired.rx_entries =
3393 adapter->max_rx_add_entries_per_subcrq;
3395 max_entries = IBMVNIC_MAX_LTB_SIZE /
3396 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3398 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3399 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3400 adapter->desired.tx_entries = max_entries;
3403 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3404 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3405 adapter->desired.rx_entries = max_entries;
3408 if (adapter->desired.tx_entries)
3409 adapter->req_tx_entries_per_subcrq =
3410 adapter->desired.tx_entries;
3412 adapter->req_tx_entries_per_subcrq =
3413 adapter->max_tx_entries_per_subcrq;
3415 if (adapter->desired.rx_entries)
3416 adapter->req_rx_add_entries_per_subcrq =
3417 adapter->desired.rx_entries;
3419 adapter->req_rx_add_entries_per_subcrq =
3420 adapter->max_rx_add_entries_per_subcrq;
3422 if (adapter->desired.tx_queues)
3423 adapter->req_tx_queues =
3424 adapter->desired.tx_queues;
3426 adapter->req_tx_queues =
3427 adapter->opt_tx_comp_sub_queues;
3429 if (adapter->desired.rx_queues)
3430 adapter->req_rx_queues =
3431 adapter->desired.rx_queues;
3433 adapter->req_rx_queues =
3434 adapter->opt_rx_comp_queues;
3436 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3438 atomic_add(cap_reqs, &adapter->running_cap_crqs);
3440 memset(&crq, 0, sizeof(crq));
3441 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3442 crq.request_capability.cmd = REQUEST_CAPABILITY;
3444 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3445 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3447 ibmvnic_send_crq(adapter, &crq);
3449 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3450 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3452 ibmvnic_send_crq(adapter, &crq);
3454 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3455 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3457 ibmvnic_send_crq(adapter, &crq);
3459 crq.request_capability.capability =
3460 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3461 crq.request_capability.number =
3462 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3464 ibmvnic_send_crq(adapter, &crq);
3466 crq.request_capability.capability =
3467 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3468 crq.request_capability.number =
3469 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3471 ibmvnic_send_crq(adapter, &crq);
3473 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3474 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3476 ibmvnic_send_crq(adapter, &crq);
3478 if (adapter->netdev->flags & IFF_PROMISC) {
3479 if (adapter->promisc_supported) {
3480 crq.request_capability.capability =
3481 cpu_to_be16(PROMISC_REQUESTED);
3482 crq.request_capability.number = cpu_to_be64(1);
3484 ibmvnic_send_crq(adapter, &crq);
3487 crq.request_capability.capability =
3488 cpu_to_be16(PROMISC_REQUESTED);
3489 crq.request_capability.number = cpu_to_be64(0);
3491 ibmvnic_send_crq(adapter, &crq);
3494 /* Keep at end to catch any discrepancy between expected and actual
3497 WARN_ON(cap_reqs != 0);
3500 static int pending_scrq(struct ibmvnic_adapter *adapter,
3501 struct ibmvnic_sub_crq_queue *scrq)
3503 union sub_crq *entry = &scrq->msgs[scrq->cur];
3505 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3511 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3512 struct ibmvnic_sub_crq_queue *scrq)
3514 union sub_crq *entry;
3515 unsigned long flags;
3517 spin_lock_irqsave(&scrq->lock, flags);
3518 entry = &scrq->msgs[scrq->cur];
3519 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3520 if (++scrq->cur == scrq->size)
3525 spin_unlock_irqrestore(&scrq->lock, flags);
3527 /* Ensure that the entire buffer descriptor has been
3528 * loaded before reading its contents
3535 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3537 struct ibmvnic_crq_queue *queue = &adapter->crq;
3538 union ibmvnic_crq *crq;
3540 crq = &queue->msgs[queue->cur];
3541 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3542 if (++queue->cur == queue->size)
3551 static void print_subcrq_error(struct device *dev, int rc, const char *func)
3555 dev_warn_ratelimited(dev,
3556 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3560 dev_warn_ratelimited(dev,
3561 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3565 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3570 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3571 union sub_crq *sub_crq)
3573 unsigned int ua = adapter->vdev->unit_address;
3574 struct device *dev = &adapter->vdev->dev;
3575 u64 *u64_crq = (u64 *)sub_crq;
3578 netdev_dbg(adapter->netdev,
3579 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3580 (unsigned long int)cpu_to_be64(remote_handle),
3581 (unsigned long int)cpu_to_be64(u64_crq[0]),
3582 (unsigned long int)cpu_to_be64(u64_crq[1]),
3583 (unsigned long int)cpu_to_be64(u64_crq[2]),
3584 (unsigned long int)cpu_to_be64(u64_crq[3]));
3586 /* Make sure the hypervisor sees the complete request */
3589 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3590 cpu_to_be64(remote_handle),
3591 cpu_to_be64(u64_crq[0]),
3592 cpu_to_be64(u64_crq[1]),
3593 cpu_to_be64(u64_crq[2]),
3594 cpu_to_be64(u64_crq[3]));
3597 print_subcrq_error(dev, rc, __func__);
3602 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3603 u64 remote_handle, u64 ioba, u64 num_entries)
3605 unsigned int ua = adapter->vdev->unit_address;
3606 struct device *dev = &adapter->vdev->dev;
3609 /* Make sure the hypervisor sees the complete request */
3611 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3612 cpu_to_be64(remote_handle),
3616 print_subcrq_error(dev, rc, __func__);
3621 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3622 union ibmvnic_crq *crq)
3624 unsigned int ua = adapter->vdev->unit_address;
3625 struct device *dev = &adapter->vdev->dev;
3626 u64 *u64_crq = (u64 *)crq;
3629 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3630 (unsigned long int)cpu_to_be64(u64_crq[0]),
3631 (unsigned long int)cpu_to_be64(u64_crq[1]));
3633 if (!adapter->crq.active &&
3634 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3635 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3639 /* Make sure the hypervisor sees the complete request */
3642 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3643 cpu_to_be64(u64_crq[0]),
3644 cpu_to_be64(u64_crq[1]));
3647 if (rc == H_CLOSED) {
3648 dev_warn(dev, "CRQ Queue closed\n");
3649 /* do not reset, report the fail, wait for passive init from server */
3652 dev_warn(dev, "Send error (rc=%d)\n", rc);
3658 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3660 struct device *dev = &adapter->vdev->dev;
3661 union ibmvnic_crq crq;
3665 memset(&crq, 0, sizeof(crq));
3666 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3667 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3668 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3671 rc = ibmvnic_send_crq(adapter, &crq);
3677 } while (retries > 0);
3680 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
3687 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3689 union ibmvnic_crq crq;
3691 memset(&crq, 0, sizeof(crq));
3692 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3693 crq.version_exchange.cmd = VERSION_EXCHANGE;
3694 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3696 return ibmvnic_send_crq(adapter, &crq);
3699 struct vnic_login_client_data {
3705 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3709 /* Calculate the amount of buffer space needed for the
3710 * vnic client data in the login buffer. There are four entries,
3711 * OS name, LPAR name, device name, and a null last entry.
3713 len = 4 * sizeof(struct vnic_login_client_data);
3714 len += 6; /* "Linux" plus NULL */
3715 len += strlen(utsname()->nodename) + 1;
3716 len += strlen(adapter->netdev->name) + 1;
3721 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3722 struct vnic_login_client_data *vlcd)
3724 const char *os_name = "Linux";
3727 /* Type 1 - LPAR OS */
3729 len = strlen(os_name) + 1;
3730 vlcd->len = cpu_to_be16(len);
3731 strncpy(vlcd->name, os_name, len);
3732 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3734 /* Type 2 - LPAR name */
3736 len = strlen(utsname()->nodename) + 1;
3737 vlcd->len = cpu_to_be16(len);
3738 strncpy(vlcd->name, utsname()->nodename, len);
3739 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3741 /* Type 3 - device name */
3743 len = strlen(adapter->netdev->name) + 1;
3744 vlcd->len = cpu_to_be16(len);
3745 strncpy(vlcd->name, adapter->netdev->name, len);
3748 static int send_login(struct ibmvnic_adapter *adapter)
3750 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3751 struct ibmvnic_login_buffer *login_buffer;
3752 struct device *dev = &adapter->vdev->dev;
3753 struct vnic_login_client_data *vlcd;
3754 dma_addr_t rsp_buffer_token;
3755 dma_addr_t buffer_token;
3756 size_t rsp_buffer_size;
3757 union ibmvnic_crq crq;
3758 int client_data_len;
3765 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3766 netdev_err(adapter->netdev,
3767 "RX or TX queues are not allocated, device login failed\n");
3771 release_login_buffer(adapter);
3772 release_login_rsp_buffer(adapter);
3774 client_data_len = vnic_client_data_len(adapter);
3777 sizeof(struct ibmvnic_login_buffer) +
3778 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3781 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3783 goto buf_alloc_failed;
3785 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3787 if (dma_mapping_error(dev, buffer_token)) {
3788 dev_err(dev, "Couldn't map login buffer\n");
3789 goto buf_map_failed;
3792 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3793 sizeof(u64) * adapter->req_tx_queues +
3794 sizeof(u64) * adapter->req_rx_queues +
3795 sizeof(u64) * adapter->req_rx_queues +
3796 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3798 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3799 if (!login_rsp_buffer)
3800 goto buf_rsp_alloc_failed;
3802 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3803 rsp_buffer_size, DMA_FROM_DEVICE);
3804 if (dma_mapping_error(dev, rsp_buffer_token)) {
3805 dev_err(dev, "Couldn't map login rsp buffer\n");
3806 goto buf_rsp_map_failed;
3809 adapter->login_buf = login_buffer;
3810 adapter->login_buf_token = buffer_token;
3811 adapter->login_buf_sz = buffer_size;
3812 adapter->login_rsp_buf = login_rsp_buffer;
3813 adapter->login_rsp_buf_token = rsp_buffer_token;
3814 adapter->login_rsp_buf_sz = rsp_buffer_size;
3816 login_buffer->len = cpu_to_be32(buffer_size);
3817 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3818 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3819 login_buffer->off_txcomp_subcrqs =
3820 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3821 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3822 login_buffer->off_rxcomp_subcrqs =
3823 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3824 sizeof(u64) * adapter->req_tx_queues);
3825 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3826 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3828 tx_list_p = (__be64 *)((char *)login_buffer +
3829 sizeof(struct ibmvnic_login_buffer));
3830 rx_list_p = (__be64 *)((char *)login_buffer +
3831 sizeof(struct ibmvnic_login_buffer) +
3832 sizeof(u64) * adapter->req_tx_queues);
3834 for (i = 0; i < adapter->req_tx_queues; i++) {
3835 if (adapter->tx_scrq[i]) {
3836 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3841 for (i = 0; i < adapter->req_rx_queues; i++) {
3842 if (adapter->rx_scrq[i]) {
3843 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3848 /* Insert vNIC login client data */
3849 vlcd = (struct vnic_login_client_data *)
3850 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3851 login_buffer->client_data_offset =
3852 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3853 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3855 vnic_add_client_data(adapter, vlcd);
3857 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3858 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3859 netdev_dbg(adapter->netdev, "%016lx\n",
3860 ((unsigned long int *)(adapter->login_buf))[i]);
3863 memset(&crq, 0, sizeof(crq));
3864 crq.login.first = IBMVNIC_CRQ_CMD;
3865 crq.login.cmd = LOGIN;
3866 crq.login.ioba = cpu_to_be32(buffer_token);
3867 crq.login.len = cpu_to_be32(buffer_size);
3869 adapter->login_pending = true;
3870 rc = ibmvnic_send_crq(adapter, &crq);
3872 adapter->login_pending = false;
3873 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
3874 goto buf_send_failed;
3880 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
3883 kfree(login_rsp_buffer);
3884 adapter->login_rsp_buf = NULL;
3885 buf_rsp_alloc_failed:
3886 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3888 kfree(login_buffer);
3889 adapter->login_buf = NULL;
3894 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3897 union ibmvnic_crq crq;
3899 memset(&crq, 0, sizeof(crq));
3900 crq.request_map.first = IBMVNIC_CRQ_CMD;
3901 crq.request_map.cmd = REQUEST_MAP;
3902 crq.request_map.map_id = map_id;
3903 crq.request_map.ioba = cpu_to_be32(addr);
3904 crq.request_map.len = cpu_to_be32(len);
3905 return ibmvnic_send_crq(adapter, &crq);
3908 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3910 union ibmvnic_crq crq;
3912 memset(&crq, 0, sizeof(crq));
3913 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3914 crq.request_unmap.cmd = REQUEST_UNMAP;
3915 crq.request_unmap.map_id = map_id;
3916 return ibmvnic_send_crq(adapter, &crq);
3919 static void send_query_map(struct ibmvnic_adapter *adapter)
3921 union ibmvnic_crq crq;
3923 memset(&crq, 0, sizeof(crq));
3924 crq.query_map.first = IBMVNIC_CRQ_CMD;
3925 crq.query_map.cmd = QUERY_MAP;
3926 ibmvnic_send_crq(adapter, &crq);
3929 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3930 static void send_query_cap(struct ibmvnic_adapter *adapter)
3932 union ibmvnic_crq crq;
3935 /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
3936 * upfront. When the tasklet receives a response to all of these, it
3937 * can send out the next protocol messaage (REQUEST_CAPABILITY).
3941 atomic_set(&adapter->running_cap_crqs, cap_reqs);
3943 memset(&crq, 0, sizeof(crq));
3944 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3945 crq.query_capability.cmd = QUERY_CAPABILITY;
3947 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3948 ibmvnic_send_crq(adapter, &crq);
3951 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3952 ibmvnic_send_crq(adapter, &crq);
3955 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3956 ibmvnic_send_crq(adapter, &crq);
3959 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3960 ibmvnic_send_crq(adapter, &crq);
3963 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3964 ibmvnic_send_crq(adapter, &crq);
3967 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3968 ibmvnic_send_crq(adapter, &crq);
3971 crq.query_capability.capability =
3972 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3973 ibmvnic_send_crq(adapter, &crq);
3976 crq.query_capability.capability =
3977 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3978 ibmvnic_send_crq(adapter, &crq);
3981 crq.query_capability.capability =
3982 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3983 ibmvnic_send_crq(adapter, &crq);
3986 crq.query_capability.capability =
3987 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3988 ibmvnic_send_crq(adapter, &crq);
3991 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3992 ibmvnic_send_crq(adapter, &crq);
3995 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3996 ibmvnic_send_crq(adapter, &crq);
3999 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
4000 ibmvnic_send_crq(adapter, &crq);
4003 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
4004 ibmvnic_send_crq(adapter, &crq);
4007 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
4008 ibmvnic_send_crq(adapter, &crq);
4011 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
4012 ibmvnic_send_crq(adapter, &crq);
4015 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
4016 ibmvnic_send_crq(adapter, &crq);
4019 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
4020 ibmvnic_send_crq(adapter, &crq);
4023 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
4024 ibmvnic_send_crq(adapter, &crq);
4027 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
4028 ibmvnic_send_crq(adapter, &crq);
4031 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
4032 ibmvnic_send_crq(adapter, &crq);
4035 crq.query_capability.capability =
4036 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
4037 ibmvnic_send_crq(adapter, &crq);
4040 crq.query_capability.capability =
4041 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
4042 ibmvnic_send_crq(adapter, &crq);
4045 crq.query_capability.capability =
4046 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
4047 ibmvnic_send_crq(adapter, &crq);
4050 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
4052 ibmvnic_send_crq(adapter, &crq);
4055 /* Keep at end to catch any discrepancy between expected and actual
4058 WARN_ON(cap_reqs != 0);
4061 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4063 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4064 struct device *dev = &adapter->vdev->dev;
4065 union ibmvnic_crq crq;
4067 adapter->ip_offload_tok =
4069 &adapter->ip_offload_buf,
4073 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4074 if (!firmware_has_feature(FW_FEATURE_CMO))
4075 dev_err(dev, "Couldn't map offload buffer\n");
4079 memset(&crq, 0, sizeof(crq));
4080 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4081 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4082 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4083 crq.query_ip_offload.ioba =
4084 cpu_to_be32(adapter->ip_offload_tok);
4086 ibmvnic_send_crq(adapter, &crq);
4089 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4091 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4092 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4093 struct device *dev = &adapter->vdev->dev;
4094 netdev_features_t old_hw_features = 0;
4095 union ibmvnic_crq crq;
4097 adapter->ip_offload_ctrl_tok =
4100 sizeof(adapter->ip_offload_ctrl),
4103 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4104 dev_err(dev, "Couldn't map ip offload control buffer\n");
4108 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4109 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4110 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4111 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4112 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4113 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4114 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4115 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4116 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4117 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4119 /* large_rx disabled for now, additional features needed */
4120 ctrl_buf->large_rx_ipv4 = 0;
4121 ctrl_buf->large_rx_ipv6 = 0;
4123 if (adapter->state != VNIC_PROBING) {
4124 old_hw_features = adapter->netdev->hw_features;
4125 adapter->netdev->hw_features = 0;
4128 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4130 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4131 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4133 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4134 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4136 if ((adapter->netdev->features &
4137 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4138 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4140 if (buf->large_tx_ipv4)
4141 adapter->netdev->hw_features |= NETIF_F_TSO;
4142 if (buf->large_tx_ipv6)
4143 adapter->netdev->hw_features |= NETIF_F_TSO6;
4145 if (adapter->state == VNIC_PROBING) {
4146 adapter->netdev->features |= adapter->netdev->hw_features;
4147 } else if (old_hw_features != adapter->netdev->hw_features) {
4148 netdev_features_t tmp = 0;
4150 /* disable features no longer supported */
4151 adapter->netdev->features &= adapter->netdev->hw_features;
4152 /* turn on features now supported if previously enabled */
4153 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4154 adapter->netdev->hw_features;
4155 adapter->netdev->features |=
4156 tmp & adapter->netdev->wanted_features;
4159 memset(&crq, 0, sizeof(crq));
4160 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4161 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4162 crq.control_ip_offload.len =
4163 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4164 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4165 ibmvnic_send_crq(adapter, &crq);
4168 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4169 struct ibmvnic_adapter *adapter)
4171 struct device *dev = &adapter->vdev->dev;
4173 if (crq->get_vpd_size_rsp.rc.code) {
4174 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4175 crq->get_vpd_size_rsp.rc.code);
4176 complete(&adapter->fw_done);
4180 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4181 complete(&adapter->fw_done);
4184 static void handle_vpd_rsp(union ibmvnic_crq *crq,
4185 struct ibmvnic_adapter *adapter)
4187 struct device *dev = &adapter->vdev->dev;
4188 unsigned char *substr = NULL;
4189 u8 fw_level_len = 0;
4191 memset(adapter->fw_version, 0, 32);
4193 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4196 if (crq->get_vpd_rsp.rc.code) {
4197 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4198 crq->get_vpd_rsp.rc.code);
4202 /* get the position of the firmware version info
4203 * located after the ASCII 'RM' substring in the buffer
4205 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4207 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
4211 /* get length of firmware level ASCII substring */
4212 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4213 fw_level_len = *(substr + 2);
4215 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4219 /* copy firmware version string from vpd into adapter */
4220 if ((substr + 3 + fw_level_len) <
4221 (adapter->vpd->buff + adapter->vpd->len)) {
4222 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4224 dev_info(dev, "FW substr extrapolated VPD buff\n");
4228 if (adapter->fw_version[0] == '\0')
4229 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
4230 complete(&adapter->fw_done);
4233 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4235 struct device *dev = &adapter->vdev->dev;
4236 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4239 dma_unmap_single(dev, adapter->ip_offload_tok,
4240 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4242 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4243 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4244 netdev_dbg(adapter->netdev, "%016lx\n",
4245 ((unsigned long int *)(buf))[i]);
4247 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4248 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4249 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4250 buf->tcp_ipv4_chksum);
4251 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4252 buf->tcp_ipv6_chksum);
4253 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4254 buf->udp_ipv4_chksum);
4255 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4256 buf->udp_ipv6_chksum);
4257 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4258 buf->large_tx_ipv4);
4259 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4260 buf->large_tx_ipv6);
4261 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4262 buf->large_rx_ipv4);
4263 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4264 buf->large_rx_ipv6);
4265 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4266 buf->max_ipv4_header_size);
4267 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4268 buf->max_ipv6_header_size);
4269 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4270 buf->max_tcp_header_size);
4271 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4272 buf->max_udp_header_size);
4273 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4274 buf->max_large_tx_size);
4275 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4276 buf->max_large_rx_size);
4277 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4278 buf->ipv6_extension_header);
4279 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4280 buf->tcp_pseudosum_req);
4281 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4282 buf->num_ipv6_ext_headers);
4283 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4284 buf->off_ipv6_ext_headers);
4286 send_control_ip_offload(adapter);
4289 static const char *ibmvnic_fw_err_cause(u16 cause)
4292 case ADAPTER_PROBLEM:
4293 return "adapter problem";
4295 return "bus problem";
4297 return "firmware problem";
4299 return "device driver problem";
4301 return "EEH recovery";
4303 return "firmware updated";
4305 return "low Memory";
4311 static void handle_error_indication(union ibmvnic_crq *crq,
4312 struct ibmvnic_adapter *adapter)
4314 struct device *dev = &adapter->vdev->dev;
4317 cause = be16_to_cpu(crq->error_indication.error_cause);
4319 dev_warn_ratelimited(dev,
4320 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4321 crq->error_indication.flags
4322 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4323 ibmvnic_fw_err_cause(cause));
4325 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4326 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4328 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4331 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4332 struct ibmvnic_adapter *adapter)
4334 struct net_device *netdev = adapter->netdev;
4335 struct device *dev = &adapter->vdev->dev;
4338 rc = crq->change_mac_addr_rsp.rc.code;
4340 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4343 /* crq->change_mac_addr.mac_addr is the requested one
4344 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4346 ether_addr_copy(netdev->dev_addr,
4347 &crq->change_mac_addr_rsp.mac_addr[0]);
4348 ether_addr_copy(adapter->mac_addr,
4349 &crq->change_mac_addr_rsp.mac_addr[0]);
4351 complete(&adapter->fw_done);
4355 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4356 struct ibmvnic_adapter *adapter)
4358 struct device *dev = &adapter->vdev->dev;
4362 atomic_dec(&adapter->running_cap_crqs);
4363 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
4364 atomic_read(&adapter->running_cap_crqs));
4365 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4367 req_value = &adapter->req_tx_queues;
4371 req_value = &adapter->req_rx_queues;
4374 case REQ_RX_ADD_QUEUES:
4375 req_value = &adapter->req_rx_add_queues;
4378 case REQ_TX_ENTRIES_PER_SUBCRQ:
4379 req_value = &adapter->req_tx_entries_per_subcrq;
4380 name = "tx_entries_per_subcrq";
4382 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4383 req_value = &adapter->req_rx_add_entries_per_subcrq;
4384 name = "rx_add_entries_per_subcrq";
4387 req_value = &adapter->req_mtu;
4390 case PROMISC_REQUESTED:
4391 req_value = &adapter->promisc;
4395 dev_err(dev, "Got invalid cap request rsp %d\n",
4396 crq->request_capability.capability);
4400 switch (crq->request_capability_rsp.rc.code) {
4403 case PARTIALSUCCESS:
4404 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4406 (long int)be64_to_cpu(crq->request_capability_rsp.
4409 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4411 pr_err("mtu of %llu is not supported. Reverting.\n",
4413 *req_value = adapter->fallback.mtu;
4416 be64_to_cpu(crq->request_capability_rsp.number);
4419 send_request_cap(adapter, 1);
4422 dev_err(dev, "Error %d in request cap rsp\n",
4423 crq->request_capability_rsp.rc.code);
4427 /* Done receiving requested capabilities, query IP offload support */
4428 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4429 adapter->wait_capability = false;
4430 send_query_ip_offload(adapter);
4434 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4435 struct ibmvnic_adapter *adapter)
4437 struct device *dev = &adapter->vdev->dev;
4438 struct net_device *netdev = adapter->netdev;
4439 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4440 struct ibmvnic_login_buffer *login = adapter->login_buf;
4441 u64 *tx_handle_array;
4442 u64 *rx_handle_array;
4449 /* CHECK: Test/set of login_pending does not need to be atomic
4450 * because only ibmvnic_tasklet tests/clears this.
4452 if (!adapter->login_pending) {
4453 netdev_warn(netdev, "Ignoring unexpected login response\n");
4456 adapter->login_pending = false;
4458 /* If the number of queues requested can't be allocated by the
4459 * server, the login response will return with code 1. We will need
4460 * to resend the login buffer with fewer queues requested.
4462 if (login_rsp_crq->generic.rc.code) {
4463 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4464 complete(&adapter->init_done);
4468 if (adapter->failover_pending) {
4469 adapter->init_done_rc = -EAGAIN;
4470 netdev_dbg(netdev, "Failover pending, ignoring login response\n");
4471 complete(&adapter->init_done);
4472 /* login response buffer will be released on reset */
4476 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4478 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4479 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4480 netdev_dbg(adapter->netdev, "%016lx\n",
4481 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4485 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4486 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4487 adapter->req_rx_add_queues !=
4488 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4489 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4490 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4494 rsp_len = be32_to_cpu(login_rsp->len);
4495 if (be32_to_cpu(login->login_rsp_len) < rsp_len ||
4496 rsp_len <= be32_to_cpu(login_rsp->off_txsubm_subcrqs) ||
4497 rsp_len <= be32_to_cpu(login_rsp->off_rxadd_subcrqs) ||
4498 rsp_len <= be32_to_cpu(login_rsp->off_rxadd_buff_size) ||
4499 rsp_len <= be32_to_cpu(login_rsp->off_supp_tx_desc)) {
4500 /* This can happen if a login request times out and there are
4501 * 2 outstanding login requests sent, the LOGIN_RSP crq
4502 * could have been for the older login request. So we are
4503 * parsing the newer response buffer which may be incomplete
4505 dev_err(dev, "FATAL: Login rsp offsets/lengths invalid\n");
4506 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4510 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4511 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4512 /* variable buffer sizes are not supported, so just read the
4515 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
4517 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4518 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4520 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4521 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4522 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4523 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4525 for (i = 0; i < num_tx_pools; i++)
4526 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4528 for (i = 0; i < num_rx_pools; i++)
4529 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4531 adapter->num_active_tx_scrqs = num_tx_pools;
4532 adapter->num_active_rx_scrqs = num_rx_pools;
4533 release_login_rsp_buffer(adapter);
4534 release_login_buffer(adapter);
4535 complete(&adapter->init_done);
4540 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4541 struct ibmvnic_adapter *adapter)
4543 struct device *dev = &adapter->vdev->dev;
4546 rc = crq->request_unmap_rsp.rc.code;
4548 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4551 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4552 struct ibmvnic_adapter *adapter)
4554 struct net_device *netdev = adapter->netdev;
4555 struct device *dev = &adapter->vdev->dev;
4558 rc = crq->query_map_rsp.rc.code;
4560 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4563 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4564 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4565 crq->query_map_rsp.free_pages);
4568 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4569 struct ibmvnic_adapter *adapter)
4571 struct net_device *netdev = adapter->netdev;
4572 struct device *dev = &adapter->vdev->dev;
4575 atomic_dec(&adapter->running_cap_crqs);
4576 netdev_dbg(netdev, "Outstanding queries: %d\n",
4577 atomic_read(&adapter->running_cap_crqs));
4578 rc = crq->query_capability.rc.code;
4580 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4584 switch (be16_to_cpu(crq->query_capability.capability)) {
4586 adapter->min_tx_queues =
4587 be64_to_cpu(crq->query_capability.number);
4588 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4589 adapter->min_tx_queues);
4592 adapter->min_rx_queues =
4593 be64_to_cpu(crq->query_capability.number);
4594 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4595 adapter->min_rx_queues);
4597 case MIN_RX_ADD_QUEUES:
4598 adapter->min_rx_add_queues =
4599 be64_to_cpu(crq->query_capability.number);
4600 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4601 adapter->min_rx_add_queues);
4604 adapter->max_tx_queues =
4605 be64_to_cpu(crq->query_capability.number);
4606 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4607 adapter->max_tx_queues);
4610 adapter->max_rx_queues =
4611 be64_to_cpu(crq->query_capability.number);
4612 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4613 adapter->max_rx_queues);
4615 case MAX_RX_ADD_QUEUES:
4616 adapter->max_rx_add_queues =
4617 be64_to_cpu(crq->query_capability.number);
4618 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4619 adapter->max_rx_add_queues);
4621 case MIN_TX_ENTRIES_PER_SUBCRQ:
4622 adapter->min_tx_entries_per_subcrq =
4623 be64_to_cpu(crq->query_capability.number);
4624 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4625 adapter->min_tx_entries_per_subcrq);
4627 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4628 adapter->min_rx_add_entries_per_subcrq =
4629 be64_to_cpu(crq->query_capability.number);
4630 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4631 adapter->min_rx_add_entries_per_subcrq);
4633 case MAX_TX_ENTRIES_PER_SUBCRQ:
4634 adapter->max_tx_entries_per_subcrq =
4635 be64_to_cpu(crq->query_capability.number);
4636 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4637 adapter->max_tx_entries_per_subcrq);
4639 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4640 adapter->max_rx_add_entries_per_subcrq =
4641 be64_to_cpu(crq->query_capability.number);
4642 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4643 adapter->max_rx_add_entries_per_subcrq);
4645 case TCP_IP_OFFLOAD:
4646 adapter->tcp_ip_offload =
4647 be64_to_cpu(crq->query_capability.number);
4648 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4649 adapter->tcp_ip_offload);
4651 case PROMISC_SUPPORTED:
4652 adapter->promisc_supported =
4653 be64_to_cpu(crq->query_capability.number);
4654 netdev_dbg(netdev, "promisc_supported = %lld\n",
4655 adapter->promisc_supported);
4658 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4659 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4660 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4663 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4664 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4665 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4667 case MAX_MULTICAST_FILTERS:
4668 adapter->max_multicast_filters =
4669 be64_to_cpu(crq->query_capability.number);
4670 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4671 adapter->max_multicast_filters);
4673 case VLAN_HEADER_INSERTION:
4674 adapter->vlan_header_insertion =
4675 be64_to_cpu(crq->query_capability.number);
4676 if (adapter->vlan_header_insertion)
4677 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4678 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4679 adapter->vlan_header_insertion);
4681 case RX_VLAN_HEADER_INSERTION:
4682 adapter->rx_vlan_header_insertion =
4683 be64_to_cpu(crq->query_capability.number);
4684 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4685 adapter->rx_vlan_header_insertion);
4687 case MAX_TX_SG_ENTRIES:
4688 adapter->max_tx_sg_entries =
4689 be64_to_cpu(crq->query_capability.number);
4690 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4691 adapter->max_tx_sg_entries);
4693 case RX_SG_SUPPORTED:
4694 adapter->rx_sg_supported =
4695 be64_to_cpu(crq->query_capability.number);
4696 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4697 adapter->rx_sg_supported);
4699 case OPT_TX_COMP_SUB_QUEUES:
4700 adapter->opt_tx_comp_sub_queues =
4701 be64_to_cpu(crq->query_capability.number);
4702 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4703 adapter->opt_tx_comp_sub_queues);
4705 case OPT_RX_COMP_QUEUES:
4706 adapter->opt_rx_comp_queues =
4707 be64_to_cpu(crq->query_capability.number);
4708 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4709 adapter->opt_rx_comp_queues);
4711 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4712 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4713 be64_to_cpu(crq->query_capability.number);
4714 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4715 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4717 case OPT_TX_ENTRIES_PER_SUBCRQ:
4718 adapter->opt_tx_entries_per_subcrq =
4719 be64_to_cpu(crq->query_capability.number);
4720 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4721 adapter->opt_tx_entries_per_subcrq);
4723 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4724 adapter->opt_rxba_entries_per_subcrq =
4725 be64_to_cpu(crq->query_capability.number);
4726 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4727 adapter->opt_rxba_entries_per_subcrq);
4729 case TX_RX_DESC_REQ:
4730 adapter->tx_rx_desc_req = crq->query_capability.number;
4731 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4732 adapter->tx_rx_desc_req);
4736 netdev_err(netdev, "Got invalid cap rsp %d\n",
4737 crq->query_capability.capability);
4741 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4742 adapter->wait_capability = false;
4743 send_request_cap(adapter, 0);
4747 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4749 union ibmvnic_crq crq;
4752 memset(&crq, 0, sizeof(crq));
4753 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4754 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4756 mutex_lock(&adapter->fw_lock);
4757 adapter->fw_done_rc = 0;
4758 reinit_completion(&adapter->fw_done);
4760 rc = ibmvnic_send_crq(adapter, &crq);
4762 mutex_unlock(&adapter->fw_lock);
4766 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
4768 mutex_unlock(&adapter->fw_lock);
4772 mutex_unlock(&adapter->fw_lock);
4773 return adapter->fw_done_rc ? -EIO : 0;
4776 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4777 struct ibmvnic_adapter *adapter)
4779 struct net_device *netdev = adapter->netdev;
4781 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
4783 rc = crq->query_phys_parms_rsp.rc.code;
4785 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4789 case IBMVNIC_10MBPS:
4790 adapter->speed = SPEED_10;
4792 case IBMVNIC_100MBPS:
4793 adapter->speed = SPEED_100;
4796 adapter->speed = SPEED_1000;
4798 case IBMVNIC_10GBPS:
4799 adapter->speed = SPEED_10000;
4801 case IBMVNIC_25GBPS:
4802 adapter->speed = SPEED_25000;
4804 case IBMVNIC_40GBPS:
4805 adapter->speed = SPEED_40000;
4807 case IBMVNIC_50GBPS:
4808 adapter->speed = SPEED_50000;
4810 case IBMVNIC_100GBPS:
4811 adapter->speed = SPEED_100000;
4813 case IBMVNIC_200GBPS:
4814 adapter->speed = SPEED_200000;
4817 if (netif_carrier_ok(netdev))
4818 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
4819 adapter->speed = SPEED_UNKNOWN;
4821 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4822 adapter->duplex = DUPLEX_FULL;
4823 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4824 adapter->duplex = DUPLEX_HALF;
4826 adapter->duplex = DUPLEX_UNKNOWN;
4831 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4832 struct ibmvnic_adapter *adapter)
4834 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4835 struct net_device *netdev = adapter->netdev;
4836 struct device *dev = &adapter->vdev->dev;
4837 u64 *u64_crq = (u64 *)crq;
4840 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4841 (unsigned long int)cpu_to_be64(u64_crq[0]),
4842 (unsigned long int)cpu_to_be64(u64_crq[1]));
4843 switch (gen_crq->first) {
4844 case IBMVNIC_CRQ_INIT_RSP:
4845 switch (gen_crq->cmd) {
4846 case IBMVNIC_CRQ_INIT:
4847 dev_info(dev, "Partner initialized\n");
4848 adapter->from_passive_init = true;
4849 /* Discard any stale login responses from prev reset.
4850 * CHECK: should we clear even on INIT_COMPLETE?
4852 adapter->login_pending = false;
4854 if (!completion_done(&adapter->init_done)) {
4855 complete(&adapter->init_done);
4856 adapter->init_done_rc = -EIO;
4858 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4859 if (rc && rc != -EBUSY) {
4860 /* We were unable to schedule the failover
4861 * reset either because the adapter was still
4862 * probing (eg: during kexec) or we could not
4863 * allocate memory. Clear the failover_pending
4864 * flag since no one else will. We ignore
4865 * EBUSY because it means either FAILOVER reset
4866 * is already scheduled or the adapter is
4870 "Error %ld scheduling failover reset\n",
4872 adapter->failover_pending = false;
4875 case IBMVNIC_CRQ_INIT_COMPLETE:
4876 dev_info(dev, "Partner initialization complete\n");
4877 adapter->crq.active = true;
4878 send_version_xchg(adapter);
4881 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4884 case IBMVNIC_CRQ_XPORT_EVENT:
4885 netif_carrier_off(netdev);
4886 adapter->crq.active = false;
4887 /* terminate any thread waiting for a response
4890 if (!completion_done(&adapter->fw_done)) {
4891 adapter->fw_done_rc = -EIO;
4892 complete(&adapter->fw_done);
4895 /* if we got here during crq-init, retry crq-init */
4896 if (!completion_done(&adapter->init_done)) {
4897 adapter->init_done_rc = -EAGAIN;
4898 complete(&adapter->init_done);
4901 if (!completion_done(&adapter->stats_done))
4902 complete(&adapter->stats_done);
4903 if (test_bit(0, &adapter->resetting))
4904 adapter->force_reset_recovery = true;
4905 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4906 dev_info(dev, "Migrated, re-enabling adapter\n");
4907 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4908 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4909 dev_info(dev, "Backing device failover detected\n");
4910 adapter->failover_pending = true;
4912 /* The adapter lost the connection */
4913 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4915 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4918 case IBMVNIC_CRQ_CMD_RSP:
4921 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4926 switch (gen_crq->cmd) {
4927 case VERSION_EXCHANGE_RSP:
4928 rc = crq->version_exchange_rsp.rc.code;
4930 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4934 be16_to_cpu(crq->version_exchange_rsp.version);
4935 dev_info(dev, "Partner protocol version is %d\n",
4937 send_query_cap(adapter);
4939 case QUERY_CAPABILITY_RSP:
4940 handle_query_cap_rsp(crq, adapter);
4943 handle_query_map_rsp(crq, adapter);
4945 case REQUEST_MAP_RSP:
4946 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4947 complete(&adapter->fw_done);
4949 case REQUEST_UNMAP_RSP:
4950 handle_request_unmap_rsp(crq, adapter);
4952 case REQUEST_CAPABILITY_RSP:
4953 handle_request_cap_rsp(crq, adapter);
4956 netdev_dbg(netdev, "Got Login Response\n");
4957 handle_login_rsp(crq, adapter);
4959 case LOGICAL_LINK_STATE_RSP:
4961 "Got Logical Link State Response, state: %d rc: %d\n",
4962 crq->logical_link_state_rsp.link_state,
4963 crq->logical_link_state_rsp.rc.code);
4964 adapter->logical_link_state =
4965 crq->logical_link_state_rsp.link_state;
4966 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4967 complete(&adapter->init_done);
4969 case LINK_STATE_INDICATION:
4970 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4971 adapter->phys_link_state =
4972 crq->link_state_indication.phys_link_state;
4973 adapter->logical_link_state =
4974 crq->link_state_indication.logical_link_state;
4975 if (adapter->phys_link_state && adapter->logical_link_state)
4976 netif_carrier_on(netdev);
4978 netif_carrier_off(netdev);
4980 case CHANGE_MAC_ADDR_RSP:
4981 netdev_dbg(netdev, "Got MAC address change Response\n");
4982 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4984 case ERROR_INDICATION:
4985 netdev_dbg(netdev, "Got Error Indication\n");
4986 handle_error_indication(crq, adapter);
4988 case REQUEST_STATISTICS_RSP:
4989 netdev_dbg(netdev, "Got Statistics Response\n");
4990 complete(&adapter->stats_done);
4992 case QUERY_IP_OFFLOAD_RSP:
4993 netdev_dbg(netdev, "Got Query IP offload Response\n");
4994 handle_query_ip_offload_rsp(adapter);
4996 case MULTICAST_CTRL_RSP:
4997 netdev_dbg(netdev, "Got multicast control Response\n");
4999 case CONTROL_IP_OFFLOAD_RSP:
5000 netdev_dbg(netdev, "Got Control IP offload Response\n");
5001 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5002 sizeof(adapter->ip_offload_ctrl),
5004 complete(&adapter->init_done);
5006 case COLLECT_FW_TRACE_RSP:
5007 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
5008 complete(&adapter->fw_done);
5010 case GET_VPD_SIZE_RSP:
5011 handle_vpd_size_rsp(crq, adapter);
5014 handle_vpd_rsp(crq, adapter);
5016 case QUERY_PHYS_PARMS_RSP:
5017 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5018 complete(&adapter->fw_done);
5021 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
5026 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
5028 struct ibmvnic_adapter *adapter = instance;
5030 tasklet_schedule(&adapter->tasklet);
5034 static void ibmvnic_tasklet(struct tasklet_struct *t)
5036 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
5037 struct ibmvnic_crq_queue *queue = &adapter->crq;
5038 union ibmvnic_crq *crq;
5039 unsigned long flags;
5042 spin_lock_irqsave(&queue->lock, flags);
5044 /* Pull all the valid messages off the CRQ */
5045 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
5046 /* This barrier makes sure ibmvnic_next_crq()'s
5047 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
5048 * before ibmvnic_handle_crq()'s
5049 * switch(gen_crq->first) and switch(gen_crq->cmd).
5052 ibmvnic_handle_crq(crq, adapter);
5053 crq->generic.first = 0;
5056 /* if capabilities CRQ's were sent in this tasklet, the following
5057 * tasklet must wait until all responses are received
5059 if (atomic_read(&adapter->running_cap_crqs) != 0)
5060 adapter->wait_capability = true;
5061 spin_unlock_irqrestore(&queue->lock, flags);
5064 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5066 struct vio_dev *vdev = adapter->vdev;
5070 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5071 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5074 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5079 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5081 struct ibmvnic_crq_queue *crq = &adapter->crq;
5082 struct device *dev = &adapter->vdev->dev;
5083 struct vio_dev *vdev = adapter->vdev;
5088 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5089 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5091 /* Clean out the queue */
5095 memset(crq->msgs, 0, PAGE_SIZE);
5097 crq->active = false;
5099 /* And re-open it again */
5100 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5101 crq->msg_token, PAGE_SIZE);
5104 /* Adapter is good, but other end is not ready */
5105 dev_warn(dev, "Partner adapter not ready\n");
5107 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5112 static void release_crq_queue(struct ibmvnic_adapter *adapter)
5114 struct ibmvnic_crq_queue *crq = &adapter->crq;
5115 struct vio_dev *vdev = adapter->vdev;
5121 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5122 free_irq(vdev->irq, adapter);
5123 tasklet_kill(&adapter->tasklet);
5125 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5126 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5128 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5130 free_page((unsigned long)crq->msgs);
5132 crq->active = false;
5135 static int init_crq_queue(struct ibmvnic_adapter *adapter)
5137 struct ibmvnic_crq_queue *crq = &adapter->crq;
5138 struct device *dev = &adapter->vdev->dev;
5139 struct vio_dev *vdev = adapter->vdev;
5140 int rc, retrc = -ENOMEM;
5145 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5146 /* Should we allocate more than one page? */
5151 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5152 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5154 if (dma_mapping_error(dev, crq->msg_token))
5157 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5158 crq->msg_token, PAGE_SIZE);
5160 if (rc == H_RESOURCE)
5161 /* maybe kexecing and resource is busy. try a reset */
5162 rc = ibmvnic_reset_crq(adapter);
5165 if (rc == H_CLOSED) {
5166 dev_warn(dev, "Partner adapter not ready\n");
5168 dev_warn(dev, "Error %d opening adapter\n", rc);
5169 goto reg_crq_failed;
5174 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
5176 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
5177 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5178 adapter->vdev->unit_address);
5179 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
5181 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5183 goto req_irq_failed;
5186 rc = vio_enable_interrupts(vdev);
5188 dev_err(dev, "Error %d enabling interrupts\n", rc);
5189 goto req_irq_failed;
5193 spin_lock_init(&crq->lock);
5195 /* process any CRQs that were queued before we enabled interrupts */
5196 tasklet_schedule(&adapter->tasklet);
5201 tasklet_kill(&adapter->tasklet);
5203 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5204 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5206 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5208 free_page((unsigned long)crq->msgs);
5213 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
5215 struct device *dev = &adapter->vdev->dev;
5216 unsigned long timeout = msecs_to_jiffies(20000);
5217 u64 old_num_rx_queues = adapter->req_rx_queues;
5218 u64 old_num_tx_queues = adapter->req_tx_queues;
5221 adapter->from_passive_init = false;
5224 reinit_completion(&adapter->init_done);
5226 adapter->init_done_rc = 0;
5227 rc = ibmvnic_send_crq_init(adapter);
5229 dev_err(dev, "Send crq init failed with error %d\n", rc);
5233 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5234 dev_err(dev, "Initialization sequence timed out\n");
5238 if (adapter->init_done_rc) {
5239 release_crq_queue(adapter);
5240 return adapter->init_done_rc;
5243 if (adapter->from_passive_init) {
5244 adapter->state = VNIC_OPEN;
5245 adapter->from_passive_init = false;
5250 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5251 adapter->reset_reason != VNIC_RESET_MOBILITY) {
5252 if (adapter->req_rx_queues != old_num_rx_queues ||
5253 adapter->req_tx_queues != old_num_tx_queues) {
5254 release_sub_crqs(adapter, 0);
5255 rc = init_sub_crqs(adapter);
5257 /* no need to reinitialize completely, but we do
5258 * need to clean up transmits that were in flight
5259 * when we processed the reset. Failure to do so
5260 * will confound the upper layer, usually TCP, by
5261 * creating the illusion of transmits that are
5262 * awaiting completion.
5264 clean_tx_pools(adapter);
5266 rc = reset_sub_crq_queues(adapter);
5269 rc = init_sub_crqs(adapter);
5273 dev_err(dev, "Initialization of sub crqs failed\n");
5274 release_crq_queue(adapter);
5278 rc = init_sub_crq_irqs(adapter);
5280 dev_err(dev, "Failed to initialize sub crq irqs\n");
5281 release_crq_queue(adapter);
5287 static struct device_attribute dev_attr_failover;
5289 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5291 struct ibmvnic_adapter *adapter;
5292 struct net_device *netdev;
5293 unsigned char *mac_addr_p;
5296 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5299 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5300 VETH_MAC_ADDR, NULL);
5303 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5304 __FILE__, __LINE__);
5308 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
5309 IBMVNIC_MAX_QUEUES);
5313 adapter = netdev_priv(netdev);
5314 adapter->state = VNIC_PROBING;
5315 dev_set_drvdata(&dev->dev, netdev);
5316 adapter->vdev = dev;
5317 adapter->netdev = netdev;
5318 adapter->login_pending = false;
5320 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5321 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5322 netdev->irq = dev->irq;
5323 netdev->netdev_ops = &ibmvnic_netdev_ops;
5324 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5325 SET_NETDEV_DEV(netdev, &dev->dev);
5327 spin_lock_init(&adapter->stats_lock);
5329 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
5330 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5331 __ibmvnic_delayed_reset);
5332 INIT_LIST_HEAD(&adapter->rwi_list);
5333 spin_lock_init(&adapter->rwi_lock);
5334 spin_lock_init(&adapter->state_lock);
5335 mutex_init(&adapter->fw_lock);
5336 init_completion(&adapter->init_done);
5337 init_completion(&adapter->fw_done);
5338 init_completion(&adapter->reset_done);
5339 init_completion(&adapter->stats_done);
5340 clear_bit(0, &adapter->resetting);
5343 rc = init_crq_queue(adapter);
5345 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5347 goto ibmvnic_init_fail;
5350 rc = ibmvnic_reset_init(adapter, false);
5351 if (rc && rc != EAGAIN)
5352 goto ibmvnic_init_fail;
5353 } while (rc == EAGAIN);
5355 rc = init_stats_buffers(adapter);
5357 goto ibmvnic_init_fail;
5359 rc = init_stats_token(adapter);
5361 goto ibmvnic_stats_fail;
5363 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5364 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5365 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5367 rc = device_create_file(&dev->dev, &dev_attr_failover);
5369 goto ibmvnic_dev_file_err;
5371 netif_carrier_off(netdev);
5373 adapter->state = VNIC_PROBED;
5375 adapter->wait_for_reset = false;
5376 adapter->last_reset_time = jiffies;
5378 rc = register_netdev(netdev);
5380 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5381 goto ibmvnic_register_fail;
5383 dev_info(&dev->dev, "ibmvnic registered\n");
5387 ibmvnic_register_fail:
5388 device_remove_file(&dev->dev, &dev_attr_failover);
5390 ibmvnic_dev_file_err:
5391 release_stats_token(adapter);
5394 release_stats_buffers(adapter);
5397 release_sub_crqs(adapter, 1);
5398 release_crq_queue(adapter);
5399 mutex_destroy(&adapter->fw_lock);
5400 free_netdev(netdev);
5405 static int ibmvnic_remove(struct vio_dev *dev)
5407 struct net_device *netdev = dev_get_drvdata(&dev->dev);
5408 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5409 unsigned long flags;
5411 spin_lock_irqsave(&adapter->state_lock, flags);
5413 /* If ibmvnic_reset() is scheduling a reset, wait for it to
5414 * finish. Then, set the state to REMOVING to prevent it from
5415 * scheduling any more work and to have reset functions ignore
5416 * any resets that have already been scheduled. Drop the lock
5417 * after setting state, so __ibmvnic_reset() which is called
5418 * from the flush_work() below, can make progress.
5420 spin_lock(&adapter->rwi_lock);
5421 adapter->state = VNIC_REMOVING;
5422 spin_unlock(&adapter->rwi_lock);
5424 spin_unlock_irqrestore(&adapter->state_lock, flags);
5426 flush_work(&adapter->ibmvnic_reset);
5427 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5430 unregister_netdevice(netdev);
5432 release_resources(adapter);
5433 release_sub_crqs(adapter, 1);
5434 release_crq_queue(adapter);
5436 release_stats_token(adapter);
5437 release_stats_buffers(adapter);
5439 adapter->state = VNIC_REMOVED;
5442 mutex_destroy(&adapter->fw_lock);
5443 device_remove_file(&dev->dev, &dev_attr_failover);
5444 free_netdev(netdev);
5445 dev_set_drvdata(&dev->dev, NULL);
5450 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5451 const char *buf, size_t count)
5453 struct net_device *netdev = dev_get_drvdata(dev);
5454 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5455 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5456 __be64 session_token;
5459 if (!sysfs_streq(buf, "1"))
5462 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5463 H_GET_SESSION_TOKEN, 0, 0, 0);
5465 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5470 session_token = (__be64)retbuf[0];
5471 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5472 be64_to_cpu(session_token));
5473 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5474 H_SESSION_ERR_DETECTED, session_token, 0, 0);
5476 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5484 static DEVICE_ATTR_WO(failover);
5486 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5488 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5489 struct ibmvnic_adapter *adapter;
5490 struct iommu_table *tbl;
5491 unsigned long ret = 0;
5494 tbl = get_iommu_table_base(&vdev->dev);
5496 /* netdev inits at probe time along with the structures we need below*/
5498 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5500 adapter = netdev_priv(netdev);
5502 ret += PAGE_SIZE; /* the crq message queue */
5503 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5505 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5506 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5508 for (i = 0; i < adapter->num_active_rx_pools; i++)
5509 ret += adapter->rx_pool[i].size *
5510 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5515 static int ibmvnic_resume(struct device *dev)
5517 struct net_device *netdev = dev_get_drvdata(dev);
5518 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5520 if (adapter->state != VNIC_OPEN)
5523 tasklet_schedule(&adapter->tasklet);
5528 static const struct vio_device_id ibmvnic_device_table[] = {
5529 {"network", "IBM,vnic"},
5532 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5534 static const struct dev_pm_ops ibmvnic_pm_ops = {
5535 .resume = ibmvnic_resume
5538 static struct vio_driver ibmvnic_driver = {
5539 .id_table = ibmvnic_device_table,
5540 .probe = ibmvnic_probe,
5541 .remove = ibmvnic_remove,
5542 .get_desired_dma = ibmvnic_get_desired_dma,
5543 .name = ibmvnic_driver_name,
5544 .pm = &ibmvnic_pm_ops,
5547 /* module functions */
5548 static int __init ibmvnic_module_init(void)
5550 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5551 IBMVNIC_DRIVER_VERSION);
5553 return vio_register_driver(&ibmvnic_driver);
5556 static void __exit ibmvnic_module_exit(void)
5558 vio_unregister_driver(&ibmvnic_driver);
5561 module_init(ibmvnic_module_init);
5562 module_exit(ibmvnic_module_exit);