1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/debugfs.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/workqueue.h>
81 static const char ibmvnic_driver_name[] = "ibmvnic";
82 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
84 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
89 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90 static int ibmvnic_remove(struct vio_dev *);
91 static void release_sub_crqs(struct ibmvnic_adapter *);
92 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
93 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
94 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
95 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
96 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
97 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
98 union sub_crq *sub_crq);
99 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
100 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
101 static int enable_scrq_irq(struct ibmvnic_adapter *,
102 struct ibmvnic_sub_crq_queue *);
103 static int disable_scrq_irq(struct ibmvnic_adapter *,
104 struct ibmvnic_sub_crq_queue *);
105 static int pending_scrq(struct ibmvnic_adapter *,
106 struct ibmvnic_sub_crq_queue *);
107 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
108 struct ibmvnic_sub_crq_queue *);
109 static int ibmvnic_poll(struct napi_struct *napi, int data);
110 static void send_map_query(struct ibmvnic_adapter *adapter);
111 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
112 static void send_request_unmap(struct ibmvnic_adapter *, u8);
114 struct ibmvnic_stat {
115 char name[ETH_GSTRING_LEN];
119 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
120 offsetof(struct ibmvnic_statistics, stat))
121 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
123 static const struct ibmvnic_stat ibmvnic_stats[] = {
124 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
125 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
126 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
127 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
128 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
129 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
130 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
131 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
132 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
133 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
134 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
135 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
136 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
137 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
138 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
139 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
140 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
141 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
142 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
143 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
144 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
145 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
148 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
149 unsigned long length, unsigned long *number,
152 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
155 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
162 /* net_device_ops functions */
164 static void init_rx_pool(struct ibmvnic_adapter *adapter,
165 struct ibmvnic_rx_pool *rx_pool, int num, int index,
166 int buff_size, int active)
168 netdev_dbg(adapter->netdev,
169 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
170 index, num, buff_size);
172 rx_pool->index = index;
173 rx_pool->buff_size = buff_size;
174 rx_pool->active = active;
177 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
178 struct ibmvnic_long_term_buff *ltb, int size)
180 struct device *dev = &adapter->vdev->dev;
183 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
187 dev_err(dev, "Couldn't alloc long term buffer\n");
190 ltb->map_id = adapter->map_id;
193 init_completion(&adapter->fw_done);
194 send_request_map(adapter, ltb->addr,
195 ltb->size, ltb->map_id);
196 wait_for_completion(&adapter->fw_done);
200 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
201 struct ibmvnic_long_term_buff *ltb)
203 struct device *dev = &adapter->vdev->dev;
205 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
206 if (!adapter->failover)
207 send_request_unmap(adapter, ltb->map_id);
210 static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
211 struct ibmvnic_rx_pool *pool)
213 struct device *dev = &adapter->vdev->dev;
216 pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
220 pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
223 if (!pool->rx_buff) {
224 dev_err(dev, "Couldn't alloc rx buffers\n");
225 kfree(pool->free_map);
229 if (alloc_long_term_buff(adapter, &pool->long_term_buff,
230 pool->size * pool->buff_size)) {
231 kfree(pool->free_map);
232 kfree(pool->rx_buff);
236 for (i = 0; i < pool->size; ++i)
237 pool->free_map[i] = i;
239 atomic_set(&pool->available, 0);
240 pool->next_alloc = 0;
246 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
247 struct ibmvnic_rx_pool *pool)
249 int count = pool->size - atomic_read(&pool->available);
250 struct device *dev = &adapter->vdev->dev;
251 int buffers_added = 0;
252 unsigned long lpar_rc;
253 union sub_crq sub_crq;
263 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
264 be32_to_cpu(adapter->login_rsp_buf->
267 for (i = 0; i < count; ++i) {
268 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
270 dev_err(dev, "Couldn't replenish rx buff\n");
271 adapter->replenish_no_mem++;
275 index = pool->free_map[pool->next_free];
277 if (pool->rx_buff[index].skb)
278 dev_err(dev, "Inconsistent free_map!\n");
280 /* Copy the skb to the long term mapped DMA buffer */
281 offset = index * pool->buff_size;
282 dst = pool->long_term_buff.buff + offset;
283 memset(dst, 0, pool->buff_size);
284 dma_addr = pool->long_term_buff.addr + offset;
285 pool->rx_buff[index].data = dst;
287 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
288 pool->rx_buff[index].dma = dma_addr;
289 pool->rx_buff[index].skb = skb;
290 pool->rx_buff[index].pool_index = pool->index;
291 pool->rx_buff[index].size = pool->buff_size;
293 memset(&sub_crq, 0, sizeof(sub_crq));
294 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
295 sub_crq.rx_add.correlator =
296 cpu_to_be64((u64)&pool->rx_buff[index]);
297 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
298 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
300 /* The length field of the sCRQ is defined to be 24 bits so the
301 * buffer size needs to be left shifted by a byte before it is
302 * converted to big endian to prevent the last byte from being
305 #ifdef __LITTLE_ENDIAN__
308 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
310 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
312 if (lpar_rc != H_SUCCESS)
316 adapter->replenish_add_buff_success++;
317 pool->next_free = (pool->next_free + 1) % pool->size;
319 atomic_add(buffers_added, &pool->available);
323 dev_info(dev, "replenish pools failure\n");
324 pool->free_map[pool->next_free] = index;
325 pool->rx_buff[index].skb = NULL;
326 if (!dma_mapping_error(dev, dma_addr))
327 dma_unmap_single(dev, dma_addr, pool->buff_size,
330 dev_kfree_skb_any(skb);
331 adapter->replenish_add_buff_failure++;
332 atomic_add(buffers_added, &pool->available);
335 static void replenish_pools(struct ibmvnic_adapter *adapter)
339 if (adapter->migrated)
342 adapter->replenish_task_cycles++;
343 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
345 if (adapter->rx_pool[i].active)
346 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
350 static void free_rx_pool(struct ibmvnic_adapter *adapter,
351 struct ibmvnic_rx_pool *pool)
355 kfree(pool->free_map);
356 pool->free_map = NULL;
361 for (i = 0; i < pool->size; i++) {
362 if (pool->rx_buff[i].skb) {
363 dev_kfree_skb_any(pool->rx_buff[i].skb);
364 pool->rx_buff[i].skb = NULL;
367 kfree(pool->rx_buff);
368 pool->rx_buff = NULL;
371 static int ibmvnic_open(struct net_device *netdev)
373 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
374 struct device *dev = &adapter->vdev->dev;
375 struct ibmvnic_tx_pool *tx_pool;
376 union ibmvnic_crq crq;
383 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
385 be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
386 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
387 be32_to_cpu(adapter->login_rsp_buf->
388 off_rxadd_buff_size));
390 adapter->napi = kcalloc(adapter->req_rx_queues,
391 sizeof(struct napi_struct), GFP_KERNEL);
393 goto alloc_napi_failed;
394 for (i = 0; i < adapter->req_rx_queues; i++) {
395 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
397 napi_enable(&adapter->napi[i]);
400 kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
402 if (!adapter->rx_pool)
403 goto rx_pool_arr_alloc_failed;
404 send_map_query(adapter);
405 for (i = 0; i < rxadd_subcrqs; i++) {
406 init_rx_pool(adapter, &adapter->rx_pool[i],
407 adapter->req_rx_add_entries_per_subcrq, i,
408 be64_to_cpu(size_array[i]), 1);
409 if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
410 dev_err(dev, "Couldn't alloc rx pool\n");
411 goto rx_pool_alloc_failed;
415 kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
417 if (!adapter->tx_pool)
418 goto tx_pool_arr_alloc_failed;
419 for (i = 0; i < tx_subcrqs; i++) {
420 tx_pool = &adapter->tx_pool[i];
422 kcalloc(adapter->req_tx_entries_per_subcrq,
423 sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
424 if (!tx_pool->tx_buff)
425 goto tx_pool_alloc_failed;
427 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
428 adapter->req_tx_entries_per_subcrq *
430 goto tx_ltb_alloc_failed;
433 kcalloc(adapter->req_tx_entries_per_subcrq,
434 sizeof(int), GFP_KERNEL);
435 if (!tx_pool->free_map)
436 goto tx_fm_alloc_failed;
438 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
439 tx_pool->free_map[j] = j;
441 tx_pool->consumer_index = 0;
442 tx_pool->producer_index = 0;
444 adapter->bounce_buffer_size =
445 (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
446 adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
448 if (!adapter->bounce_buffer)
449 goto bounce_alloc_failed;
451 adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
452 adapter->bounce_buffer_size,
454 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
455 dev_err(dev, "Couldn't map tx bounce buffer\n");
456 goto bounce_map_failed;
458 replenish_pools(adapter);
460 /* We're ready to receive frames, enable the sub-crq interrupts and
461 * set the logical link state to up
463 for (i = 0; i < adapter->req_rx_queues; i++)
464 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
466 for (i = 0; i < adapter->req_tx_queues; i++)
467 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
469 memset(&crq, 0, sizeof(crq));
470 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
471 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
472 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
473 ibmvnic_send_crq(adapter, &crq);
475 netif_tx_start_all_queues(netdev);
480 kfree(adapter->bounce_buffer);
483 kfree(adapter->tx_pool[i].free_map);
485 free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
487 kfree(adapter->tx_pool[i].tx_buff);
488 tx_pool_alloc_failed:
489 for (j = 0; j < i; j++) {
490 kfree(adapter->tx_pool[j].tx_buff);
491 free_long_term_buff(adapter,
492 &adapter->tx_pool[j].long_term_buff);
493 kfree(adapter->tx_pool[j].free_map);
495 kfree(adapter->tx_pool);
496 adapter->tx_pool = NULL;
497 tx_pool_arr_alloc_failed:
499 rx_pool_alloc_failed:
500 for (j = 0; j < i; j++) {
501 free_rx_pool(adapter, &adapter->rx_pool[j]);
502 free_long_term_buff(adapter,
503 &adapter->rx_pool[j].long_term_buff);
505 kfree(adapter->rx_pool);
506 adapter->rx_pool = NULL;
507 rx_pool_arr_alloc_failed:
508 for (i = 0; i < adapter->req_rx_queues; i++)
509 napi_disable(&adapter->napi[i]);
514 static void disable_sub_crqs(struct ibmvnic_adapter *adapter)
518 if (adapter->tx_scrq) {
519 for (i = 0; i < adapter->req_tx_queues; i++)
520 if (adapter->tx_scrq[i])
521 disable_irq(adapter->tx_scrq[i]->irq);
524 if (adapter->rx_scrq) {
525 for (i = 0; i < adapter->req_rx_queues; i++)
526 if (adapter->rx_scrq[i])
527 disable_irq(adapter->rx_scrq[i]->irq);
531 static int ibmvnic_close(struct net_device *netdev)
533 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
534 struct device *dev = &adapter->vdev->dev;
535 union ibmvnic_crq crq;
538 adapter->closing = true;
539 disable_sub_crqs(adapter);
541 for (i = 0; i < adapter->req_rx_queues; i++)
542 napi_disable(&adapter->napi[i]);
544 if (!adapter->failover)
545 netif_tx_stop_all_queues(netdev);
547 if (adapter->bounce_buffer) {
548 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
549 dma_unmap_single(&adapter->vdev->dev,
550 adapter->bounce_buffer_dma,
551 adapter->bounce_buffer_size,
553 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
555 kfree(adapter->bounce_buffer);
556 adapter->bounce_buffer = NULL;
559 memset(&crq, 0, sizeof(crq));
560 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
561 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
562 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
563 ibmvnic_send_crq(adapter, &crq);
565 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
567 kfree(adapter->tx_pool[i].tx_buff);
568 free_long_term_buff(adapter,
569 &adapter->tx_pool[i].long_term_buff);
570 kfree(adapter->tx_pool[i].free_map);
572 kfree(adapter->tx_pool);
573 adapter->tx_pool = NULL;
575 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
577 free_rx_pool(adapter, &adapter->rx_pool[i]);
578 free_long_term_buff(adapter,
579 &adapter->rx_pool[i].long_term_buff);
581 kfree(adapter->rx_pool);
582 adapter->rx_pool = NULL;
584 adapter->closing = false;
590 * build_hdr_data - creates L2/L3/L4 header data buffer
591 * @hdr_field - bitfield determining needed headers
592 * @skb - socket buffer
593 * @hdr_len - array of header lengths
594 * @tot_len - total length of data
596 * Reads hdr_field to determine which headers are needed by firmware.
597 * Builds a buffer containing these headers. Saves individual header
598 * lengths and total buffer length to be used to build descriptors.
600 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
601 int *hdr_len, u8 *hdr_data)
606 hdr_len[0] = sizeof(struct ethhdr);
608 if (skb->protocol == htons(ETH_P_IP)) {
609 hdr_len[1] = ip_hdr(skb)->ihl * 4;
610 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
611 hdr_len[2] = tcp_hdrlen(skb);
612 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
613 hdr_len[2] = sizeof(struct udphdr);
614 } else if (skb->protocol == htons(ETH_P_IPV6)) {
615 hdr_len[1] = sizeof(struct ipv6hdr);
616 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
617 hdr_len[2] = tcp_hdrlen(skb);
618 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
619 hdr_len[2] = sizeof(struct udphdr);
622 memset(hdr_data, 0, 120);
623 if ((hdr_field >> 6) & 1) {
624 hdr = skb_mac_header(skb);
625 memcpy(hdr_data, hdr, hdr_len[0]);
629 if ((hdr_field >> 5) & 1) {
630 hdr = skb_network_header(skb);
631 memcpy(hdr_data + len, hdr, hdr_len[1]);
635 if ((hdr_field >> 4) & 1) {
636 hdr = skb_transport_header(skb);
637 memcpy(hdr_data + len, hdr, hdr_len[2]);
644 * create_hdr_descs - create header and header extension descriptors
645 * @hdr_field - bitfield determining needed headers
646 * @data - buffer containing header data
647 * @len - length of data buffer
648 * @hdr_len - array of individual header lengths
649 * @scrq_arr - descriptor array
651 * Creates header and, if needed, header extension descriptors and
652 * places them in a descriptor array, scrq_arr
655 static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
656 union sub_crq *scrq_arr)
658 union sub_crq hdr_desc;
663 while (tmp_len > 0) {
664 cur = hdr_data + len - tmp_len;
666 memset(&hdr_desc, 0, sizeof(hdr_desc));
667 if (cur != hdr_data) {
668 data = hdr_desc.hdr_ext.data;
669 tmp = tmp_len > 29 ? 29 : tmp_len;
670 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
671 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
672 hdr_desc.hdr_ext.len = tmp;
674 data = hdr_desc.hdr.data;
675 tmp = tmp_len > 24 ? 24 : tmp_len;
676 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
677 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
678 hdr_desc.hdr.len = tmp;
679 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
680 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
681 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
682 hdr_desc.hdr.flag = hdr_field << 1;
684 memcpy(data, cur, tmp);
686 *scrq_arr = hdr_desc;
692 * build_hdr_descs_arr - build a header descriptor array
693 * @skb - socket buffer
694 * @num_entries - number of descriptors to be sent
695 * @subcrq - first TX descriptor
696 * @hdr_field - bit field determining which headers will be sent
698 * This function will build a TX descriptor array with applicable
699 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
702 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
703 int *num_entries, u8 hdr_field)
705 int hdr_len[3] = {0, 0, 0};
707 u8 *hdr_data = txbuff->hdr_data;
709 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
714 num_entries += len % 29 ? len / 29 + 1 : len / 29;
715 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
716 txbuff->indir_arr + 1);
719 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
721 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
722 int queue_num = skb_get_queue_mapping(skb);
723 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
724 struct device *dev = &adapter->vdev->dev;
725 struct ibmvnic_tx_buff *tx_buff = NULL;
726 struct ibmvnic_sub_crq_queue *tx_scrq;
727 struct ibmvnic_tx_pool *tx_pool;
728 unsigned int tx_send_failed = 0;
729 unsigned int tx_map_failed = 0;
730 unsigned int tx_dropped = 0;
731 unsigned int tx_packets = 0;
732 unsigned int tx_bytes = 0;
733 dma_addr_t data_dma_addr;
734 struct netdev_queue *txq;
735 bool used_bounce = false;
736 unsigned long lpar_rc;
737 union sub_crq tx_crq;
745 tx_pool = &adapter->tx_pool[queue_num];
746 tx_scrq = adapter->tx_scrq[queue_num];
747 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
748 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
749 be32_to_cpu(adapter->login_rsp_buf->
750 off_txsubm_subcrqs));
751 if (adapter->migrated) {
754 ret = NETDEV_TX_BUSY;
758 index = tx_pool->free_map[tx_pool->consumer_index];
759 offset = index * adapter->req_mtu;
760 dst = tx_pool->long_term_buff.buff + offset;
761 memset(dst, 0, adapter->req_mtu);
762 skb_copy_from_linear_data(skb, dst, skb->len);
763 data_dma_addr = tx_pool->long_term_buff.addr + offset;
765 tx_pool->consumer_index =
766 (tx_pool->consumer_index + 1) %
767 adapter->req_tx_entries_per_subcrq;
769 tx_buff = &tx_pool->tx_buff[index];
771 tx_buff->data_dma[0] = data_dma_addr;
772 tx_buff->data_len[0] = skb->len;
773 tx_buff->index = index;
774 tx_buff->pool_index = queue_num;
775 tx_buff->last_frag = true;
776 tx_buff->used_bounce = used_bounce;
778 memset(&tx_crq, 0, sizeof(tx_crq));
779 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
780 tx_crq.v1.type = IBMVNIC_TX_DESC;
781 tx_crq.v1.n_crq_elem = 1;
783 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
784 tx_crq.v1.correlator = cpu_to_be32(index);
785 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
786 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
787 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
789 if (adapter->vlan_header_insertion) {
790 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
791 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
794 if (skb->protocol == htons(ETH_P_IP)) {
795 if (ip_hdr(skb)->version == 4)
796 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
797 else if (ip_hdr(skb)->version == 6)
798 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
800 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
801 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
802 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
803 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
806 if (skb->ip_summed == CHECKSUM_PARTIAL) {
807 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
810 /* determine if l2/3/4 headers are sent to firmware */
811 if ((*hdrs >> 7) & 1 &&
812 (skb->protocol == htons(ETH_P_IP) ||
813 skb->protocol == htons(ETH_P_IPV6))) {
814 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
815 tx_crq.v1.n_crq_elem = num_entries;
816 tx_buff->indir_arr[0] = tx_crq;
817 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
818 sizeof(tx_buff->indir_arr),
820 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
821 if (!firmware_has_feature(FW_FEATURE_CMO))
822 dev_err(dev, "tx: unable to map descriptor array\n");
825 ret = NETDEV_TX_BUSY;
828 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
829 (u64)tx_buff->indir_dma,
832 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
835 if (lpar_rc != H_SUCCESS) {
836 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
838 if (tx_pool->consumer_index == 0)
839 tx_pool->consumer_index =
840 adapter->req_tx_entries_per_subcrq - 1;
842 tx_pool->consumer_index--;
846 ret = NETDEV_TX_BUSY;
850 atomic_inc(&tx_scrq->used);
852 if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
853 netdev_info(netdev, "Stopping queue %d\n", queue_num);
854 netif_stop_subqueue(netdev, queue_num);
858 tx_bytes += skb->len;
859 txq->trans_start = jiffies;
863 netdev->stats.tx_dropped += tx_dropped;
864 netdev->stats.tx_bytes += tx_bytes;
865 netdev->stats.tx_packets += tx_packets;
866 adapter->tx_send_failed += tx_send_failed;
867 adapter->tx_map_failed += tx_map_failed;
872 static void ibmvnic_set_multi(struct net_device *netdev)
874 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
875 struct netdev_hw_addr *ha;
876 union ibmvnic_crq crq;
878 memset(&crq, 0, sizeof(crq));
879 crq.request_capability.first = IBMVNIC_CRQ_CMD;
880 crq.request_capability.cmd = REQUEST_CAPABILITY;
882 if (netdev->flags & IFF_PROMISC) {
883 if (!adapter->promisc_supported)
886 if (netdev->flags & IFF_ALLMULTI) {
887 /* Accept all multicast */
888 memset(&crq, 0, sizeof(crq));
889 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
890 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
891 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
892 ibmvnic_send_crq(adapter, &crq);
893 } else if (netdev_mc_empty(netdev)) {
894 /* Reject all multicast */
895 memset(&crq, 0, sizeof(crq));
896 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
897 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
898 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
899 ibmvnic_send_crq(adapter, &crq);
901 /* Accept one or more multicast(s) */
902 netdev_for_each_mc_addr(ha, netdev) {
903 memset(&crq, 0, sizeof(crq));
904 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
905 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
906 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
907 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
909 ibmvnic_send_crq(adapter, &crq);
915 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
917 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
918 struct sockaddr *addr = p;
919 union ibmvnic_crq crq;
921 if (!is_valid_ether_addr(addr->sa_data))
922 return -EADDRNOTAVAIL;
924 memset(&crq, 0, sizeof(crq));
925 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
926 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
927 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
928 ibmvnic_send_crq(adapter, &crq);
929 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
933 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
935 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
937 if (new_mtu > adapter->req_mtu || new_mtu < adapter->min_mtu)
940 netdev->mtu = new_mtu;
944 static void ibmvnic_tx_timeout(struct net_device *dev)
946 struct ibmvnic_adapter *adapter = netdev_priv(dev);
949 /* Adapter timed out, resetting it */
950 release_sub_crqs(adapter);
951 rc = ibmvnic_reset_crq(adapter);
953 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
955 ibmvnic_send_crq_init(adapter);
958 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
959 struct ibmvnic_rx_buff *rx_buff)
961 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
965 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
966 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
968 atomic_dec(&pool->available);
971 static int ibmvnic_poll(struct napi_struct *napi, int budget)
973 struct net_device *netdev = napi->dev;
974 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
975 int scrq_num = (int)(napi - adapter->napi);
976 int frames_processed = 0;
978 while (frames_processed < budget) {
980 struct ibmvnic_rx_buff *rx_buff;
986 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
988 /* The queue entry at the current index is peeked at above
989 * to determine that there is a valid descriptor awaiting
990 * processing. We want to be sure that the current slot
991 * holds a valid descriptor before reading its contents.
994 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
996 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
998 /* do error checking */
999 if (next->rx_comp.rc) {
1000 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
1001 /* free the entry */
1002 next->rx_comp.first = 0;
1003 dev_kfree_skb_any(rx_buff->skb);
1004 remove_buff_from_pool(adapter, rx_buff);
1008 length = be32_to_cpu(next->rx_comp.len);
1009 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1010 flags = next->rx_comp.flags;
1012 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1014 skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
1015 /* free the entry */
1016 next->rx_comp.first = 0;
1017 remove_buff_from_pool(adapter, rx_buff);
1019 skb_put(skb, length);
1020 skb->protocol = eth_type_trans(skb, netdev);
1022 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1023 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1024 skb->ip_summed = CHECKSUM_UNNECESSARY;
1028 napi_gro_receive(napi, skb); /* send it up */
1029 netdev->stats.rx_packets++;
1030 netdev->stats.rx_bytes += length;
1033 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
1035 if (frames_processed < budget) {
1036 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1037 napi_complete(napi);
1038 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1039 napi_reschedule(napi)) {
1040 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1044 return frames_processed;
1047 #ifdef CONFIG_NET_POLL_CONTROLLER
1048 static void ibmvnic_netpoll_controller(struct net_device *dev)
1050 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1053 replenish_pools(netdev_priv(dev));
1054 for (i = 0; i < adapter->req_rx_queues; i++)
1055 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1056 adapter->rx_scrq[i]);
1060 static const struct net_device_ops ibmvnic_netdev_ops = {
1061 .ndo_open = ibmvnic_open,
1062 .ndo_stop = ibmvnic_close,
1063 .ndo_start_xmit = ibmvnic_xmit,
1064 .ndo_set_rx_mode = ibmvnic_set_multi,
1065 .ndo_set_mac_address = ibmvnic_set_mac,
1066 .ndo_validate_addr = eth_validate_addr,
1067 .ndo_change_mtu = ibmvnic_change_mtu,
1068 .ndo_tx_timeout = ibmvnic_tx_timeout,
1069 #ifdef CONFIG_NET_POLL_CONTROLLER
1070 .ndo_poll_controller = ibmvnic_netpoll_controller,
1074 /* ethtool functions */
1076 static int ibmvnic_get_settings(struct net_device *netdev,
1077 struct ethtool_cmd *cmd)
1079 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1081 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1083 ethtool_cmd_speed_set(cmd, SPEED_1000);
1084 cmd->duplex = DUPLEX_FULL;
1085 cmd->port = PORT_FIBRE;
1086 cmd->phy_address = 0;
1087 cmd->transceiver = XCVR_INTERNAL;
1088 cmd->autoneg = AUTONEG_ENABLE;
1094 static void ibmvnic_get_drvinfo(struct net_device *dev,
1095 struct ethtool_drvinfo *info)
1097 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1098 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1101 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1103 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1105 return adapter->msg_enable;
1108 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1110 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1112 adapter->msg_enable = data;
1115 static u32 ibmvnic_get_link(struct net_device *netdev)
1117 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1119 /* Don't need to send a query because we request a logical link up at
1120 * init and then we wait for link state indications
1122 return adapter->logical_link_state;
1125 static void ibmvnic_get_ringparam(struct net_device *netdev,
1126 struct ethtool_ringparam *ring)
1128 ring->rx_max_pending = 0;
1129 ring->tx_max_pending = 0;
1130 ring->rx_mini_max_pending = 0;
1131 ring->rx_jumbo_max_pending = 0;
1132 ring->rx_pending = 0;
1133 ring->tx_pending = 0;
1134 ring->rx_mini_pending = 0;
1135 ring->rx_jumbo_pending = 0;
1138 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1142 if (stringset != ETH_SS_STATS)
1145 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1146 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1149 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1153 return ARRAY_SIZE(ibmvnic_stats);
1159 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1160 struct ethtool_stats *stats, u64 *data)
1162 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1163 union ibmvnic_crq crq;
1166 memset(&crq, 0, sizeof(crq));
1167 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1168 crq.request_statistics.cmd = REQUEST_STATISTICS;
1169 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1170 crq.request_statistics.len =
1171 cpu_to_be32(sizeof(struct ibmvnic_statistics));
1173 /* Wait for data to be written */
1174 init_completion(&adapter->stats_done);
1175 ibmvnic_send_crq(adapter, &crq);
1176 wait_for_completion(&adapter->stats_done);
1178 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1179 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1182 static const struct ethtool_ops ibmvnic_ethtool_ops = {
1183 .get_settings = ibmvnic_get_settings,
1184 .get_drvinfo = ibmvnic_get_drvinfo,
1185 .get_msglevel = ibmvnic_get_msglevel,
1186 .set_msglevel = ibmvnic_set_msglevel,
1187 .get_link = ibmvnic_get_link,
1188 .get_ringparam = ibmvnic_get_ringparam,
1189 .get_strings = ibmvnic_get_strings,
1190 .get_sset_count = ibmvnic_get_sset_count,
1191 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
1194 /* Routines for managing CRQs/sCRQs */
1196 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1197 struct ibmvnic_sub_crq_queue *scrq)
1199 struct device *dev = &adapter->vdev->dev;
1202 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1204 /* Close the sub-crqs */
1206 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1207 adapter->vdev->unit_address,
1209 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1211 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1213 free_pages((unsigned long)scrq->msgs, 2);
1217 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1220 struct device *dev = &adapter->vdev->dev;
1221 struct ibmvnic_sub_crq_queue *scrq;
1224 scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1228 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
1229 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1231 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1232 goto zero_page_failed;
1235 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1237 if (dma_mapping_error(dev, scrq->msg_token)) {
1238 dev_warn(dev, "Couldn't map crq queue messages page\n");
1242 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1243 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1245 if (rc == H_RESOURCE)
1246 rc = ibmvnic_reset_crq(adapter);
1248 if (rc == H_CLOSED) {
1249 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1251 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1255 scrq->adapter = adapter;
1256 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1258 atomic_set(&scrq->used, 0);
1259 scrq->rx_skb_top = NULL;
1260 spin_lock_init(&scrq->lock);
1262 netdev_dbg(adapter->netdev,
1263 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1264 scrq->crq_num, scrq->hw_irq, scrq->irq);
1269 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1272 free_pages((unsigned long)scrq->msgs, 2);
1279 static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1283 if (adapter->tx_scrq) {
1284 for (i = 0; i < adapter->req_tx_queues; i++)
1285 if (adapter->tx_scrq[i]) {
1286 free_irq(adapter->tx_scrq[i]->irq,
1287 adapter->tx_scrq[i]);
1288 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
1289 release_sub_crq_queue(adapter,
1290 adapter->tx_scrq[i]);
1292 kfree(adapter->tx_scrq);
1293 adapter->tx_scrq = NULL;
1296 if (adapter->rx_scrq) {
1297 for (i = 0; i < adapter->req_rx_queues; i++)
1298 if (adapter->rx_scrq[i]) {
1299 free_irq(adapter->rx_scrq[i]->irq,
1300 adapter->rx_scrq[i]);
1301 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
1302 release_sub_crq_queue(adapter,
1303 adapter->rx_scrq[i]);
1305 kfree(adapter->rx_scrq);
1306 adapter->rx_scrq = NULL;
1309 adapter->requested_caps = 0;
1312 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
1316 if (adapter->tx_scrq) {
1317 for (i = 0; i < adapter->req_tx_queues; i++)
1318 if (adapter->tx_scrq[i])
1319 release_sub_crq_queue(adapter,
1320 adapter->tx_scrq[i]);
1321 adapter->tx_scrq = NULL;
1324 if (adapter->rx_scrq) {
1325 for (i = 0; i < adapter->req_rx_queues; i++)
1326 if (adapter->rx_scrq[i])
1327 release_sub_crq_queue(adapter,
1328 adapter->rx_scrq[i]);
1329 adapter->rx_scrq = NULL;
1332 adapter->requested_caps = 0;
1335 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1336 struct ibmvnic_sub_crq_queue *scrq)
1338 struct device *dev = &adapter->vdev->dev;
1341 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1342 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1344 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1349 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1350 struct ibmvnic_sub_crq_queue *scrq)
1352 struct device *dev = &adapter->vdev->dev;
1355 if (scrq->hw_irq > 0x100000000ULL) {
1356 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1360 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1361 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1363 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1368 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1369 struct ibmvnic_sub_crq_queue *scrq)
1371 struct device *dev = &adapter->vdev->dev;
1372 struct ibmvnic_tx_buff *txbuff;
1373 union sub_crq *next;
1379 while (pending_scrq(adapter, scrq)) {
1380 unsigned int pool = scrq->pool_index;
1382 /* The queue entry at the current index is peeked at above
1383 * to determine that there is a valid descriptor awaiting
1384 * processing. We want to be sure that the current slot
1385 * holds a valid descriptor before reading its contents.
1389 next = ibmvnic_next_scrq(adapter, scrq);
1390 for (i = 0; i < next->tx_comp.num_comps; i++) {
1391 if (next->tx_comp.rcs[i])
1392 dev_err(dev, "tx error %x\n",
1393 next->tx_comp.rcs[i]);
1394 index = be32_to_cpu(next->tx_comp.correlators[i]);
1395 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1397 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1398 if (!txbuff->data_dma[j])
1401 txbuff->data_dma[j] = 0;
1402 txbuff->used_bounce = false;
1404 /* if sub_crq was sent indirectly */
1405 first = txbuff->indir_arr[0].generic.first;
1406 if (first == IBMVNIC_CRQ_CMD) {
1407 dma_unmap_single(dev, txbuff->indir_dma,
1408 sizeof(txbuff->indir_arr),
1412 if (txbuff->last_frag) {
1413 atomic_dec(&scrq->used);
1415 if (atomic_read(&scrq->used) <=
1416 (adapter->req_tx_entries_per_subcrq / 2) &&
1417 netif_subqueue_stopped(adapter->netdev,
1419 netif_wake_subqueue(adapter->netdev,
1421 netdev_dbg(adapter->netdev,
1422 "Started queue %d\n",
1426 dev_kfree_skb_any(txbuff->skb);
1429 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1430 producer_index] = index;
1431 adapter->tx_pool[pool].producer_index =
1432 (adapter->tx_pool[pool].producer_index + 1) %
1433 adapter->req_tx_entries_per_subcrq;
1435 /* remove tx_comp scrq*/
1436 next->tx_comp.first = 0;
1439 enable_scrq_irq(adapter, scrq);
1441 if (pending_scrq(adapter, scrq)) {
1442 disable_scrq_irq(adapter, scrq);
1449 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1451 struct ibmvnic_sub_crq_queue *scrq = instance;
1452 struct ibmvnic_adapter *adapter = scrq->adapter;
1454 disable_scrq_irq(adapter, scrq);
1455 ibmvnic_complete_tx(adapter, scrq);
1460 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1462 struct ibmvnic_sub_crq_queue *scrq = instance;
1463 struct ibmvnic_adapter *adapter = scrq->adapter;
1465 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1466 disable_scrq_irq(adapter, scrq);
1467 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1473 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1475 struct device *dev = &adapter->vdev->dev;
1476 struct ibmvnic_sub_crq_queue *scrq;
1480 for (i = 0; i < adapter->req_tx_queues; i++) {
1481 scrq = adapter->tx_scrq[i];
1482 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1486 dev_err(dev, "Error mapping irq\n");
1487 goto req_tx_irq_failed;
1490 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1491 0, "ibmvnic_tx", scrq);
1494 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1496 irq_dispose_mapping(scrq->irq);
1497 goto req_rx_irq_failed;
1501 for (i = 0; i < adapter->req_rx_queues; i++) {
1502 scrq = adapter->rx_scrq[i];
1503 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1506 dev_err(dev, "Error mapping irq\n");
1507 goto req_rx_irq_failed;
1509 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1510 0, "ibmvnic_rx", scrq);
1512 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1514 irq_dispose_mapping(scrq->irq);
1515 goto req_rx_irq_failed;
1521 for (j = 0; j < i; j++) {
1522 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1523 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1525 i = adapter->req_tx_queues;
1527 for (j = 0; j < i; j++) {
1528 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1529 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
1531 release_sub_crqs_no_irqs(adapter);
1535 static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1537 struct device *dev = &adapter->vdev->dev;
1538 struct ibmvnic_sub_crq_queue **allqueues;
1539 int registered_queues = 0;
1540 union ibmvnic_crq crq;
1546 /* Sub-CRQ entries are 32 byte long */
1547 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1549 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1550 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1551 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1552 goto allqueues_failed;
1555 /* Get the minimum between the queried max and the entries
1556 * that fit in our PAGE_SIZE
1558 adapter->req_tx_entries_per_subcrq =
1559 adapter->max_tx_entries_per_subcrq > entries_page ?
1560 entries_page : adapter->max_tx_entries_per_subcrq;
1561 adapter->req_rx_add_entries_per_subcrq =
1562 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1563 entries_page : adapter->max_rx_add_entries_per_subcrq;
1565 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1566 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
1567 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
1569 adapter->req_mtu = adapter->max_mtu;
1572 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1574 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1576 goto allqueues_failed;
1578 for (i = 0; i < total_queues; i++) {
1579 allqueues[i] = init_sub_crq_queue(adapter);
1580 if (!allqueues[i]) {
1581 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1584 registered_queues++;
1587 /* Make sure we were able to register the minimum number of queues */
1588 if (registered_queues <
1589 adapter->min_tx_queues + adapter->min_rx_queues) {
1590 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1594 /* Distribute the failed allocated queues*/
1595 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1596 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1599 if (adapter->req_rx_queues > adapter->min_rx_queues)
1600 adapter->req_rx_queues--;
1605 if (adapter->req_tx_queues > adapter->min_tx_queues)
1606 adapter->req_tx_queues--;
1613 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1614 sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1615 if (!adapter->tx_scrq)
1618 for (i = 0; i < adapter->req_tx_queues; i++) {
1619 adapter->tx_scrq[i] = allqueues[i];
1620 adapter->tx_scrq[i]->pool_index = i;
1623 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1624 sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1625 if (!adapter->rx_scrq)
1628 for (i = 0; i < adapter->req_rx_queues; i++) {
1629 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1630 adapter->rx_scrq[i]->scrq_num = i;
1633 memset(&crq, 0, sizeof(crq));
1634 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1635 crq.request_capability.cmd = REQUEST_CAPABILITY;
1637 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
1638 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
1639 ibmvnic_send_crq(adapter, &crq);
1641 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
1642 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
1643 ibmvnic_send_crq(adapter, &crq);
1645 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
1646 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
1647 ibmvnic_send_crq(adapter, &crq);
1649 crq.request_capability.capability =
1650 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1651 crq.request_capability.number =
1652 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
1653 ibmvnic_send_crq(adapter, &crq);
1655 crq.request_capability.capability =
1656 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1657 crq.request_capability.number =
1658 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
1659 ibmvnic_send_crq(adapter, &crq);
1661 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
1662 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
1663 ibmvnic_send_crq(adapter, &crq);
1665 if (adapter->netdev->flags & IFF_PROMISC) {
1666 if (adapter->promisc_supported) {
1667 crq.request_capability.capability =
1668 cpu_to_be16(PROMISC_REQUESTED);
1669 crq.request_capability.number = cpu_to_be64(1);
1670 ibmvnic_send_crq(adapter, &crq);
1673 crq.request_capability.capability =
1674 cpu_to_be16(PROMISC_REQUESTED);
1675 crq.request_capability.number = cpu_to_be64(0);
1676 ibmvnic_send_crq(adapter, &crq);
1684 kfree(adapter->tx_scrq);
1685 adapter->tx_scrq = NULL;
1687 for (i = 0; i < registered_queues; i++)
1688 release_sub_crq_queue(adapter, allqueues[i]);
1691 ibmvnic_remove(adapter->vdev);
1694 static int pending_scrq(struct ibmvnic_adapter *adapter,
1695 struct ibmvnic_sub_crq_queue *scrq)
1697 union sub_crq *entry = &scrq->msgs[scrq->cur];
1699 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1705 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1706 struct ibmvnic_sub_crq_queue *scrq)
1708 union sub_crq *entry;
1709 unsigned long flags;
1711 spin_lock_irqsave(&scrq->lock, flags);
1712 entry = &scrq->msgs[scrq->cur];
1713 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1714 if (++scrq->cur == scrq->size)
1719 spin_unlock_irqrestore(&scrq->lock, flags);
1721 /* Ensure that the entire buffer descriptor has been
1722 * loaded before reading its contents
1729 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1731 struct ibmvnic_crq_queue *queue = &adapter->crq;
1732 union ibmvnic_crq *crq;
1734 crq = &queue->msgs[queue->cur];
1735 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1736 if (++queue->cur == queue->size)
1745 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1746 union sub_crq *sub_crq)
1748 unsigned int ua = adapter->vdev->unit_address;
1749 struct device *dev = &adapter->vdev->dev;
1750 u64 *u64_crq = (u64 *)sub_crq;
1753 netdev_dbg(adapter->netdev,
1754 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1755 (unsigned long int)cpu_to_be64(remote_handle),
1756 (unsigned long int)cpu_to_be64(u64_crq[0]),
1757 (unsigned long int)cpu_to_be64(u64_crq[1]),
1758 (unsigned long int)cpu_to_be64(u64_crq[2]),
1759 (unsigned long int)cpu_to_be64(u64_crq[3]));
1761 /* Make sure the hypervisor sees the complete request */
1764 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1765 cpu_to_be64(remote_handle),
1766 cpu_to_be64(u64_crq[0]),
1767 cpu_to_be64(u64_crq[1]),
1768 cpu_to_be64(u64_crq[2]),
1769 cpu_to_be64(u64_crq[3]));
1773 dev_warn(dev, "CRQ Queue closed\n");
1774 dev_err(dev, "Send error (rc=%d)\n", rc);
1780 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1781 u64 remote_handle, u64 ioba, u64 num_entries)
1783 unsigned int ua = adapter->vdev->unit_address;
1784 struct device *dev = &adapter->vdev->dev;
1787 /* Make sure the hypervisor sees the complete request */
1789 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1790 cpu_to_be64(remote_handle),
1795 dev_warn(dev, "CRQ Queue closed\n");
1796 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1802 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1803 union ibmvnic_crq *crq)
1805 unsigned int ua = adapter->vdev->unit_address;
1806 struct device *dev = &adapter->vdev->dev;
1807 u64 *u64_crq = (u64 *)crq;
1810 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1811 (unsigned long int)cpu_to_be64(u64_crq[0]),
1812 (unsigned long int)cpu_to_be64(u64_crq[1]));
1814 /* Make sure the hypervisor sees the complete request */
1817 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1818 cpu_to_be64(u64_crq[0]),
1819 cpu_to_be64(u64_crq[1]));
1823 dev_warn(dev, "CRQ Queue closed\n");
1824 dev_warn(dev, "Send error (rc=%d)\n", rc);
1830 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1832 union ibmvnic_crq crq;
1834 memset(&crq, 0, sizeof(crq));
1835 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1836 crq.generic.cmd = IBMVNIC_CRQ_INIT;
1837 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1839 return ibmvnic_send_crq(adapter, &crq);
1842 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1844 union ibmvnic_crq crq;
1846 memset(&crq, 0, sizeof(crq));
1847 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1848 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1849 netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1851 return ibmvnic_send_crq(adapter, &crq);
1854 static int send_version_xchg(struct ibmvnic_adapter *adapter)
1856 union ibmvnic_crq crq;
1858 memset(&crq, 0, sizeof(crq));
1859 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1860 crq.version_exchange.cmd = VERSION_EXCHANGE;
1861 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1863 return ibmvnic_send_crq(adapter, &crq);
1866 static void send_login(struct ibmvnic_adapter *adapter)
1868 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1869 struct ibmvnic_login_buffer *login_buffer;
1870 struct ibmvnic_inflight_cmd *inflight_cmd;
1871 struct device *dev = &adapter->vdev->dev;
1872 dma_addr_t rsp_buffer_token;
1873 dma_addr_t buffer_token;
1874 size_t rsp_buffer_size;
1875 union ibmvnic_crq crq;
1876 unsigned long flags;
1883 sizeof(struct ibmvnic_login_buffer) +
1884 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1886 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1888 goto buf_alloc_failed;
1890 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1892 if (dma_mapping_error(dev, buffer_token)) {
1893 dev_err(dev, "Couldn't map login buffer\n");
1894 goto buf_map_failed;
1897 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1898 sizeof(u64) * adapter->req_tx_queues +
1899 sizeof(u64) * adapter->req_rx_queues +
1900 sizeof(u64) * adapter->req_rx_queues +
1901 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
1903 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1904 if (!login_rsp_buffer)
1905 goto buf_rsp_alloc_failed;
1907 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1908 rsp_buffer_size, DMA_FROM_DEVICE);
1909 if (dma_mapping_error(dev, rsp_buffer_token)) {
1910 dev_err(dev, "Couldn't map login rsp buffer\n");
1911 goto buf_rsp_map_failed;
1913 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
1914 if (!inflight_cmd) {
1915 dev_err(dev, "Couldn't allocate inflight_cmd\n");
1916 goto inflight_alloc_failed;
1918 adapter->login_buf = login_buffer;
1919 adapter->login_buf_token = buffer_token;
1920 adapter->login_buf_sz = buffer_size;
1921 adapter->login_rsp_buf = login_rsp_buffer;
1922 adapter->login_rsp_buf_token = rsp_buffer_token;
1923 adapter->login_rsp_buf_sz = rsp_buffer_size;
1925 login_buffer->len = cpu_to_be32(buffer_size);
1926 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
1927 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
1928 login_buffer->off_txcomp_subcrqs =
1929 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
1930 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
1931 login_buffer->off_rxcomp_subcrqs =
1932 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
1933 sizeof(u64) * adapter->req_tx_queues);
1934 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
1935 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
1937 tx_list_p = (__be64 *)((char *)login_buffer +
1938 sizeof(struct ibmvnic_login_buffer));
1939 rx_list_p = (__be64 *)((char *)login_buffer +
1940 sizeof(struct ibmvnic_login_buffer) +
1941 sizeof(u64) * adapter->req_tx_queues);
1943 for (i = 0; i < adapter->req_tx_queues; i++) {
1944 if (adapter->tx_scrq[i]) {
1945 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
1950 for (i = 0; i < adapter->req_rx_queues; i++) {
1951 if (adapter->rx_scrq[i]) {
1952 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
1957 netdev_dbg(adapter->netdev, "Login Buffer:\n");
1958 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
1959 netdev_dbg(adapter->netdev, "%016lx\n",
1960 ((unsigned long int *)(adapter->login_buf))[i]);
1963 memset(&crq, 0, sizeof(crq));
1964 crq.login.first = IBMVNIC_CRQ_CMD;
1965 crq.login.cmd = LOGIN;
1966 crq.login.ioba = cpu_to_be32(buffer_token);
1967 crq.login.len = cpu_to_be32(buffer_size);
1969 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
1971 spin_lock_irqsave(&adapter->inflight_lock, flags);
1972 list_add_tail(&inflight_cmd->list, &adapter->inflight);
1973 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
1975 ibmvnic_send_crq(adapter, &crq);
1979 inflight_alloc_failed:
1980 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
1983 kfree(login_rsp_buffer);
1984 buf_rsp_alloc_failed:
1985 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
1987 kfree(login_buffer);
1992 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
1995 union ibmvnic_crq crq;
1997 memset(&crq, 0, sizeof(crq));
1998 crq.request_map.first = IBMVNIC_CRQ_CMD;
1999 crq.request_map.cmd = REQUEST_MAP;
2000 crq.request_map.map_id = map_id;
2001 crq.request_map.ioba = cpu_to_be32(addr);
2002 crq.request_map.len = cpu_to_be32(len);
2003 ibmvnic_send_crq(adapter, &crq);
2006 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
2008 union ibmvnic_crq crq;
2010 memset(&crq, 0, sizeof(crq));
2011 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
2012 crq.request_unmap.cmd = REQUEST_UNMAP;
2013 crq.request_unmap.map_id = map_id;
2014 ibmvnic_send_crq(adapter, &crq);
2017 static void send_map_query(struct ibmvnic_adapter *adapter)
2019 union ibmvnic_crq crq;
2021 memset(&crq, 0, sizeof(crq));
2022 crq.query_map.first = IBMVNIC_CRQ_CMD;
2023 crq.query_map.cmd = QUERY_MAP;
2024 ibmvnic_send_crq(adapter, &crq);
2027 /* Send a series of CRQs requesting various capabilities of the VNIC server */
2028 static void send_cap_queries(struct ibmvnic_adapter *adapter)
2030 union ibmvnic_crq crq;
2032 atomic_set(&adapter->running_cap_queries, 0);
2033 memset(&crq, 0, sizeof(crq));
2034 crq.query_capability.first = IBMVNIC_CRQ_CMD;
2035 crq.query_capability.cmd = QUERY_CAPABILITY;
2037 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
2038 atomic_inc(&adapter->running_cap_queries);
2039 ibmvnic_send_crq(adapter, &crq);
2041 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
2042 atomic_inc(&adapter->running_cap_queries);
2043 ibmvnic_send_crq(adapter, &crq);
2045 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
2046 atomic_inc(&adapter->running_cap_queries);
2047 ibmvnic_send_crq(adapter, &crq);
2049 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
2050 atomic_inc(&adapter->running_cap_queries);
2051 ibmvnic_send_crq(adapter, &crq);
2053 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
2054 atomic_inc(&adapter->running_cap_queries);
2055 ibmvnic_send_crq(adapter, &crq);
2057 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
2058 atomic_inc(&adapter->running_cap_queries);
2059 ibmvnic_send_crq(adapter, &crq);
2061 crq.query_capability.capability =
2062 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
2063 atomic_inc(&adapter->running_cap_queries);
2064 ibmvnic_send_crq(adapter, &crq);
2066 crq.query_capability.capability =
2067 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
2068 atomic_inc(&adapter->running_cap_queries);
2069 ibmvnic_send_crq(adapter, &crq);
2071 crq.query_capability.capability =
2072 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
2073 atomic_inc(&adapter->running_cap_queries);
2074 ibmvnic_send_crq(adapter, &crq);
2076 crq.query_capability.capability =
2077 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
2078 atomic_inc(&adapter->running_cap_queries);
2079 ibmvnic_send_crq(adapter, &crq);
2081 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
2082 atomic_inc(&adapter->running_cap_queries);
2083 ibmvnic_send_crq(adapter, &crq);
2085 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
2086 atomic_inc(&adapter->running_cap_queries);
2087 ibmvnic_send_crq(adapter, &crq);
2089 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
2090 atomic_inc(&adapter->running_cap_queries);
2091 ibmvnic_send_crq(adapter, &crq);
2093 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
2094 atomic_inc(&adapter->running_cap_queries);
2095 ibmvnic_send_crq(adapter, &crq);
2097 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
2098 atomic_inc(&adapter->running_cap_queries);
2099 ibmvnic_send_crq(adapter, &crq);
2101 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
2102 atomic_inc(&adapter->running_cap_queries);
2103 ibmvnic_send_crq(adapter, &crq);
2105 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
2106 atomic_inc(&adapter->running_cap_queries);
2107 ibmvnic_send_crq(adapter, &crq);
2109 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
2110 atomic_inc(&adapter->running_cap_queries);
2111 ibmvnic_send_crq(adapter, &crq);
2113 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2114 atomic_inc(&adapter->running_cap_queries);
2115 ibmvnic_send_crq(adapter, &crq);
2117 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2118 atomic_inc(&adapter->running_cap_queries);
2119 ibmvnic_send_crq(adapter, &crq);
2121 crq.query_capability.capability =
2122 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2123 atomic_inc(&adapter->running_cap_queries);
2124 ibmvnic_send_crq(adapter, &crq);
2126 crq.query_capability.capability =
2127 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2128 atomic_inc(&adapter->running_cap_queries);
2129 ibmvnic_send_crq(adapter, &crq);
2131 crq.query_capability.capability =
2132 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2133 atomic_inc(&adapter->running_cap_queries);
2134 ibmvnic_send_crq(adapter, &crq);
2136 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2137 atomic_inc(&adapter->running_cap_queries);
2138 ibmvnic_send_crq(adapter, &crq);
2141 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2143 struct device *dev = &adapter->vdev->dev;
2144 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2145 union ibmvnic_crq crq;
2148 dma_unmap_single(dev, adapter->ip_offload_tok,
2149 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2151 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2152 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2153 netdev_dbg(adapter->netdev, "%016lx\n",
2154 ((unsigned long int *)(buf))[i]);
2156 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2157 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2158 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2159 buf->tcp_ipv4_chksum);
2160 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2161 buf->tcp_ipv6_chksum);
2162 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2163 buf->udp_ipv4_chksum);
2164 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2165 buf->udp_ipv6_chksum);
2166 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2167 buf->large_tx_ipv4);
2168 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2169 buf->large_tx_ipv6);
2170 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2171 buf->large_rx_ipv4);
2172 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2173 buf->large_rx_ipv6);
2174 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2175 buf->max_ipv4_header_size);
2176 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2177 buf->max_ipv6_header_size);
2178 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2179 buf->max_tcp_header_size);
2180 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2181 buf->max_udp_header_size);
2182 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2183 buf->max_large_tx_size);
2184 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2185 buf->max_large_rx_size);
2186 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2187 buf->ipv6_extension_header);
2188 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2189 buf->tcp_pseudosum_req);
2190 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2191 buf->num_ipv6_ext_headers);
2192 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2193 buf->off_ipv6_ext_headers);
2195 adapter->ip_offload_ctrl_tok =
2196 dma_map_single(dev, &adapter->ip_offload_ctrl,
2197 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2199 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2200 dev_err(dev, "Couldn't map ip offload control buffer\n");
2204 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2205 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2206 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2207 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2208 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2210 /* large_tx/rx disabled for now, additional features needed */
2211 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2212 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2213 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2214 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2216 adapter->netdev->features = NETIF_F_GSO;
2218 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2219 adapter->netdev->features |= NETIF_F_IP_CSUM;
2221 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2222 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2224 if ((adapter->netdev->features &
2225 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2226 adapter->netdev->features |= NETIF_F_RXCSUM;
2228 memset(&crq, 0, sizeof(crq));
2229 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2230 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2231 crq.control_ip_offload.len =
2232 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2233 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2234 ibmvnic_send_crq(adapter, &crq);
2237 static void handle_error_info_rsp(union ibmvnic_crq *crq,
2238 struct ibmvnic_adapter *adapter)
2240 struct device *dev = &adapter->vdev->dev;
2241 struct ibmvnic_error_buff *error_buff, *tmp;
2242 unsigned long flags;
2246 if (!crq->request_error_rsp.rc.code) {
2247 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2248 crq->request_error_rsp.rc.code);
2252 spin_lock_irqsave(&adapter->error_list_lock, flags);
2253 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
2254 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2256 list_del(&error_buff->list);
2259 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2262 dev_err(dev, "Couldn't find error id %x\n",
2263 be32_to_cpu(crq->request_error_rsp.error_id));
2267 dev_err(dev, "Detailed info for error id %x:",
2268 be32_to_cpu(crq->request_error_rsp.error_id));
2270 for (i = 0; i < error_buff->len; i++) {
2271 pr_cont("%02x", (int)error_buff->buff[i]);
2277 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2279 kfree(error_buff->buff);
2283 static void handle_dump_size_rsp(union ibmvnic_crq *crq,
2284 struct ibmvnic_adapter *adapter)
2286 int len = be32_to_cpu(crq->request_dump_size_rsp.len);
2287 struct ibmvnic_inflight_cmd *inflight_cmd;
2288 struct device *dev = &adapter->vdev->dev;
2289 union ibmvnic_crq newcrq;
2290 unsigned long flags;
2292 /* allocate and map buffer */
2293 adapter->dump_data = kmalloc(len, GFP_KERNEL);
2294 if (!adapter->dump_data) {
2295 complete(&adapter->fw_done);
2299 adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
2302 if (dma_mapping_error(dev, adapter->dump_data_token)) {
2303 if (!firmware_has_feature(FW_FEATURE_CMO))
2304 dev_err(dev, "Couldn't map dump data\n");
2305 kfree(adapter->dump_data);
2306 complete(&adapter->fw_done);
2310 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2311 if (!inflight_cmd) {
2312 dma_unmap_single(dev, adapter->dump_data_token, len,
2314 kfree(adapter->dump_data);
2315 complete(&adapter->fw_done);
2319 memset(&newcrq, 0, sizeof(newcrq));
2320 newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
2321 newcrq.request_dump.cmd = REQUEST_DUMP;
2322 newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
2323 newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
2325 memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
2327 spin_lock_irqsave(&adapter->inflight_lock, flags);
2328 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2329 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2331 ibmvnic_send_crq(adapter, &newcrq);
2334 static void handle_error_indication(union ibmvnic_crq *crq,
2335 struct ibmvnic_adapter *adapter)
2337 int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2338 struct ibmvnic_inflight_cmd *inflight_cmd;
2339 struct device *dev = &adapter->vdev->dev;
2340 struct ibmvnic_error_buff *error_buff;
2341 union ibmvnic_crq new_crq;
2342 unsigned long flags;
2344 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2345 crq->error_indication.
2346 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2347 be32_to_cpu(crq->error_indication.error_id),
2348 be16_to_cpu(crq->error_indication.error_cause));
2350 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2354 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2355 if (!error_buff->buff) {
2360 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2362 if (dma_mapping_error(dev, error_buff->dma)) {
2363 if (!firmware_has_feature(FW_FEATURE_CMO))
2364 dev_err(dev, "Couldn't map error buffer\n");
2365 kfree(error_buff->buff);
2370 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2371 if (!inflight_cmd) {
2372 dma_unmap_single(dev, error_buff->dma, detail_len,
2374 kfree(error_buff->buff);
2379 error_buff->len = detail_len;
2380 error_buff->error_id = crq->error_indication.error_id;
2382 spin_lock_irqsave(&adapter->error_list_lock, flags);
2383 list_add_tail(&error_buff->list, &adapter->errors);
2384 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2386 memset(&new_crq, 0, sizeof(new_crq));
2387 new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2388 new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2389 new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2390 new_crq.request_error_info.len = cpu_to_be32(detail_len);
2391 new_crq.request_error_info.error_id = crq->error_indication.error_id;
2393 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2395 spin_lock_irqsave(&adapter->inflight_lock, flags);
2396 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2397 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2399 ibmvnic_send_crq(adapter, &new_crq);
2402 static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2403 struct ibmvnic_adapter *adapter)
2405 struct net_device *netdev = adapter->netdev;
2406 struct device *dev = &adapter->vdev->dev;
2409 rc = crq->change_mac_addr_rsp.rc.code;
2411 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2414 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2418 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2419 struct ibmvnic_adapter *adapter)
2421 struct device *dev = &adapter->vdev->dev;
2425 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2427 req_value = &adapter->req_tx_queues;
2431 req_value = &adapter->req_rx_queues;
2434 case REQ_RX_ADD_QUEUES:
2435 req_value = &adapter->req_rx_add_queues;
2438 case REQ_TX_ENTRIES_PER_SUBCRQ:
2439 req_value = &adapter->req_tx_entries_per_subcrq;
2440 name = "tx_entries_per_subcrq";
2442 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2443 req_value = &adapter->req_rx_add_entries_per_subcrq;
2444 name = "rx_add_entries_per_subcrq";
2447 req_value = &adapter->req_mtu;
2450 case PROMISC_REQUESTED:
2451 req_value = &adapter->promisc;
2455 dev_err(dev, "Got invalid cap request rsp %d\n",
2456 crq->request_capability.capability);
2460 switch (crq->request_capability_rsp.rc.code) {
2463 case PARTIALSUCCESS:
2464 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2466 (long int)be64_to_cpu(crq->request_capability_rsp.
2468 release_sub_crqs_no_irqs(adapter);
2469 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
2470 init_sub_crqs(adapter, 1);
2473 dev_err(dev, "Error %d in request cap rsp\n",
2474 crq->request_capability_rsp.rc.code);
2478 /* Done receiving requested capabilities, query IP offload support */
2479 if (++adapter->requested_caps == 7) {
2480 union ibmvnic_crq newcrq;
2481 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2482 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2483 &adapter->ip_offload_buf;
2485 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2489 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2490 if (!firmware_has_feature(FW_FEATURE_CMO))
2491 dev_err(dev, "Couldn't map offload buffer\n");
2495 memset(&newcrq, 0, sizeof(newcrq));
2496 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2497 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2498 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2499 newcrq.query_ip_offload.ioba =
2500 cpu_to_be32(adapter->ip_offload_tok);
2502 ibmvnic_send_crq(adapter, &newcrq);
2506 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2507 struct ibmvnic_adapter *adapter)
2509 struct device *dev = &adapter->vdev->dev;
2510 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2511 struct ibmvnic_login_buffer *login = adapter->login_buf;
2512 union ibmvnic_crq crq;
2515 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2517 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2518 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2520 /* If the number of queues requested can't be allocated by the
2521 * server, the login response will return with code 1. We will need
2522 * to resend the login buffer with fewer queues requested.
2524 if (login_rsp_crq->generic.rc.code) {
2525 adapter->renegotiate = true;
2526 complete(&adapter->init_done);
2530 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2531 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2532 netdev_dbg(adapter->netdev, "%016lx\n",
2533 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2537 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2538 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2539 adapter->req_rx_add_queues !=
2540 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2541 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2542 ibmvnic_remove(adapter->vdev);
2545 complete(&adapter->init_done);
2547 memset(&crq, 0, sizeof(crq));
2548 crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
2549 crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
2550 ibmvnic_send_crq(adapter, &crq);
2555 static void handle_request_map_rsp(union ibmvnic_crq *crq,
2556 struct ibmvnic_adapter *adapter)
2558 struct device *dev = &adapter->vdev->dev;
2559 u8 map_id = crq->request_map_rsp.map_id;
2565 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2566 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2568 rc = crq->request_map_rsp.rc.code;
2570 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2572 /* need to find and zero tx/rx_pool map_id */
2573 for (i = 0; i < tx_subcrqs; i++) {
2574 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2575 adapter->tx_pool[i].long_term_buff.map_id = 0;
2577 for (i = 0; i < rx_subcrqs; i++) {
2578 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2579 adapter->rx_pool[i].long_term_buff.map_id = 0;
2582 complete(&adapter->fw_done);
2585 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2586 struct ibmvnic_adapter *adapter)
2588 struct device *dev = &adapter->vdev->dev;
2591 rc = crq->request_unmap_rsp.rc.code;
2593 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2596 static void handle_query_map_rsp(union ibmvnic_crq *crq,
2597 struct ibmvnic_adapter *adapter)
2599 struct net_device *netdev = adapter->netdev;
2600 struct device *dev = &adapter->vdev->dev;
2603 rc = crq->query_map_rsp.rc.code;
2605 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2608 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2609 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2610 crq->query_map_rsp.free_pages);
2613 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2614 struct ibmvnic_adapter *adapter)
2616 struct net_device *netdev = adapter->netdev;
2617 struct device *dev = &adapter->vdev->dev;
2620 atomic_dec(&adapter->running_cap_queries);
2621 netdev_dbg(netdev, "Outstanding queries: %d\n",
2622 atomic_read(&adapter->running_cap_queries));
2623 rc = crq->query_capability.rc.code;
2625 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2629 switch (be16_to_cpu(crq->query_capability.capability)) {
2631 adapter->min_tx_queues =
2632 be64_to_cpu(crq->query_capability.number);
2633 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2634 adapter->min_tx_queues);
2637 adapter->min_rx_queues =
2638 be64_to_cpu(crq->query_capability.number);
2639 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2640 adapter->min_rx_queues);
2642 case MIN_RX_ADD_QUEUES:
2643 adapter->min_rx_add_queues =
2644 be64_to_cpu(crq->query_capability.number);
2645 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2646 adapter->min_rx_add_queues);
2649 adapter->max_tx_queues =
2650 be64_to_cpu(crq->query_capability.number);
2651 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2652 adapter->max_tx_queues);
2655 adapter->max_rx_queues =
2656 be64_to_cpu(crq->query_capability.number);
2657 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2658 adapter->max_rx_queues);
2660 case MAX_RX_ADD_QUEUES:
2661 adapter->max_rx_add_queues =
2662 be64_to_cpu(crq->query_capability.number);
2663 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2664 adapter->max_rx_add_queues);
2666 case MIN_TX_ENTRIES_PER_SUBCRQ:
2667 adapter->min_tx_entries_per_subcrq =
2668 be64_to_cpu(crq->query_capability.number);
2669 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2670 adapter->min_tx_entries_per_subcrq);
2672 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2673 adapter->min_rx_add_entries_per_subcrq =
2674 be64_to_cpu(crq->query_capability.number);
2675 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2676 adapter->min_rx_add_entries_per_subcrq);
2678 case MAX_TX_ENTRIES_PER_SUBCRQ:
2679 adapter->max_tx_entries_per_subcrq =
2680 be64_to_cpu(crq->query_capability.number);
2681 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2682 adapter->max_tx_entries_per_subcrq);
2684 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2685 adapter->max_rx_add_entries_per_subcrq =
2686 be64_to_cpu(crq->query_capability.number);
2687 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2688 adapter->max_rx_add_entries_per_subcrq);
2690 case TCP_IP_OFFLOAD:
2691 adapter->tcp_ip_offload =
2692 be64_to_cpu(crq->query_capability.number);
2693 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2694 adapter->tcp_ip_offload);
2696 case PROMISC_SUPPORTED:
2697 adapter->promisc_supported =
2698 be64_to_cpu(crq->query_capability.number);
2699 netdev_dbg(netdev, "promisc_supported = %lld\n",
2700 adapter->promisc_supported);
2703 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
2704 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2707 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
2708 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2710 case MAX_MULTICAST_FILTERS:
2711 adapter->max_multicast_filters =
2712 be64_to_cpu(crq->query_capability.number);
2713 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2714 adapter->max_multicast_filters);
2716 case VLAN_HEADER_INSERTION:
2717 adapter->vlan_header_insertion =
2718 be64_to_cpu(crq->query_capability.number);
2719 if (adapter->vlan_header_insertion)
2720 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2721 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2722 adapter->vlan_header_insertion);
2724 case MAX_TX_SG_ENTRIES:
2725 adapter->max_tx_sg_entries =
2726 be64_to_cpu(crq->query_capability.number);
2727 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2728 adapter->max_tx_sg_entries);
2730 case RX_SG_SUPPORTED:
2731 adapter->rx_sg_supported =
2732 be64_to_cpu(crq->query_capability.number);
2733 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2734 adapter->rx_sg_supported);
2736 case OPT_TX_COMP_SUB_QUEUES:
2737 adapter->opt_tx_comp_sub_queues =
2738 be64_to_cpu(crq->query_capability.number);
2739 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2740 adapter->opt_tx_comp_sub_queues);
2742 case OPT_RX_COMP_QUEUES:
2743 adapter->opt_rx_comp_queues =
2744 be64_to_cpu(crq->query_capability.number);
2745 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2746 adapter->opt_rx_comp_queues);
2748 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2749 adapter->opt_rx_bufadd_q_per_rx_comp_q =
2750 be64_to_cpu(crq->query_capability.number);
2751 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2752 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2754 case OPT_TX_ENTRIES_PER_SUBCRQ:
2755 adapter->opt_tx_entries_per_subcrq =
2756 be64_to_cpu(crq->query_capability.number);
2757 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2758 adapter->opt_tx_entries_per_subcrq);
2760 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2761 adapter->opt_rxba_entries_per_subcrq =
2762 be64_to_cpu(crq->query_capability.number);
2763 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2764 adapter->opt_rxba_entries_per_subcrq);
2766 case TX_RX_DESC_REQ:
2767 adapter->tx_rx_desc_req = crq->query_capability.number;
2768 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2769 adapter->tx_rx_desc_req);
2773 netdev_err(netdev, "Got invalid cap rsp %d\n",
2774 crq->query_capability.capability);
2778 if (atomic_read(&adapter->running_cap_queries) == 0)
2779 init_sub_crqs(adapter, 0);
2780 /* We're done querying the capabilities, initialize sub-crqs */
2783 static void handle_control_ras_rsp(union ibmvnic_crq *crq,
2784 struct ibmvnic_adapter *adapter)
2786 u8 correlator = crq->control_ras_rsp.correlator;
2787 struct device *dev = &adapter->vdev->dev;
2791 if (crq->control_ras_rsp.rc.code) {
2792 dev_warn(dev, "Control ras failed rc=%d\n",
2793 crq->control_ras_rsp.rc.code);
2797 for (i = 0; i < adapter->ras_comp_num; i++) {
2798 if (adapter->ras_comps[i].correlator == correlator) {
2805 dev_warn(dev, "Correlator not found on control_ras_rsp\n");
2809 switch (crq->control_ras_rsp.op) {
2810 case IBMVNIC_TRACE_LEVEL:
2811 adapter->ras_comps[i].trace_level = crq->control_ras.level;
2813 case IBMVNIC_ERROR_LEVEL:
2814 adapter->ras_comps[i].error_check_level =
2815 crq->control_ras.level;
2817 case IBMVNIC_TRACE_PAUSE:
2818 adapter->ras_comp_int[i].paused = 1;
2820 case IBMVNIC_TRACE_RESUME:
2821 adapter->ras_comp_int[i].paused = 0;
2823 case IBMVNIC_TRACE_ON:
2824 adapter->ras_comps[i].trace_on = 1;
2826 case IBMVNIC_TRACE_OFF:
2827 adapter->ras_comps[i].trace_on = 0;
2829 case IBMVNIC_CHG_TRACE_BUFF_SZ:
2830 /* trace_buff_sz is 3 bytes, stuff it into an int */
2831 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
2832 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
2833 crq->control_ras_rsp.trace_buff_sz[0];
2834 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
2835 crq->control_ras_rsp.trace_buff_sz[1];
2836 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
2837 crq->control_ras_rsp.trace_buff_sz[2];
2840 dev_err(dev, "invalid op %d on control_ras_rsp",
2841 crq->control_ras_rsp.op);
2845 static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2848 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2849 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2850 struct device *dev = &adapter->vdev->dev;
2851 struct ibmvnic_fw_trace_entry *trace;
2852 int num = ras_comp_int->num;
2853 union ibmvnic_crq crq;
2854 dma_addr_t trace_tok;
2856 if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2860 dma_alloc_coherent(dev,
2861 be32_to_cpu(adapter->ras_comps[num].
2862 trace_buff_size), &trace_tok,
2865 dev_err(dev, "Couldn't alloc trace buffer\n");
2869 memset(&crq, 0, sizeof(crq));
2870 crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
2871 crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
2872 crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2873 crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2874 crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
2876 init_completion(&adapter->fw_done);
2877 ibmvnic_send_crq(adapter, &crq);
2878 wait_for_completion(&adapter->fw_done);
2880 if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2882 be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
2885 copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
2887 dma_free_coherent(dev,
2888 be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
2894 static const struct file_operations trace_ops = {
2895 .owner = THIS_MODULE,
2896 .open = simple_open,
2900 static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
2903 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2904 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2905 int num = ras_comp_int->num;
2906 char buff[5]; /* 1 or 0 plus \n and \0 */
2909 size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
2914 copy_to_user(user_buf, buff, size);
2919 static ssize_t paused_write(struct file *file, const char __user *user_buf,
2920 size_t len, loff_t *ppos)
2922 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2923 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2924 int num = ras_comp_int->num;
2925 union ibmvnic_crq crq;
2927 char buff[9]; /* decimal max int plus \n and \0 */
2929 copy_from_user(buff, user_buf, sizeof(buff));
2930 val = kstrtoul(buff, 10, NULL);
2932 adapter->ras_comp_int[num].paused = val ? 1 : 0;
2934 memset(&crq, 0, sizeof(crq));
2935 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2936 crq.control_ras.cmd = CONTROL_RAS;
2937 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2938 crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
2939 ibmvnic_send_crq(adapter, &crq);
2944 static const struct file_operations paused_ops = {
2945 .owner = THIS_MODULE,
2946 .open = simple_open,
2947 .read = paused_read,
2948 .write = paused_write,
2951 static ssize_t tracing_read(struct file *file, char __user *user_buf,
2952 size_t len, loff_t *ppos)
2954 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2955 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2956 int num = ras_comp_int->num;
2957 char buff[5]; /* 1 or 0 plus \n and \0 */
2960 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
2965 copy_to_user(user_buf, buff, size);
2970 static ssize_t tracing_write(struct file *file, const char __user *user_buf,
2971 size_t len, loff_t *ppos)
2973 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2974 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2975 int num = ras_comp_int->num;
2976 union ibmvnic_crq crq;
2978 char buff[9]; /* decimal max int plus \n and \0 */
2980 copy_from_user(buff, user_buf, sizeof(buff));
2981 val = kstrtoul(buff, 10, NULL);
2983 memset(&crq, 0, sizeof(crq));
2984 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2985 crq.control_ras.cmd = CONTROL_RAS;
2986 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2987 crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
2992 static const struct file_operations tracing_ops = {
2993 .owner = THIS_MODULE,
2994 .open = simple_open,
2995 .read = tracing_read,
2996 .write = tracing_write,
2999 static ssize_t error_level_read(struct file *file, char __user *user_buf,
3000 size_t len, loff_t *ppos)
3002 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3003 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3004 int num = ras_comp_int->num;
3005 char buff[5]; /* decimal max char plus \n and \0 */
3008 size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
3013 copy_to_user(user_buf, buff, size);
3018 static ssize_t error_level_write(struct file *file, const char __user *user_buf,
3019 size_t len, loff_t *ppos)
3021 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3022 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3023 int num = ras_comp_int->num;
3024 union ibmvnic_crq crq;
3026 char buff[9]; /* decimal max int plus \n and \0 */
3028 copy_from_user(buff, user_buf, sizeof(buff));
3029 val = kstrtoul(buff, 10, NULL);
3034 memset(&crq, 0, sizeof(crq));
3035 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3036 crq.control_ras.cmd = CONTROL_RAS;
3037 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
3038 crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
3039 crq.control_ras.level = val;
3040 ibmvnic_send_crq(adapter, &crq);
3045 static const struct file_operations error_level_ops = {
3046 .owner = THIS_MODULE,
3047 .open = simple_open,
3048 .read = error_level_read,
3049 .write = error_level_write,
3052 static ssize_t trace_level_read(struct file *file, char __user *user_buf,
3053 size_t len, loff_t *ppos)
3055 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3056 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3057 int num = ras_comp_int->num;
3058 char buff[5]; /* decimal max char plus \n and \0 */
3061 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
3065 copy_to_user(user_buf, buff, size);
3070 static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
3071 size_t len, loff_t *ppos)
3073 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3074 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3075 union ibmvnic_crq crq;
3077 char buff[9]; /* decimal max int plus \n and \0 */
3079 copy_from_user(buff, user_buf, sizeof(buff));
3080 val = kstrtoul(buff, 10, NULL);
3084 memset(&crq, 0, sizeof(crq));
3085 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3086 crq.control_ras.cmd = CONTROL_RAS;
3087 crq.control_ras.correlator =
3088 adapter->ras_comps[ras_comp_int->num].correlator;
3089 crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
3090 crq.control_ras.level = val;
3091 ibmvnic_send_crq(adapter, &crq);
3096 static const struct file_operations trace_level_ops = {
3097 .owner = THIS_MODULE,
3098 .open = simple_open,
3099 .read = trace_level_read,
3100 .write = trace_level_write,
3103 static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
3104 size_t len, loff_t *ppos)
3106 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3107 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3108 int num = ras_comp_int->num;
3109 char buff[9]; /* decimal max int plus \n and \0 */
3112 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
3116 copy_to_user(user_buf, buff, size);
3121 static ssize_t trace_buff_size_write(struct file *file,
3122 const char __user *user_buf, size_t len,
3125 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3126 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3127 union ibmvnic_crq crq;
3129 char buff[9]; /* decimal max int plus \n and \0 */
3131 copy_from_user(buff, user_buf, sizeof(buff));
3132 val = kstrtoul(buff, 10, NULL);
3134 memset(&crq, 0, sizeof(crq));
3135 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3136 crq.control_ras.cmd = CONTROL_RAS;
3137 crq.control_ras.correlator =
3138 adapter->ras_comps[ras_comp_int->num].correlator;
3139 crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
3140 /* trace_buff_sz is 3 bytes, stuff an int into it */
3141 crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
3142 crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
3143 crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
3144 ibmvnic_send_crq(adapter, &crq);
3149 static const struct file_operations trace_size_ops = {
3150 .owner = THIS_MODULE,
3151 .open = simple_open,
3152 .read = trace_buff_size_read,
3153 .write = trace_buff_size_write,
3156 static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
3157 struct ibmvnic_adapter *adapter)
3159 struct device *dev = &adapter->vdev->dev;
3160 struct dentry *dir_ent;
3164 debugfs_remove_recursive(adapter->ras_comps_ent);
3166 adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
3167 adapter->debugfs_dir);
3168 if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
3169 dev_info(dev, "debugfs create ras_comps dir failed\n");
3173 for (i = 0; i < adapter->ras_comp_num; i++) {
3174 dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
3175 adapter->ras_comps_ent);
3176 if (!dir_ent || IS_ERR(dir_ent)) {
3177 dev_info(dev, "debugfs create %s dir failed\n",
3178 adapter->ras_comps[i].name);
3182 adapter->ras_comp_int[i].adapter = adapter;
3183 adapter->ras_comp_int[i].num = i;
3184 adapter->ras_comp_int[i].desc_blob.data =
3185 &adapter->ras_comps[i].description;
3186 adapter->ras_comp_int[i].desc_blob.size =
3187 sizeof(adapter->ras_comps[i].description);
3189 /* Don't need to remember the dentry's because the debugfs dir
3190 * gets removed recursively
3192 ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
3193 &adapter->ras_comp_int[i].desc_blob);
3194 ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
3195 dir_ent, &adapter->ras_comp_int[i],
3197 ent = debugfs_create_file("trace_level",
3199 (adapter->ras_comps[i].trace_level !=
3200 0xFF ? S_IWUSR : 0),
3201 dir_ent, &adapter->ras_comp_int[i],
3203 ent = debugfs_create_file("error_level",
3206 ras_comps[i].error_check_level !=
3207 0xFF ? S_IWUSR : 0),
3208 dir_ent, &adapter->ras_comp_int[i],
3210 ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
3211 dir_ent, &adapter->ras_comp_int[i],
3213 ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
3214 dir_ent, &adapter->ras_comp_int[i],
3216 ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
3217 &adapter->ras_comp_int[i],
3222 static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
3223 struct ibmvnic_adapter *adapter)
3225 int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
3226 struct device *dev = &adapter->vdev->dev;
3227 union ibmvnic_crq newcrq;
3229 adapter->ras_comps = dma_alloc_coherent(dev, len,
3230 &adapter->ras_comps_tok,
3232 if (!adapter->ras_comps) {
3233 if (!firmware_has_feature(FW_FEATURE_CMO))
3234 dev_err(dev, "Couldn't alloc fw comps buffer\n");
3238 adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
3239 sizeof(struct ibmvnic_fw_comp_internal),
3241 if (!adapter->ras_comp_int)
3242 dma_free_coherent(dev, len, adapter->ras_comps,
3243 adapter->ras_comps_tok);
3245 memset(&newcrq, 0, sizeof(newcrq));
3246 newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
3247 newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
3248 newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
3249 newcrq.request_ras_comps.len = cpu_to_be32(len);
3250 ibmvnic_send_crq(adapter, &newcrq);
3253 static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3255 struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
3256 struct device *dev = &adapter->vdev->dev;
3257 struct ibmvnic_error_buff *error_buff, *tmp2;
3258 unsigned long flags;
3259 unsigned long flags2;
3261 spin_lock_irqsave(&adapter->inflight_lock, flags);
3262 list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
3263 switch (inflight_cmd->crq.generic.cmd) {
3265 dma_unmap_single(dev, adapter->login_buf_token,
3266 adapter->login_buf_sz,
3268 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3269 adapter->login_rsp_buf_sz,
3271 kfree(adapter->login_rsp_buf);
3272 kfree(adapter->login_buf);
3275 complete(&adapter->fw_done);
3277 case REQUEST_ERROR_INFO:
3278 spin_lock_irqsave(&adapter->error_list_lock, flags2);
3279 list_for_each_entry_safe(error_buff, tmp2,
3280 &adapter->errors, list) {
3281 dma_unmap_single(dev, error_buff->dma,
3284 kfree(error_buff->buff);
3285 list_del(&error_buff->list);
3288 spin_unlock_irqrestore(&adapter->error_list_lock,
3292 list_del(&inflight_cmd->list);
3293 kfree(inflight_cmd);
3295 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
3298 static void ibmvnic_xport_event(struct work_struct *work)
3300 struct ibmvnic_adapter *adapter = container_of(work,
3301 struct ibmvnic_adapter,
3303 struct device *dev = &adapter->vdev->dev;
3306 ibmvnic_free_inflight(adapter);
3307 release_sub_crqs(adapter);
3308 if (adapter->migrated) {
3309 rc = ibmvnic_reenable_crq_queue(adapter);
3311 dev_err(dev, "Error after enable rc=%ld\n", rc);
3312 adapter->migrated = false;
3313 rc = ibmvnic_send_crq_init(adapter);
3315 dev_err(dev, "Error sending init rc=%ld\n", rc);
3319 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3320 struct ibmvnic_adapter *adapter)
3322 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3323 struct net_device *netdev = adapter->netdev;
3324 struct device *dev = &adapter->vdev->dev;
3327 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3328 ((unsigned long int *)crq)[0],
3329 ((unsigned long int *)crq)[1]);
3330 switch (gen_crq->first) {
3331 case IBMVNIC_CRQ_INIT_RSP:
3332 switch (gen_crq->cmd) {
3333 case IBMVNIC_CRQ_INIT:
3334 dev_info(dev, "Partner initialized\n");
3335 /* Send back a response */
3336 rc = ibmvnic_send_crq_init_complete(adapter);
3338 schedule_work(&adapter->vnic_crq_init);
3340 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
3342 case IBMVNIC_CRQ_INIT_COMPLETE:
3343 dev_info(dev, "Partner initialization complete\n");
3344 send_version_xchg(adapter);
3347 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3350 case IBMVNIC_CRQ_XPORT_EVENT:
3351 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3352 dev_info(dev, "Re-enabling adapter\n");
3353 adapter->migrated = true;
3354 schedule_work(&adapter->ibmvnic_xport);
3355 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
3356 dev_info(dev, "Backing device failover detected\n");
3357 netif_carrier_off(netdev);
3358 adapter->failover = true;
3360 /* The adapter lost the connection */
3361 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3363 schedule_work(&adapter->ibmvnic_xport);
3366 case IBMVNIC_CRQ_CMD_RSP:
3369 dev_err(dev, "Got an invalid msg type 0x%02x\n",
3374 switch (gen_crq->cmd) {
3375 case VERSION_EXCHANGE_RSP:
3376 rc = crq->version_exchange_rsp.rc.code;
3378 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3382 be16_to_cpu(crq->version_exchange_rsp.version);
3383 dev_info(dev, "Partner protocol version is %d\n",
3385 send_cap_queries(adapter);
3387 case QUERY_CAPABILITY_RSP:
3388 handle_query_cap_rsp(crq, adapter);
3391 handle_query_map_rsp(crq, adapter);
3393 case REQUEST_MAP_RSP:
3394 handle_request_map_rsp(crq, adapter);
3396 case REQUEST_UNMAP_RSP:
3397 handle_request_unmap_rsp(crq, adapter);
3399 case REQUEST_CAPABILITY_RSP:
3400 handle_request_cap_rsp(crq, adapter);
3403 netdev_dbg(netdev, "Got Login Response\n");
3404 handle_login_rsp(crq, adapter);
3406 case LOGICAL_LINK_STATE_RSP:
3407 netdev_dbg(netdev, "Got Logical Link State Response\n");
3408 adapter->logical_link_state =
3409 crq->logical_link_state_rsp.link_state;
3411 case LINK_STATE_INDICATION:
3412 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3413 adapter->phys_link_state =
3414 crq->link_state_indication.phys_link_state;
3415 adapter->logical_link_state =
3416 crq->link_state_indication.logical_link_state;
3418 case CHANGE_MAC_ADDR_RSP:
3419 netdev_dbg(netdev, "Got MAC address change Response\n");
3420 handle_change_mac_rsp(crq, adapter);
3422 case ERROR_INDICATION:
3423 netdev_dbg(netdev, "Got Error Indication\n");
3424 handle_error_indication(crq, adapter);
3426 case REQUEST_ERROR_RSP:
3427 netdev_dbg(netdev, "Got Error Detail Response\n");
3428 handle_error_info_rsp(crq, adapter);
3430 case REQUEST_STATISTICS_RSP:
3431 netdev_dbg(netdev, "Got Statistics Response\n");
3432 complete(&adapter->stats_done);
3434 case REQUEST_DUMP_SIZE_RSP:
3435 netdev_dbg(netdev, "Got Request Dump Size Response\n");
3436 handle_dump_size_rsp(crq, adapter);
3438 case REQUEST_DUMP_RSP:
3439 netdev_dbg(netdev, "Got Request Dump Response\n");
3440 complete(&adapter->fw_done);
3442 case QUERY_IP_OFFLOAD_RSP:
3443 netdev_dbg(netdev, "Got Query IP offload Response\n");
3444 handle_query_ip_offload_rsp(adapter);
3446 case MULTICAST_CTRL_RSP:
3447 netdev_dbg(netdev, "Got multicast control Response\n");
3449 case CONTROL_IP_OFFLOAD_RSP:
3450 netdev_dbg(netdev, "Got Control IP offload Response\n");
3451 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3452 sizeof(adapter->ip_offload_ctrl),
3454 /* We're done with the queries, perform the login */
3455 send_login(adapter);
3457 case REQUEST_RAS_COMP_NUM_RSP:
3458 netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
3459 if (crq->request_ras_comp_num_rsp.rc.code == 10) {
3460 netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
3463 adapter->ras_comp_num =
3464 be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
3465 handle_request_ras_comp_num_rsp(crq, adapter);
3467 case REQUEST_RAS_COMPS_RSP:
3468 netdev_dbg(netdev, "Got Request RAS Comps Response\n");
3469 handle_request_ras_comps_rsp(crq, adapter);
3471 case CONTROL_RAS_RSP:
3472 netdev_dbg(netdev, "Got Control RAS Response\n");
3473 handle_control_ras_rsp(crq, adapter);
3475 case COLLECT_FW_TRACE_RSP:
3476 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3477 complete(&adapter->fw_done);
3480 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3485 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3487 struct ibmvnic_adapter *adapter = instance;
3488 struct ibmvnic_crq_queue *queue = &adapter->crq;
3489 struct vio_dev *vdev = adapter->vdev;
3490 union ibmvnic_crq *crq;
3491 unsigned long flags;
3494 spin_lock_irqsave(&queue->lock, flags);
3495 vio_disable_interrupts(vdev);
3497 /* Pull all the valid messages off the CRQ */
3498 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3499 /* This barrier makes sure ibmvnic_next_crq()'s
3500 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
3501 * before ibmvnic_handle_crq()'s
3502 * switch(gen_crq->first) and switch(gen_crq->cmd).
3505 ibmvnic_handle_crq(crq, adapter);
3506 crq->generic.first = 0;
3508 vio_enable_interrupts(vdev);
3509 crq = ibmvnic_next_crq(adapter);
3511 vio_disable_interrupts(vdev);
3512 ibmvnic_handle_crq(crq, adapter);
3513 crq->generic.first = 0;
3518 spin_unlock_irqrestore(&queue->lock, flags);
3522 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3524 struct vio_dev *vdev = adapter->vdev;
3528 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3529 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3532 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3537 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3539 struct ibmvnic_crq_queue *crq = &adapter->crq;
3540 struct device *dev = &adapter->vdev->dev;
3541 struct vio_dev *vdev = adapter->vdev;
3546 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3547 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3549 /* Clean out the queue */
3553 memset(crq->msgs, 0, PAGE_SIZE);
3556 /* And re-open it again */
3557 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3558 crq->msg_token, PAGE_SIZE);
3561 /* Adapter is good, but other end is not ready */
3562 dev_warn(dev, "Partner adapter not ready\n");
3564 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3569 static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
3571 struct ibmvnic_crq_queue *crq = &adapter->crq;
3572 struct vio_dev *vdev = adapter->vdev;
3575 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3576 free_irq(vdev->irq, adapter);
3578 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3579 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3581 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3583 free_page((unsigned long)crq->msgs);
3586 static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
3588 struct ibmvnic_crq_queue *crq = &adapter->crq;
3589 struct device *dev = &adapter->vdev->dev;
3590 struct vio_dev *vdev = adapter->vdev;
3591 int rc, retrc = -ENOMEM;
3593 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3594 /* Should we allocate more than one page? */
3599 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3600 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3602 if (dma_mapping_error(dev, crq->msg_token))
3605 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3606 crq->msg_token, PAGE_SIZE);
3608 if (rc == H_RESOURCE)
3609 /* maybe kexecing and resource is busy. try a reset */
3610 rc = ibmvnic_reset_crq(adapter);
3613 if (rc == H_CLOSED) {
3614 dev_warn(dev, "Partner adapter not ready\n");
3616 dev_warn(dev, "Error %d opening adapter\n", rc);
3617 goto reg_crq_failed;
3622 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3623 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3626 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3628 goto req_irq_failed;
3631 rc = vio_enable_interrupts(vdev);
3633 dev_err(dev, "Error %d enabling interrupts\n", rc);
3634 goto req_irq_failed;
3638 spin_lock_init(&crq->lock);
3644 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3645 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3647 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3649 free_page((unsigned long)crq->msgs);
3653 /* debugfs for dump */
3654 static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3656 struct net_device *netdev = seq->private;
3657 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3658 struct device *dev = &adapter->vdev->dev;
3659 union ibmvnic_crq crq;
3661 memset(&crq, 0, sizeof(crq));
3662 crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3663 crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
3665 init_completion(&adapter->fw_done);
3666 ibmvnic_send_crq(adapter, &crq);
3667 wait_for_completion(&adapter->fw_done);
3669 seq_write(seq, adapter->dump_data, adapter->dump_data_size);
3671 dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
3674 kfree(adapter->dump_data);
3679 static int ibmvnic_dump_open(struct inode *inode, struct file *file)
3681 return single_open(file, ibmvnic_dump_show, inode->i_private);
3684 static const struct file_operations ibmvnic_dump_ops = {
3685 .owner = THIS_MODULE,
3686 .open = ibmvnic_dump_open,
3688 .llseek = seq_lseek,
3689 .release = single_release,
3692 static void handle_crq_init_rsp(struct work_struct *work)
3694 struct ibmvnic_adapter *adapter = container_of(work,
3695 struct ibmvnic_adapter,
3697 struct device *dev = &adapter->vdev->dev;
3698 struct net_device *netdev = adapter->netdev;
3699 unsigned long timeout = msecs_to_jiffies(30000);
3700 bool restart = false;
3703 if (adapter->failover) {
3704 release_sub_crqs(adapter);
3705 if (netif_running(netdev)) {
3706 netif_tx_disable(netdev);
3707 ibmvnic_close(netdev);
3712 reinit_completion(&adapter->init_done);
3713 send_version_xchg(adapter);
3714 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3715 dev_err(dev, "Passive init timeout\n");
3720 if (adapter->renegotiate) {
3721 adapter->renegotiate = false;
3722 release_sub_crqs_no_irqs(adapter);
3724 reinit_completion(&adapter->init_done);
3725 send_cap_queries(adapter);
3726 if (!wait_for_completion_timeout(&adapter->init_done,
3728 dev_err(dev, "Passive init timeout\n");
3732 } while (adapter->renegotiate);
3733 rc = init_sub_crq_irqs(adapter);
3738 netdev->real_num_tx_queues = adapter->req_tx_queues;
3739 netdev->mtu = adapter->req_mtu;
3741 if (adapter->failover) {
3742 adapter->failover = false;
3744 rc = ibmvnic_open(netdev);
3746 goto restart_failed;
3748 netif_carrier_on(netdev);
3752 rc = register_netdev(netdev);
3755 "failed to register netdev rc=%d\n", rc);
3756 goto register_failed;
3758 dev_info(dev, "ibmvnic registered\n");
3763 dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
3765 release_sub_crqs(adapter);
3767 dev_err(dev, "Passive initialization was not successful\n");
3770 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3772 unsigned long timeout = msecs_to_jiffies(30000);
3773 struct ibmvnic_adapter *adapter;
3774 struct net_device *netdev;
3775 unsigned char *mac_addr_p;
3777 char buf[17]; /* debugfs name buf */
3780 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3783 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3784 VETH_MAC_ADDR, NULL);
3787 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3788 __FILE__, __LINE__);
3792 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3793 IBMVNIC_MAX_TX_QUEUES);
3797 adapter = netdev_priv(netdev);
3798 dev_set_drvdata(&dev->dev, netdev);
3799 adapter->vdev = dev;
3800 adapter->netdev = netdev;
3801 adapter->failover = false;
3803 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3804 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3805 netdev->irq = dev->irq;
3806 netdev->netdev_ops = &ibmvnic_netdev_ops;
3807 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3808 SET_NETDEV_DEV(netdev, &dev->dev);
3810 INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
3811 INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
3813 spin_lock_init(&adapter->stats_lock);
3815 rc = ibmvnic_init_crq_queue(adapter);
3817 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
3821 INIT_LIST_HEAD(&adapter->errors);
3822 INIT_LIST_HEAD(&adapter->inflight);
3823 spin_lock_init(&adapter->error_list_lock);
3824 spin_lock_init(&adapter->inflight_lock);
3826 adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
3827 sizeof(struct ibmvnic_statistics),
3829 if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
3830 if (!firmware_has_feature(FW_FEATURE_CMO))
3831 dev_err(&dev->dev, "Couldn't map stats buffer\n");
3836 snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
3837 ent = debugfs_create_dir(buf, NULL);
3838 if (!ent || IS_ERR(ent)) {
3839 dev_info(&dev->dev, "debugfs create directory failed\n");
3840 adapter->debugfs_dir = NULL;
3842 adapter->debugfs_dir = ent;
3843 ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
3844 netdev, &ibmvnic_dump_ops);
3845 if (!ent || IS_ERR(ent)) {
3847 "debugfs create dump file failed\n");
3848 adapter->debugfs_dump = NULL;
3850 adapter->debugfs_dump = ent;
3854 init_completion(&adapter->init_done);
3855 ibmvnic_send_crq_init(adapter);
3856 if (!wait_for_completion_timeout(&adapter->init_done, timeout))
3860 if (adapter->renegotiate) {
3861 adapter->renegotiate = false;
3862 release_sub_crqs_no_irqs(adapter);
3864 reinit_completion(&adapter->init_done);
3865 send_cap_queries(adapter);
3866 if (!wait_for_completion_timeout(&adapter->init_done,
3870 } while (adapter->renegotiate);
3872 rc = init_sub_crq_irqs(adapter);
3874 dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
3878 netdev->real_num_tx_queues = adapter->req_tx_queues;
3879 netdev->mtu = adapter->req_mtu;
3881 rc = register_netdev(netdev);
3883 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3886 dev_info(&dev->dev, "ibmvnic registered\n");
3891 release_sub_crqs(adapter);
3893 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3894 debugfs_remove_recursive(adapter->debugfs_dir);
3896 ibmvnic_release_crq_queue(adapter);
3898 free_netdev(netdev);
3902 static int ibmvnic_remove(struct vio_dev *dev)
3904 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3905 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3907 unregister_netdev(netdev);
3909 release_sub_crqs(adapter);
3911 ibmvnic_release_crq_queue(adapter);
3913 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3914 debugfs_remove_recursive(adapter->debugfs_dir);
3916 dma_unmap_single(&dev->dev, adapter->stats_token,
3917 sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
3919 if (adapter->ras_comps)
3920 dma_free_coherent(&dev->dev,
3921 adapter->ras_comp_num *
3922 sizeof(struct ibmvnic_fw_component),
3923 adapter->ras_comps, adapter->ras_comps_tok);
3925 kfree(adapter->ras_comp_int);
3927 free_netdev(netdev);
3928 dev_set_drvdata(&dev->dev, NULL);
3933 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3935 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3936 struct ibmvnic_adapter *adapter;
3937 struct iommu_table *tbl;
3938 unsigned long ret = 0;
3941 tbl = get_iommu_table_base(&vdev->dev);
3943 /* netdev inits at probe time along with the structures we need below*/
3945 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3947 adapter = netdev_priv(netdev);
3949 ret += PAGE_SIZE; /* the crq message queue */
3950 ret += adapter->bounce_buffer_size;
3951 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3953 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3954 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3956 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3958 ret += adapter->rx_pool[i].size *
3959 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3964 static int ibmvnic_resume(struct device *dev)
3966 struct net_device *netdev = dev_get_drvdata(dev);
3967 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3970 /* kick the interrupt handlers just in case we lost an interrupt */
3971 for (i = 0; i < adapter->req_rx_queues; i++)
3972 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3973 adapter->rx_scrq[i]);
3978 static struct vio_device_id ibmvnic_device_table[] = {
3979 {"network", "IBM,vnic"},
3982 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3984 static const struct dev_pm_ops ibmvnic_pm_ops = {
3985 .resume = ibmvnic_resume
3988 static struct vio_driver ibmvnic_driver = {
3989 .id_table = ibmvnic_device_table,
3990 .probe = ibmvnic_probe,
3991 .remove = ibmvnic_remove,
3992 .get_desired_dma = ibmvnic_get_desired_dma,
3993 .name = ibmvnic_driver_name,
3994 .pm = &ibmvnic_pm_ops,
3997 /* module functions */
3998 static int __init ibmvnic_module_init(void)
4000 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
4001 IBMVNIC_DRIVER_VERSION);
4003 return vio_register_driver(&ibmvnic_driver);
4006 static void __exit ibmvnic_module_exit(void)
4008 vio_unregister_driver(&ibmvnic_driver);
4011 module_init(ibmvnic_module_init);
4012 module_exit(ibmvnic_module_exit);