1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /**************************************************************************/
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
16 /**************************************************************************/
18 #define IBMVNIC_NAME "ibmvnic"
19 #define IBMVNIC_DRIVER_VERSION "1.0.1"
20 #define IBMVNIC_INVALID_MAP -1
21 #define IBMVNIC_OPEN_FAILED 3
23 /* basic structures plus 100 2k buffers */
24 #define IBMVNIC_IO_ENTITLEMENT_DEFAULT 610305
26 /* Initial module_parameters */
27 #define IBMVNIC_RX_WEIGHT 16
28 /* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
29 #define IBMVNIC_BUFFS_PER_POOL 100
30 #define IBMVNIC_MAX_QUEUES 16
31 #define IBMVNIC_MAX_QUEUE_SZ 4096
32 #define IBMVNIC_MAX_IND_DESCS 16
33 #define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
35 #define IBMVNIC_TSO_BUF_SZ 65536
36 #define IBMVNIC_TSO_BUFS 64
37 #define IBMVNIC_TSO_POOL_MASK 0x80000000
39 /* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool
40 * has a set of buffers. The size of each buffer is determined by the MTU.
42 * Each Rx/Tx pool is also associated with a DMA region that is shared
43 * with the "hardware" (VIOS) and used to send/receive packets. The DMA
44 * region is also referred to as a Long Term Buffer or LTB.
46 * The size of the DMA region required for an Rx/Tx pool depends on the
47 * number and size (MTU) of the buffers in the pool. At the max levels
48 * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus
51 * But the size of a single DMA region is limited by MAX_PAGE_ORDER in the
52 * kernel (about 16MB currently). To support say 4K Jumbo frames, we
53 * use a set of LTBs (struct ltb_set) per pool.
55 * IBMVNIC_ONE_LTB_MAX - max size of each LTB supported by kernel
56 * IBMVNIC_ONE_LTB_SIZE - current max size of each LTB in an ltb_set
57 * (must be <= IBMVNIC_ONE_LTB_MAX)
58 * IBMVNIC_LTB_SET_SIZE - current size of all LTBs in an ltb_set
60 * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools
61 * are of fixed length (IBMVNIC_TSO_BUF_SZ * IBMVNIC_TSO_BUFS) of 4MB.
63 * The Rx and Tx pools can have upto 4096 buffers. The max size of these
64 * buffers is about 9588 (for jumbo frames, including IBMVNIC_BUFFER_HLEN).
65 * So, setting the IBMVNIC_LTB_SET_SIZE for a pool to 4096 * 9588 ~= 38MB.
67 * There is a trade-off in setting IBMVNIC_ONE_LTB_SIZE. If it is large,
68 * the allocation of the LTB can fail when system is low in memory. If
69 * its too small, we would need several mappings for each of the Rx/
70 * Tx/TSO pools but there is a limit of 255 mappings per vnic in the
73 * So setting IBMVNIC_ONE_LTB_SIZE to 8MB. With IBMVNIC_LTB_SET_SIZE set
74 * to 38MB, we will need 5 LTBs per Rx and Tx pool and 1 LTB per TSO
75 * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160
76 * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
78 #define IBMVNIC_ONE_LTB_MAX ((u32)((1 << MAX_PAGE_ORDER) * PAGE_SIZE))
79 #define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX)
80 #define IBMVNIC_LTB_SET_SIZE (38 << 20)
82 #define IBMVNIC_BUFFER_HLEN 500
83 #define IBMVNIC_RESET_DELAY 100
85 struct ibmvnic_login_buffer {
88 #define INITIAL_VERSION_LB 1
89 __be32 num_txcomp_subcrqs;
90 __be32 off_txcomp_subcrqs;
91 __be32 num_rxcomp_subcrqs;
92 __be32 off_rxcomp_subcrqs;
93 __be32 login_rsp_ioba;
95 __be32 client_data_offset;
96 __be32 client_data_len;
97 } __packed __aligned(8);
99 struct ibmvnic_login_rsp_buffer {
102 #define INITIAL_VERSION_LRB 1
103 __be32 num_txsubm_subcrqs;
104 __be32 off_txsubm_subcrqs;
105 __be32 num_rxadd_subcrqs;
106 __be32 off_rxadd_subcrqs;
107 __be32 off_rxadd_buff_size;
108 __be32 num_supp_tx_desc;
109 __be32 off_supp_tx_desc;
110 } __packed __aligned(8);
112 struct ibmvnic_query_ip_offload_buffer {
115 #define INITIAL_VERSION_IOB 1
127 __be16 max_ipv4_header_size;
128 __be16 max_ipv6_header_size;
129 __be16 max_tcp_header_size;
130 __be16 max_udp_header_size;
131 __be32 max_large_tx_size;
132 __be32 max_large_rx_size;
134 u8 ipv6_extension_header;
135 #define IPV6_EH_NOT_SUPPORTED 0x00
136 #define IPV6_EH_SUPPORTED_LIM 0x01
137 #define IPV6_EH_SUPPORTED 0xFF
138 u8 tcp_pseudosum_req;
139 #define TCP_PS_NOT_REQUIRED 0x00
140 #define TCP_PS_REQUIRED 0x01
142 __be16 num_ipv6_ext_headers;
143 __be32 off_ipv6_ext_headers;
145 } __packed __aligned(8);
147 struct ibmvnic_control_ip_offload_buffer {
150 #define INITIAL_VERSION_IOB 1
163 } __packed __aligned(8);
165 struct ibmvnic_fw_component {
167 __be32 trace_buff_size;
170 u8 parent_correlator;
171 u8 error_check_level;
175 } __packed __aligned(8);
177 struct ibmvnic_fw_trace_entry {
181 __be64 pmc_registers;
183 __be64 trace_data[5];
184 } __packed __aligned(8);
186 struct ibmvnic_statistics {
193 __be64 ucast_tx_packets;
194 __be64 ucast_rx_packets;
195 __be64 mcast_tx_packets;
196 __be64 mcast_rx_packets;
197 __be64 bcast_tx_packets;
198 __be64 bcast_rx_packets;
201 __be64 single_collision_frames;
202 __be64 multi_collision_frames;
203 __be64 sqe_test_errors;
205 __be64 late_collisions;
206 __be64 excess_collisions;
207 __be64 internal_mac_tx_errors;
208 __be64 carrier_sense;
209 __be64 too_long_frames;
210 __be64 internal_mac_rx_errors;
212 } __packed __aligned(8);
214 #define NUM_TX_STATS 3
215 struct ibmvnic_tx_queue_stats {
221 #define NUM_RX_STATS 3
222 struct ibmvnic_rx_queue_stats {
228 struct ibmvnic_acl_buffer {
231 #define INITIAL_VERSION_IOB 1
232 u8 mac_acls_restrict;
233 u8 vlan_acls_restrict;
235 __be32 num_mac_addrs;
236 __be32 offset_mac_addrs;
238 __be32 offset_vlan_ids;
240 } __packed __aligned(8);
242 /* descriptors have been changed, how should this be defined? 1? 4? */
244 #define IBMVNIC_TX_DESC_VERSIONS 3
246 /* is this still needed? */
247 struct ibmvnic_tx_comp_desc {
251 __be32 correlators[5];
252 } __packed __aligned(8);
254 /* some flags that included in v0 descriptor, which is gone
255 * only used for IBMVNIC_TCP_CHKSUM and IBMVNIC_UDP_CHKSUM
256 * and only in some offload_flags variable that doesn't seem
257 * to be used anywhere, can probably be removed?
260 #define IBMVNIC_TCP_CHKSUM 0x20
261 #define IBMVNIC_UDP_CHKSUM 0x08
263 struct ibmvnic_tx_desc {
267 #define IBMVNIC_TX_DESC 0x10
271 #define IBMVNIC_TX_COMP_NEEDED 0x80
272 #define IBMVNIC_TX_CHKSUM_OFFLOAD 0x40
273 #define IBMVNIC_TX_LSO 0x20
274 #define IBMVNIC_TX_PROT_TCP 0x10
275 #define IBMVNIC_TX_PROT_UDP 0x08
276 #define IBMVNIC_TX_PROT_IPV4 0x04
277 #define IBMVNIC_TX_PROT_IPV6 0x02
278 #define IBMVNIC_TX_VLAN_PRESENT 0x01
280 #define IBMVNIC_TX_VLAN_INSERT 0x80
288 } __packed __aligned(8);
290 struct ibmvnic_hdr_desc {
293 #define IBMVNIC_HDR_DESC 0x11
300 } __packed __aligned(8);
302 struct ibmvnic_hdr_ext_desc {
305 #define IBMVNIC_HDR_EXT_DESC 0x12
308 } __packed __aligned(8);
310 struct ibmvnic_sge_desc {
313 #define IBMVNIC_SGE_DESC 0x30
321 } __packed __aligned(8);
323 struct ibmvnic_rx_comp_desc {
326 #define IBMVNIC_IP_CHKSUM_GOOD 0x80
327 #define IBMVNIC_TCP_UDP_CHKSUM_GOOD 0x40
328 #define IBMVNIC_END_FRAME 0x20
329 #define IBMVNIC_EXACT_MC 0x10
330 #define IBMVNIC_VLAN_STRIPPED 0x08
331 __be16 off_frame_data;
337 } __packed __aligned(8);
339 struct ibmvnic_generic_scrq {
342 } __packed __aligned(8);
344 struct ibmvnic_rx_buff_add_desc {
352 } __packed __aligned(8);
355 u8 code; /* one of enum ibmvnic_rc_codes */
357 } __packed __aligned(4);
359 struct ibmvnic_generic_crq {
363 struct ibmvnic_rc rc;
364 } __packed __aligned(8);
366 struct ibmvnic_version_exchange {
370 #define IBMVNIC_INITIAL_VERSION 1
372 struct ibmvnic_rc rc;
373 } __packed __aligned(8);
375 struct ibmvnic_capability {
378 __be16 capability; /* one of ibmvnic_capabilities */
380 struct ibmvnic_rc rc;
381 } __packed __aligned(8);
383 struct ibmvnic_login {
389 } __packed __aligned(8);
391 struct ibmvnic_phys_parms {
395 #define IBMVNIC_EXTERNAL_LOOPBACK 0x80
396 #define IBMVNIC_INTERNAL_LOOPBACK 0x40
397 #define IBMVNIC_PROMISC 0x20
398 #define IBMVNIC_PHYS_LINK_ACTIVE 0x10
399 #define IBMVNIC_AUTONEG_DUPLEX 0x08
400 #define IBMVNIC_FULL_DUPLEX 0x04
401 #define IBMVNIC_HALF_DUPLEX 0x02
402 #define IBMVNIC_CAN_CHG_PHYS_PARMS 0x01
404 #define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80
406 #define IBMVNIC_AUTONEG 0x80000000
407 #define IBMVNIC_10MBPS 0x40000000
408 #define IBMVNIC_100MBPS 0x20000000
409 #define IBMVNIC_1GBPS 0x10000000
410 #define IBMVNIC_10GBPS 0x08000000
411 #define IBMVNIC_40GBPS 0x04000000
412 #define IBMVNIC_100GBPS 0x02000000
413 #define IBMVNIC_25GBPS 0x01000000
414 #define IBMVNIC_50GBPS 0x00800000
415 #define IBMVNIC_200GBPS 0x00400000
417 struct ibmvnic_rc rc;
418 } __packed __aligned(8);
420 struct ibmvnic_logical_link_state {
424 #define IBMVNIC_LOGICAL_LNK_DN 0x00
425 #define IBMVNIC_LOGICAL_LNK_UP 0x01
426 #define IBMVNIC_LOGICAL_LNK_QUERY 0xff
428 struct ibmvnic_rc rc;
429 } __packed __aligned(8);
431 struct ibmvnic_query_ip_offload {
437 struct ibmvnic_rc rc;
438 } __packed __aligned(8);
440 struct ibmvnic_control_ip_offload {
446 struct ibmvnic_rc rc;
447 } __packed __aligned(8);
449 struct ibmvnic_request_statistics {
453 #define IBMVNIC_PHYSICAL_PORT 0x80
458 } __packed __aligned(8);
460 struct ibmvnic_error_indication {
464 #define IBMVNIC_FATAL_ERROR 0x80
467 __be32 detail_error_sz;
470 } __packed __aligned(8);
472 struct ibmvnic_link_state_indication {
477 u8 logical_link_state;
479 } __packed __aligned(8);
481 struct ibmvnic_change_mac_addr {
486 struct ibmvnic_rc rc;
487 } __packed __aligned(8);
489 struct ibmvnic_multicast_ctrl {
494 #define IBMVNIC_ENABLE_MC 0x80
495 #define IBMVNIC_DISABLE_MC 0x40
496 #define IBMVNIC_ENABLE_ALL 0x20
497 #define IBMVNIC_DISABLE_ALL 0x10
499 __be16 reserved2; /* was num_enabled_mc_addr; */
500 struct ibmvnic_rc rc;
501 } __packed __aligned(8);
503 struct ibmvnic_get_vpd_size {
507 } __packed __aligned(8);
509 struct ibmvnic_get_vpd_size_rsp {
514 struct ibmvnic_rc rc;
515 } __packed __aligned(8);
517 struct ibmvnic_get_vpd {
524 } __packed __aligned(8);
526 struct ibmvnic_get_vpd_rsp {
530 struct ibmvnic_rc rc;
531 } __packed __aligned(8);
533 struct ibmvnic_acl_change_indication {
537 #define IBMVNIC_MAC_ACL 0
538 #define IBMVNIC_VLAN_ACL 1
540 } __packed __aligned(8);
542 struct ibmvnic_acl_query {
549 } __packed __aligned(8);
551 struct ibmvnic_tune {
558 } __packed __aligned(8);
560 struct ibmvnic_request_map {
568 } __packed __aligned(8);
570 struct ibmvnic_request_map_rsp {
576 struct ibmvnic_rc rc;
577 } __packed __aligned(8);
579 struct ibmvnic_request_unmap {
585 } __packed __aligned(8);
587 struct ibmvnic_request_unmap_rsp {
593 struct ibmvnic_rc rc;
594 } __packed __aligned(8);
596 struct ibmvnic_query_map {
600 } __packed __aligned(8);
602 struct ibmvnic_query_map_rsp {
609 struct ibmvnic_rc rc;
610 } __packed __aligned(8);
613 struct ibmvnic_generic_crq generic;
614 struct ibmvnic_version_exchange version_exchange;
615 struct ibmvnic_version_exchange version_exchange_rsp;
616 struct ibmvnic_capability query_capability;
617 struct ibmvnic_capability query_capability_rsp;
618 struct ibmvnic_capability request_capability;
619 struct ibmvnic_capability request_capability_rsp;
620 struct ibmvnic_login login;
621 struct ibmvnic_generic_crq login_rsp;
622 struct ibmvnic_phys_parms query_phys_parms;
623 struct ibmvnic_phys_parms query_phys_parms_rsp;
624 struct ibmvnic_phys_parms query_phys_capabilities;
625 struct ibmvnic_phys_parms query_phys_capabilities_rsp;
626 struct ibmvnic_phys_parms set_phys_parms;
627 struct ibmvnic_phys_parms set_phys_parms_rsp;
628 struct ibmvnic_logical_link_state logical_link_state;
629 struct ibmvnic_logical_link_state logical_link_state_rsp;
630 struct ibmvnic_query_ip_offload query_ip_offload;
631 struct ibmvnic_query_ip_offload query_ip_offload_rsp;
632 struct ibmvnic_control_ip_offload control_ip_offload;
633 struct ibmvnic_control_ip_offload control_ip_offload_rsp;
634 struct ibmvnic_request_statistics request_statistics;
635 struct ibmvnic_generic_crq request_statistics_rsp;
636 struct ibmvnic_error_indication error_indication;
637 struct ibmvnic_link_state_indication link_state_indication;
638 struct ibmvnic_change_mac_addr change_mac_addr;
639 struct ibmvnic_change_mac_addr change_mac_addr_rsp;
640 struct ibmvnic_multicast_ctrl multicast_ctrl;
641 struct ibmvnic_multicast_ctrl multicast_ctrl_rsp;
642 struct ibmvnic_get_vpd_size get_vpd_size;
643 struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp;
644 struct ibmvnic_get_vpd get_vpd;
645 struct ibmvnic_get_vpd_rsp get_vpd_rsp;
646 struct ibmvnic_acl_change_indication acl_change_indication;
647 struct ibmvnic_acl_query acl_query;
648 struct ibmvnic_generic_crq acl_query_rsp;
649 struct ibmvnic_tune tune;
650 struct ibmvnic_generic_crq tune_rsp;
651 struct ibmvnic_request_map request_map;
652 struct ibmvnic_request_map_rsp request_map_rsp;
653 struct ibmvnic_request_unmap request_unmap;
654 struct ibmvnic_request_unmap_rsp request_unmap_rsp;
655 struct ibmvnic_query_map query_map;
656 struct ibmvnic_query_map_rsp query_map_rsp;
659 enum ibmvnic_rc_codes {
670 UNSUPPORTEDOPTION = 10,
673 enum ibmvnic_capabilities {
676 MIN_RX_ADD_QUEUES = 3,
679 MAX_RX_ADD_QUEUES = 6,
682 REQ_RX_ADD_QUEUES = 9,
683 MIN_TX_ENTRIES_PER_SUBCRQ = 10,
684 MIN_RX_ADD_ENTRIES_PER_SUBCRQ = 11,
685 MAX_TX_ENTRIES_PER_SUBCRQ = 12,
686 MAX_RX_ADD_ENTRIES_PER_SUBCRQ = 13,
687 REQ_TX_ENTRIES_PER_SUBCRQ = 14,
688 REQ_RX_ADD_ENTRIES_PER_SUBCRQ = 15,
690 PROMISC_REQUESTED = 17,
691 PROMISC_SUPPORTED = 18,
695 MAX_MULTICAST_FILTERS = 22,
696 VLAN_HEADER_INSERTION = 23,
697 RX_VLAN_HEADER_INSERTION = 24,
698 MAX_TX_SG_ENTRIES = 25,
699 RX_SG_SUPPORTED = 26,
700 RX_SG_REQUESTED = 27,
701 OPT_TX_COMP_SUB_QUEUES = 28,
702 OPT_RX_COMP_QUEUES = 29,
703 OPT_RX_BUFADD_Q_PER_RX_COMP_Q = 30,
704 OPT_TX_ENTRIES_PER_SUBCRQ = 31,
705 OPT_RXBA_ENTRIES_PER_SUBCRQ = 32,
709 enum ibmvnic_error_cause {
719 enum ibmvnic_commands {
720 VERSION_EXCHANGE = 0x01,
721 VERSION_EXCHANGE_RSP = 0x81,
722 QUERY_CAPABILITY = 0x02,
723 QUERY_CAPABILITY_RSP = 0x82,
724 REQUEST_CAPABILITY = 0x03,
725 REQUEST_CAPABILITY_RSP = 0x83,
728 QUERY_PHYS_PARMS = 0x05,
729 QUERY_PHYS_PARMS_RSP = 0x85,
730 QUERY_PHYS_CAPABILITIES = 0x06,
731 QUERY_PHYS_CAPABILITIES_RSP = 0x86,
732 SET_PHYS_PARMS = 0x07,
733 SET_PHYS_PARMS_RSP = 0x87,
734 ERROR_INDICATION = 0x08,
735 LOGICAL_LINK_STATE = 0x0C,
736 LOGICAL_LINK_STATE_RSP = 0x8C,
737 REQUEST_STATISTICS = 0x0D,
738 REQUEST_STATISTICS_RSP = 0x8D,
739 COLLECT_FW_TRACE = 0x11,
740 COLLECT_FW_TRACE_RSP = 0x91,
741 LINK_STATE_INDICATION = 0x12,
742 CHANGE_MAC_ADDR = 0x13,
743 CHANGE_MAC_ADDR_RSP = 0x93,
744 MULTICAST_CTRL = 0x14,
745 MULTICAST_CTRL_RSP = 0x94,
747 GET_VPD_SIZE_RSP = 0x95,
752 QUERY_IP_OFFLOAD = 0x18,
753 QUERY_IP_OFFLOAD_RSP = 0x98,
754 CONTROL_IP_OFFLOAD = 0x19,
755 CONTROL_IP_OFFLOAD_RSP = 0x99,
756 ACL_CHANGE_INDICATION = 0x1A,
758 ACL_QUERY_RSP = 0x9B,
760 QUERY_MAP_RSP = 0x9D,
762 REQUEST_MAP_RSP = 0x9E,
763 REQUEST_UNMAP = 0x1F,
764 REQUEST_UNMAP_RSP = 0x9F,
766 VLAN_CTRL_RSP = 0xA0,
769 enum ibmvnic_crq_type {
770 IBMVNIC_CRQ_CMD = 0x80,
771 IBMVNIC_CRQ_CMD_RSP = 0x80,
772 IBMVNIC_CRQ_INIT_CMD = 0xC0,
773 IBMVNIC_CRQ_INIT_RSP = 0xC0,
774 IBMVNIC_CRQ_XPORT_EVENT = 0xFF,
777 enum ibmvfc_crq_format {
778 IBMVNIC_CRQ_INIT = 0x01,
779 IBMVNIC_CRQ_INIT_COMPLETE = 0x02,
780 IBMVNIC_PARTITION_MIGRATED = 0x06,
781 IBMVNIC_DEVICE_FAILOVER = 0x08,
784 struct ibmvnic_crq_queue {
785 union ibmvnic_crq *msgs;
787 dma_addr_t msg_token;
788 /* Used for serialization of msgs, cur */
795 struct ibmvnic_generic_scrq generic;
796 struct ibmvnic_tx_comp_desc tx_comp;
797 struct ibmvnic_tx_desc v1;
798 struct ibmvnic_hdr_desc hdr;
799 struct ibmvnic_hdr_ext_desc hdr_ext;
800 struct ibmvnic_sge_desc sge;
801 struct ibmvnic_rx_comp_desc rx_comp;
802 struct ibmvnic_rx_buff_add_desc rx_add;
805 struct ibmvnic_ind_xmit_queue {
806 union sub_crq *indir_arr;
807 dma_addr_t indir_dma;
811 struct ibmvnic_sub_crq_queue {
814 dma_addr_t msg_token;
815 unsigned long crq_num;
816 unsigned long hw_irq;
818 unsigned int pool_index;
820 /* Used for serialization of msgs, cur */
822 struct sk_buff *rx_skb_top;
823 struct ibmvnic_adapter *adapter;
824 struct ibmvnic_ind_xmit_queue ind_buf;
828 cpumask_var_t affinity_mask;
829 } ____cacheline_aligned;
831 struct ibmvnic_long_term_buff {
838 struct ibmvnic_ltb_set {
840 struct ibmvnic_long_term_buff *ltbs;
843 struct ibmvnic_tx_buff {
850 struct ibmvnic_tx_pool {
851 struct ibmvnic_tx_buff *tx_buff;
855 struct ibmvnic_ltb_set ltb_set;
858 } ____cacheline_aligned;
860 struct ibmvnic_rx_buff {
868 struct ibmvnic_rx_pool {
869 struct ibmvnic_rx_buff *rx_buff;
870 int size; /* # of buffers in the pool */
878 struct ibmvnic_ltb_set ltb_set;
879 } ____cacheline_aligned;
887 enum vnic_state {VNIC_PROBING = 1,
897 enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
900 VNIC_RESET_NON_FATAL,
902 VNIC_RESET_CHANGE_PARAM,
903 VNIC_RESET_PASSIVE_INIT};
906 enum ibmvnic_reset_reason reset_reason;
907 struct list_head list;
910 struct ibmvnic_tunables {
918 struct ibmvnic_adapter {
919 struct vio_dev *vdev;
920 struct net_device *netdev;
921 struct ibmvnic_crq_queue crq;
922 u8 mac_addr[ETH_ALEN];
923 struct ibmvnic_query_ip_offload_buffer ip_offload_buf;
924 dma_addr_t ip_offload_tok;
925 struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
926 dma_addr_t ip_offload_ctrl_tok;
929 /* Vital Product Data (VPD) */
930 struct ibmvnic_vpd *vpd;
934 struct ibmvnic_statistics stats;
935 dma_addr_t stats_token;
936 struct completion stats_done;
937 int replenish_no_mem;
938 int replenish_add_buff_success;
939 int replenish_add_buff_failure;
940 int replenish_task_cycles;
944 struct ibmvnic_tx_queue_stats *tx_stats_buffers;
945 struct ibmvnic_rx_queue_stats *rx_stats_buffers;
948 int logical_link_state;
954 struct ibmvnic_login_buffer *login_buf;
955 dma_addr_t login_buf_token;
958 struct ibmvnic_login_rsp_buffer *login_rsp_buf;
959 dma_addr_t login_rsp_buf_token;
960 int login_rsp_buf_sz;
962 atomic_t running_cap_crqs;
964 struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned;
965 struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned;
968 struct napi_struct *napi;
969 struct ibmvnic_rx_pool *rx_pool;
972 struct ibmvnic_tx_pool *tx_pool;
973 struct ibmvnic_tx_pool *tso_pool;
974 struct completion probe_done;
975 struct completion init_done;
978 struct completion fw_done;
979 /* Used for serialization of device commands */
980 struct mutex fw_lock;
983 struct completion reset_done;
987 /* CPU hotplug instances for online & dead */
988 struct hlist_node node;
989 struct hlist_node node_dead;
991 /* partner capabilities */
994 u64 min_rx_add_queues;
997 u64 max_rx_add_queues;
1000 u64 req_rx_add_queues;
1001 u64 min_tx_entries_per_subcrq;
1002 u64 min_rx_add_entries_per_subcrq;
1003 u64 max_tx_entries_per_subcrq;
1004 u64 max_rx_add_entries_per_subcrq;
1005 u64 req_tx_entries_per_subcrq;
1006 u64 req_rx_add_entries_per_subcrq;
1008 u64 promisc_requested;
1009 u64 promisc_supported;
1014 u64 max_multicast_filters;
1015 u64 vlan_header_insertion;
1016 u64 rx_vlan_header_insertion;
1017 u64 max_tx_sg_entries;
1018 u64 rx_sg_supported;
1019 u64 rx_sg_requested;
1020 u64 opt_tx_comp_sub_queues;
1021 u64 opt_rx_comp_queues;
1022 u64 opt_rx_bufadd_q_per_rx_comp_q;
1023 u64 opt_tx_entries_per_subcrq;
1024 u64 opt_rxba_entries_per_subcrq;
1025 __be64 tx_rx_desc_req;
1026 #define MAX_MAP_ID 255
1027 DECLARE_BITMAP(map_ids, MAX_MAP_ID);
1028 u32 num_active_rx_scrqs;
1029 u32 num_active_rx_pools;
1030 u32 num_active_rx_napi;
1031 u32 num_active_tx_scrqs;
1032 u32 num_active_tx_pools;
1034 u32 prev_rx_pool_size;
1035 u32 prev_tx_pool_size;
1039 struct tasklet_struct tasklet;
1040 enum vnic_state state;
1041 /* Used for serialization of state field. When taking both state
1042 * and rwi locks, take state lock first.
1044 spinlock_t state_lock;
1045 enum ibmvnic_reset_reason reset_reason;
1046 struct list_head rwi_list;
1047 /* Used for serialization of rwi_list. When taking both state
1048 * and rwi locks, take state lock first
1050 spinlock_t rwi_lock;
1051 struct work_struct ibmvnic_reset;
1052 struct delayed_work ibmvnic_delayed_reset;
1053 unsigned long resetting;
1054 /* last device reset time */
1055 unsigned long last_reset_time;
1058 bool from_passive_init;
1060 /* protected by rcu */
1061 bool tx_queues_active;
1062 bool failover_pending;
1063 bool force_reset_recovery;
1065 struct ibmvnic_tunables desired;
1066 struct ibmvnic_tunables fallback;