GNU Linux-libre 4.9.311-gnu1
[releases.git] / drivers / net / ethernet / intel / e1000 / e1000_main.c
1 /*******************************************************************************
2
3   Intel PRO/1000 Linux driver
4   Copyright(c) 1999 - 2006 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30 #include <net/ip6_checksum.h>
31 #include <linux/io.h>
32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
35
36 char e1000_driver_name[] = "e1000";
37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38 #define DRV_VERSION "7.3.21-k8-NAPI"
39 const char e1000_driver_version[] = DRV_VERSION;
40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
41
42 /* e1000_pci_tbl - PCI Device ID Table
43  *
44  * Last entry must be all 0s
45  *
46  * Macro expands to...
47  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48  */
49 static const struct pci_device_id e1000_pci_tbl[] = {
50         INTEL_E1000_ETHERNET_DEVICE(0x1000),
51         INTEL_E1000_ETHERNET_DEVICE(0x1001),
52         INTEL_E1000_ETHERNET_DEVICE(0x1004),
53         INTEL_E1000_ETHERNET_DEVICE(0x1008),
54         INTEL_E1000_ETHERNET_DEVICE(0x1009),
55         INTEL_E1000_ETHERNET_DEVICE(0x100C),
56         INTEL_E1000_ETHERNET_DEVICE(0x100D),
57         INTEL_E1000_ETHERNET_DEVICE(0x100E),
58         INTEL_E1000_ETHERNET_DEVICE(0x100F),
59         INTEL_E1000_ETHERNET_DEVICE(0x1010),
60         INTEL_E1000_ETHERNET_DEVICE(0x1011),
61         INTEL_E1000_ETHERNET_DEVICE(0x1012),
62         INTEL_E1000_ETHERNET_DEVICE(0x1013),
63         INTEL_E1000_ETHERNET_DEVICE(0x1014),
64         INTEL_E1000_ETHERNET_DEVICE(0x1015),
65         INTEL_E1000_ETHERNET_DEVICE(0x1016),
66         INTEL_E1000_ETHERNET_DEVICE(0x1017),
67         INTEL_E1000_ETHERNET_DEVICE(0x1018),
68         INTEL_E1000_ETHERNET_DEVICE(0x1019),
69         INTEL_E1000_ETHERNET_DEVICE(0x101A),
70         INTEL_E1000_ETHERNET_DEVICE(0x101D),
71         INTEL_E1000_ETHERNET_DEVICE(0x101E),
72         INTEL_E1000_ETHERNET_DEVICE(0x1026),
73         INTEL_E1000_ETHERNET_DEVICE(0x1027),
74         INTEL_E1000_ETHERNET_DEVICE(0x1028),
75         INTEL_E1000_ETHERNET_DEVICE(0x1075),
76         INTEL_E1000_ETHERNET_DEVICE(0x1076),
77         INTEL_E1000_ETHERNET_DEVICE(0x1077),
78         INTEL_E1000_ETHERNET_DEVICE(0x1078),
79         INTEL_E1000_ETHERNET_DEVICE(0x1079),
80         INTEL_E1000_ETHERNET_DEVICE(0x107A),
81         INTEL_E1000_ETHERNET_DEVICE(0x107B),
82         INTEL_E1000_ETHERNET_DEVICE(0x107C),
83         INTEL_E1000_ETHERNET_DEVICE(0x108A),
84         INTEL_E1000_ETHERNET_DEVICE(0x1099),
85         INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86         INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87         /* required last entry */
88         {0,}
89 };
90
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
95 void e1000_reinit_locked(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
97 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102                                     struct e1000_tx_ring *txdr);
103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104                                     struct e1000_rx_ring *rxdr);
105 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106                                     struct e1000_tx_ring *tx_ring);
107 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108                                     struct e1000_rx_ring *rx_ring);
109 void e1000_update_stats(struct e1000_adapter *adapter);
110
111 static int e1000_init_module(void);
112 static void e1000_exit_module(void);
113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114 static void e1000_remove(struct pci_dev *pdev);
115 static int e1000_alloc_queues(struct e1000_adapter *adapter);
116 static int e1000_sw_init(struct e1000_adapter *adapter);
117 int e1000_open(struct net_device *netdev);
118 int e1000_close(struct net_device *netdev);
119 static void e1000_configure_tx(struct e1000_adapter *adapter);
120 static void e1000_configure_rx(struct e1000_adapter *adapter);
121 static void e1000_setup_rctl(struct e1000_adapter *adapter);
122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125                                 struct e1000_tx_ring *tx_ring);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127                                 struct e1000_rx_ring *rx_ring);
128 static void e1000_set_rx_mode(struct net_device *netdev);
129 static void e1000_update_phy_info_task(struct work_struct *work);
130 static void e1000_watchdog(struct work_struct *work);
131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133                                     struct net_device *netdev);
134 static struct net_device_stats *e1000_get_stats(struct net_device *netdev);
135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136 static int e1000_set_mac(struct net_device *netdev, void *p);
137 static irqreturn_t e1000_intr(int irq, void *data);
138 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139                                struct e1000_tx_ring *tx_ring);
140 static int e1000_clean(struct napi_struct *napi, int budget);
141 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142                                struct e1000_rx_ring *rx_ring,
143                                int *work_done, int work_to_do);
144 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145                                      struct e1000_rx_ring *rx_ring,
146                                      int *work_done, int work_to_do);
147 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
148                                          struct e1000_rx_ring *rx_ring,
149                                          int cleaned_count)
150 {
151 }
152 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
153                                    struct e1000_rx_ring *rx_ring,
154                                    int cleaned_count);
155 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
156                                          struct e1000_rx_ring *rx_ring,
157                                          int cleaned_count);
158 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
159 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
160                            int cmd);
161 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
162 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
163 static void e1000_tx_timeout(struct net_device *dev);
164 static void e1000_reset_task(struct work_struct *work);
165 static void e1000_smartspeed(struct e1000_adapter *adapter);
166 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
167                                        struct sk_buff *skb);
168
169 static bool e1000_vlan_used(struct e1000_adapter *adapter);
170 static void e1000_vlan_mode(struct net_device *netdev,
171                             netdev_features_t features);
172 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
173                                      bool filter_on);
174 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
175                                  __be16 proto, u16 vid);
176 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
177                                   __be16 proto, u16 vid);
178 static void e1000_restore_vlan(struct e1000_adapter *adapter);
179
180 #ifdef CONFIG_PM
181 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
182 static int e1000_resume(struct pci_dev *pdev);
183 #endif
184 static void e1000_shutdown(struct pci_dev *pdev);
185
186 #ifdef CONFIG_NET_POLL_CONTROLLER
187 /* for netdump / net console */
188 static void e1000_netpoll (struct net_device *netdev);
189 #endif
190
191 #define COPYBREAK_DEFAULT 256
192 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
193 module_param(copybreak, uint, 0644);
194 MODULE_PARM_DESC(copybreak,
195         "Maximum size of packet that is copied to a new buffer on receive");
196
197 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
198                                                 pci_channel_state_t state);
199 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
200 static void e1000_io_resume(struct pci_dev *pdev);
201
202 static const struct pci_error_handlers e1000_err_handler = {
203         .error_detected = e1000_io_error_detected,
204         .slot_reset = e1000_io_slot_reset,
205         .resume = e1000_io_resume,
206 };
207
208 static struct pci_driver e1000_driver = {
209         .name     = e1000_driver_name,
210         .id_table = e1000_pci_tbl,
211         .probe    = e1000_probe,
212         .remove   = e1000_remove,
213 #ifdef CONFIG_PM
214         /* Power Management Hooks */
215         .suspend  = e1000_suspend,
216         .resume   = e1000_resume,
217 #endif
218         .shutdown = e1000_shutdown,
219         .err_handler = &e1000_err_handler
220 };
221
222 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
223 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_VERSION);
226
227 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
228 static int debug = -1;
229 module_param(debug, int, 0);
230 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
231
232 /**
233  * e1000_get_hw_dev - return device
234  * used by hardware layer to print debugging information
235  *
236  **/
237 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
238 {
239         struct e1000_adapter *adapter = hw->back;
240         return adapter->netdev;
241 }
242
243 /**
244  * e1000_init_module - Driver Registration Routine
245  *
246  * e1000_init_module is the first routine called when the driver is
247  * loaded. All it does is register with the PCI subsystem.
248  **/
249 static int __init e1000_init_module(void)
250 {
251         int ret;
252         pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
253
254         pr_info("%s\n", e1000_copyright);
255
256         ret = pci_register_driver(&e1000_driver);
257         if (copybreak != COPYBREAK_DEFAULT) {
258                 if (copybreak == 0)
259                         pr_info("copybreak disabled\n");
260                 else
261                         pr_info("copybreak enabled for "
262                                    "packets <= %u bytes\n", copybreak);
263         }
264         return ret;
265 }
266
267 module_init(e1000_init_module);
268
269 /**
270  * e1000_exit_module - Driver Exit Cleanup Routine
271  *
272  * e1000_exit_module is called just before the driver is removed
273  * from memory.
274  **/
275 static void __exit e1000_exit_module(void)
276 {
277         pci_unregister_driver(&e1000_driver);
278 }
279
280 module_exit(e1000_exit_module);
281
282 static int e1000_request_irq(struct e1000_adapter *adapter)
283 {
284         struct net_device *netdev = adapter->netdev;
285         irq_handler_t handler = e1000_intr;
286         int irq_flags = IRQF_SHARED;
287         int err;
288
289         err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
290                           netdev);
291         if (err) {
292                 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
293         }
294
295         return err;
296 }
297
298 static void e1000_free_irq(struct e1000_adapter *adapter)
299 {
300         struct net_device *netdev = adapter->netdev;
301
302         free_irq(adapter->pdev->irq, netdev);
303 }
304
305 /**
306  * e1000_irq_disable - Mask off interrupt generation on the NIC
307  * @adapter: board private structure
308  **/
309 static void e1000_irq_disable(struct e1000_adapter *adapter)
310 {
311         struct e1000_hw *hw = &adapter->hw;
312
313         ew32(IMC, ~0);
314         E1000_WRITE_FLUSH();
315         synchronize_irq(adapter->pdev->irq);
316 }
317
318 /**
319  * e1000_irq_enable - Enable default interrupt generation settings
320  * @adapter: board private structure
321  **/
322 static void e1000_irq_enable(struct e1000_adapter *adapter)
323 {
324         struct e1000_hw *hw = &adapter->hw;
325
326         ew32(IMS, IMS_ENABLE_MASK);
327         E1000_WRITE_FLUSH();
328 }
329
330 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
331 {
332         struct e1000_hw *hw = &adapter->hw;
333         struct net_device *netdev = adapter->netdev;
334         u16 vid = hw->mng_cookie.vlan_id;
335         u16 old_vid = adapter->mng_vlan_id;
336
337         if (!e1000_vlan_used(adapter))
338                 return;
339
340         if (!test_bit(vid, adapter->active_vlans)) {
341                 if (hw->mng_cookie.status &
342                     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
343                         e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
344                         adapter->mng_vlan_id = vid;
345                 } else {
346                         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
347                 }
348                 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
349                     (vid != old_vid) &&
350                     !test_bit(old_vid, adapter->active_vlans))
351                         e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
352                                                old_vid);
353         } else {
354                 adapter->mng_vlan_id = vid;
355         }
356 }
357
358 static void e1000_init_manageability(struct e1000_adapter *adapter)
359 {
360         struct e1000_hw *hw = &adapter->hw;
361
362         if (adapter->en_mng_pt) {
363                 u32 manc = er32(MANC);
364
365                 /* disable hardware interception of ARP */
366                 manc &= ~(E1000_MANC_ARP_EN);
367
368                 ew32(MANC, manc);
369         }
370 }
371
372 static void e1000_release_manageability(struct e1000_adapter *adapter)
373 {
374         struct e1000_hw *hw = &adapter->hw;
375
376         if (adapter->en_mng_pt) {
377                 u32 manc = er32(MANC);
378
379                 /* re-enable hardware interception of ARP */
380                 manc |= E1000_MANC_ARP_EN;
381
382                 ew32(MANC, manc);
383         }
384 }
385
386 /**
387  * e1000_configure - configure the hardware for RX and TX
388  * @adapter = private board structure
389  **/
390 static void e1000_configure(struct e1000_adapter *adapter)
391 {
392         struct net_device *netdev = adapter->netdev;
393         int i;
394
395         e1000_set_rx_mode(netdev);
396
397         e1000_restore_vlan(adapter);
398         e1000_init_manageability(adapter);
399
400         e1000_configure_tx(adapter);
401         e1000_setup_rctl(adapter);
402         e1000_configure_rx(adapter);
403         /* call E1000_DESC_UNUSED which always leaves
404          * at least 1 descriptor unused to make sure
405          * next_to_use != next_to_clean
406          */
407         for (i = 0; i < adapter->num_rx_queues; i++) {
408                 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
409                 adapter->alloc_rx_buf(adapter, ring,
410                                       E1000_DESC_UNUSED(ring));
411         }
412 }
413
414 int e1000_up(struct e1000_adapter *adapter)
415 {
416         struct e1000_hw *hw = &adapter->hw;
417
418         /* hardware has been reset, we need to reload some things */
419         e1000_configure(adapter);
420
421         clear_bit(__E1000_DOWN, &adapter->flags);
422
423         napi_enable(&adapter->napi);
424
425         e1000_irq_enable(adapter);
426
427         netif_wake_queue(adapter->netdev);
428
429         /* fire a link change interrupt to start the watchdog */
430         ew32(ICS, E1000_ICS_LSC);
431         return 0;
432 }
433
434 /**
435  * e1000_power_up_phy - restore link in case the phy was powered down
436  * @adapter: address of board private structure
437  *
438  * The phy may be powered down to save power and turn off link when the
439  * driver is unloaded and wake on lan is not enabled (among others)
440  * *** this routine MUST be followed by a call to e1000_reset ***
441  **/
442 void e1000_power_up_phy(struct e1000_adapter *adapter)
443 {
444         struct e1000_hw *hw = &adapter->hw;
445         u16 mii_reg = 0;
446
447         /* Just clear the power down bit to wake the phy back up */
448         if (hw->media_type == e1000_media_type_copper) {
449                 /* according to the manual, the phy will retain its
450                  * settings across a power-down/up cycle
451                  */
452                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
453                 mii_reg &= ~MII_CR_POWER_DOWN;
454                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
455         }
456 }
457
458 static void e1000_power_down_phy(struct e1000_adapter *adapter)
459 {
460         struct e1000_hw *hw = &adapter->hw;
461
462         /* Power down the PHY so no link is implied when interface is down *
463          * The PHY cannot be powered down if any of the following is true *
464          * (a) WoL is enabled
465          * (b) AMT is active
466          * (c) SoL/IDER session is active
467          */
468         if (!adapter->wol && hw->mac_type >= e1000_82540 &&
469            hw->media_type == e1000_media_type_copper) {
470                 u16 mii_reg = 0;
471
472                 switch (hw->mac_type) {
473                 case e1000_82540:
474                 case e1000_82545:
475                 case e1000_82545_rev_3:
476                 case e1000_82546:
477                 case e1000_ce4100:
478                 case e1000_82546_rev_3:
479                 case e1000_82541:
480                 case e1000_82541_rev_2:
481                 case e1000_82547:
482                 case e1000_82547_rev_2:
483                         if (er32(MANC) & E1000_MANC_SMBUS_EN)
484                                 goto out;
485                         break;
486                 default:
487                         goto out;
488                 }
489                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
490                 mii_reg |= MII_CR_POWER_DOWN;
491                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
492                 msleep(1);
493         }
494 out:
495         return;
496 }
497
498 static void e1000_down_and_stop(struct e1000_adapter *adapter)
499 {
500         set_bit(__E1000_DOWN, &adapter->flags);
501
502         cancel_delayed_work_sync(&adapter->watchdog_task);
503
504         /*
505          * Since the watchdog task can reschedule other tasks, we should cancel
506          * it first, otherwise we can run into the situation when a work is
507          * still running after the adapter has been turned down.
508          */
509
510         cancel_delayed_work_sync(&adapter->phy_info_task);
511         cancel_delayed_work_sync(&adapter->fifo_stall_task);
512
513         /* Only kill reset task if adapter is not resetting */
514         if (!test_bit(__E1000_RESETTING, &adapter->flags))
515                 cancel_work_sync(&adapter->reset_task);
516 }
517
518 void e1000_down(struct e1000_adapter *adapter)
519 {
520         struct e1000_hw *hw = &adapter->hw;
521         struct net_device *netdev = adapter->netdev;
522         u32 rctl, tctl;
523
524         /* disable receives in the hardware */
525         rctl = er32(RCTL);
526         ew32(RCTL, rctl & ~E1000_RCTL_EN);
527         /* flush and sleep below */
528
529         netif_tx_disable(netdev);
530
531         /* disable transmits in the hardware */
532         tctl = er32(TCTL);
533         tctl &= ~E1000_TCTL_EN;
534         ew32(TCTL, tctl);
535         /* flush both disables and wait for them to finish */
536         E1000_WRITE_FLUSH();
537         msleep(10);
538
539         /* Set the carrier off after transmits have been disabled in the
540          * hardware, to avoid race conditions with e1000_watchdog() (which
541          * may be running concurrently to us, checking for the carrier
542          * bit to decide whether it should enable transmits again). Such
543          * a race condition would result into transmission being disabled
544          * in the hardware until the next IFF_DOWN+IFF_UP cycle.
545          */
546         netif_carrier_off(netdev);
547
548         napi_disable(&adapter->napi);
549
550         e1000_irq_disable(adapter);
551
552         /* Setting DOWN must be after irq_disable to prevent
553          * a screaming interrupt.  Setting DOWN also prevents
554          * tasks from rescheduling.
555          */
556         e1000_down_and_stop(adapter);
557
558         adapter->link_speed = 0;
559         adapter->link_duplex = 0;
560
561         e1000_reset(adapter);
562         e1000_clean_all_tx_rings(adapter);
563         e1000_clean_all_rx_rings(adapter);
564 }
565
566 void e1000_reinit_locked(struct e1000_adapter *adapter)
567 {
568         WARN_ON(in_interrupt());
569         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
570                 msleep(1);
571
572         /* only run the task if not already down */
573         if (!test_bit(__E1000_DOWN, &adapter->flags)) {
574                 e1000_down(adapter);
575                 e1000_up(adapter);
576         }
577
578         clear_bit(__E1000_RESETTING, &adapter->flags);
579 }
580
581 void e1000_reset(struct e1000_adapter *adapter)
582 {
583         struct e1000_hw *hw = &adapter->hw;
584         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
585         bool legacy_pba_adjust = false;
586         u16 hwm;
587
588         /* Repartition Pba for greater than 9k mtu
589          * To take effect CTRL.RST is required.
590          */
591
592         switch (hw->mac_type) {
593         case e1000_82542_rev2_0:
594         case e1000_82542_rev2_1:
595         case e1000_82543:
596         case e1000_82544:
597         case e1000_82540:
598         case e1000_82541:
599         case e1000_82541_rev_2:
600                 legacy_pba_adjust = true;
601                 pba = E1000_PBA_48K;
602                 break;
603         case e1000_82545:
604         case e1000_82545_rev_3:
605         case e1000_82546:
606         case e1000_ce4100:
607         case e1000_82546_rev_3:
608                 pba = E1000_PBA_48K;
609                 break;
610         case e1000_82547:
611         case e1000_82547_rev_2:
612                 legacy_pba_adjust = true;
613                 pba = E1000_PBA_30K;
614                 break;
615         case e1000_undefined:
616         case e1000_num_macs:
617                 break;
618         }
619
620         if (legacy_pba_adjust) {
621                 if (hw->max_frame_size > E1000_RXBUFFER_8192)
622                         pba -= 8; /* allocate more FIFO for Tx */
623
624                 if (hw->mac_type == e1000_82547) {
625                         adapter->tx_fifo_head = 0;
626                         adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
627                         adapter->tx_fifo_size =
628                                 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
629                         atomic_set(&adapter->tx_fifo_stall, 0);
630                 }
631         } else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
632                 /* adjust PBA for jumbo frames */
633                 ew32(PBA, pba);
634
635                 /* To maintain wire speed transmits, the Tx FIFO should be
636                  * large enough to accommodate two full transmit packets,
637                  * rounded up to the next 1KB and expressed in KB.  Likewise,
638                  * the Rx FIFO should be large enough to accommodate at least
639                  * one full receive packet and is similarly rounded up and
640                  * expressed in KB.
641                  */
642                 pba = er32(PBA);
643                 /* upper 16 bits has Tx packet buffer allocation size in KB */
644                 tx_space = pba >> 16;
645                 /* lower 16 bits has Rx packet buffer allocation size in KB */
646                 pba &= 0xffff;
647                 /* the Tx fifo also stores 16 bytes of information about the Tx
648                  * but don't include ethernet FCS because hardware appends it
649                  */
650                 min_tx_space = (hw->max_frame_size +
651                                 sizeof(struct e1000_tx_desc) -
652                                 ETH_FCS_LEN) * 2;
653                 min_tx_space = ALIGN(min_tx_space, 1024);
654                 min_tx_space >>= 10;
655                 /* software strips receive CRC, so leave room for it */
656                 min_rx_space = hw->max_frame_size;
657                 min_rx_space = ALIGN(min_rx_space, 1024);
658                 min_rx_space >>= 10;
659
660                 /* If current Tx allocation is less than the min Tx FIFO size,
661                  * and the min Tx FIFO size is less than the current Rx FIFO
662                  * allocation, take space away from current Rx allocation
663                  */
664                 if (tx_space < min_tx_space &&
665                     ((min_tx_space - tx_space) < pba)) {
666                         pba = pba - (min_tx_space - tx_space);
667
668                         /* PCI/PCIx hardware has PBA alignment constraints */
669                         switch (hw->mac_type) {
670                         case e1000_82545 ... e1000_82546_rev_3:
671                                 pba &= ~(E1000_PBA_8K - 1);
672                                 break;
673                         default:
674                                 break;
675                         }
676
677                         /* if short on Rx space, Rx wins and must trump Tx
678                          * adjustment or use Early Receive if available
679                          */
680                         if (pba < min_rx_space)
681                                 pba = min_rx_space;
682                 }
683         }
684
685         ew32(PBA, pba);
686
687         /* flow control settings:
688          * The high water mark must be low enough to fit one full frame
689          * (or the size used for early receive) above it in the Rx FIFO.
690          * Set it to the lower of:
691          * - 90% of the Rx FIFO size, and
692          * - the full Rx FIFO size minus the early receive size (for parts
693          *   with ERT support assuming ERT set to E1000_ERT_2048), or
694          * - the full Rx FIFO size minus one full frame
695          */
696         hwm = min(((pba << 10) * 9 / 10),
697                   ((pba << 10) - hw->max_frame_size));
698
699         hw->fc_high_water = hwm & 0xFFF8;       /* 8-byte granularity */
700         hw->fc_low_water = hw->fc_high_water - 8;
701         hw->fc_pause_time = E1000_FC_PAUSE_TIME;
702         hw->fc_send_xon = 1;
703         hw->fc = hw->original_fc;
704
705         /* Allow time for pending master requests to run */
706         e1000_reset_hw(hw);
707         if (hw->mac_type >= e1000_82544)
708                 ew32(WUC, 0);
709
710         if (e1000_init_hw(hw))
711                 e_dev_err("Hardware Error\n");
712         e1000_update_mng_vlan(adapter);
713
714         /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
715         if (hw->mac_type >= e1000_82544 &&
716             hw->autoneg == 1 &&
717             hw->autoneg_advertised == ADVERTISE_1000_FULL) {
718                 u32 ctrl = er32(CTRL);
719                 /* clear phy power management bit if we are in gig only mode,
720                  * which if enabled will attempt negotiation to 100Mb, which
721                  * can cause a loss of link at power off or driver unload
722                  */
723                 ctrl &= ~E1000_CTRL_SWDPIN3;
724                 ew32(CTRL, ctrl);
725         }
726
727         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
728         ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
729
730         e1000_reset_adaptive(hw);
731         e1000_phy_get_info(hw, &adapter->phy_info);
732
733         e1000_release_manageability(adapter);
734 }
735
736 /* Dump the eeprom for users having checksum issues */
737 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
738 {
739         struct net_device *netdev = adapter->netdev;
740         struct ethtool_eeprom eeprom;
741         const struct ethtool_ops *ops = netdev->ethtool_ops;
742         u8 *data;
743         int i;
744         u16 csum_old, csum_new = 0;
745
746         eeprom.len = ops->get_eeprom_len(netdev);
747         eeprom.offset = 0;
748
749         data = kmalloc(eeprom.len, GFP_KERNEL);
750         if (!data)
751                 return;
752
753         ops->get_eeprom(netdev, &eeprom, data);
754
755         csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
756                    (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
757         for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
758                 csum_new += data[i] + (data[i + 1] << 8);
759         csum_new = EEPROM_SUM - csum_new;
760
761         pr_err("/*********************/\n");
762         pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
763         pr_err("Calculated              : 0x%04x\n", csum_new);
764
765         pr_err("Offset    Values\n");
766         pr_err("========  ======\n");
767         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
768
769         pr_err("Include this output when contacting your support provider.\n");
770         pr_err("This is not a software error! Something bad happened to\n");
771         pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
772         pr_err("result in further problems, possibly loss of data,\n");
773         pr_err("corruption or system hangs!\n");
774         pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
775         pr_err("which is invalid and requires you to set the proper MAC\n");
776         pr_err("address manually before continuing to enable this network\n");
777         pr_err("device. Please inspect the EEPROM dump and report the\n");
778         pr_err("issue to your hardware vendor or Intel Customer Support.\n");
779         pr_err("/*********************/\n");
780
781         kfree(data);
782 }
783
784 /**
785  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
786  * @pdev: PCI device information struct
787  *
788  * Return true if an adapter needs ioport resources
789  **/
790 static int e1000_is_need_ioport(struct pci_dev *pdev)
791 {
792         switch (pdev->device) {
793         case E1000_DEV_ID_82540EM:
794         case E1000_DEV_ID_82540EM_LOM:
795         case E1000_DEV_ID_82540EP:
796         case E1000_DEV_ID_82540EP_LOM:
797         case E1000_DEV_ID_82540EP_LP:
798         case E1000_DEV_ID_82541EI:
799         case E1000_DEV_ID_82541EI_MOBILE:
800         case E1000_DEV_ID_82541ER:
801         case E1000_DEV_ID_82541ER_LOM:
802         case E1000_DEV_ID_82541GI:
803         case E1000_DEV_ID_82541GI_LF:
804         case E1000_DEV_ID_82541GI_MOBILE:
805         case E1000_DEV_ID_82544EI_COPPER:
806         case E1000_DEV_ID_82544EI_FIBER:
807         case E1000_DEV_ID_82544GC_COPPER:
808         case E1000_DEV_ID_82544GC_LOM:
809         case E1000_DEV_ID_82545EM_COPPER:
810         case E1000_DEV_ID_82545EM_FIBER:
811         case E1000_DEV_ID_82546EB_COPPER:
812         case E1000_DEV_ID_82546EB_FIBER:
813         case E1000_DEV_ID_82546EB_QUAD_COPPER:
814                 return true;
815         default:
816                 return false;
817         }
818 }
819
820 static netdev_features_t e1000_fix_features(struct net_device *netdev,
821         netdev_features_t features)
822 {
823         /* Since there is no support for separate Rx/Tx vlan accel
824          * enable/disable make sure Tx flag is always in same state as Rx.
825          */
826         if (features & NETIF_F_HW_VLAN_CTAG_RX)
827                 features |= NETIF_F_HW_VLAN_CTAG_TX;
828         else
829                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
830
831         return features;
832 }
833
834 static int e1000_set_features(struct net_device *netdev,
835         netdev_features_t features)
836 {
837         struct e1000_adapter *adapter = netdev_priv(netdev);
838         netdev_features_t changed = features ^ netdev->features;
839
840         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
841                 e1000_vlan_mode(netdev, features);
842
843         if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
844                 return 0;
845
846         netdev->features = features;
847         adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
848
849         if (netif_running(netdev))
850                 e1000_reinit_locked(adapter);
851         else
852                 e1000_reset(adapter);
853
854         return 0;
855 }
856
857 static const struct net_device_ops e1000_netdev_ops = {
858         .ndo_open               = e1000_open,
859         .ndo_stop               = e1000_close,
860         .ndo_start_xmit         = e1000_xmit_frame,
861         .ndo_get_stats          = e1000_get_stats,
862         .ndo_set_rx_mode        = e1000_set_rx_mode,
863         .ndo_set_mac_address    = e1000_set_mac,
864         .ndo_tx_timeout         = e1000_tx_timeout,
865         .ndo_change_mtu         = e1000_change_mtu,
866         .ndo_do_ioctl           = e1000_ioctl,
867         .ndo_validate_addr      = eth_validate_addr,
868         .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
869         .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
870 #ifdef CONFIG_NET_POLL_CONTROLLER
871         .ndo_poll_controller    = e1000_netpoll,
872 #endif
873         .ndo_fix_features       = e1000_fix_features,
874         .ndo_set_features       = e1000_set_features,
875 };
876
877 /**
878  * e1000_init_hw_struct - initialize members of hw struct
879  * @adapter: board private struct
880  * @hw: structure used by e1000_hw.c
881  *
882  * Factors out initialization of the e1000_hw struct to its own function
883  * that can be called very early at init (just after struct allocation).
884  * Fields are initialized based on PCI device information and
885  * OS network device settings (MTU size).
886  * Returns negative error codes if MAC type setup fails.
887  */
888 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
889                                 struct e1000_hw *hw)
890 {
891         struct pci_dev *pdev = adapter->pdev;
892
893         /* PCI config space info */
894         hw->vendor_id = pdev->vendor;
895         hw->device_id = pdev->device;
896         hw->subsystem_vendor_id = pdev->subsystem_vendor;
897         hw->subsystem_id = pdev->subsystem_device;
898         hw->revision_id = pdev->revision;
899
900         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
901
902         hw->max_frame_size = adapter->netdev->mtu +
903                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
904         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
905
906         /* identify the MAC */
907         if (e1000_set_mac_type(hw)) {
908                 e_err(probe, "Unknown MAC Type\n");
909                 return -EIO;
910         }
911
912         switch (hw->mac_type) {
913         default:
914                 break;
915         case e1000_82541:
916         case e1000_82547:
917         case e1000_82541_rev_2:
918         case e1000_82547_rev_2:
919                 hw->phy_init_script = 1;
920                 break;
921         }
922
923         e1000_set_media_type(hw);
924         e1000_get_bus_info(hw);
925
926         hw->wait_autoneg_complete = false;
927         hw->tbi_compatibility_en = true;
928         hw->adaptive_ifs = true;
929
930         /* Copper options */
931
932         if (hw->media_type == e1000_media_type_copper) {
933                 hw->mdix = AUTO_ALL_MODES;
934                 hw->disable_polarity_correction = false;
935                 hw->master_slave = E1000_MASTER_SLAVE;
936         }
937
938         return 0;
939 }
940
941 /**
942  * e1000_probe - Device Initialization Routine
943  * @pdev: PCI device information struct
944  * @ent: entry in e1000_pci_tbl
945  *
946  * Returns 0 on success, negative on failure
947  *
948  * e1000_probe initializes an adapter identified by a pci_dev structure.
949  * The OS initialization, configuring of the adapter private structure,
950  * and a hardware reset occur.
951  **/
952 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
953 {
954         struct net_device *netdev;
955         struct e1000_adapter *adapter = NULL;
956         struct e1000_hw *hw;
957
958         static int cards_found;
959         static int global_quad_port_a; /* global ksp3 port a indication */
960         int i, err, pci_using_dac;
961         u16 eeprom_data = 0;
962         u16 tmp = 0;
963         u16 eeprom_apme_mask = E1000_EEPROM_APME;
964         int bars, need_ioport;
965         bool disable_dev = false;
966
967         /* do not allocate ioport bars when not needed */
968         need_ioport = e1000_is_need_ioport(pdev);
969         if (need_ioport) {
970                 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
971                 err = pci_enable_device(pdev);
972         } else {
973                 bars = pci_select_bars(pdev, IORESOURCE_MEM);
974                 err = pci_enable_device_mem(pdev);
975         }
976         if (err)
977                 return err;
978
979         err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
980         if (err)
981                 goto err_pci_reg;
982
983         pci_set_master(pdev);
984         err = pci_save_state(pdev);
985         if (err)
986                 goto err_alloc_etherdev;
987
988         err = -ENOMEM;
989         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
990         if (!netdev)
991                 goto err_alloc_etherdev;
992
993         SET_NETDEV_DEV(netdev, &pdev->dev);
994
995         pci_set_drvdata(pdev, netdev);
996         adapter = netdev_priv(netdev);
997         adapter->netdev = netdev;
998         adapter->pdev = pdev;
999         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
1000         adapter->bars = bars;
1001         adapter->need_ioport = need_ioport;
1002
1003         hw = &adapter->hw;
1004         hw->back = adapter;
1005
1006         err = -EIO;
1007         hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
1008         if (!hw->hw_addr)
1009                 goto err_ioremap;
1010
1011         if (adapter->need_ioport) {
1012                 for (i = BAR_1; i <= BAR_5; i++) {
1013                         if (pci_resource_len(pdev, i) == 0)
1014                                 continue;
1015                         if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1016                                 hw->io_base = pci_resource_start(pdev, i);
1017                                 break;
1018                         }
1019                 }
1020         }
1021
1022         /* make ready for any if (hw->...) below */
1023         err = e1000_init_hw_struct(adapter, hw);
1024         if (err)
1025                 goto err_sw_init;
1026
1027         /* there is a workaround being applied below that limits
1028          * 64-bit DMA addresses to 64-bit hardware.  There are some
1029          * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1030          */
1031         pci_using_dac = 0;
1032         if ((hw->bus_type == e1000_bus_type_pcix) &&
1033             !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1034                 pci_using_dac = 1;
1035         } else {
1036                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1037                 if (err) {
1038                         pr_err("No usable DMA config, aborting\n");
1039                         goto err_dma;
1040                 }
1041         }
1042
1043         netdev->netdev_ops = &e1000_netdev_ops;
1044         e1000_set_ethtool_ops(netdev);
1045         netdev->watchdog_timeo = 5 * HZ;
1046         netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1047
1048         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1049
1050         adapter->bd_number = cards_found;
1051
1052         /* setup the private structure */
1053
1054         err = e1000_sw_init(adapter);
1055         if (err)
1056                 goto err_sw_init;
1057
1058         err = -EIO;
1059         if (hw->mac_type == e1000_ce4100) {
1060                 hw->ce4100_gbe_mdio_base_virt =
1061                                         ioremap(pci_resource_start(pdev, BAR_1),
1062                                                 pci_resource_len(pdev, BAR_1));
1063
1064                 if (!hw->ce4100_gbe_mdio_base_virt)
1065                         goto err_mdio_ioremap;
1066         }
1067
1068         if (hw->mac_type >= e1000_82543) {
1069                 netdev->hw_features = NETIF_F_SG |
1070                                    NETIF_F_HW_CSUM |
1071                                    NETIF_F_HW_VLAN_CTAG_RX;
1072                 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1073                                    NETIF_F_HW_VLAN_CTAG_FILTER;
1074         }
1075
1076         if ((hw->mac_type >= e1000_82544) &&
1077            (hw->mac_type != e1000_82547))
1078                 netdev->hw_features |= NETIF_F_TSO;
1079
1080         netdev->priv_flags |= IFF_SUPP_NOFCS;
1081
1082         netdev->features |= netdev->hw_features;
1083         netdev->hw_features |= (NETIF_F_RXCSUM |
1084                                 NETIF_F_RXALL |
1085                                 NETIF_F_RXFCS);
1086
1087         if (pci_using_dac) {
1088                 netdev->features |= NETIF_F_HIGHDMA;
1089                 netdev->vlan_features |= NETIF_F_HIGHDMA;
1090         }
1091
1092         netdev->vlan_features |= (NETIF_F_TSO |
1093                                   NETIF_F_HW_CSUM |
1094                                   NETIF_F_SG);
1095
1096         /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1097         if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1098             hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1099                 netdev->priv_flags |= IFF_UNICAST_FLT;
1100
1101         adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1102
1103         /* initialize eeprom parameters */
1104         if (e1000_init_eeprom_params(hw)) {
1105                 e_err(probe, "EEPROM initialization failed\n");
1106                 goto err_eeprom;
1107         }
1108
1109         /* before reading the EEPROM, reset the controller to
1110          * put the device in a known good starting state
1111          */
1112
1113         e1000_reset_hw(hw);
1114
1115         /* make sure the EEPROM is good */
1116         if (e1000_validate_eeprom_checksum(hw) < 0) {
1117                 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1118                 e1000_dump_eeprom(adapter);
1119                 /* set MAC address to all zeroes to invalidate and temporary
1120                  * disable this device for the user. This blocks regular
1121                  * traffic while still permitting ethtool ioctls from reaching
1122                  * the hardware as well as allowing the user to run the
1123                  * interface after manually setting a hw addr using
1124                  * `ip set address`
1125                  */
1126                 memset(hw->mac_addr, 0, netdev->addr_len);
1127         } else {
1128                 /* copy the MAC address out of the EEPROM */
1129                 if (e1000_read_mac_addr(hw))
1130                         e_err(probe, "EEPROM Read Error\n");
1131         }
1132         /* don't block initialization here due to bad MAC address */
1133         memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1134
1135         if (!is_valid_ether_addr(netdev->dev_addr))
1136                 e_err(probe, "Invalid MAC Address\n");
1137
1138
1139         INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1140         INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1141                           e1000_82547_tx_fifo_stall_task);
1142         INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1143         INIT_WORK(&adapter->reset_task, e1000_reset_task);
1144
1145         e1000_check_options(adapter);
1146
1147         /* Initial Wake on LAN setting
1148          * If APM wake is enabled in the EEPROM,
1149          * enable the ACPI Magic Packet filter
1150          */
1151
1152         switch (hw->mac_type) {
1153         case e1000_82542_rev2_0:
1154         case e1000_82542_rev2_1:
1155         case e1000_82543:
1156                 break;
1157         case e1000_82544:
1158                 e1000_read_eeprom(hw,
1159                         EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1160                 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1161                 break;
1162         case e1000_82546:
1163         case e1000_82546_rev_3:
1164                 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1165                         e1000_read_eeprom(hw,
1166                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1167                         break;
1168                 }
1169                 /* Fall Through */
1170         default:
1171                 e1000_read_eeprom(hw,
1172                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1173                 break;
1174         }
1175         if (eeprom_data & eeprom_apme_mask)
1176                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1177
1178         /* now that we have the eeprom settings, apply the special cases
1179          * where the eeprom may be wrong or the board simply won't support
1180          * wake on lan on a particular port
1181          */
1182         switch (pdev->device) {
1183         case E1000_DEV_ID_82546GB_PCIE:
1184                 adapter->eeprom_wol = 0;
1185                 break;
1186         case E1000_DEV_ID_82546EB_FIBER:
1187         case E1000_DEV_ID_82546GB_FIBER:
1188                 /* Wake events only supported on port A for dual fiber
1189                  * regardless of eeprom setting
1190                  */
1191                 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1192                         adapter->eeprom_wol = 0;
1193                 break;
1194         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1195                 /* if quad port adapter, disable WoL on all but port A */
1196                 if (global_quad_port_a != 0)
1197                         adapter->eeprom_wol = 0;
1198                 else
1199                         adapter->quad_port_a = true;
1200                 /* Reset for multiple quad port adapters */
1201                 if (++global_quad_port_a == 4)
1202                         global_quad_port_a = 0;
1203                 break;
1204         }
1205
1206         /* initialize the wol settings based on the eeprom settings */
1207         adapter->wol = adapter->eeprom_wol;
1208         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1209
1210         /* Auto detect PHY address */
1211         if (hw->mac_type == e1000_ce4100) {
1212                 for (i = 0; i < 32; i++) {
1213                         hw->phy_addr = i;
1214                         e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1215
1216                         if (tmp != 0 && tmp != 0xFF)
1217                                 break;
1218                 }
1219
1220                 if (i >= 32)
1221                         goto err_eeprom;
1222         }
1223
1224         /* reset the hardware with the new settings */
1225         e1000_reset(adapter);
1226
1227         strcpy(netdev->name, "eth%d");
1228         err = register_netdev(netdev);
1229         if (err)
1230                 goto err_register;
1231
1232         e1000_vlan_filter_on_off(adapter, false);
1233
1234         /* print bus type/speed/width info */
1235         e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1236                ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1237                ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1238                 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1239                 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1240                 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1241                ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1242                netdev->dev_addr);
1243
1244         /* carrier off reporting is important to ethtool even BEFORE open */
1245         netif_carrier_off(netdev);
1246
1247         e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1248
1249         cards_found++;
1250         return 0;
1251
1252 err_register:
1253 err_eeprom:
1254         e1000_phy_hw_reset(hw);
1255
1256         if (hw->flash_address)
1257                 iounmap(hw->flash_address);
1258         kfree(adapter->tx_ring);
1259         kfree(adapter->rx_ring);
1260 err_dma:
1261 err_sw_init:
1262 err_mdio_ioremap:
1263         iounmap(hw->ce4100_gbe_mdio_base_virt);
1264         iounmap(hw->hw_addr);
1265 err_ioremap:
1266         disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1267         free_netdev(netdev);
1268 err_alloc_etherdev:
1269         pci_release_selected_regions(pdev, bars);
1270 err_pci_reg:
1271         if (!adapter || disable_dev)
1272                 pci_disable_device(pdev);
1273         return err;
1274 }
1275
1276 /**
1277  * e1000_remove - Device Removal Routine
1278  * @pdev: PCI device information struct
1279  *
1280  * e1000_remove is called by the PCI subsystem to alert the driver
1281  * that it should release a PCI device. That could be caused by a
1282  * Hot-Plug event, or because the driver is going to be removed from
1283  * memory.
1284  **/
1285 static void e1000_remove(struct pci_dev *pdev)
1286 {
1287         struct net_device *netdev = pci_get_drvdata(pdev);
1288         struct e1000_adapter *adapter = netdev_priv(netdev);
1289         struct e1000_hw *hw = &adapter->hw;
1290         bool disable_dev;
1291
1292         e1000_down_and_stop(adapter);
1293         e1000_release_manageability(adapter);
1294
1295         unregister_netdev(netdev);
1296
1297         e1000_phy_hw_reset(hw);
1298
1299         kfree(adapter->tx_ring);
1300         kfree(adapter->rx_ring);
1301
1302         if (hw->mac_type == e1000_ce4100)
1303                 iounmap(hw->ce4100_gbe_mdio_base_virt);
1304         iounmap(hw->hw_addr);
1305         if (hw->flash_address)
1306                 iounmap(hw->flash_address);
1307         pci_release_selected_regions(pdev, adapter->bars);
1308
1309         disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1310         free_netdev(netdev);
1311
1312         if (disable_dev)
1313                 pci_disable_device(pdev);
1314 }
1315
1316 /**
1317  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1318  * @adapter: board private structure to initialize
1319  *
1320  * e1000_sw_init initializes the Adapter private data structure.
1321  * e1000_init_hw_struct MUST be called before this function
1322  **/
1323 static int e1000_sw_init(struct e1000_adapter *adapter)
1324 {
1325         adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1326
1327         adapter->num_tx_queues = 1;
1328         adapter->num_rx_queues = 1;
1329
1330         if (e1000_alloc_queues(adapter)) {
1331                 e_err(probe, "Unable to allocate memory for queues\n");
1332                 return -ENOMEM;
1333         }
1334
1335         /* Explicitly disable IRQ since the NIC can be in any state. */
1336         e1000_irq_disable(adapter);
1337
1338         spin_lock_init(&adapter->stats_lock);
1339
1340         set_bit(__E1000_DOWN, &adapter->flags);
1341
1342         return 0;
1343 }
1344
1345 /**
1346  * e1000_alloc_queues - Allocate memory for all rings
1347  * @adapter: board private structure to initialize
1348  *
1349  * We allocate one ring per queue at run-time since we don't know the
1350  * number of queues at compile-time.
1351  **/
1352 static int e1000_alloc_queues(struct e1000_adapter *adapter)
1353 {
1354         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1355                                    sizeof(struct e1000_tx_ring), GFP_KERNEL);
1356         if (!adapter->tx_ring)
1357                 return -ENOMEM;
1358
1359         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1360                                    sizeof(struct e1000_rx_ring), GFP_KERNEL);
1361         if (!adapter->rx_ring) {
1362                 kfree(adapter->tx_ring);
1363                 return -ENOMEM;
1364         }
1365
1366         return E1000_SUCCESS;
1367 }
1368
1369 /**
1370  * e1000_open - Called when a network interface is made active
1371  * @netdev: network interface device structure
1372  *
1373  * Returns 0 on success, negative value on failure
1374  *
1375  * The open entry point is called when a network interface is made
1376  * active by the system (IFF_UP).  At this point all resources needed
1377  * for transmit and receive operations are allocated, the interrupt
1378  * handler is registered with the OS, the watchdog task is started,
1379  * and the stack is notified that the interface is ready.
1380  **/
1381 int e1000_open(struct net_device *netdev)
1382 {
1383         struct e1000_adapter *adapter = netdev_priv(netdev);
1384         struct e1000_hw *hw = &adapter->hw;
1385         int err;
1386
1387         /* disallow open during test */
1388         if (test_bit(__E1000_TESTING, &adapter->flags))
1389                 return -EBUSY;
1390
1391         netif_carrier_off(netdev);
1392
1393         /* allocate transmit descriptors */
1394         err = e1000_setup_all_tx_resources(adapter);
1395         if (err)
1396                 goto err_setup_tx;
1397
1398         /* allocate receive descriptors */
1399         err = e1000_setup_all_rx_resources(adapter);
1400         if (err)
1401                 goto err_setup_rx;
1402
1403         e1000_power_up_phy(adapter);
1404
1405         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1406         if ((hw->mng_cookie.status &
1407                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1408                 e1000_update_mng_vlan(adapter);
1409         }
1410
1411         /* before we allocate an interrupt, we must be ready to handle it.
1412          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1413          * as soon as we call pci_request_irq, so we have to setup our
1414          * clean_rx handler before we do so.
1415          */
1416         e1000_configure(adapter);
1417
1418         err = e1000_request_irq(adapter);
1419         if (err)
1420                 goto err_req_irq;
1421
1422         /* From here on the code is the same as e1000_up() */
1423         clear_bit(__E1000_DOWN, &adapter->flags);
1424
1425         napi_enable(&adapter->napi);
1426
1427         e1000_irq_enable(adapter);
1428
1429         netif_start_queue(netdev);
1430
1431         /* fire a link status change interrupt to start the watchdog */
1432         ew32(ICS, E1000_ICS_LSC);
1433
1434         return E1000_SUCCESS;
1435
1436 err_req_irq:
1437         e1000_power_down_phy(adapter);
1438         e1000_free_all_rx_resources(adapter);
1439 err_setup_rx:
1440         e1000_free_all_tx_resources(adapter);
1441 err_setup_tx:
1442         e1000_reset(adapter);
1443
1444         return err;
1445 }
1446
1447 /**
1448  * e1000_close - Disables a network interface
1449  * @netdev: network interface device structure
1450  *
1451  * Returns 0, this is not allowed to fail
1452  *
1453  * The close entry point is called when an interface is de-activated
1454  * by the OS.  The hardware is still under the drivers control, but
1455  * needs to be disabled.  A global MAC reset is issued to stop the
1456  * hardware, and all transmit and receive resources are freed.
1457  **/
1458 int e1000_close(struct net_device *netdev)
1459 {
1460         struct e1000_adapter *adapter = netdev_priv(netdev);
1461         struct e1000_hw *hw = &adapter->hw;
1462         int count = E1000_CHECK_RESET_COUNT;
1463
1464         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
1465                 usleep_range(10000, 20000);
1466
1467         WARN_ON(count < 0);
1468
1469         /* signal that we're down so that the reset task will no longer run */
1470         set_bit(__E1000_DOWN, &adapter->flags);
1471         clear_bit(__E1000_RESETTING, &adapter->flags);
1472
1473         e1000_down(adapter);
1474         e1000_power_down_phy(adapter);
1475         e1000_free_irq(adapter);
1476
1477         e1000_free_all_tx_resources(adapter);
1478         e1000_free_all_rx_resources(adapter);
1479
1480         /* kill manageability vlan ID if supported, but not if a vlan with
1481          * the same ID is registered on the host OS (let 8021q kill it)
1482          */
1483         if ((hw->mng_cookie.status &
1484              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1485             !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1486                 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1487                                        adapter->mng_vlan_id);
1488         }
1489
1490         return 0;
1491 }
1492
1493 /**
1494  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1495  * @adapter: address of board private structure
1496  * @start: address of beginning of memory
1497  * @len: length of memory
1498  **/
1499 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1500                                   unsigned long len)
1501 {
1502         struct e1000_hw *hw = &adapter->hw;
1503         unsigned long begin = (unsigned long)start;
1504         unsigned long end = begin + len;
1505
1506         /* First rev 82545 and 82546 need to not allow any memory
1507          * write location to cross 64k boundary due to errata 23
1508          */
1509         if (hw->mac_type == e1000_82545 ||
1510             hw->mac_type == e1000_ce4100 ||
1511             hw->mac_type == e1000_82546) {
1512                 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1513         }
1514
1515         return true;
1516 }
1517
1518 /**
1519  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1520  * @adapter: board private structure
1521  * @txdr:    tx descriptor ring (for a specific queue) to setup
1522  *
1523  * Return 0 on success, negative on failure
1524  **/
1525 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1526                                     struct e1000_tx_ring *txdr)
1527 {
1528         struct pci_dev *pdev = adapter->pdev;
1529         int size;
1530
1531         size = sizeof(struct e1000_tx_buffer) * txdr->count;
1532         txdr->buffer_info = vzalloc(size);
1533         if (!txdr->buffer_info)
1534                 return -ENOMEM;
1535
1536         /* round up to nearest 4K */
1537
1538         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1539         txdr->size = ALIGN(txdr->size, 4096);
1540
1541         txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1542                                         GFP_KERNEL);
1543         if (!txdr->desc) {
1544 setup_tx_desc_die:
1545                 vfree(txdr->buffer_info);
1546                 return -ENOMEM;
1547         }
1548
1549         /* Fix for errata 23, can't cross 64kB boundary */
1550         if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1551                 void *olddesc = txdr->desc;
1552                 dma_addr_t olddma = txdr->dma;
1553                 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1554                       txdr->size, txdr->desc);
1555                 /* Try again, without freeing the previous */
1556                 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1557                                                 &txdr->dma, GFP_KERNEL);
1558                 /* Failed allocation, critical failure */
1559                 if (!txdr->desc) {
1560                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1561                                           olddma);
1562                         goto setup_tx_desc_die;
1563                 }
1564
1565                 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1566                         /* give up */
1567                         dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1568                                           txdr->dma);
1569                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1570                                           olddma);
1571                         e_err(probe, "Unable to allocate aligned memory "
1572                               "for the transmit descriptor ring\n");
1573                         vfree(txdr->buffer_info);
1574                         return -ENOMEM;
1575                 } else {
1576                         /* Free old allocation, new allocation was successful */
1577                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1578                                           olddma);
1579                 }
1580         }
1581         memset(txdr->desc, 0, txdr->size);
1582
1583         txdr->next_to_use = 0;
1584         txdr->next_to_clean = 0;
1585
1586         return 0;
1587 }
1588
1589 /**
1590  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1591  *                                (Descriptors) for all queues
1592  * @adapter: board private structure
1593  *
1594  * Return 0 on success, negative on failure
1595  **/
1596 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1597 {
1598         int i, err = 0;
1599
1600         for (i = 0; i < adapter->num_tx_queues; i++) {
1601                 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1602                 if (err) {
1603                         e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1604                         for (i-- ; i >= 0; i--)
1605                                 e1000_free_tx_resources(adapter,
1606                                                         &adapter->tx_ring[i]);
1607                         break;
1608                 }
1609         }
1610
1611         return err;
1612 }
1613
1614 /**
1615  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1616  * @adapter: board private structure
1617  *
1618  * Configure the Tx unit of the MAC after a reset.
1619  **/
1620 static void e1000_configure_tx(struct e1000_adapter *adapter)
1621 {
1622         u64 tdba;
1623         struct e1000_hw *hw = &adapter->hw;
1624         u32 tdlen, tctl, tipg;
1625         u32 ipgr1, ipgr2;
1626
1627         /* Setup the HW Tx Head and Tail descriptor pointers */
1628
1629         switch (adapter->num_tx_queues) {
1630         case 1:
1631         default:
1632                 tdba = adapter->tx_ring[0].dma;
1633                 tdlen = adapter->tx_ring[0].count *
1634                         sizeof(struct e1000_tx_desc);
1635                 ew32(TDLEN, tdlen);
1636                 ew32(TDBAH, (tdba >> 32));
1637                 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1638                 ew32(TDT, 0);
1639                 ew32(TDH, 0);
1640                 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1641                                            E1000_TDH : E1000_82542_TDH);
1642                 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1643                                            E1000_TDT : E1000_82542_TDT);
1644                 break;
1645         }
1646
1647         /* Set the default values for the Tx Inter Packet Gap timer */
1648         if ((hw->media_type == e1000_media_type_fiber ||
1649              hw->media_type == e1000_media_type_internal_serdes))
1650                 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1651         else
1652                 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1653
1654         switch (hw->mac_type) {
1655         case e1000_82542_rev2_0:
1656         case e1000_82542_rev2_1:
1657                 tipg = DEFAULT_82542_TIPG_IPGT;
1658                 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1659                 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1660                 break;
1661         default:
1662                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1663                 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1664                 break;
1665         }
1666         tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1667         tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1668         ew32(TIPG, tipg);
1669
1670         /* Set the Tx Interrupt Delay register */
1671
1672         ew32(TIDV, adapter->tx_int_delay);
1673         if (hw->mac_type >= e1000_82540)
1674                 ew32(TADV, adapter->tx_abs_int_delay);
1675
1676         /* Program the Transmit Control Register */
1677
1678         tctl = er32(TCTL);
1679         tctl &= ~E1000_TCTL_CT;
1680         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1681                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1682
1683         e1000_config_collision_dist(hw);
1684
1685         /* Setup Transmit Descriptor Settings for eop descriptor */
1686         adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1687
1688         /* only set IDE if we are delaying interrupts using the timers */
1689         if (adapter->tx_int_delay)
1690                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1691
1692         if (hw->mac_type < e1000_82543)
1693                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1694         else
1695                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1696
1697         /* Cache if we're 82544 running in PCI-X because we'll
1698          * need this to apply a workaround later in the send path.
1699          */
1700         if (hw->mac_type == e1000_82544 &&
1701             hw->bus_type == e1000_bus_type_pcix)
1702                 adapter->pcix_82544 = true;
1703
1704         ew32(TCTL, tctl);
1705
1706 }
1707
1708 /**
1709  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1710  * @adapter: board private structure
1711  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1712  *
1713  * Returns 0 on success, negative on failure
1714  **/
1715 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1716                                     struct e1000_rx_ring *rxdr)
1717 {
1718         struct pci_dev *pdev = adapter->pdev;
1719         int size, desc_len;
1720
1721         size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1722         rxdr->buffer_info = vzalloc(size);
1723         if (!rxdr->buffer_info)
1724                 return -ENOMEM;
1725
1726         desc_len = sizeof(struct e1000_rx_desc);
1727
1728         /* Round up to nearest 4K */
1729
1730         rxdr->size = rxdr->count * desc_len;
1731         rxdr->size = ALIGN(rxdr->size, 4096);
1732
1733         rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1734                                         GFP_KERNEL);
1735         if (!rxdr->desc) {
1736 setup_rx_desc_die:
1737                 vfree(rxdr->buffer_info);
1738                 return -ENOMEM;
1739         }
1740
1741         /* Fix for errata 23, can't cross 64kB boundary */
1742         if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1743                 void *olddesc = rxdr->desc;
1744                 dma_addr_t olddma = rxdr->dma;
1745                 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1746                       rxdr->size, rxdr->desc);
1747                 /* Try again, without freeing the previous */
1748                 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1749                                                 &rxdr->dma, GFP_KERNEL);
1750                 /* Failed allocation, critical failure */
1751                 if (!rxdr->desc) {
1752                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1753                                           olddma);
1754                         goto setup_rx_desc_die;
1755                 }
1756
1757                 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1758                         /* give up */
1759                         dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1760                                           rxdr->dma);
1761                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1762                                           olddma);
1763                         e_err(probe, "Unable to allocate aligned memory for "
1764                               "the Rx descriptor ring\n");
1765                         goto setup_rx_desc_die;
1766                 } else {
1767                         /* Free old allocation, new allocation was successful */
1768                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1769                                           olddma);
1770                 }
1771         }
1772         memset(rxdr->desc, 0, rxdr->size);
1773
1774         rxdr->next_to_clean = 0;
1775         rxdr->next_to_use = 0;
1776         rxdr->rx_skb_top = NULL;
1777
1778         return 0;
1779 }
1780
1781 /**
1782  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1783  *                                (Descriptors) for all queues
1784  * @adapter: board private structure
1785  *
1786  * Return 0 on success, negative on failure
1787  **/
1788 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1789 {
1790         int i, err = 0;
1791
1792         for (i = 0; i < adapter->num_rx_queues; i++) {
1793                 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1794                 if (err) {
1795                         e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1796                         for (i-- ; i >= 0; i--)
1797                                 e1000_free_rx_resources(adapter,
1798                                                         &adapter->rx_ring[i]);
1799                         break;
1800                 }
1801         }
1802
1803         return err;
1804 }
1805
1806 /**
1807  * e1000_setup_rctl - configure the receive control registers
1808  * @adapter: Board private structure
1809  **/
1810 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1811 {
1812         struct e1000_hw *hw = &adapter->hw;
1813         u32 rctl;
1814
1815         rctl = er32(RCTL);
1816
1817         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1818
1819         rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1820                 E1000_RCTL_RDMTS_HALF |
1821                 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1822
1823         if (hw->tbi_compatibility_on == 1)
1824                 rctl |= E1000_RCTL_SBP;
1825         else
1826                 rctl &= ~E1000_RCTL_SBP;
1827
1828         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1829                 rctl &= ~E1000_RCTL_LPE;
1830         else
1831                 rctl |= E1000_RCTL_LPE;
1832
1833         /* Setup buffer sizes */
1834         rctl &= ~E1000_RCTL_SZ_4096;
1835         rctl |= E1000_RCTL_BSEX;
1836         switch (adapter->rx_buffer_len) {
1837         case E1000_RXBUFFER_2048:
1838         default:
1839                 rctl |= E1000_RCTL_SZ_2048;
1840                 rctl &= ~E1000_RCTL_BSEX;
1841                 break;
1842         case E1000_RXBUFFER_4096:
1843                 rctl |= E1000_RCTL_SZ_4096;
1844                 break;
1845         case E1000_RXBUFFER_8192:
1846                 rctl |= E1000_RCTL_SZ_8192;
1847                 break;
1848         case E1000_RXBUFFER_16384:
1849                 rctl |= E1000_RCTL_SZ_16384;
1850                 break;
1851         }
1852
1853         /* This is useful for sniffing bad packets. */
1854         if (adapter->netdev->features & NETIF_F_RXALL) {
1855                 /* UPE and MPE will be handled by normal PROMISC logic
1856                  * in e1000e_set_rx_mode
1857                  */
1858                 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1859                          E1000_RCTL_BAM | /* RX All Bcast Pkts */
1860                          E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1861
1862                 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1863                           E1000_RCTL_DPF | /* Allow filtered pause */
1864                           E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1865                 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1866                  * and that breaks VLANs.
1867                  */
1868         }
1869
1870         ew32(RCTL, rctl);
1871 }
1872
1873 /**
1874  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1875  * @adapter: board private structure
1876  *
1877  * Configure the Rx unit of the MAC after a reset.
1878  **/
1879 static void e1000_configure_rx(struct e1000_adapter *adapter)
1880 {
1881         u64 rdba;
1882         struct e1000_hw *hw = &adapter->hw;
1883         u32 rdlen, rctl, rxcsum;
1884
1885         if (adapter->netdev->mtu > ETH_DATA_LEN) {
1886                 rdlen = adapter->rx_ring[0].count *
1887                         sizeof(struct e1000_rx_desc);
1888                 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1889                 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1890         } else {
1891                 rdlen = adapter->rx_ring[0].count *
1892                         sizeof(struct e1000_rx_desc);
1893                 adapter->clean_rx = e1000_clean_rx_irq;
1894                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1895         }
1896
1897         /* disable receives while setting up the descriptors */
1898         rctl = er32(RCTL);
1899         ew32(RCTL, rctl & ~E1000_RCTL_EN);
1900
1901         /* set the Receive Delay Timer Register */
1902         ew32(RDTR, adapter->rx_int_delay);
1903
1904         if (hw->mac_type >= e1000_82540) {
1905                 ew32(RADV, adapter->rx_abs_int_delay);
1906                 if (adapter->itr_setting != 0)
1907                         ew32(ITR, 1000000000 / (adapter->itr * 256));
1908         }
1909
1910         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1911          * the Base and Length of the Rx Descriptor Ring
1912          */
1913         switch (adapter->num_rx_queues) {
1914         case 1:
1915         default:
1916                 rdba = adapter->rx_ring[0].dma;
1917                 ew32(RDLEN, rdlen);
1918                 ew32(RDBAH, (rdba >> 32));
1919                 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1920                 ew32(RDT, 0);
1921                 ew32(RDH, 0);
1922                 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1923                                            E1000_RDH : E1000_82542_RDH);
1924                 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1925                                            E1000_RDT : E1000_82542_RDT);
1926                 break;
1927         }
1928
1929         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1930         if (hw->mac_type >= e1000_82543) {
1931                 rxcsum = er32(RXCSUM);
1932                 if (adapter->rx_csum)
1933                         rxcsum |= E1000_RXCSUM_TUOFL;
1934                 else
1935                         /* don't need to clear IPPCSE as it defaults to 0 */
1936                         rxcsum &= ~E1000_RXCSUM_TUOFL;
1937                 ew32(RXCSUM, rxcsum);
1938         }
1939
1940         /* Enable Receives */
1941         ew32(RCTL, rctl | E1000_RCTL_EN);
1942 }
1943
1944 /**
1945  * e1000_free_tx_resources - Free Tx Resources per Queue
1946  * @adapter: board private structure
1947  * @tx_ring: Tx descriptor ring for a specific queue
1948  *
1949  * Free all transmit software resources
1950  **/
1951 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1952                                     struct e1000_tx_ring *tx_ring)
1953 {
1954         struct pci_dev *pdev = adapter->pdev;
1955
1956         e1000_clean_tx_ring(adapter, tx_ring);
1957
1958         vfree(tx_ring->buffer_info);
1959         tx_ring->buffer_info = NULL;
1960
1961         dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1962                           tx_ring->dma);
1963
1964         tx_ring->desc = NULL;
1965 }
1966
1967 /**
1968  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1969  * @adapter: board private structure
1970  *
1971  * Free all transmit software resources
1972  **/
1973 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1974 {
1975         int i;
1976
1977         for (i = 0; i < adapter->num_tx_queues; i++)
1978                 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1979 }
1980
1981 static void
1982 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1983                                  struct e1000_tx_buffer *buffer_info)
1984 {
1985         if (buffer_info->dma) {
1986                 if (buffer_info->mapped_as_page)
1987                         dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1988                                        buffer_info->length, DMA_TO_DEVICE);
1989                 else
1990                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1991                                          buffer_info->length,
1992                                          DMA_TO_DEVICE);
1993                 buffer_info->dma = 0;
1994         }
1995         if (buffer_info->skb) {
1996                 dev_kfree_skb_any(buffer_info->skb);
1997                 buffer_info->skb = NULL;
1998         }
1999         buffer_info->time_stamp = 0;
2000         /* buffer_info must be completely set up in the transmit path */
2001 }
2002
2003 /**
2004  * e1000_clean_tx_ring - Free Tx Buffers
2005  * @adapter: board private structure
2006  * @tx_ring: ring to be cleaned
2007  **/
2008 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2009                                 struct e1000_tx_ring *tx_ring)
2010 {
2011         struct e1000_hw *hw = &adapter->hw;
2012         struct e1000_tx_buffer *buffer_info;
2013         unsigned long size;
2014         unsigned int i;
2015
2016         /* Free all the Tx ring sk_buffs */
2017
2018         for (i = 0; i < tx_ring->count; i++) {
2019                 buffer_info = &tx_ring->buffer_info[i];
2020                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2021         }
2022
2023         netdev_reset_queue(adapter->netdev);
2024         size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
2025         memset(tx_ring->buffer_info, 0, size);
2026
2027         /* Zero out the descriptor ring */
2028
2029         memset(tx_ring->desc, 0, tx_ring->size);
2030
2031         tx_ring->next_to_use = 0;
2032         tx_ring->next_to_clean = 0;
2033         tx_ring->last_tx_tso = false;
2034
2035         writel(0, hw->hw_addr + tx_ring->tdh);
2036         writel(0, hw->hw_addr + tx_ring->tdt);
2037 }
2038
2039 /**
2040  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2041  * @adapter: board private structure
2042  **/
2043 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2044 {
2045         int i;
2046
2047         for (i = 0; i < adapter->num_tx_queues; i++)
2048                 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2049 }
2050
2051 /**
2052  * e1000_free_rx_resources - Free Rx Resources
2053  * @adapter: board private structure
2054  * @rx_ring: ring to clean the resources from
2055  *
2056  * Free all receive software resources
2057  **/
2058 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2059                                     struct e1000_rx_ring *rx_ring)
2060 {
2061         struct pci_dev *pdev = adapter->pdev;
2062
2063         e1000_clean_rx_ring(adapter, rx_ring);
2064
2065         vfree(rx_ring->buffer_info);
2066         rx_ring->buffer_info = NULL;
2067
2068         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2069                           rx_ring->dma);
2070
2071         rx_ring->desc = NULL;
2072 }
2073
2074 /**
2075  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2076  * @adapter: board private structure
2077  *
2078  * Free all receive software resources
2079  **/
2080 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2081 {
2082         int i;
2083
2084         for (i = 0; i < adapter->num_rx_queues; i++)
2085                 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2086 }
2087
2088 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2089 static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2090 {
2091         return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2092                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2093 }
2094
2095 static void *e1000_alloc_frag(const struct e1000_adapter *a)
2096 {
2097         unsigned int len = e1000_frag_len(a);
2098         u8 *data = netdev_alloc_frag(len);
2099
2100         if (likely(data))
2101                 data += E1000_HEADROOM;
2102         return data;
2103 }
2104
2105 /**
2106  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2107  * @adapter: board private structure
2108  * @rx_ring: ring to free buffers from
2109  **/
2110 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2111                                 struct e1000_rx_ring *rx_ring)
2112 {
2113         struct e1000_hw *hw = &adapter->hw;
2114         struct e1000_rx_buffer *buffer_info;
2115         struct pci_dev *pdev = adapter->pdev;
2116         unsigned long size;
2117         unsigned int i;
2118
2119         /* Free all the Rx netfrags */
2120         for (i = 0; i < rx_ring->count; i++) {
2121                 buffer_info = &rx_ring->buffer_info[i];
2122                 if (adapter->clean_rx == e1000_clean_rx_irq) {
2123                         if (buffer_info->dma)
2124                                 dma_unmap_single(&pdev->dev, buffer_info->dma,
2125                                                  adapter->rx_buffer_len,
2126                                                  DMA_FROM_DEVICE);
2127                         if (buffer_info->rxbuf.data) {
2128                                 skb_free_frag(buffer_info->rxbuf.data);
2129                                 buffer_info->rxbuf.data = NULL;
2130                         }
2131                 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2132                         if (buffer_info->dma)
2133                                 dma_unmap_page(&pdev->dev, buffer_info->dma,
2134                                                adapter->rx_buffer_len,
2135                                                DMA_FROM_DEVICE);
2136                         if (buffer_info->rxbuf.page) {
2137                                 put_page(buffer_info->rxbuf.page);
2138                                 buffer_info->rxbuf.page = NULL;
2139                         }
2140                 }
2141
2142                 buffer_info->dma = 0;
2143         }
2144
2145         /* there also may be some cached data from a chained receive */
2146         napi_free_frags(&adapter->napi);
2147         rx_ring->rx_skb_top = NULL;
2148
2149         size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2150         memset(rx_ring->buffer_info, 0, size);
2151
2152         /* Zero out the descriptor ring */
2153         memset(rx_ring->desc, 0, rx_ring->size);
2154
2155         rx_ring->next_to_clean = 0;
2156         rx_ring->next_to_use = 0;
2157
2158         writel(0, hw->hw_addr + rx_ring->rdh);
2159         writel(0, hw->hw_addr + rx_ring->rdt);
2160 }
2161
2162 /**
2163  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2164  * @adapter: board private structure
2165  **/
2166 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2167 {
2168         int i;
2169
2170         for (i = 0; i < adapter->num_rx_queues; i++)
2171                 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2172 }
2173
2174 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2175  * and memory write and invalidate disabled for certain operations
2176  */
2177 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2178 {
2179         struct e1000_hw *hw = &adapter->hw;
2180         struct net_device *netdev = adapter->netdev;
2181         u32 rctl;
2182
2183         e1000_pci_clear_mwi(hw);
2184
2185         rctl = er32(RCTL);
2186         rctl |= E1000_RCTL_RST;
2187         ew32(RCTL, rctl);
2188         E1000_WRITE_FLUSH();
2189         mdelay(5);
2190
2191         if (netif_running(netdev))
2192                 e1000_clean_all_rx_rings(adapter);
2193 }
2194
2195 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2196 {
2197         struct e1000_hw *hw = &adapter->hw;
2198         struct net_device *netdev = adapter->netdev;
2199         u32 rctl;
2200
2201         rctl = er32(RCTL);
2202         rctl &= ~E1000_RCTL_RST;
2203         ew32(RCTL, rctl);
2204         E1000_WRITE_FLUSH();
2205         mdelay(5);
2206
2207         if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2208                 e1000_pci_set_mwi(hw);
2209
2210         if (netif_running(netdev)) {
2211                 /* No need to loop, because 82542 supports only 1 queue */
2212                 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2213                 e1000_configure_rx(adapter);
2214                 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2215         }
2216 }
2217
2218 /**
2219  * e1000_set_mac - Change the Ethernet Address of the NIC
2220  * @netdev: network interface device structure
2221  * @p: pointer to an address structure
2222  *
2223  * Returns 0 on success, negative on failure
2224  **/
2225 static int e1000_set_mac(struct net_device *netdev, void *p)
2226 {
2227         struct e1000_adapter *adapter = netdev_priv(netdev);
2228         struct e1000_hw *hw = &adapter->hw;
2229         struct sockaddr *addr = p;
2230
2231         if (!is_valid_ether_addr(addr->sa_data))
2232                 return -EADDRNOTAVAIL;
2233
2234         /* 82542 2.0 needs to be in reset to write receive address registers */
2235
2236         if (hw->mac_type == e1000_82542_rev2_0)
2237                 e1000_enter_82542_rst(adapter);
2238
2239         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2240         memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2241
2242         e1000_rar_set(hw, hw->mac_addr, 0);
2243
2244         if (hw->mac_type == e1000_82542_rev2_0)
2245                 e1000_leave_82542_rst(adapter);
2246
2247         return 0;
2248 }
2249
2250 /**
2251  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2252  * @netdev: network interface device structure
2253  *
2254  * The set_rx_mode entry point is called whenever the unicast or multicast
2255  * address lists or the network interface flags are updated. This routine is
2256  * responsible for configuring the hardware for proper unicast, multicast,
2257  * promiscuous mode, and all-multi behavior.
2258  **/
2259 static void e1000_set_rx_mode(struct net_device *netdev)
2260 {
2261         struct e1000_adapter *adapter = netdev_priv(netdev);
2262         struct e1000_hw *hw = &adapter->hw;
2263         struct netdev_hw_addr *ha;
2264         bool use_uc = false;
2265         u32 rctl;
2266         u32 hash_value;
2267         int i, rar_entries = E1000_RAR_ENTRIES;
2268         int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2269         u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2270
2271         if (!mcarray)
2272                 return;
2273
2274         /* Check for Promiscuous and All Multicast modes */
2275
2276         rctl = er32(RCTL);
2277
2278         if (netdev->flags & IFF_PROMISC) {
2279                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2280                 rctl &= ~E1000_RCTL_VFE;
2281         } else {
2282                 if (netdev->flags & IFF_ALLMULTI)
2283                         rctl |= E1000_RCTL_MPE;
2284                 else
2285                         rctl &= ~E1000_RCTL_MPE;
2286                 /* Enable VLAN filter if there is a VLAN */
2287                 if (e1000_vlan_used(adapter))
2288                         rctl |= E1000_RCTL_VFE;
2289         }
2290
2291         if (netdev_uc_count(netdev) > rar_entries - 1) {
2292                 rctl |= E1000_RCTL_UPE;
2293         } else if (!(netdev->flags & IFF_PROMISC)) {
2294                 rctl &= ~E1000_RCTL_UPE;
2295                 use_uc = true;
2296         }
2297
2298         ew32(RCTL, rctl);
2299
2300         /* 82542 2.0 needs to be in reset to write receive address registers */
2301
2302         if (hw->mac_type == e1000_82542_rev2_0)
2303                 e1000_enter_82542_rst(adapter);
2304
2305         /* load the first 14 addresses into the exact filters 1-14. Unicast
2306          * addresses take precedence to avoid disabling unicast filtering
2307          * when possible.
2308          *
2309          * RAR 0 is used for the station MAC address
2310          * if there are not 14 addresses, go ahead and clear the filters
2311          */
2312         i = 1;
2313         if (use_uc)
2314                 netdev_for_each_uc_addr(ha, netdev) {
2315                         if (i == rar_entries)
2316                                 break;
2317                         e1000_rar_set(hw, ha->addr, i++);
2318                 }
2319
2320         netdev_for_each_mc_addr(ha, netdev) {
2321                 if (i == rar_entries) {
2322                         /* load any remaining addresses into the hash table */
2323                         u32 hash_reg, hash_bit, mta;
2324                         hash_value = e1000_hash_mc_addr(hw, ha->addr);
2325                         hash_reg = (hash_value >> 5) & 0x7F;
2326                         hash_bit = hash_value & 0x1F;
2327                         mta = (1 << hash_bit);
2328                         mcarray[hash_reg] |= mta;
2329                 } else {
2330                         e1000_rar_set(hw, ha->addr, i++);
2331                 }
2332         }
2333
2334         for (; i < rar_entries; i++) {
2335                 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2336                 E1000_WRITE_FLUSH();
2337                 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2338                 E1000_WRITE_FLUSH();
2339         }
2340
2341         /* write the hash table completely, write from bottom to avoid
2342          * both stupid write combining chipsets, and flushing each write
2343          */
2344         for (i = mta_reg_count - 1; i >= 0 ; i--) {
2345                 /* If we are on an 82544 has an errata where writing odd
2346                  * offsets overwrites the previous even offset, but writing
2347                  * backwards over the range solves the issue by always
2348                  * writing the odd offset first
2349                  */
2350                 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2351         }
2352         E1000_WRITE_FLUSH();
2353
2354         if (hw->mac_type == e1000_82542_rev2_0)
2355                 e1000_leave_82542_rst(adapter);
2356
2357         kfree(mcarray);
2358 }
2359
2360 /**
2361  * e1000_update_phy_info_task - get phy info
2362  * @work: work struct contained inside adapter struct
2363  *
2364  * Need to wait a few seconds after link up to get diagnostic information from
2365  * the phy
2366  */
2367 static void e1000_update_phy_info_task(struct work_struct *work)
2368 {
2369         struct e1000_adapter *adapter = container_of(work,
2370                                                      struct e1000_adapter,
2371                                                      phy_info_task.work);
2372
2373         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2374 }
2375
2376 /**
2377  * e1000_82547_tx_fifo_stall_task - task to complete work
2378  * @work: work struct contained inside adapter struct
2379  **/
2380 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2381 {
2382         struct e1000_adapter *adapter = container_of(work,
2383                                                      struct e1000_adapter,
2384                                                      fifo_stall_task.work);
2385         struct e1000_hw *hw = &adapter->hw;
2386         struct net_device *netdev = adapter->netdev;
2387         u32 tctl;
2388
2389         if (atomic_read(&adapter->tx_fifo_stall)) {
2390                 if ((er32(TDT) == er32(TDH)) &&
2391                    (er32(TDFT) == er32(TDFH)) &&
2392                    (er32(TDFTS) == er32(TDFHS))) {
2393                         tctl = er32(TCTL);
2394                         ew32(TCTL, tctl & ~E1000_TCTL_EN);
2395                         ew32(TDFT, adapter->tx_head_addr);
2396                         ew32(TDFH, adapter->tx_head_addr);
2397                         ew32(TDFTS, adapter->tx_head_addr);
2398                         ew32(TDFHS, adapter->tx_head_addr);
2399                         ew32(TCTL, tctl);
2400                         E1000_WRITE_FLUSH();
2401
2402                         adapter->tx_fifo_head = 0;
2403                         atomic_set(&adapter->tx_fifo_stall, 0);
2404                         netif_wake_queue(netdev);
2405                 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2406                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
2407                 }
2408         }
2409 }
2410
2411 bool e1000_has_link(struct e1000_adapter *adapter)
2412 {
2413         struct e1000_hw *hw = &adapter->hw;
2414         bool link_active = false;
2415
2416         /* get_link_status is set on LSC (link status) interrupt or rx
2417          * sequence error interrupt (except on intel ce4100).
2418          * get_link_status will stay false until the
2419          * e1000_check_for_link establishes link for copper adapters
2420          * ONLY
2421          */
2422         switch (hw->media_type) {
2423         case e1000_media_type_copper:
2424                 if (hw->mac_type == e1000_ce4100)
2425                         hw->get_link_status = 1;
2426                 if (hw->get_link_status) {
2427                         e1000_check_for_link(hw);
2428                         link_active = !hw->get_link_status;
2429                 } else {
2430                         link_active = true;
2431                 }
2432                 break;
2433         case e1000_media_type_fiber:
2434                 e1000_check_for_link(hw);
2435                 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2436                 break;
2437         case e1000_media_type_internal_serdes:
2438                 e1000_check_for_link(hw);
2439                 link_active = hw->serdes_has_link;
2440                 break;
2441         default:
2442                 break;
2443         }
2444
2445         return link_active;
2446 }
2447
2448 /**
2449  * e1000_watchdog - work function
2450  * @work: work struct contained inside adapter struct
2451  **/
2452 static void e1000_watchdog(struct work_struct *work)
2453 {
2454         struct e1000_adapter *adapter = container_of(work,
2455                                                      struct e1000_adapter,
2456                                                      watchdog_task.work);
2457         struct e1000_hw *hw = &adapter->hw;
2458         struct net_device *netdev = adapter->netdev;
2459         struct e1000_tx_ring *txdr = adapter->tx_ring;
2460         u32 link, tctl;
2461
2462         link = e1000_has_link(adapter);
2463         if ((netif_carrier_ok(netdev)) && link)
2464                 goto link_up;
2465
2466         if (link) {
2467                 if (!netif_carrier_ok(netdev)) {
2468                         u32 ctrl;
2469                         bool txb2b = true;
2470                         /* update snapshot of PHY registers on LSC */
2471                         e1000_get_speed_and_duplex(hw,
2472                                                    &adapter->link_speed,
2473                                                    &adapter->link_duplex);
2474
2475                         ctrl = er32(CTRL);
2476                         pr_info("%s NIC Link is Up %d Mbps %s, "
2477                                 "Flow Control: %s\n",
2478                                 netdev->name,
2479                                 adapter->link_speed,
2480                                 adapter->link_duplex == FULL_DUPLEX ?
2481                                 "Full Duplex" : "Half Duplex",
2482                                 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2483                                 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2484                                 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2485                                 E1000_CTRL_TFCE) ? "TX" : "None")));
2486
2487                         /* adjust timeout factor according to speed/duplex */
2488                         adapter->tx_timeout_factor = 1;
2489                         switch (adapter->link_speed) {
2490                         case SPEED_10:
2491                                 txb2b = false;
2492                                 adapter->tx_timeout_factor = 16;
2493                                 break;
2494                         case SPEED_100:
2495                                 txb2b = false;
2496                                 /* maybe add some timeout factor ? */
2497                                 break;
2498                         }
2499
2500                         /* enable transmits in the hardware */
2501                         tctl = er32(TCTL);
2502                         tctl |= E1000_TCTL_EN;
2503                         ew32(TCTL, tctl);
2504
2505                         netif_carrier_on(netdev);
2506                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2507                                 schedule_delayed_work(&adapter->phy_info_task,
2508                                                       2 * HZ);
2509                         adapter->smartspeed = 0;
2510                 }
2511         } else {
2512                 if (netif_carrier_ok(netdev)) {
2513                         adapter->link_speed = 0;
2514                         adapter->link_duplex = 0;
2515                         pr_info("%s NIC Link is Down\n",
2516                                 netdev->name);
2517                         netif_carrier_off(netdev);
2518
2519                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2520                                 schedule_delayed_work(&adapter->phy_info_task,
2521                                                       2 * HZ);
2522                 }
2523
2524                 e1000_smartspeed(adapter);
2525         }
2526
2527 link_up:
2528         e1000_update_stats(adapter);
2529
2530         hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2531         adapter->tpt_old = adapter->stats.tpt;
2532         hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2533         adapter->colc_old = adapter->stats.colc;
2534
2535         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2536         adapter->gorcl_old = adapter->stats.gorcl;
2537         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2538         adapter->gotcl_old = adapter->stats.gotcl;
2539
2540         e1000_update_adaptive(hw);
2541
2542         if (!netif_carrier_ok(netdev)) {
2543                 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2544                         /* We've lost link, so the controller stops DMA,
2545                          * but we've got queued Tx work that's never going
2546                          * to get done, so reset controller to flush Tx.
2547                          * (Do the reset outside of interrupt context).
2548                          */
2549                         adapter->tx_timeout_count++;
2550                         schedule_work(&adapter->reset_task);
2551                         /* exit immediately since reset is imminent */
2552                         return;
2553                 }
2554         }
2555
2556         /* Simple mode for Interrupt Throttle Rate (ITR) */
2557         if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2558                 /* Symmetric Tx/Rx gets a reduced ITR=2000;
2559                  * Total asymmetrical Tx or Rx gets ITR=8000;
2560                  * everyone else is between 2000-8000.
2561                  */
2562                 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2563                 u32 dif = (adapter->gotcl > adapter->gorcl ?
2564                             adapter->gotcl - adapter->gorcl :
2565                             adapter->gorcl - adapter->gotcl) / 10000;
2566                 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2567
2568                 ew32(ITR, 1000000000 / (itr * 256));
2569         }
2570
2571         /* Cause software interrupt to ensure rx ring is cleaned */
2572         ew32(ICS, E1000_ICS_RXDMT0);
2573
2574         /* Force detection of hung controller every watchdog period */
2575         adapter->detect_tx_hung = true;
2576
2577         /* Reschedule the task */
2578         if (!test_bit(__E1000_DOWN, &adapter->flags))
2579                 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2580 }
2581
2582 enum latency_range {
2583         lowest_latency = 0,
2584         low_latency = 1,
2585         bulk_latency = 2,
2586         latency_invalid = 255
2587 };
2588
2589 /**
2590  * e1000_update_itr - update the dynamic ITR value based on statistics
2591  * @adapter: pointer to adapter
2592  * @itr_setting: current adapter->itr
2593  * @packets: the number of packets during this measurement interval
2594  * @bytes: the number of bytes during this measurement interval
2595  *
2596  *      Stores a new ITR value based on packets and byte
2597  *      counts during the last interrupt.  The advantage of per interrupt
2598  *      computation is faster updates and more accurate ITR for the current
2599  *      traffic pattern.  Constants in this function were computed
2600  *      based on theoretical maximum wire speed and thresholds were set based
2601  *      on testing data as well as attempting to minimize response time
2602  *      while increasing bulk throughput.
2603  *      this functionality is controlled by the InterruptThrottleRate module
2604  *      parameter (see e1000_param.c)
2605  **/
2606 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2607                                      u16 itr_setting, int packets, int bytes)
2608 {
2609         unsigned int retval = itr_setting;
2610         struct e1000_hw *hw = &adapter->hw;
2611
2612         if (unlikely(hw->mac_type < e1000_82540))
2613                 goto update_itr_done;
2614
2615         if (packets == 0)
2616                 goto update_itr_done;
2617
2618         switch (itr_setting) {
2619         case lowest_latency:
2620                 /* jumbo frames get bulk treatment*/
2621                 if (bytes/packets > 8000)
2622                         retval = bulk_latency;
2623                 else if ((packets < 5) && (bytes > 512))
2624                         retval = low_latency;
2625                 break;
2626         case low_latency:  /* 50 usec aka 20000 ints/s */
2627                 if (bytes > 10000) {
2628                         /* jumbo frames need bulk latency setting */
2629                         if (bytes/packets > 8000)
2630                                 retval = bulk_latency;
2631                         else if ((packets < 10) || ((bytes/packets) > 1200))
2632                                 retval = bulk_latency;
2633                         else if ((packets > 35))
2634                                 retval = lowest_latency;
2635                 } else if (bytes/packets > 2000)
2636                         retval = bulk_latency;
2637                 else if (packets <= 2 && bytes < 512)
2638                         retval = lowest_latency;
2639                 break;
2640         case bulk_latency: /* 250 usec aka 4000 ints/s */
2641                 if (bytes > 25000) {
2642                         if (packets > 35)
2643                                 retval = low_latency;
2644                 } else if (bytes < 6000) {
2645                         retval = low_latency;
2646                 }
2647                 break;
2648         }
2649
2650 update_itr_done:
2651         return retval;
2652 }
2653
2654 static void e1000_set_itr(struct e1000_adapter *adapter)
2655 {
2656         struct e1000_hw *hw = &adapter->hw;
2657         u16 current_itr;
2658         u32 new_itr = adapter->itr;
2659
2660         if (unlikely(hw->mac_type < e1000_82540))
2661                 return;
2662
2663         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2664         if (unlikely(adapter->link_speed != SPEED_1000)) {
2665                 current_itr = 0;
2666                 new_itr = 4000;
2667                 goto set_itr_now;
2668         }
2669
2670         adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2671                                            adapter->total_tx_packets,
2672                                            adapter->total_tx_bytes);
2673         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2674         if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2675                 adapter->tx_itr = low_latency;
2676
2677         adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2678                                            adapter->total_rx_packets,
2679                                            adapter->total_rx_bytes);
2680         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2681         if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2682                 adapter->rx_itr = low_latency;
2683
2684         current_itr = max(adapter->rx_itr, adapter->tx_itr);
2685
2686         switch (current_itr) {
2687         /* counts and packets in update_itr are dependent on these numbers */
2688         case lowest_latency:
2689                 new_itr = 70000;
2690                 break;
2691         case low_latency:
2692                 new_itr = 20000; /* aka hwitr = ~200 */
2693                 break;
2694         case bulk_latency:
2695                 new_itr = 4000;
2696                 break;
2697         default:
2698                 break;
2699         }
2700
2701 set_itr_now:
2702         if (new_itr != adapter->itr) {
2703                 /* this attempts to bias the interrupt rate towards Bulk
2704                  * by adding intermediate steps when interrupt rate is
2705                  * increasing
2706                  */
2707                 new_itr = new_itr > adapter->itr ?
2708                           min(adapter->itr + (new_itr >> 2), new_itr) :
2709                           new_itr;
2710                 adapter->itr = new_itr;
2711                 ew32(ITR, 1000000000 / (new_itr * 256));
2712         }
2713 }
2714
2715 #define E1000_TX_FLAGS_CSUM             0x00000001
2716 #define E1000_TX_FLAGS_VLAN             0x00000002
2717 #define E1000_TX_FLAGS_TSO              0x00000004
2718 #define E1000_TX_FLAGS_IPV4             0x00000008
2719 #define E1000_TX_FLAGS_NO_FCS           0x00000010
2720 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2721 #define E1000_TX_FLAGS_VLAN_SHIFT       16
2722
2723 static int e1000_tso(struct e1000_adapter *adapter,
2724                      struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2725                      __be16 protocol)
2726 {
2727         struct e1000_context_desc *context_desc;
2728         struct e1000_tx_buffer *buffer_info;
2729         unsigned int i;
2730         u32 cmd_length = 0;
2731         u16 ipcse = 0, tucse, mss;
2732         u8 ipcss, ipcso, tucss, tucso, hdr_len;
2733
2734         if (skb_is_gso(skb)) {
2735                 int err;
2736
2737                 err = skb_cow_head(skb, 0);
2738                 if (err < 0)
2739                         return err;
2740
2741                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2742                 mss = skb_shinfo(skb)->gso_size;
2743                 if (protocol == htons(ETH_P_IP)) {
2744                         struct iphdr *iph = ip_hdr(skb);
2745                         iph->tot_len = 0;
2746                         iph->check = 0;
2747                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2748                                                                  iph->daddr, 0,
2749                                                                  IPPROTO_TCP,
2750                                                                  0);
2751                         cmd_length = E1000_TXD_CMD_IP;
2752                         ipcse = skb_transport_offset(skb) - 1;
2753                 } else if (skb_is_gso_v6(skb)) {
2754                         ipv6_hdr(skb)->payload_len = 0;
2755                         tcp_hdr(skb)->check =
2756                                 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2757                                                  &ipv6_hdr(skb)->daddr,
2758                                                  0, IPPROTO_TCP, 0);
2759                         ipcse = 0;
2760                 }
2761                 ipcss = skb_network_offset(skb);
2762                 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2763                 tucss = skb_transport_offset(skb);
2764                 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2765                 tucse = 0;
2766
2767                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2768                                E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2769
2770                 i = tx_ring->next_to_use;
2771                 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2772                 buffer_info = &tx_ring->buffer_info[i];
2773
2774                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2775                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2776                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2777                 context_desc->upper_setup.tcp_fields.tucss = tucss;
2778                 context_desc->upper_setup.tcp_fields.tucso = tucso;
2779                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2780                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2781                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2782                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2783
2784                 buffer_info->time_stamp = jiffies;
2785                 buffer_info->next_to_watch = i;
2786
2787                 if (++i == tx_ring->count)
2788                         i = 0;
2789
2790                 tx_ring->next_to_use = i;
2791
2792                 return true;
2793         }
2794         return false;
2795 }
2796
2797 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2798                           struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2799                           __be16 protocol)
2800 {
2801         struct e1000_context_desc *context_desc;
2802         struct e1000_tx_buffer *buffer_info;
2803         unsigned int i;
2804         u8 css;
2805         u32 cmd_len = E1000_TXD_CMD_DEXT;
2806
2807         if (skb->ip_summed != CHECKSUM_PARTIAL)
2808                 return false;
2809
2810         switch (protocol) {
2811         case cpu_to_be16(ETH_P_IP):
2812                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2813                         cmd_len |= E1000_TXD_CMD_TCP;
2814                 break;
2815         case cpu_to_be16(ETH_P_IPV6):
2816                 /* XXX not handling all IPV6 headers */
2817                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2818                         cmd_len |= E1000_TXD_CMD_TCP;
2819                 break;
2820         default:
2821                 if (unlikely(net_ratelimit()))
2822                         e_warn(drv, "checksum_partial proto=%x!\n",
2823                                skb->protocol);
2824                 break;
2825         }
2826
2827         css = skb_checksum_start_offset(skb);
2828
2829         i = tx_ring->next_to_use;
2830         buffer_info = &tx_ring->buffer_info[i];
2831         context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2832
2833         context_desc->lower_setup.ip_config = 0;
2834         context_desc->upper_setup.tcp_fields.tucss = css;
2835         context_desc->upper_setup.tcp_fields.tucso =
2836                 css + skb->csum_offset;
2837         context_desc->upper_setup.tcp_fields.tucse = 0;
2838         context_desc->tcp_seg_setup.data = 0;
2839         context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2840
2841         buffer_info->time_stamp = jiffies;
2842         buffer_info->next_to_watch = i;
2843
2844         if (unlikely(++i == tx_ring->count))
2845                 i = 0;
2846
2847         tx_ring->next_to_use = i;
2848
2849         return true;
2850 }
2851
2852 #define E1000_MAX_TXD_PWR       12
2853 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2854
2855 static int e1000_tx_map(struct e1000_adapter *adapter,
2856                         struct e1000_tx_ring *tx_ring,
2857                         struct sk_buff *skb, unsigned int first,
2858                         unsigned int max_per_txd, unsigned int nr_frags,
2859                         unsigned int mss)
2860 {
2861         struct e1000_hw *hw = &adapter->hw;
2862         struct pci_dev *pdev = adapter->pdev;
2863         struct e1000_tx_buffer *buffer_info;
2864         unsigned int len = skb_headlen(skb);
2865         unsigned int offset = 0, size, count = 0, i;
2866         unsigned int f, bytecount, segs;
2867
2868         i = tx_ring->next_to_use;
2869
2870         while (len) {
2871                 buffer_info = &tx_ring->buffer_info[i];
2872                 size = min(len, max_per_txd);
2873                 /* Workaround for Controller erratum --
2874                  * descriptor for non-tso packet in a linear SKB that follows a
2875                  * tso gets written back prematurely before the data is fully
2876                  * DMA'd to the controller
2877                  */
2878                 if (!skb->data_len && tx_ring->last_tx_tso &&
2879                     !skb_is_gso(skb)) {
2880                         tx_ring->last_tx_tso = false;
2881                         size -= 4;
2882                 }
2883
2884                 /* Workaround for premature desc write-backs
2885                  * in TSO mode.  Append 4-byte sentinel desc
2886                  */
2887                 if (unlikely(mss && !nr_frags && size == len && size > 8))
2888                         size -= 4;
2889                 /* work-around for errata 10 and it applies
2890                  * to all controllers in PCI-X mode
2891                  * The fix is to make sure that the first descriptor of a
2892                  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2893                  */
2894                 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2895                              (size > 2015) && count == 0))
2896                         size = 2015;
2897
2898                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
2899                  * terminating buffers within evenly-aligned dwords.
2900                  */
2901                 if (unlikely(adapter->pcix_82544 &&
2902                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2903                    size > 4))
2904                         size -= 4;
2905
2906                 buffer_info->length = size;
2907                 /* set time_stamp *before* dma to help avoid a possible race */
2908                 buffer_info->time_stamp = jiffies;
2909                 buffer_info->mapped_as_page = false;
2910                 buffer_info->dma = dma_map_single(&pdev->dev,
2911                                                   skb->data + offset,
2912                                                   size, DMA_TO_DEVICE);
2913                 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2914                         goto dma_error;
2915                 buffer_info->next_to_watch = i;
2916
2917                 len -= size;
2918                 offset += size;
2919                 count++;
2920                 if (len) {
2921                         i++;
2922                         if (unlikely(i == tx_ring->count))
2923                                 i = 0;
2924                 }
2925         }
2926
2927         for (f = 0; f < nr_frags; f++) {
2928                 const struct skb_frag_struct *frag;
2929
2930                 frag = &skb_shinfo(skb)->frags[f];
2931                 len = skb_frag_size(frag);
2932                 offset = 0;
2933
2934                 while (len) {
2935                         unsigned long bufend;
2936                         i++;
2937                         if (unlikely(i == tx_ring->count))
2938                                 i = 0;
2939
2940                         buffer_info = &tx_ring->buffer_info[i];
2941                         size = min(len, max_per_txd);
2942                         /* Workaround for premature desc write-backs
2943                          * in TSO mode.  Append 4-byte sentinel desc
2944                          */
2945                         if (unlikely(mss && f == (nr_frags-1) &&
2946                             size == len && size > 8))
2947                                 size -= 4;
2948                         /* Workaround for potential 82544 hang in PCI-X.
2949                          * Avoid terminating buffers within evenly-aligned
2950                          * dwords.
2951                          */
2952                         bufend = (unsigned long)
2953                                 page_to_phys(skb_frag_page(frag));
2954                         bufend += offset + size - 1;
2955                         if (unlikely(adapter->pcix_82544 &&
2956                                      !(bufend & 4) &&
2957                                      size > 4))
2958                                 size -= 4;
2959
2960                         buffer_info->length = size;
2961                         buffer_info->time_stamp = jiffies;
2962                         buffer_info->mapped_as_page = true;
2963                         buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2964                                                 offset, size, DMA_TO_DEVICE);
2965                         if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2966                                 goto dma_error;
2967                         buffer_info->next_to_watch = i;
2968
2969                         len -= size;
2970                         offset += size;
2971                         count++;
2972                 }
2973         }
2974
2975         segs = skb_shinfo(skb)->gso_segs ?: 1;
2976         /* multiply data chunks by size of headers */
2977         bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2978
2979         tx_ring->buffer_info[i].skb = skb;
2980         tx_ring->buffer_info[i].segs = segs;
2981         tx_ring->buffer_info[i].bytecount = bytecount;
2982         tx_ring->buffer_info[first].next_to_watch = i;
2983
2984         return count;
2985
2986 dma_error:
2987         dev_err(&pdev->dev, "TX DMA map failed\n");
2988         buffer_info->dma = 0;
2989         if (count)
2990                 count--;
2991
2992         while (count--) {
2993                 if (i == 0)
2994                         i += tx_ring->count;
2995                 i--;
2996                 buffer_info = &tx_ring->buffer_info[i];
2997                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2998         }
2999
3000         return 0;
3001 }
3002
3003 static void e1000_tx_queue(struct e1000_adapter *adapter,
3004                            struct e1000_tx_ring *tx_ring, int tx_flags,
3005                            int count)
3006 {
3007         struct e1000_tx_desc *tx_desc = NULL;
3008         struct e1000_tx_buffer *buffer_info;
3009         u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3010         unsigned int i;
3011
3012         if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
3013                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3014                              E1000_TXD_CMD_TSE;
3015                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3016
3017                 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3018                         txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3019         }
3020
3021         if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3022                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3023                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3024         }
3025
3026         if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3027                 txd_lower |= E1000_TXD_CMD_VLE;
3028                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3029         }
3030
3031         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3032                 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3033
3034         i = tx_ring->next_to_use;
3035
3036         while (count--) {
3037                 buffer_info = &tx_ring->buffer_info[i];
3038                 tx_desc = E1000_TX_DESC(*tx_ring, i);
3039                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3040                 tx_desc->lower.data =
3041                         cpu_to_le32(txd_lower | buffer_info->length);
3042                 tx_desc->upper.data = cpu_to_le32(txd_upper);
3043                 if (unlikely(++i == tx_ring->count))
3044                         i = 0;
3045         }
3046
3047         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3048
3049         /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3050         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3051                 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3052
3053         /* Force memory writes to complete before letting h/w
3054          * know there are new descriptors to fetch.  (Only
3055          * applicable for weak-ordered memory model archs,
3056          * such as IA-64).
3057          */
3058         wmb();
3059
3060         tx_ring->next_to_use = i;
3061 }
3062
3063 /* 82547 workaround to avoid controller hang in half-duplex environment.
3064  * The workaround is to avoid queuing a large packet that would span
3065  * the internal Tx FIFO ring boundary by notifying the stack to resend
3066  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3067  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3068  * to the beginning of the Tx FIFO.
3069  */
3070
3071 #define E1000_FIFO_HDR                  0x10
3072 #define E1000_82547_PAD_LEN             0x3E0
3073
3074 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3075                                        struct sk_buff *skb)
3076 {
3077         u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3078         u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3079
3080         skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3081
3082         if (adapter->link_duplex != HALF_DUPLEX)
3083                 goto no_fifo_stall_required;
3084
3085         if (atomic_read(&adapter->tx_fifo_stall))
3086                 return 1;
3087
3088         if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3089                 atomic_set(&adapter->tx_fifo_stall, 1);
3090                 return 1;
3091         }
3092
3093 no_fifo_stall_required:
3094         adapter->tx_fifo_head += skb_fifo_len;
3095         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3096                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3097         return 0;
3098 }
3099
3100 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3101 {
3102         struct e1000_adapter *adapter = netdev_priv(netdev);
3103         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3104
3105         netif_stop_queue(netdev);
3106         /* Herbert's original patch had:
3107          *  smp_mb__after_netif_stop_queue();
3108          * but since that doesn't exist yet, just open code it.
3109          */
3110         smp_mb();
3111
3112         /* We need to check again in a case another CPU has just
3113          * made room available.
3114          */
3115         if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3116                 return -EBUSY;
3117
3118         /* A reprieve! */
3119         netif_start_queue(netdev);
3120         ++adapter->restart_queue;
3121         return 0;
3122 }
3123
3124 static int e1000_maybe_stop_tx(struct net_device *netdev,
3125                                struct e1000_tx_ring *tx_ring, int size)
3126 {
3127         if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3128                 return 0;
3129         return __e1000_maybe_stop_tx(netdev, size);
3130 }
3131
3132 #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3133 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3134                                     struct net_device *netdev)
3135 {
3136         struct e1000_adapter *adapter = netdev_priv(netdev);
3137         struct e1000_hw *hw = &adapter->hw;
3138         struct e1000_tx_ring *tx_ring;
3139         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3140         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3141         unsigned int tx_flags = 0;
3142         unsigned int len = skb_headlen(skb);
3143         unsigned int nr_frags;
3144         unsigned int mss;
3145         int count = 0;
3146         int tso;
3147         unsigned int f;
3148         __be16 protocol = vlan_get_protocol(skb);
3149
3150         /* This goes back to the question of how to logically map a Tx queue
3151          * to a flow.  Right now, performance is impacted slightly negatively
3152          * if using multiple Tx queues.  If the stack breaks away from a
3153          * single qdisc implementation, we can look at this again.
3154          */
3155         tx_ring = adapter->tx_ring;
3156
3157         /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3158          * packets may get corrupted during padding by HW.
3159          * To WA this issue, pad all small packets manually.
3160          */
3161         if (eth_skb_pad(skb))
3162                 return NETDEV_TX_OK;
3163
3164         mss = skb_shinfo(skb)->gso_size;
3165         /* The controller does a simple calculation to
3166          * make sure there is enough room in the FIFO before
3167          * initiating the DMA for each buffer.  The calc is:
3168          * 4 = ceil(buffer len/mss).  To make sure we don't
3169          * overrun the FIFO, adjust the max buffer len if mss
3170          * drops.
3171          */
3172         if (mss) {
3173                 u8 hdr_len;
3174                 max_per_txd = min(mss << 2, max_per_txd);
3175                 max_txd_pwr = fls(max_per_txd) - 1;
3176
3177                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3178                 if (skb->data_len && hdr_len == len) {
3179                         switch (hw->mac_type) {
3180                         case e1000_82544: {
3181                                 unsigned int pull_size;
3182
3183                                 /* Make sure we have room to chop off 4 bytes,
3184                                  * and that the end alignment will work out to
3185                                  * this hardware's requirements
3186                                  * NOTE: this is a TSO only workaround
3187                                  * if end byte alignment not correct move us
3188                                  * into the next dword
3189                                  */
3190                                 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3191                                     & 4)
3192                                         break;
3193                                 /* fall through */
3194                                 pull_size = min((unsigned int)4, skb->data_len);
3195                                 if (!__pskb_pull_tail(skb, pull_size)) {
3196                                         e_err(drv, "__pskb_pull_tail "
3197                                               "failed.\n");
3198                                         dev_kfree_skb_any(skb);
3199                                         return NETDEV_TX_OK;
3200                                 }
3201                                 len = skb_headlen(skb);
3202                                 break;
3203                         }
3204                         default:
3205                                 /* do nothing */
3206                                 break;
3207                         }
3208                 }
3209         }
3210
3211         /* reserve a descriptor for the offload context */
3212         if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3213                 count++;
3214         count++;
3215
3216         /* Controller Erratum workaround */
3217         if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3218                 count++;
3219
3220         count += TXD_USE_COUNT(len, max_txd_pwr);
3221
3222         if (adapter->pcix_82544)
3223                 count++;
3224
3225         /* work-around for errata 10 and it applies to all controllers
3226          * in PCI-X mode, so add one more descriptor to the count
3227          */
3228         if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3229                         (len > 2015)))
3230                 count++;
3231
3232         nr_frags = skb_shinfo(skb)->nr_frags;
3233         for (f = 0; f < nr_frags; f++)
3234                 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3235                                        max_txd_pwr);
3236         if (adapter->pcix_82544)
3237                 count += nr_frags;
3238
3239         /* need: count + 2 desc gap to keep tail from touching
3240          * head, otherwise try next time
3241          */
3242         if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3243                 return NETDEV_TX_BUSY;
3244
3245         if (unlikely((hw->mac_type == e1000_82547) &&
3246                      (e1000_82547_fifo_workaround(adapter, skb)))) {
3247                 netif_stop_queue(netdev);
3248                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3249                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
3250                 return NETDEV_TX_BUSY;
3251         }
3252
3253         if (skb_vlan_tag_present(skb)) {
3254                 tx_flags |= E1000_TX_FLAGS_VLAN;
3255                 tx_flags |= (skb_vlan_tag_get(skb) <<
3256                              E1000_TX_FLAGS_VLAN_SHIFT);
3257         }
3258
3259         first = tx_ring->next_to_use;
3260
3261         tso = e1000_tso(adapter, tx_ring, skb, protocol);
3262         if (tso < 0) {
3263                 dev_kfree_skb_any(skb);
3264                 return NETDEV_TX_OK;
3265         }
3266
3267         if (likely(tso)) {
3268                 if (likely(hw->mac_type != e1000_82544))
3269                         tx_ring->last_tx_tso = true;
3270                 tx_flags |= E1000_TX_FLAGS_TSO;
3271         } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3272                 tx_flags |= E1000_TX_FLAGS_CSUM;
3273
3274         if (protocol == htons(ETH_P_IP))
3275                 tx_flags |= E1000_TX_FLAGS_IPV4;
3276
3277         if (unlikely(skb->no_fcs))
3278                 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3279
3280         count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3281                              nr_frags, mss);
3282
3283         if (count) {
3284                 /* The descriptors needed is higher than other Intel drivers
3285                  * due to a number of workarounds.  The breakdown is below:
3286                  * Data descriptors: MAX_SKB_FRAGS + 1
3287                  * Context Descriptor: 1
3288                  * Keep head from touching tail: 2
3289                  * Workarounds: 3
3290                  */
3291                 int desc_needed = MAX_SKB_FRAGS + 7;
3292
3293                 netdev_sent_queue(netdev, skb->len);
3294                 skb_tx_timestamp(skb);
3295
3296                 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3297
3298                 /* 82544 potentially requires twice as many data descriptors
3299                  * in order to guarantee buffers don't end on evenly-aligned
3300                  * dwords
3301                  */
3302                 if (adapter->pcix_82544)
3303                         desc_needed += MAX_SKB_FRAGS + 1;
3304
3305                 /* Make sure there is space in the ring for the next send. */
3306                 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3307
3308                 if (!skb->xmit_more ||
3309                     netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3310                         writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3311                         /* we need this if more than one processor can write to
3312                          * our tail at a time, it synchronizes IO on IA64/Altix
3313                          * systems
3314                          */
3315                         mmiowb();
3316                 }
3317         } else {
3318                 dev_kfree_skb_any(skb);
3319                 tx_ring->buffer_info[first].time_stamp = 0;
3320                 tx_ring->next_to_use = first;
3321         }
3322
3323         return NETDEV_TX_OK;
3324 }
3325
3326 #define NUM_REGS 38 /* 1 based count */
3327 static void e1000_regdump(struct e1000_adapter *adapter)
3328 {
3329         struct e1000_hw *hw = &adapter->hw;
3330         u32 regs[NUM_REGS];
3331         u32 *regs_buff = regs;
3332         int i = 0;
3333
3334         static const char * const reg_name[] = {
3335                 "CTRL",  "STATUS",
3336                 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3337                 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3338                 "TIDV", "TXDCTL", "TADV", "TARC0",
3339                 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3340                 "TXDCTL1", "TARC1",
3341                 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3342                 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3343                 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3344         };
3345
3346         regs_buff[0]  = er32(CTRL);
3347         regs_buff[1]  = er32(STATUS);
3348
3349         regs_buff[2]  = er32(RCTL);
3350         regs_buff[3]  = er32(RDLEN);
3351         regs_buff[4]  = er32(RDH);
3352         regs_buff[5]  = er32(RDT);
3353         regs_buff[6]  = er32(RDTR);
3354
3355         regs_buff[7]  = er32(TCTL);
3356         regs_buff[8]  = er32(TDBAL);
3357         regs_buff[9]  = er32(TDBAH);
3358         regs_buff[10] = er32(TDLEN);
3359         regs_buff[11] = er32(TDH);
3360         regs_buff[12] = er32(TDT);
3361         regs_buff[13] = er32(TIDV);
3362         regs_buff[14] = er32(TXDCTL);
3363         regs_buff[15] = er32(TADV);
3364         regs_buff[16] = er32(TARC0);
3365
3366         regs_buff[17] = er32(TDBAL1);
3367         regs_buff[18] = er32(TDBAH1);
3368         regs_buff[19] = er32(TDLEN1);
3369         regs_buff[20] = er32(TDH1);
3370         regs_buff[21] = er32(TDT1);
3371         regs_buff[22] = er32(TXDCTL1);
3372         regs_buff[23] = er32(TARC1);
3373         regs_buff[24] = er32(CTRL_EXT);
3374         regs_buff[25] = er32(ERT);
3375         regs_buff[26] = er32(RDBAL0);
3376         regs_buff[27] = er32(RDBAH0);
3377         regs_buff[28] = er32(TDFH);
3378         regs_buff[29] = er32(TDFT);
3379         regs_buff[30] = er32(TDFHS);
3380         regs_buff[31] = er32(TDFTS);
3381         regs_buff[32] = er32(TDFPC);
3382         regs_buff[33] = er32(RDFH);
3383         regs_buff[34] = er32(RDFT);
3384         regs_buff[35] = er32(RDFHS);
3385         regs_buff[36] = er32(RDFTS);
3386         regs_buff[37] = er32(RDFPC);
3387
3388         pr_info("Register dump\n");
3389         for (i = 0; i < NUM_REGS; i++)
3390                 pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3391 }
3392
3393 /*
3394  * e1000_dump: Print registers, tx ring and rx ring
3395  */
3396 static void e1000_dump(struct e1000_adapter *adapter)
3397 {
3398         /* this code doesn't handle multiple rings */
3399         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3400         struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3401         int i;
3402
3403         if (!netif_msg_hw(adapter))
3404                 return;
3405
3406         /* Print Registers */
3407         e1000_regdump(adapter);
3408
3409         /* transmit dump */
3410         pr_info("TX Desc ring0 dump\n");
3411
3412         /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3413          *
3414          * Legacy Transmit Descriptor
3415          *   +--------------------------------------------------------------+
3416          * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3417          *   +--------------------------------------------------------------+
3418          * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3419          *   +--------------------------------------------------------------+
3420          *   63       48 47        36 35    32 31     24 23    16 15        0
3421          *
3422          * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3423          *   63      48 47    40 39       32 31             16 15    8 7      0
3424          *   +----------------------------------------------------------------+
3425          * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3426          *   +----------------------------------------------------------------+
3427          * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3428          *   +----------------------------------------------------------------+
3429          *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3430          *
3431          * Extended Data Descriptor (DTYP=0x1)
3432          *   +----------------------------------------------------------------+
3433          * 0 |                     Buffer Address [63:0]                      |
3434          *   +----------------------------------------------------------------+
3435          * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3436          *   +----------------------------------------------------------------+
3437          *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3438          */
3439         pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3440         pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3441
3442         if (!netif_msg_tx_done(adapter))
3443                 goto rx_ring_summary;
3444
3445         for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3446                 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3447                 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3448                 struct my_u { __le64 a; __le64 b; };
3449                 struct my_u *u = (struct my_u *)tx_desc;
3450                 const char *type;
3451
3452                 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3453                         type = "NTC/U";
3454                 else if (i == tx_ring->next_to_use)
3455                         type = "NTU";
3456                 else if (i == tx_ring->next_to_clean)
3457                         type = "NTC";
3458                 else
3459                         type = "";
3460
3461                 pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3462                         ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3463                         le64_to_cpu(u->a), le64_to_cpu(u->b),
3464                         (u64)buffer_info->dma, buffer_info->length,
3465                         buffer_info->next_to_watch,
3466                         (u64)buffer_info->time_stamp, buffer_info->skb, type);
3467         }
3468
3469 rx_ring_summary:
3470         /* receive dump */
3471         pr_info("\nRX Desc ring dump\n");
3472
3473         /* Legacy Receive Descriptor Format
3474          *
3475          * +-----------------------------------------------------+
3476          * |                Buffer Address [63:0]                |
3477          * +-----------------------------------------------------+
3478          * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3479          * +-----------------------------------------------------+
3480          * 63       48 47    40 39      32 31         16 15      0
3481          */
3482         pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3483
3484         if (!netif_msg_rx_status(adapter))
3485                 goto exit;
3486
3487         for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3488                 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3489                 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3490                 struct my_u { __le64 a; __le64 b; };
3491                 struct my_u *u = (struct my_u *)rx_desc;
3492                 const char *type;
3493
3494                 if (i == rx_ring->next_to_use)
3495                         type = "NTU";
3496                 else if (i == rx_ring->next_to_clean)
3497                         type = "NTC";
3498                 else
3499                         type = "";
3500
3501                 pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3502                         i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3503                         (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3504         } /* for */
3505
3506         /* dump the descriptor caches */
3507         /* rx */
3508         pr_info("Rx descriptor cache in 64bit format\n");
3509         for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3510                 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3511                         i,
3512                         readl(adapter->hw.hw_addr + i+4),
3513                         readl(adapter->hw.hw_addr + i),
3514                         readl(adapter->hw.hw_addr + i+12),
3515                         readl(adapter->hw.hw_addr + i+8));
3516         }
3517         /* tx */
3518         pr_info("Tx descriptor cache in 64bit format\n");
3519         for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3520                 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3521                         i,
3522                         readl(adapter->hw.hw_addr + i+4),
3523                         readl(adapter->hw.hw_addr + i),
3524                         readl(adapter->hw.hw_addr + i+12),
3525                         readl(adapter->hw.hw_addr + i+8));
3526         }
3527 exit:
3528         return;
3529 }
3530
3531 /**
3532  * e1000_tx_timeout - Respond to a Tx Hang
3533  * @netdev: network interface device structure
3534  **/
3535 static void e1000_tx_timeout(struct net_device *netdev)
3536 {
3537         struct e1000_adapter *adapter = netdev_priv(netdev);
3538
3539         /* Do the reset outside of interrupt context */
3540         adapter->tx_timeout_count++;
3541         schedule_work(&adapter->reset_task);
3542 }
3543
3544 static void e1000_reset_task(struct work_struct *work)
3545 {
3546         struct e1000_adapter *adapter =
3547                 container_of(work, struct e1000_adapter, reset_task);
3548
3549         e_err(drv, "Reset adapter\n");
3550         e1000_reinit_locked(adapter);
3551 }
3552
3553 /**
3554  * e1000_get_stats - Get System Network Statistics
3555  * @netdev: network interface device structure
3556  *
3557  * Returns the address of the device statistics structure.
3558  * The statistics are actually updated from the watchdog.
3559  **/
3560 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3561 {
3562         /* only return the current stats */
3563         return &netdev->stats;
3564 }
3565
3566 /**
3567  * e1000_change_mtu - Change the Maximum Transfer Unit
3568  * @netdev: network interface device structure
3569  * @new_mtu: new value for maximum frame size
3570  *
3571  * Returns 0 on success, negative on failure
3572  **/
3573 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3574 {
3575         struct e1000_adapter *adapter = netdev_priv(netdev);
3576         struct e1000_hw *hw = &adapter->hw;
3577         int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3578
3579         if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3580             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3581                 e_err(probe, "Invalid MTU setting\n");
3582                 return -EINVAL;
3583         }
3584
3585         /* Adapter-specific max frame size limits. */
3586         switch (hw->mac_type) {
3587         case e1000_undefined ... e1000_82542_rev2_1:
3588                 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3589                         e_err(probe, "Jumbo Frames not supported.\n");
3590                         return -EINVAL;
3591                 }
3592                 break;
3593         default:
3594                 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3595                 break;
3596         }
3597
3598         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3599                 msleep(1);
3600         /* e1000_down has a dependency on max_frame_size */
3601         hw->max_frame_size = max_frame;
3602         if (netif_running(netdev)) {
3603                 /* prevent buffers from being reallocated */
3604                 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3605                 e1000_down(adapter);
3606         }
3607
3608         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3609          * means we reserve 2 more, this pushes us to allocate from the next
3610          * larger slab size.
3611          * i.e. RXBUFFER_2048 --> size-4096 slab
3612          * however with the new *_jumbo_rx* routines, jumbo receives will use
3613          * fragmented skbs
3614          */
3615
3616         if (max_frame <= E1000_RXBUFFER_2048)
3617                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3618         else
3619 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3620                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3621 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3622                 adapter->rx_buffer_len = PAGE_SIZE;
3623 #endif
3624
3625         /* adjust allocation if LPE protects us, and we aren't using SBP */
3626         if (!hw->tbi_compatibility_on &&
3627             ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3628              (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3629                 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3630
3631         pr_info("%s changing MTU from %d to %d\n",
3632                 netdev->name, netdev->mtu, new_mtu);
3633         netdev->mtu = new_mtu;
3634
3635         if (netif_running(netdev))
3636                 e1000_up(adapter);
3637         else
3638                 e1000_reset(adapter);
3639
3640         clear_bit(__E1000_RESETTING, &adapter->flags);
3641
3642         return 0;
3643 }
3644
3645 /**
3646  * e1000_update_stats - Update the board statistics counters
3647  * @adapter: board private structure
3648  **/
3649 void e1000_update_stats(struct e1000_adapter *adapter)
3650 {
3651         struct net_device *netdev = adapter->netdev;
3652         struct e1000_hw *hw = &adapter->hw;
3653         struct pci_dev *pdev = adapter->pdev;
3654         unsigned long flags;
3655         u16 phy_tmp;
3656
3657 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3658
3659         /* Prevent stats update while adapter is being reset, or if the pci
3660          * connection is down.
3661          */
3662         if (adapter->link_speed == 0)
3663                 return;
3664         if (pci_channel_offline(pdev))
3665                 return;
3666
3667         spin_lock_irqsave(&adapter->stats_lock, flags);
3668
3669         /* these counters are modified from e1000_tbi_adjust_stats,
3670          * called from the interrupt context, so they must only
3671          * be written while holding adapter->stats_lock
3672          */
3673
3674         adapter->stats.crcerrs += er32(CRCERRS);
3675         adapter->stats.gprc += er32(GPRC);
3676         adapter->stats.gorcl += er32(GORCL);
3677         adapter->stats.gorch += er32(GORCH);
3678         adapter->stats.bprc += er32(BPRC);
3679         adapter->stats.mprc += er32(MPRC);
3680         adapter->stats.roc += er32(ROC);
3681
3682         adapter->stats.prc64 += er32(PRC64);
3683         adapter->stats.prc127 += er32(PRC127);
3684         adapter->stats.prc255 += er32(PRC255);
3685         adapter->stats.prc511 += er32(PRC511);
3686         adapter->stats.prc1023 += er32(PRC1023);
3687         adapter->stats.prc1522 += er32(PRC1522);
3688
3689         adapter->stats.symerrs += er32(SYMERRS);
3690         adapter->stats.mpc += er32(MPC);
3691         adapter->stats.scc += er32(SCC);
3692         adapter->stats.ecol += er32(ECOL);
3693         adapter->stats.mcc += er32(MCC);
3694         adapter->stats.latecol += er32(LATECOL);
3695         adapter->stats.dc += er32(DC);
3696         adapter->stats.sec += er32(SEC);
3697         adapter->stats.rlec += er32(RLEC);
3698         adapter->stats.xonrxc += er32(XONRXC);
3699         adapter->stats.xontxc += er32(XONTXC);
3700         adapter->stats.xoffrxc += er32(XOFFRXC);
3701         adapter->stats.xofftxc += er32(XOFFTXC);
3702         adapter->stats.fcruc += er32(FCRUC);
3703         adapter->stats.gptc += er32(GPTC);
3704         adapter->stats.gotcl += er32(GOTCL);
3705         adapter->stats.gotch += er32(GOTCH);
3706         adapter->stats.rnbc += er32(RNBC);
3707         adapter->stats.ruc += er32(RUC);
3708         adapter->stats.rfc += er32(RFC);
3709         adapter->stats.rjc += er32(RJC);
3710         adapter->stats.torl += er32(TORL);
3711         adapter->stats.torh += er32(TORH);
3712         adapter->stats.totl += er32(TOTL);
3713         adapter->stats.toth += er32(TOTH);
3714         adapter->stats.tpr += er32(TPR);
3715
3716         adapter->stats.ptc64 += er32(PTC64);
3717         adapter->stats.ptc127 += er32(PTC127);
3718         adapter->stats.ptc255 += er32(PTC255);
3719         adapter->stats.ptc511 += er32(PTC511);
3720         adapter->stats.ptc1023 += er32(PTC1023);
3721         adapter->stats.ptc1522 += er32(PTC1522);
3722
3723         adapter->stats.mptc += er32(MPTC);
3724         adapter->stats.bptc += er32(BPTC);
3725
3726         /* used for adaptive IFS */
3727
3728         hw->tx_packet_delta = er32(TPT);
3729         adapter->stats.tpt += hw->tx_packet_delta;
3730         hw->collision_delta = er32(COLC);
3731         adapter->stats.colc += hw->collision_delta;
3732
3733         if (hw->mac_type >= e1000_82543) {
3734                 adapter->stats.algnerrc += er32(ALGNERRC);
3735                 adapter->stats.rxerrc += er32(RXERRC);
3736                 adapter->stats.tncrs += er32(TNCRS);
3737                 adapter->stats.cexterr += er32(CEXTERR);
3738                 adapter->stats.tsctc += er32(TSCTC);
3739                 adapter->stats.tsctfc += er32(TSCTFC);
3740         }
3741
3742         /* Fill out the OS statistics structure */
3743         netdev->stats.multicast = adapter->stats.mprc;
3744         netdev->stats.collisions = adapter->stats.colc;
3745
3746         /* Rx Errors */
3747
3748         /* RLEC on some newer hardware can be incorrect so build
3749          * our own version based on RUC and ROC
3750          */
3751         netdev->stats.rx_errors = adapter->stats.rxerrc +
3752                 adapter->stats.crcerrs + adapter->stats.algnerrc +
3753                 adapter->stats.ruc + adapter->stats.roc +
3754                 adapter->stats.cexterr;
3755         adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3756         netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3757         netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3758         netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3759         netdev->stats.rx_missed_errors = adapter->stats.mpc;
3760
3761         /* Tx Errors */
3762         adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3763         netdev->stats.tx_errors = adapter->stats.txerrc;
3764         netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3765         netdev->stats.tx_window_errors = adapter->stats.latecol;
3766         netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3767         if (hw->bad_tx_carr_stats_fd &&
3768             adapter->link_duplex == FULL_DUPLEX) {
3769                 netdev->stats.tx_carrier_errors = 0;
3770                 adapter->stats.tncrs = 0;
3771         }
3772
3773         /* Tx Dropped needs to be maintained elsewhere */
3774
3775         /* Phy Stats */
3776         if (hw->media_type == e1000_media_type_copper) {
3777                 if ((adapter->link_speed == SPEED_1000) &&
3778                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3779                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3780                         adapter->phy_stats.idle_errors += phy_tmp;
3781                 }
3782
3783                 if ((hw->mac_type <= e1000_82546) &&
3784                    (hw->phy_type == e1000_phy_m88) &&
3785                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3786                         adapter->phy_stats.receive_errors += phy_tmp;
3787         }
3788
3789         /* Management Stats */
3790         if (hw->has_smbus) {
3791                 adapter->stats.mgptc += er32(MGTPTC);
3792                 adapter->stats.mgprc += er32(MGTPRC);
3793                 adapter->stats.mgpdc += er32(MGTPDC);
3794         }
3795
3796         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3797 }
3798
3799 /**
3800  * e1000_intr - Interrupt Handler
3801  * @irq: interrupt number
3802  * @data: pointer to a network interface device structure
3803  **/
3804 static irqreturn_t e1000_intr(int irq, void *data)
3805 {
3806         struct net_device *netdev = data;
3807         struct e1000_adapter *adapter = netdev_priv(netdev);
3808         struct e1000_hw *hw = &adapter->hw;
3809         u32 icr = er32(ICR);
3810
3811         if (unlikely((!icr)))
3812                 return IRQ_NONE;  /* Not our interrupt */
3813
3814         /* we might have caused the interrupt, but the above
3815          * read cleared it, and just in case the driver is
3816          * down there is nothing to do so return handled
3817          */
3818         if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3819                 return IRQ_HANDLED;
3820
3821         if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3822                 hw->get_link_status = 1;
3823                 /* guard against interrupt when we're going down */
3824                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3825                         schedule_delayed_work(&adapter->watchdog_task, 1);
3826         }
3827
3828         /* disable interrupts, without the synchronize_irq bit */
3829         ew32(IMC, ~0);
3830         E1000_WRITE_FLUSH();
3831
3832         if (likely(napi_schedule_prep(&adapter->napi))) {
3833                 adapter->total_tx_bytes = 0;
3834                 adapter->total_tx_packets = 0;
3835                 adapter->total_rx_bytes = 0;
3836                 adapter->total_rx_packets = 0;
3837                 __napi_schedule(&adapter->napi);
3838         } else {
3839                 /* this really should not happen! if it does it is basically a
3840                  * bug, but not a hard error, so enable ints and continue
3841                  */
3842                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3843                         e1000_irq_enable(adapter);
3844         }
3845
3846         return IRQ_HANDLED;
3847 }
3848
3849 /**
3850  * e1000_clean - NAPI Rx polling callback
3851  * @adapter: board private structure
3852  **/
3853 static int e1000_clean(struct napi_struct *napi, int budget)
3854 {
3855         struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3856                                                      napi);
3857         int tx_clean_complete = 0, work_done = 0;
3858
3859         tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3860
3861         adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3862
3863         if (!tx_clean_complete)
3864                 work_done = budget;
3865
3866         /* If budget not fully consumed, exit the polling mode */
3867         if (work_done < budget) {
3868                 if (likely(adapter->itr_setting & 3))
3869                         e1000_set_itr(adapter);
3870                 napi_complete_done(napi, work_done);
3871                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3872                         e1000_irq_enable(adapter);
3873         }
3874
3875         return work_done;
3876 }
3877
3878 /**
3879  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3880  * @adapter: board private structure
3881  **/
3882 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3883                                struct e1000_tx_ring *tx_ring)
3884 {
3885         struct e1000_hw *hw = &adapter->hw;
3886         struct net_device *netdev = adapter->netdev;
3887         struct e1000_tx_desc *tx_desc, *eop_desc;
3888         struct e1000_tx_buffer *buffer_info;
3889         unsigned int i, eop;
3890         unsigned int count = 0;
3891         unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3892         unsigned int bytes_compl = 0, pkts_compl = 0;
3893
3894         i = tx_ring->next_to_clean;
3895         eop = tx_ring->buffer_info[i].next_to_watch;
3896         eop_desc = E1000_TX_DESC(*tx_ring, eop);
3897
3898         while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3899                (count < tx_ring->count)) {
3900                 bool cleaned = false;
3901                 dma_rmb();      /* read buffer_info after eop_desc */
3902                 for ( ; !cleaned; count++) {
3903                         tx_desc = E1000_TX_DESC(*tx_ring, i);
3904                         buffer_info = &tx_ring->buffer_info[i];
3905                         cleaned = (i == eop);
3906
3907                         if (cleaned) {
3908                                 total_tx_packets += buffer_info->segs;
3909                                 total_tx_bytes += buffer_info->bytecount;
3910                                 if (buffer_info->skb) {
3911                                         bytes_compl += buffer_info->skb->len;
3912                                         pkts_compl++;
3913                                 }
3914
3915                         }
3916                         e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3917                         tx_desc->upper.data = 0;
3918
3919                         if (unlikely(++i == tx_ring->count))
3920                                 i = 0;
3921                 }
3922
3923                 eop = tx_ring->buffer_info[i].next_to_watch;
3924                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3925         }
3926
3927         /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3928          * which will reuse the cleaned buffers.
3929          */
3930         smp_store_release(&tx_ring->next_to_clean, i);
3931
3932         netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3933
3934 #define TX_WAKE_THRESHOLD 32
3935         if (unlikely(count && netif_carrier_ok(netdev) &&
3936                      E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3937                 /* Make sure that anybody stopping the queue after this
3938                  * sees the new next_to_clean.
3939                  */
3940                 smp_mb();
3941
3942                 if (netif_queue_stopped(netdev) &&
3943                     !(test_bit(__E1000_DOWN, &adapter->flags))) {
3944                         netif_wake_queue(netdev);
3945                         ++adapter->restart_queue;
3946                 }
3947         }
3948
3949         if (adapter->detect_tx_hung) {
3950                 /* Detect a transmit hang in hardware, this serializes the
3951                  * check with the clearing of time_stamp and movement of i
3952                  */
3953                 adapter->detect_tx_hung = false;
3954                 if (tx_ring->buffer_info[eop].time_stamp &&
3955                     time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3956                                (adapter->tx_timeout_factor * HZ)) &&
3957                     !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3958
3959                         /* detected Tx unit hang */
3960                         e_err(drv, "Detected Tx Unit Hang\n"
3961                               "  Tx Queue             <%lu>\n"
3962                               "  TDH                  <%x>\n"
3963                               "  TDT                  <%x>\n"
3964                               "  next_to_use          <%x>\n"
3965                               "  next_to_clean        <%x>\n"
3966                               "buffer_info[next_to_clean]\n"
3967                               "  time_stamp           <%lx>\n"
3968                               "  next_to_watch        <%x>\n"
3969                               "  jiffies              <%lx>\n"
3970                               "  next_to_watch.status <%x>\n",
3971                                 (unsigned long)(tx_ring - adapter->tx_ring),
3972                                 readl(hw->hw_addr + tx_ring->tdh),
3973                                 readl(hw->hw_addr + tx_ring->tdt),
3974                                 tx_ring->next_to_use,
3975                                 tx_ring->next_to_clean,
3976                                 tx_ring->buffer_info[eop].time_stamp,
3977                                 eop,
3978                                 jiffies,
3979                                 eop_desc->upper.fields.status);
3980                         e1000_dump(adapter);
3981                         netif_stop_queue(netdev);
3982                 }
3983         }
3984         adapter->total_tx_bytes += total_tx_bytes;
3985         adapter->total_tx_packets += total_tx_packets;
3986         netdev->stats.tx_bytes += total_tx_bytes;
3987         netdev->stats.tx_packets += total_tx_packets;
3988         return count < tx_ring->count;
3989 }
3990
3991 /**
3992  * e1000_rx_checksum - Receive Checksum Offload for 82543
3993  * @adapter:     board private structure
3994  * @status_err:  receive descriptor status and error fields
3995  * @csum:        receive descriptor csum field
3996  * @sk_buff:     socket buffer with received data
3997  **/
3998 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3999                               u32 csum, struct sk_buff *skb)
4000 {
4001         struct e1000_hw *hw = &adapter->hw;
4002         u16 status = (u16)status_err;
4003         u8 errors = (u8)(status_err >> 24);
4004
4005         skb_checksum_none_assert(skb);
4006
4007         /* 82543 or newer only */
4008         if (unlikely(hw->mac_type < e1000_82543))
4009                 return;
4010         /* Ignore Checksum bit is set */
4011         if (unlikely(status & E1000_RXD_STAT_IXSM))
4012                 return;
4013         /* TCP/UDP checksum error bit is set */
4014         if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
4015                 /* let the stack verify checksum errors */
4016                 adapter->hw_csum_err++;
4017                 return;
4018         }
4019         /* TCP/UDP Checksum has not been calculated */
4020         if (!(status & E1000_RXD_STAT_TCPCS))
4021                 return;
4022
4023         /* It must be a TCP or UDP packet with a valid checksum */
4024         if (likely(status & E1000_RXD_STAT_TCPCS)) {
4025                 /* TCP checksum is good */
4026                 skb->ip_summed = CHECKSUM_UNNECESSARY;
4027         }
4028         adapter->hw_csum_good++;
4029 }
4030
4031 /**
4032  * e1000_consume_page - helper function for jumbo Rx path
4033  **/
4034 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
4035                                u16 length)
4036 {
4037         bi->rxbuf.page = NULL;
4038         skb->len += length;
4039         skb->data_len += length;
4040         skb->truesize += PAGE_SIZE;
4041 }
4042
4043 /**
4044  * e1000_receive_skb - helper function to handle rx indications
4045  * @adapter: board private structure
4046  * @status: descriptor status field as written by hardware
4047  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4048  * @skb: pointer to sk_buff to be indicated to stack
4049  */
4050 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4051                               __le16 vlan, struct sk_buff *skb)
4052 {
4053         skb->protocol = eth_type_trans(skb, adapter->netdev);
4054
4055         if (status & E1000_RXD_STAT_VP) {
4056                 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4057
4058                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4059         }
4060         napi_gro_receive(&adapter->napi, skb);
4061 }
4062
4063 /**
4064  * e1000_tbi_adjust_stats
4065  * @hw: Struct containing variables accessed by shared code
4066  * @frame_len: The length of the frame in question
4067  * @mac_addr: The Ethernet destination address of the frame in question
4068  *
4069  * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4070  */
4071 static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4072                                    struct e1000_hw_stats *stats,
4073                                    u32 frame_len, const u8 *mac_addr)
4074 {
4075         u64 carry_bit;
4076
4077         /* First adjust the frame length. */
4078         frame_len--;
4079         /* We need to adjust the statistics counters, since the hardware
4080          * counters overcount this packet as a CRC error and undercount
4081          * the packet as a good packet
4082          */
4083         /* This packet should not be counted as a CRC error. */
4084         stats->crcerrs--;
4085         /* This packet does count as a Good Packet Received. */
4086         stats->gprc++;
4087
4088         /* Adjust the Good Octets received counters */
4089         carry_bit = 0x80000000 & stats->gorcl;
4090         stats->gorcl += frame_len;
4091         /* If the high bit of Gorcl (the low 32 bits of the Good Octets
4092          * Received Count) was one before the addition,
4093          * AND it is zero after, then we lost the carry out,
4094          * need to add one to Gorch (Good Octets Received Count High).
4095          * This could be simplified if all environments supported
4096          * 64-bit integers.
4097          */
4098         if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4099                 stats->gorch++;
4100         /* Is this a broadcast or multicast?  Check broadcast first,
4101          * since the test for a multicast frame will test positive on
4102          * a broadcast frame.
4103          */
4104         if (is_broadcast_ether_addr(mac_addr))
4105                 stats->bprc++;
4106         else if (is_multicast_ether_addr(mac_addr))
4107                 stats->mprc++;
4108
4109         if (frame_len == hw->max_frame_size) {
4110                 /* In this case, the hardware has overcounted the number of
4111                  * oversize frames.
4112                  */
4113                 if (stats->roc > 0)
4114                         stats->roc--;
4115         }
4116
4117         /* Adjust the bin counters when the extra byte put the frame in the
4118          * wrong bin. Remember that the frame_len was adjusted above.
4119          */
4120         if (frame_len == 64) {
4121                 stats->prc64++;
4122                 stats->prc127--;
4123         } else if (frame_len == 127) {
4124                 stats->prc127++;
4125                 stats->prc255--;
4126         } else if (frame_len == 255) {
4127                 stats->prc255++;
4128                 stats->prc511--;
4129         } else if (frame_len == 511) {
4130                 stats->prc511++;
4131                 stats->prc1023--;
4132         } else if (frame_len == 1023) {
4133                 stats->prc1023++;
4134                 stats->prc1522--;
4135         } else if (frame_len == 1522) {
4136                 stats->prc1522++;
4137         }
4138 }
4139
4140 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4141                                     u8 status, u8 errors,
4142                                     u32 length, const u8 *data)
4143 {
4144         struct e1000_hw *hw = &adapter->hw;
4145         u8 last_byte = *(data + length - 1);
4146
4147         if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4148                 unsigned long irq_flags;
4149
4150                 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4151                 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4152                 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4153
4154                 return true;
4155         }
4156
4157         return false;
4158 }
4159
4160 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4161                                           unsigned int bufsz)
4162 {
4163         struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4164
4165         if (unlikely(!skb))
4166                 adapter->alloc_rx_buff_failed++;
4167         return skb;
4168 }
4169
4170 /**
4171  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4172  * @adapter: board private structure
4173  * @rx_ring: ring to clean
4174  * @work_done: amount of napi work completed this call
4175  * @work_to_do: max amount of work allowed for this call to do
4176  *
4177  * the return value indicates whether actual cleaning was done, there
4178  * is no guarantee that everything was cleaned
4179  */
4180 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4181                                      struct e1000_rx_ring *rx_ring,
4182                                      int *work_done, int work_to_do)
4183 {
4184         struct net_device *netdev = adapter->netdev;
4185         struct pci_dev *pdev = adapter->pdev;
4186         struct e1000_rx_desc *rx_desc, *next_rxd;
4187         struct e1000_rx_buffer *buffer_info, *next_buffer;
4188         u32 length;
4189         unsigned int i;
4190         int cleaned_count = 0;
4191         bool cleaned = false;
4192         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4193
4194         i = rx_ring->next_to_clean;
4195         rx_desc = E1000_RX_DESC(*rx_ring, i);
4196         buffer_info = &rx_ring->buffer_info[i];
4197
4198         while (rx_desc->status & E1000_RXD_STAT_DD) {
4199                 struct sk_buff *skb;
4200                 u8 status;
4201
4202                 if (*work_done >= work_to_do)
4203                         break;
4204                 (*work_done)++;
4205                 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4206
4207                 status = rx_desc->status;
4208
4209                 if (++i == rx_ring->count)
4210                         i = 0;
4211
4212                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4213                 prefetch(next_rxd);
4214
4215                 next_buffer = &rx_ring->buffer_info[i];
4216
4217                 cleaned = true;
4218                 cleaned_count++;
4219                 dma_unmap_page(&pdev->dev, buffer_info->dma,
4220                                adapter->rx_buffer_len, DMA_FROM_DEVICE);
4221                 buffer_info->dma = 0;
4222
4223                 length = le16_to_cpu(rx_desc->length);
4224
4225                 /* errors is only valid for DD + EOP descriptors */
4226                 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4227                     (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4228                         u8 *mapped = page_address(buffer_info->rxbuf.page);
4229
4230                         if (e1000_tbi_should_accept(adapter, status,
4231                                                     rx_desc->errors,
4232                                                     length, mapped)) {
4233                                 length--;
4234                         } else if (netdev->features & NETIF_F_RXALL) {
4235                                 goto process_skb;
4236                         } else {
4237                                 /* an error means any chain goes out the window
4238                                  * too
4239                                  */
4240                                 if (rx_ring->rx_skb_top)
4241                                         dev_kfree_skb(rx_ring->rx_skb_top);
4242                                 rx_ring->rx_skb_top = NULL;
4243                                 goto next_desc;
4244                         }
4245                 }
4246
4247 #define rxtop rx_ring->rx_skb_top
4248 process_skb:
4249                 if (!(status & E1000_RXD_STAT_EOP)) {
4250                         /* this descriptor is only the beginning (or middle) */
4251                         if (!rxtop) {
4252                                 /* this is the beginning of a chain */
4253                                 rxtop = napi_get_frags(&adapter->napi);
4254                                 if (!rxtop)
4255                                         break;
4256
4257                                 skb_fill_page_desc(rxtop, 0,
4258                                                    buffer_info->rxbuf.page,
4259                                                    0, length);
4260                         } else {
4261                                 /* this is the middle of a chain */
4262                                 skb_fill_page_desc(rxtop,
4263                                     skb_shinfo(rxtop)->nr_frags,
4264                                     buffer_info->rxbuf.page, 0, length);
4265                         }
4266                         e1000_consume_page(buffer_info, rxtop, length);
4267                         goto next_desc;
4268                 } else {
4269                         if (rxtop) {
4270                                 /* end of the chain */
4271                                 skb_fill_page_desc(rxtop,
4272                                     skb_shinfo(rxtop)->nr_frags,
4273                                     buffer_info->rxbuf.page, 0, length);
4274                                 skb = rxtop;
4275                                 rxtop = NULL;
4276                                 e1000_consume_page(buffer_info, skb, length);
4277                         } else {
4278                                 struct page *p;
4279                                 /* no chain, got EOP, this buf is the packet
4280                                  * copybreak to save the put_page/alloc_page
4281                                  */
4282                                 p = buffer_info->rxbuf.page;
4283                                 if (length <= copybreak) {
4284                                         u8 *vaddr;
4285
4286                                         if (likely(!(netdev->features & NETIF_F_RXFCS)))
4287                                                 length -= 4;
4288                                         skb = e1000_alloc_rx_skb(adapter,
4289                                                                  length);
4290                                         if (!skb)
4291                                                 break;
4292
4293                                         vaddr = kmap_atomic(p);
4294                                         memcpy(skb_tail_pointer(skb), vaddr,
4295                                                length);
4296                                         kunmap_atomic(vaddr);
4297                                         /* re-use the page, so don't erase
4298                                          * buffer_info->rxbuf.page
4299                                          */
4300                                         skb_put(skb, length);
4301                                         e1000_rx_checksum(adapter,
4302                                                           status | rx_desc->errors << 24,
4303                                                           le16_to_cpu(rx_desc->csum), skb);
4304
4305                                         total_rx_bytes += skb->len;
4306                                         total_rx_packets++;
4307
4308                                         e1000_receive_skb(adapter, status,
4309                                                           rx_desc->special, skb);
4310                                         goto next_desc;
4311                                 } else {
4312                                         skb = napi_get_frags(&adapter->napi);
4313                                         if (!skb) {
4314                                                 adapter->alloc_rx_buff_failed++;
4315                                                 break;
4316                                         }
4317                                         skb_fill_page_desc(skb, 0, p, 0,
4318                                                            length);
4319                                         e1000_consume_page(buffer_info, skb,
4320                                                            length);
4321                                 }
4322                         }
4323                 }
4324
4325                 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4326                 e1000_rx_checksum(adapter,
4327                                   (u32)(status) |
4328                                   ((u32)(rx_desc->errors) << 24),
4329                                   le16_to_cpu(rx_desc->csum), skb);
4330
4331                 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4332                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4333                         pskb_trim(skb, skb->len - 4);
4334                 total_rx_packets++;
4335
4336                 if (status & E1000_RXD_STAT_VP) {
4337                         __le16 vlan = rx_desc->special;
4338                         u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4339
4340                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4341                 }
4342
4343                 napi_gro_frags(&adapter->napi);
4344
4345 next_desc:
4346                 rx_desc->status = 0;
4347
4348                 /* return some buffers to hardware, one at a time is too slow */
4349                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4350                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4351                         cleaned_count = 0;
4352                 }
4353
4354                 /* use prefetched values */
4355                 rx_desc = next_rxd;
4356                 buffer_info = next_buffer;
4357         }
4358         rx_ring->next_to_clean = i;
4359
4360         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4361         if (cleaned_count)
4362                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4363
4364         adapter->total_rx_packets += total_rx_packets;
4365         adapter->total_rx_bytes += total_rx_bytes;
4366         netdev->stats.rx_bytes += total_rx_bytes;
4367         netdev->stats.rx_packets += total_rx_packets;
4368         return cleaned;
4369 }
4370
4371 /* this should improve performance for small packets with large amounts
4372  * of reassembly being done in the stack
4373  */
4374 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4375                                        struct e1000_rx_buffer *buffer_info,
4376                                        u32 length, const void *data)
4377 {
4378         struct sk_buff *skb;
4379
4380         if (length > copybreak)
4381                 return NULL;
4382
4383         skb = e1000_alloc_rx_skb(adapter, length);
4384         if (!skb)
4385                 return NULL;
4386
4387         dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4388                                 length, DMA_FROM_DEVICE);
4389
4390         memcpy(skb_put(skb, length), data, length);
4391
4392         return skb;
4393 }
4394
4395 /**
4396  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4397  * @adapter: board private structure
4398  * @rx_ring: ring to clean
4399  * @work_done: amount of napi work completed this call
4400  * @work_to_do: max amount of work allowed for this call to do
4401  */
4402 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4403                                struct e1000_rx_ring *rx_ring,
4404                                int *work_done, int work_to_do)
4405 {
4406         struct net_device *netdev = adapter->netdev;
4407         struct pci_dev *pdev = adapter->pdev;
4408         struct e1000_rx_desc *rx_desc, *next_rxd;
4409         struct e1000_rx_buffer *buffer_info, *next_buffer;
4410         u32 length;
4411         unsigned int i;
4412         int cleaned_count = 0;
4413         bool cleaned = false;
4414         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4415
4416         i = rx_ring->next_to_clean;
4417         rx_desc = E1000_RX_DESC(*rx_ring, i);
4418         buffer_info = &rx_ring->buffer_info[i];
4419
4420         while (rx_desc->status & E1000_RXD_STAT_DD) {
4421                 struct sk_buff *skb;
4422                 u8 *data;
4423                 u8 status;
4424
4425                 if (*work_done >= work_to_do)
4426                         break;
4427                 (*work_done)++;
4428                 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4429
4430                 status = rx_desc->status;
4431                 length = le16_to_cpu(rx_desc->length);
4432
4433                 data = buffer_info->rxbuf.data;
4434                 prefetch(data);
4435                 skb = e1000_copybreak(adapter, buffer_info, length, data);
4436                 if (!skb) {
4437                         unsigned int frag_len = e1000_frag_len(adapter);
4438
4439                         skb = build_skb(data - E1000_HEADROOM, frag_len);
4440                         if (!skb) {
4441                                 adapter->alloc_rx_buff_failed++;
4442                                 break;
4443                         }
4444
4445                         skb_reserve(skb, E1000_HEADROOM);
4446                         dma_unmap_single(&pdev->dev, buffer_info->dma,
4447                                          adapter->rx_buffer_len,
4448                                          DMA_FROM_DEVICE);
4449                         buffer_info->dma = 0;
4450                         buffer_info->rxbuf.data = NULL;
4451                 }
4452
4453                 if (++i == rx_ring->count)
4454                         i = 0;
4455
4456                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4457                 prefetch(next_rxd);
4458
4459                 next_buffer = &rx_ring->buffer_info[i];
4460
4461                 cleaned = true;
4462                 cleaned_count++;
4463
4464                 /* !EOP means multiple descriptors were used to store a single
4465                  * packet, if thats the case we need to toss it.  In fact, we
4466                  * to toss every packet with the EOP bit clear and the next
4467                  * frame that _does_ have the EOP bit set, as it is by
4468                  * definition only a frame fragment
4469                  */
4470                 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4471                         adapter->discarding = true;
4472
4473                 if (adapter->discarding) {
4474                         /* All receives must fit into a single buffer */
4475                         netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4476                         dev_kfree_skb(skb);
4477                         if (status & E1000_RXD_STAT_EOP)
4478                                 adapter->discarding = false;
4479                         goto next_desc;
4480                 }
4481
4482                 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4483                         if (e1000_tbi_should_accept(adapter, status,
4484                                                     rx_desc->errors,
4485                                                     length, data)) {
4486                                 length--;
4487                         } else if (netdev->features & NETIF_F_RXALL) {
4488                                 goto process_skb;
4489                         } else {
4490                                 dev_kfree_skb(skb);
4491                                 goto next_desc;
4492                         }
4493                 }
4494
4495 process_skb:
4496                 total_rx_bytes += (length - 4); /* don't count FCS */
4497                 total_rx_packets++;
4498
4499                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4500                         /* adjust length to remove Ethernet CRC, this must be
4501                          * done after the TBI_ACCEPT workaround above
4502                          */
4503                         length -= 4;
4504
4505                 if (buffer_info->rxbuf.data == NULL)
4506                         skb_put(skb, length);
4507                 else /* copybreak skb */
4508                         skb_trim(skb, length);
4509
4510                 /* Receive Checksum Offload */
4511                 e1000_rx_checksum(adapter,
4512                                   (u32)(status) |
4513                                   ((u32)(rx_desc->errors) << 24),
4514                                   le16_to_cpu(rx_desc->csum), skb);
4515
4516                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4517
4518 next_desc:
4519                 rx_desc->status = 0;
4520
4521                 /* return some buffers to hardware, one at a time is too slow */
4522                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4523                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4524                         cleaned_count = 0;
4525                 }
4526
4527                 /* use prefetched values */
4528                 rx_desc = next_rxd;
4529                 buffer_info = next_buffer;
4530         }
4531         rx_ring->next_to_clean = i;
4532
4533         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4534         if (cleaned_count)
4535                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4536
4537         adapter->total_rx_packets += total_rx_packets;
4538         adapter->total_rx_bytes += total_rx_bytes;
4539         netdev->stats.rx_bytes += total_rx_bytes;
4540         netdev->stats.rx_packets += total_rx_packets;
4541         return cleaned;
4542 }
4543
4544 /**
4545  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4546  * @adapter: address of board private structure
4547  * @rx_ring: pointer to receive ring structure
4548  * @cleaned_count: number of buffers to allocate this pass
4549  **/
4550 static void
4551 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4552                              struct e1000_rx_ring *rx_ring, int cleaned_count)
4553 {
4554         struct pci_dev *pdev = adapter->pdev;
4555         struct e1000_rx_desc *rx_desc;
4556         struct e1000_rx_buffer *buffer_info;
4557         unsigned int i;
4558
4559         i = rx_ring->next_to_use;
4560         buffer_info = &rx_ring->buffer_info[i];
4561
4562         while (cleaned_count--) {
4563                 /* allocate a new page if necessary */
4564                 if (!buffer_info->rxbuf.page) {
4565                         buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4566                         if (unlikely(!buffer_info->rxbuf.page)) {
4567                                 adapter->alloc_rx_buff_failed++;
4568                                 break;
4569                         }
4570                 }
4571
4572                 if (!buffer_info->dma) {
4573                         buffer_info->dma = dma_map_page(&pdev->dev,
4574                                                         buffer_info->rxbuf.page, 0,
4575                                                         adapter->rx_buffer_len,
4576                                                         DMA_FROM_DEVICE);
4577                         if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4578                                 put_page(buffer_info->rxbuf.page);
4579                                 buffer_info->rxbuf.page = NULL;
4580                                 buffer_info->dma = 0;
4581                                 adapter->alloc_rx_buff_failed++;
4582                                 break;
4583                         }
4584                 }
4585
4586                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4587                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4588
4589                 if (unlikely(++i == rx_ring->count))
4590                         i = 0;
4591                 buffer_info = &rx_ring->buffer_info[i];
4592         }
4593
4594         if (likely(rx_ring->next_to_use != i)) {
4595                 rx_ring->next_to_use = i;
4596                 if (unlikely(i-- == 0))
4597                         i = (rx_ring->count - 1);
4598
4599                 /* Force memory writes to complete before letting h/w
4600                  * know there are new descriptors to fetch.  (Only
4601                  * applicable for weak-ordered memory model archs,
4602                  * such as IA-64).
4603                  */
4604                 wmb();
4605                 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4606         }
4607 }
4608
4609 /**
4610  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4611  * @adapter: address of board private structure
4612  **/
4613 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4614                                    struct e1000_rx_ring *rx_ring,
4615                                    int cleaned_count)
4616 {
4617         struct e1000_hw *hw = &adapter->hw;
4618         struct pci_dev *pdev = adapter->pdev;
4619         struct e1000_rx_desc *rx_desc;
4620         struct e1000_rx_buffer *buffer_info;
4621         unsigned int i;
4622         unsigned int bufsz = adapter->rx_buffer_len;
4623
4624         i = rx_ring->next_to_use;
4625         buffer_info = &rx_ring->buffer_info[i];
4626
4627         while (cleaned_count--) {
4628                 void *data;
4629
4630                 if (buffer_info->rxbuf.data)
4631                         goto skip;
4632
4633                 data = e1000_alloc_frag(adapter);
4634                 if (!data) {
4635                         /* Better luck next round */
4636                         adapter->alloc_rx_buff_failed++;
4637                         break;
4638                 }
4639
4640                 /* Fix for errata 23, can't cross 64kB boundary */
4641                 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4642                         void *olddata = data;
4643                         e_err(rx_err, "skb align check failed: %u bytes at "
4644                               "%p\n", bufsz, data);
4645                         /* Try again, without freeing the previous */
4646                         data = e1000_alloc_frag(adapter);
4647                         /* Failed allocation, critical failure */
4648                         if (!data) {
4649                                 skb_free_frag(olddata);
4650                                 adapter->alloc_rx_buff_failed++;
4651                                 break;
4652                         }
4653
4654                         if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4655                                 /* give up */
4656                                 skb_free_frag(data);
4657                                 skb_free_frag(olddata);
4658                                 adapter->alloc_rx_buff_failed++;
4659                                 break;
4660                         }
4661
4662                         /* Use new allocation */
4663                         skb_free_frag(olddata);
4664                 }
4665                 buffer_info->dma = dma_map_single(&pdev->dev,
4666                                                   data,
4667                                                   adapter->rx_buffer_len,
4668                                                   DMA_FROM_DEVICE);
4669                 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4670                         skb_free_frag(data);
4671                         buffer_info->dma = 0;
4672                         adapter->alloc_rx_buff_failed++;
4673                         break;
4674                 }
4675
4676                 /* XXX if it was allocated cleanly it will never map to a
4677                  * boundary crossing
4678                  */
4679
4680                 /* Fix for errata 23, can't cross 64kB boundary */
4681                 if (!e1000_check_64k_bound(adapter,
4682                                         (void *)(unsigned long)buffer_info->dma,
4683                                         adapter->rx_buffer_len)) {
4684                         e_err(rx_err, "dma align check failed: %u bytes at "
4685                               "%p\n", adapter->rx_buffer_len,
4686                               (void *)(unsigned long)buffer_info->dma);
4687
4688                         dma_unmap_single(&pdev->dev, buffer_info->dma,
4689                                          adapter->rx_buffer_len,
4690                                          DMA_FROM_DEVICE);
4691
4692                         skb_free_frag(data);
4693                         buffer_info->rxbuf.data = NULL;
4694                         buffer_info->dma = 0;
4695
4696                         adapter->alloc_rx_buff_failed++;
4697                         break;
4698                 }
4699                 buffer_info->rxbuf.data = data;
4700  skip:
4701                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4702                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4703
4704                 if (unlikely(++i == rx_ring->count))
4705                         i = 0;
4706                 buffer_info = &rx_ring->buffer_info[i];
4707         }
4708
4709         if (likely(rx_ring->next_to_use != i)) {
4710                 rx_ring->next_to_use = i;
4711                 if (unlikely(i-- == 0))
4712                         i = (rx_ring->count - 1);
4713
4714                 /* Force memory writes to complete before letting h/w
4715                  * know there are new descriptors to fetch.  (Only
4716                  * applicable for weak-ordered memory model archs,
4717                  * such as IA-64).
4718                  */
4719                 wmb();
4720                 writel(i, hw->hw_addr + rx_ring->rdt);
4721         }
4722 }
4723
4724 /**
4725  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4726  * @adapter:
4727  **/
4728 static void e1000_smartspeed(struct e1000_adapter *adapter)
4729 {
4730         struct e1000_hw *hw = &adapter->hw;
4731         u16 phy_status;
4732         u16 phy_ctrl;
4733
4734         if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4735            !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4736                 return;
4737
4738         if (adapter->smartspeed == 0) {
4739                 /* If Master/Slave config fault is asserted twice,
4740                  * we assume back-to-back
4741                  */
4742                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4743                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4744                         return;
4745                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4746                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4747                         return;
4748                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4749                 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4750                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
4751                         e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4752                                             phy_ctrl);
4753                         adapter->smartspeed++;
4754                         if (!e1000_phy_setup_autoneg(hw) &&
4755                            !e1000_read_phy_reg(hw, PHY_CTRL,
4756                                                &phy_ctrl)) {
4757                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4758                                              MII_CR_RESTART_AUTO_NEG);
4759                                 e1000_write_phy_reg(hw, PHY_CTRL,
4760                                                     phy_ctrl);
4761                         }
4762                 }
4763                 return;
4764         } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4765                 /* If still no link, perhaps using 2/3 pair cable */
4766                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4767                 phy_ctrl |= CR_1000T_MS_ENABLE;
4768                 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4769                 if (!e1000_phy_setup_autoneg(hw) &&
4770                    !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4771                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4772                                      MII_CR_RESTART_AUTO_NEG);
4773                         e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4774                 }
4775         }
4776         /* Restart process after E1000_SMARTSPEED_MAX iterations */
4777         if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4778                 adapter->smartspeed = 0;
4779 }
4780
4781 /**
4782  * e1000_ioctl -
4783  * @netdev:
4784  * @ifreq:
4785  * @cmd:
4786  **/
4787 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4788 {
4789         switch (cmd) {
4790         case SIOCGMIIPHY:
4791         case SIOCGMIIREG:
4792         case SIOCSMIIREG:
4793                 return e1000_mii_ioctl(netdev, ifr, cmd);
4794         default:
4795                 return -EOPNOTSUPP;
4796         }
4797 }
4798
4799 /**
4800  * e1000_mii_ioctl -
4801  * @netdev:
4802  * @ifreq:
4803  * @cmd:
4804  **/
4805 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4806                            int cmd)
4807 {
4808         struct e1000_adapter *adapter = netdev_priv(netdev);
4809         struct e1000_hw *hw = &adapter->hw;
4810         struct mii_ioctl_data *data = if_mii(ifr);
4811         int retval;
4812         u16 mii_reg;
4813         unsigned long flags;
4814
4815         if (hw->media_type != e1000_media_type_copper)
4816                 return -EOPNOTSUPP;
4817
4818         switch (cmd) {
4819         case SIOCGMIIPHY:
4820                 data->phy_id = hw->phy_addr;
4821                 break;
4822         case SIOCGMIIREG:
4823                 spin_lock_irqsave(&adapter->stats_lock, flags);
4824                 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4825                                    &data->val_out)) {
4826                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4827                         return -EIO;
4828                 }
4829                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4830                 break;
4831         case SIOCSMIIREG:
4832                 if (data->reg_num & ~(0x1F))
4833                         return -EFAULT;
4834                 mii_reg = data->val_in;
4835                 spin_lock_irqsave(&adapter->stats_lock, flags);
4836                 if (e1000_write_phy_reg(hw, data->reg_num,
4837                                         mii_reg)) {
4838                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4839                         return -EIO;
4840                 }
4841                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4842                 if (hw->media_type == e1000_media_type_copper) {
4843                         switch (data->reg_num) {
4844                         case PHY_CTRL:
4845                                 if (mii_reg & MII_CR_POWER_DOWN)
4846                                         break;
4847                                 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4848                                         hw->autoneg = 1;
4849                                         hw->autoneg_advertised = 0x2F;
4850                                 } else {
4851                                         u32 speed;
4852                                         if (mii_reg & 0x40)
4853                                                 speed = SPEED_1000;
4854                                         else if (mii_reg & 0x2000)
4855                                                 speed = SPEED_100;
4856                                         else
4857                                                 speed = SPEED_10;
4858                                         retval = e1000_set_spd_dplx(
4859                                                 adapter, speed,
4860                                                 ((mii_reg & 0x100)
4861                                                  ? DUPLEX_FULL :
4862                                                  DUPLEX_HALF));
4863                                         if (retval)
4864                                                 return retval;
4865                                 }
4866                                 if (netif_running(adapter->netdev))
4867                                         e1000_reinit_locked(adapter);
4868                                 else
4869                                         e1000_reset(adapter);
4870                                 break;
4871                         case M88E1000_PHY_SPEC_CTRL:
4872                         case M88E1000_EXT_PHY_SPEC_CTRL:
4873                                 if (e1000_phy_reset(hw))
4874                                         return -EIO;
4875                                 break;
4876                         }
4877                 } else {
4878                         switch (data->reg_num) {
4879                         case PHY_CTRL:
4880                                 if (mii_reg & MII_CR_POWER_DOWN)
4881                                         break;
4882                                 if (netif_running(adapter->netdev))
4883                                         e1000_reinit_locked(adapter);
4884                                 else
4885                                         e1000_reset(adapter);
4886                                 break;
4887                         }
4888                 }
4889                 break;
4890         default:
4891                 return -EOPNOTSUPP;
4892         }
4893         return E1000_SUCCESS;
4894 }
4895
4896 void e1000_pci_set_mwi(struct e1000_hw *hw)
4897 {
4898         struct e1000_adapter *adapter = hw->back;
4899         int ret_val = pci_set_mwi(adapter->pdev);
4900
4901         if (ret_val)
4902                 e_err(probe, "Error in setting MWI\n");
4903 }
4904
4905 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4906 {
4907         struct e1000_adapter *adapter = hw->back;
4908
4909         pci_clear_mwi(adapter->pdev);
4910 }
4911
4912 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4913 {
4914         struct e1000_adapter *adapter = hw->back;
4915         return pcix_get_mmrbc(adapter->pdev);
4916 }
4917
4918 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4919 {
4920         struct e1000_adapter *adapter = hw->back;
4921         pcix_set_mmrbc(adapter->pdev, mmrbc);
4922 }
4923
4924 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4925 {
4926         outl(value, port);
4927 }
4928
4929 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4930 {
4931         u16 vid;
4932
4933         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4934                 return true;
4935         return false;
4936 }
4937
4938 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4939                               netdev_features_t features)
4940 {
4941         struct e1000_hw *hw = &adapter->hw;
4942         u32 ctrl;
4943
4944         ctrl = er32(CTRL);
4945         if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4946                 /* enable VLAN tag insert/strip */
4947                 ctrl |= E1000_CTRL_VME;
4948         } else {
4949                 /* disable VLAN tag insert/strip */
4950                 ctrl &= ~E1000_CTRL_VME;
4951         }
4952         ew32(CTRL, ctrl);
4953 }
4954 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4955                                      bool filter_on)
4956 {
4957         struct e1000_hw *hw = &adapter->hw;
4958         u32 rctl;
4959
4960         if (!test_bit(__E1000_DOWN, &adapter->flags))
4961                 e1000_irq_disable(adapter);
4962
4963         __e1000_vlan_mode(adapter, adapter->netdev->features);
4964         if (filter_on) {
4965                 /* enable VLAN receive filtering */
4966                 rctl = er32(RCTL);
4967                 rctl &= ~E1000_RCTL_CFIEN;
4968                 if (!(adapter->netdev->flags & IFF_PROMISC))
4969                         rctl |= E1000_RCTL_VFE;
4970                 ew32(RCTL, rctl);
4971                 e1000_update_mng_vlan(adapter);
4972         } else {
4973                 /* disable VLAN receive filtering */
4974                 rctl = er32(RCTL);
4975                 rctl &= ~E1000_RCTL_VFE;
4976                 ew32(RCTL, rctl);
4977         }
4978
4979         if (!test_bit(__E1000_DOWN, &adapter->flags))
4980                 e1000_irq_enable(adapter);
4981 }
4982
4983 static void e1000_vlan_mode(struct net_device *netdev,
4984                             netdev_features_t features)
4985 {
4986         struct e1000_adapter *adapter = netdev_priv(netdev);
4987
4988         if (!test_bit(__E1000_DOWN, &adapter->flags))
4989                 e1000_irq_disable(adapter);
4990
4991         __e1000_vlan_mode(adapter, features);
4992
4993         if (!test_bit(__E1000_DOWN, &adapter->flags))
4994                 e1000_irq_enable(adapter);
4995 }
4996
4997 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4998                                  __be16 proto, u16 vid)
4999 {
5000         struct e1000_adapter *adapter = netdev_priv(netdev);
5001         struct e1000_hw *hw = &adapter->hw;
5002         u32 vfta, index;
5003
5004         if ((hw->mng_cookie.status &
5005              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
5006             (vid == adapter->mng_vlan_id))
5007                 return 0;
5008
5009         if (!e1000_vlan_used(adapter))
5010                 e1000_vlan_filter_on_off(adapter, true);
5011
5012         /* add VID to filter table */
5013         index = (vid >> 5) & 0x7F;
5014         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
5015         vfta |= (1 << (vid & 0x1F));
5016         e1000_write_vfta(hw, index, vfta);
5017
5018         set_bit(vid, adapter->active_vlans);
5019
5020         return 0;
5021 }
5022
5023 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
5024                                   __be16 proto, u16 vid)
5025 {
5026         struct e1000_adapter *adapter = netdev_priv(netdev);
5027         struct e1000_hw *hw = &adapter->hw;
5028         u32 vfta, index;
5029
5030         if (!test_bit(__E1000_DOWN, &adapter->flags))
5031                 e1000_irq_disable(adapter);
5032         if (!test_bit(__E1000_DOWN, &adapter->flags))
5033                 e1000_irq_enable(adapter);
5034
5035         /* remove VID from filter table */
5036         index = (vid >> 5) & 0x7F;
5037         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
5038         vfta &= ~(1 << (vid & 0x1F));
5039         e1000_write_vfta(hw, index, vfta);
5040
5041         clear_bit(vid, adapter->active_vlans);
5042
5043         if (!e1000_vlan_used(adapter))
5044                 e1000_vlan_filter_on_off(adapter, false);
5045
5046         return 0;
5047 }
5048
5049 static void e1000_restore_vlan(struct e1000_adapter *adapter)
5050 {
5051         u16 vid;
5052
5053         if (!e1000_vlan_used(adapter))
5054                 return;
5055
5056         e1000_vlan_filter_on_off(adapter, true);
5057         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5058                 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5059 }
5060
5061 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5062 {
5063         struct e1000_hw *hw = &adapter->hw;
5064
5065         hw->autoneg = 0;
5066
5067         /* Make sure dplx is at most 1 bit and lsb of speed is not set
5068          * for the switch() below to work
5069          */
5070         if ((spd & 1) || (dplx & ~1))
5071                 goto err_inval;
5072
5073         /* Fiber NICs only allow 1000 gbps Full duplex */
5074         if ((hw->media_type == e1000_media_type_fiber) &&
5075             spd != SPEED_1000 &&
5076             dplx != DUPLEX_FULL)
5077                 goto err_inval;
5078
5079         switch (spd + dplx) {
5080         case SPEED_10 + DUPLEX_HALF:
5081                 hw->forced_speed_duplex = e1000_10_half;
5082                 break;
5083         case SPEED_10 + DUPLEX_FULL:
5084                 hw->forced_speed_duplex = e1000_10_full;
5085                 break;
5086         case SPEED_100 + DUPLEX_HALF:
5087                 hw->forced_speed_duplex = e1000_100_half;
5088                 break;
5089         case SPEED_100 + DUPLEX_FULL:
5090                 hw->forced_speed_duplex = e1000_100_full;
5091                 break;
5092         case SPEED_1000 + DUPLEX_FULL:
5093                 hw->autoneg = 1;
5094                 hw->autoneg_advertised = ADVERTISE_1000_FULL;
5095                 break;
5096         case SPEED_1000 + DUPLEX_HALF: /* not supported */
5097         default:
5098                 goto err_inval;
5099         }
5100
5101         /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5102         hw->mdix = AUTO_ALL_MODES;
5103
5104         return 0;
5105
5106 err_inval:
5107         e_err(probe, "Unsupported Speed/Duplex configuration\n");
5108         return -EINVAL;
5109 }
5110
5111 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5112 {
5113         struct net_device *netdev = pci_get_drvdata(pdev);
5114         struct e1000_adapter *adapter = netdev_priv(netdev);
5115         struct e1000_hw *hw = &adapter->hw;
5116         u32 ctrl, ctrl_ext, rctl, status;
5117         u32 wufc = adapter->wol;
5118 #ifdef CONFIG_PM
5119         int retval = 0;
5120 #endif
5121
5122         netif_device_detach(netdev);
5123
5124         if (netif_running(netdev)) {
5125                 int count = E1000_CHECK_RESET_COUNT;
5126
5127                 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5128                         usleep_range(10000, 20000);
5129
5130                 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5131                 e1000_down(adapter);
5132         }
5133
5134 #ifdef CONFIG_PM
5135         retval = pci_save_state(pdev);
5136         if (retval)
5137                 return retval;
5138 #endif
5139
5140         status = er32(STATUS);
5141         if (status & E1000_STATUS_LU)
5142                 wufc &= ~E1000_WUFC_LNKC;
5143
5144         if (wufc) {
5145                 e1000_setup_rctl(adapter);
5146                 e1000_set_rx_mode(netdev);
5147
5148                 rctl = er32(RCTL);
5149
5150                 /* turn on all-multi mode if wake on multicast is enabled */
5151                 if (wufc & E1000_WUFC_MC)
5152                         rctl |= E1000_RCTL_MPE;
5153
5154                 /* enable receives in the hardware */
5155                 ew32(RCTL, rctl | E1000_RCTL_EN);
5156
5157                 if (hw->mac_type >= e1000_82540) {
5158                         ctrl = er32(CTRL);
5159                         /* advertise wake from D3Cold */
5160                         #define E1000_CTRL_ADVD3WUC 0x00100000
5161                         /* phy power management enable */
5162                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5163                         ctrl |= E1000_CTRL_ADVD3WUC |
5164                                 E1000_CTRL_EN_PHY_PWR_MGMT;
5165                         ew32(CTRL, ctrl);
5166                 }
5167
5168                 if (hw->media_type == e1000_media_type_fiber ||
5169                     hw->media_type == e1000_media_type_internal_serdes) {
5170                         /* keep the laser running in D3 */
5171                         ctrl_ext = er32(CTRL_EXT);
5172                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5173                         ew32(CTRL_EXT, ctrl_ext);
5174                 }
5175
5176                 ew32(WUC, E1000_WUC_PME_EN);
5177                 ew32(WUFC, wufc);
5178         } else {
5179                 ew32(WUC, 0);
5180                 ew32(WUFC, 0);
5181         }
5182
5183         e1000_release_manageability(adapter);
5184
5185         *enable_wake = !!wufc;
5186
5187         /* make sure adapter isn't asleep if manageability is enabled */
5188         if (adapter->en_mng_pt)
5189                 *enable_wake = true;
5190
5191         if (netif_running(netdev))
5192                 e1000_free_irq(adapter);
5193
5194         if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5195                 pci_disable_device(pdev);
5196
5197         return 0;
5198 }
5199
5200 #ifdef CONFIG_PM
5201 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5202 {
5203         int retval;
5204         bool wake;
5205
5206         retval = __e1000_shutdown(pdev, &wake);
5207         if (retval)
5208                 return retval;
5209
5210         if (wake) {
5211                 pci_prepare_to_sleep(pdev);
5212         } else {
5213                 pci_wake_from_d3(pdev, false);
5214                 pci_set_power_state(pdev, PCI_D3hot);
5215         }
5216
5217         return 0;
5218 }
5219
5220 static int e1000_resume(struct pci_dev *pdev)
5221 {
5222         struct net_device *netdev = pci_get_drvdata(pdev);
5223         struct e1000_adapter *adapter = netdev_priv(netdev);
5224         struct e1000_hw *hw = &adapter->hw;
5225         u32 err;
5226
5227         pci_set_power_state(pdev, PCI_D0);
5228         pci_restore_state(pdev);
5229         pci_save_state(pdev);
5230
5231         if (adapter->need_ioport)
5232                 err = pci_enable_device(pdev);
5233         else
5234                 err = pci_enable_device_mem(pdev);
5235         if (err) {
5236                 pr_err("Cannot enable PCI device from suspend\n");
5237                 return err;
5238         }
5239
5240         /* flush memory to make sure state is correct */
5241         smp_mb__before_atomic();
5242         clear_bit(__E1000_DISABLED, &adapter->flags);
5243         pci_set_master(pdev);
5244
5245         pci_enable_wake(pdev, PCI_D3hot, 0);
5246         pci_enable_wake(pdev, PCI_D3cold, 0);
5247
5248         if (netif_running(netdev)) {
5249                 err = e1000_request_irq(adapter);
5250                 if (err)
5251                         return err;
5252         }
5253
5254         e1000_power_up_phy(adapter);
5255         e1000_reset(adapter);
5256         ew32(WUS, ~0);
5257
5258         e1000_init_manageability(adapter);
5259
5260         if (netif_running(netdev))
5261                 e1000_up(adapter);
5262
5263         netif_device_attach(netdev);
5264
5265         return 0;
5266 }
5267 #endif
5268
5269 static void e1000_shutdown(struct pci_dev *pdev)
5270 {
5271         bool wake;
5272
5273         __e1000_shutdown(pdev, &wake);
5274
5275         if (system_state == SYSTEM_POWER_OFF) {
5276                 pci_wake_from_d3(pdev, wake);
5277                 pci_set_power_state(pdev, PCI_D3hot);
5278         }
5279 }
5280
5281 #ifdef CONFIG_NET_POLL_CONTROLLER
5282 /* Polling 'interrupt' - used by things like netconsole to send skbs
5283  * without having to re-enable interrupts. It's not called while
5284  * the interrupt routine is executing.
5285  */
5286 static void e1000_netpoll(struct net_device *netdev)
5287 {
5288         struct e1000_adapter *adapter = netdev_priv(netdev);
5289
5290         disable_irq(adapter->pdev->irq);
5291         e1000_intr(adapter->pdev->irq, netdev);
5292         enable_irq(adapter->pdev->irq);
5293 }
5294 #endif
5295
5296 /**
5297  * e1000_io_error_detected - called when PCI error is detected
5298  * @pdev: Pointer to PCI device
5299  * @state: The current pci connection state
5300  *
5301  * This function is called after a PCI bus error affecting
5302  * this device has been detected.
5303  */
5304 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5305                                                 pci_channel_state_t state)
5306 {
5307         struct net_device *netdev = pci_get_drvdata(pdev);
5308         struct e1000_adapter *adapter = netdev_priv(netdev);
5309
5310         netif_device_detach(netdev);
5311
5312         if (state == pci_channel_io_perm_failure)
5313                 return PCI_ERS_RESULT_DISCONNECT;
5314
5315         if (netif_running(netdev))
5316                 e1000_down(adapter);
5317
5318         if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5319                 pci_disable_device(pdev);
5320
5321         /* Request a slot slot reset. */
5322         return PCI_ERS_RESULT_NEED_RESET;
5323 }
5324
5325 /**
5326  * e1000_io_slot_reset - called after the pci bus has been reset.
5327  * @pdev: Pointer to PCI device
5328  *
5329  * Restart the card from scratch, as if from a cold-boot. Implementation
5330  * resembles the first-half of the e1000_resume routine.
5331  */
5332 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5333 {
5334         struct net_device *netdev = pci_get_drvdata(pdev);
5335         struct e1000_adapter *adapter = netdev_priv(netdev);
5336         struct e1000_hw *hw = &adapter->hw;
5337         int err;
5338
5339         if (adapter->need_ioport)
5340                 err = pci_enable_device(pdev);
5341         else
5342                 err = pci_enable_device_mem(pdev);
5343         if (err) {
5344                 pr_err("Cannot re-enable PCI device after reset.\n");
5345                 return PCI_ERS_RESULT_DISCONNECT;
5346         }
5347
5348         /* flush memory to make sure state is correct */
5349         smp_mb__before_atomic();
5350         clear_bit(__E1000_DISABLED, &adapter->flags);
5351         pci_set_master(pdev);
5352
5353         pci_enable_wake(pdev, PCI_D3hot, 0);
5354         pci_enable_wake(pdev, PCI_D3cold, 0);
5355
5356         e1000_reset(adapter);
5357         ew32(WUS, ~0);
5358
5359         return PCI_ERS_RESULT_RECOVERED;
5360 }
5361
5362 /**
5363  * e1000_io_resume - called when traffic can start flowing again.
5364  * @pdev: Pointer to PCI device
5365  *
5366  * This callback is called when the error recovery driver tells us that
5367  * its OK to resume normal operation. Implementation resembles the
5368  * second-half of the e1000_resume routine.
5369  */
5370 static void e1000_io_resume(struct pci_dev *pdev)
5371 {
5372         struct net_device *netdev = pci_get_drvdata(pdev);
5373         struct e1000_adapter *adapter = netdev_priv(netdev);
5374
5375         e1000_init_manageability(adapter);
5376
5377         if (netif_running(netdev)) {
5378                 if (e1000_up(adapter)) {
5379                         pr_info("can't bring device back up after reset\n");
5380                         return;
5381                 }
5382         }
5383
5384         netif_device_attach(netdev);
5385 }
5386
5387 /* e1000_main.c */