2 * Copyright(c) 2007 Atheros Corporation. All rights reserved.
4 * Derived from Intel e1000 driver
5 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 #define DRV_VERSION "1.0.0.7-NAPI"
26 char atl1e_driver_name[] = "ATL1E";
27 char atl1e_driver_version[] = DRV_VERSION;
28 #define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026
30 * atl1e_pci_tbl - PCI Device ID Table
32 * Wildcard entries (PCI_ANY_ID) should come last
33 * Last entry must be all 0s
35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
36 * Class, Class Mask, private data (not used) }
38 static const struct pci_device_id atl1e_pci_tbl[] = {
39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
40 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
41 /* required last entry */
44 MODULE_DEVICE_TABLE(pci, atl1e_pci_tbl);
46 MODULE_AUTHOR("Atheros Corporation, <xiong.huang@atheros.com>, Jie Yang <jie.yang@atheros.com>");
47 MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
48 MODULE_LICENSE("GPL");
49 MODULE_VERSION(DRV_VERSION);
51 static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter);
54 atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
56 {REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD},
57 {REG_HOST_RXF1_PAGE0_VLD, REG_HOST_RXF1_PAGE1_VLD},
58 {REG_HOST_RXF2_PAGE0_VLD, REG_HOST_RXF2_PAGE1_VLD},
59 {REG_HOST_RXF3_PAGE0_VLD, REG_HOST_RXF3_PAGE1_VLD}
62 static const u16 atl1e_rx_page_hi_addr_regs[AT_MAX_RECEIVE_QUEUE] =
64 REG_RXF0_BASE_ADDR_HI,
65 REG_RXF1_BASE_ADDR_HI,
66 REG_RXF2_BASE_ADDR_HI,
71 atl1e_rx_page_lo_addr_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
73 {REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO},
74 {REG_HOST_RXF1_PAGE0_LO, REG_HOST_RXF1_PAGE1_LO},
75 {REG_HOST_RXF2_PAGE0_LO, REG_HOST_RXF2_PAGE1_LO},
76 {REG_HOST_RXF3_PAGE0_LO, REG_HOST_RXF3_PAGE1_LO}
80 atl1e_rx_page_write_offset_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
82 {REG_HOST_RXF0_MB0_LO, REG_HOST_RXF0_MB1_LO},
83 {REG_HOST_RXF1_MB0_LO, REG_HOST_RXF1_MB1_LO},
84 {REG_HOST_RXF2_MB0_LO, REG_HOST_RXF2_MB1_LO},
85 {REG_HOST_RXF3_MB0_LO, REG_HOST_RXF3_MB1_LO}
88 static const u16 atl1e_pay_load_size[] = {
89 128, 256, 512, 1024, 2048, 4096,
93 * atl1e_irq_enable - Enable default interrupt generation settings
94 * @adapter: board private structure
96 static inline void atl1e_irq_enable(struct atl1e_adapter *adapter)
98 if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
99 AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
100 AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
101 AT_WRITE_FLUSH(&adapter->hw);
106 * atl1e_irq_disable - Mask off interrupt generation on the NIC
107 * @adapter: board private structure
109 static inline void atl1e_irq_disable(struct atl1e_adapter *adapter)
111 atomic_inc(&adapter->irq_sem);
112 AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
113 AT_WRITE_FLUSH(&adapter->hw);
114 synchronize_irq(adapter->pdev->irq);
118 * atl1e_irq_reset - reset interrupt confiure on the NIC
119 * @adapter: board private structure
121 static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
123 atomic_set(&adapter->irq_sem, 0);
124 AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
125 AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
126 AT_WRITE_FLUSH(&adapter->hw);
130 * atl1e_phy_config - Timer Call-back
131 * @data: pointer to netdev cast into an unsigned long
133 static void atl1e_phy_config(unsigned long data)
135 struct atl1e_adapter *adapter = (struct atl1e_adapter *) data;
136 struct atl1e_hw *hw = &adapter->hw;
139 spin_lock_irqsave(&adapter->mdio_lock, flags);
140 atl1e_restart_autoneg(hw);
141 spin_unlock_irqrestore(&adapter->mdio_lock, flags);
144 void atl1e_reinit_locked(struct atl1e_adapter *adapter)
147 WARN_ON(in_interrupt());
148 while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
152 clear_bit(__AT_RESETTING, &adapter->flags);
155 static void atl1e_reset_task(struct work_struct *work)
157 struct atl1e_adapter *adapter;
158 adapter = container_of(work, struct atl1e_adapter, reset_task);
160 atl1e_reinit_locked(adapter);
163 static int atl1e_check_link(struct atl1e_adapter *adapter)
165 struct atl1e_hw *hw = &adapter->hw;
166 struct net_device *netdev = adapter->netdev;
168 u16 speed, duplex, phy_data;
170 /* MII_BMSR must read twice */
171 atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
172 atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
173 if ((phy_data & BMSR_LSTATUS) == 0) {
175 if (netif_carrier_ok(netdev)) { /* old link state: Up */
178 value = AT_READ_REG(hw, REG_MAC_CTRL);
179 value &= ~MAC_CTRL_RX_EN;
180 AT_WRITE_REG(hw, REG_MAC_CTRL, value);
181 adapter->link_speed = SPEED_0;
182 netif_carrier_off(netdev);
183 netif_stop_queue(netdev);
187 err = atl1e_get_speed_and_duplex(hw, &speed, &duplex);
191 /* link result is our setting */
192 if (adapter->link_speed != speed ||
193 adapter->link_duplex != duplex) {
194 adapter->link_speed = speed;
195 adapter->link_duplex = duplex;
196 atl1e_setup_mac_ctrl(adapter);
198 "NIC Link is Up <%d Mbps %s Duplex>\n",
200 adapter->link_duplex == FULL_DUPLEX ?
204 if (!netif_carrier_ok(netdev)) {
205 /* Link down -> Up */
206 netif_carrier_on(netdev);
207 netif_wake_queue(netdev);
214 * atl1e_link_chg_task - deal with link change event Out of interrupt context
215 * @netdev: network interface device structure
217 static void atl1e_link_chg_task(struct work_struct *work)
219 struct atl1e_adapter *adapter;
222 adapter = container_of(work, struct atl1e_adapter, link_chg_task);
223 spin_lock_irqsave(&adapter->mdio_lock, flags);
224 atl1e_check_link(adapter);
225 spin_unlock_irqrestore(&adapter->mdio_lock, flags);
228 static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
230 struct net_device *netdev = adapter->netdev;
234 spin_lock(&adapter->mdio_lock);
235 atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
236 atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
237 spin_unlock(&adapter->mdio_lock);
238 link_up = phy_data & BMSR_LSTATUS;
239 /* notify upper layer link down ASAP */
241 if (netif_carrier_ok(netdev)) {
242 /* old link state: Up */
243 netdev_info(netdev, "NIC Link is Down\n");
244 adapter->link_speed = SPEED_0;
245 netif_stop_queue(netdev);
248 schedule_work(&adapter->link_chg_task);
251 static void atl1e_del_timer(struct atl1e_adapter *adapter)
253 del_timer_sync(&adapter->phy_config_timer);
256 static void atl1e_cancel_work(struct atl1e_adapter *adapter)
258 cancel_work_sync(&adapter->reset_task);
259 cancel_work_sync(&adapter->link_chg_task);
263 * atl1e_tx_timeout - Respond to a Tx Hang
264 * @netdev: network interface device structure
266 static void atl1e_tx_timeout(struct net_device *netdev)
268 struct atl1e_adapter *adapter = netdev_priv(netdev);
270 /* Do the reset outside of interrupt context */
271 schedule_work(&adapter->reset_task);
275 * atl1e_set_multi - Multicast and Promiscuous mode set
276 * @netdev: network interface device structure
278 * The set_multi entry point is called whenever the multicast address
279 * list or the network interface flags are updated. This routine is
280 * responsible for configuring the hardware for proper multicast,
281 * promiscuous mode, and all-multi behavior.
283 static void atl1e_set_multi(struct net_device *netdev)
285 struct atl1e_adapter *adapter = netdev_priv(netdev);
286 struct atl1e_hw *hw = &adapter->hw;
287 struct netdev_hw_addr *ha;
288 u32 mac_ctrl_data = 0;
291 /* Check for Promiscuous and All Multicast modes */
292 mac_ctrl_data = AT_READ_REG(hw, REG_MAC_CTRL);
294 if (netdev->flags & IFF_PROMISC) {
295 mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
296 } else if (netdev->flags & IFF_ALLMULTI) {
297 mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
298 mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN;
300 mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
303 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
305 /* clear the old settings from the multicast hash table */
306 AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
307 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
309 /* comoute mc addresses' hash value ,and put it into hash table */
310 netdev_for_each_mc_addr(ha, netdev) {
311 hash_value = atl1e_hash_mc_addr(hw, ha->addr);
312 atl1e_hash_set(hw, hash_value);
316 static void __atl1e_rx_mode(netdev_features_t features, u32 *mac_ctrl_data)
319 if (features & NETIF_F_RXALL) {
320 /* enable RX of ALL frames */
321 *mac_ctrl_data |= MAC_CTRL_DBG;
323 /* disable RX of ALL frames */
324 *mac_ctrl_data &= ~MAC_CTRL_DBG;
328 static void atl1e_rx_mode(struct net_device *netdev,
329 netdev_features_t features)
331 struct atl1e_adapter *adapter = netdev_priv(netdev);
332 u32 mac_ctrl_data = 0;
334 netdev_dbg(adapter->netdev, "%s\n", __func__);
336 atl1e_irq_disable(adapter);
337 mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
338 __atl1e_rx_mode(features, &mac_ctrl_data);
339 AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
340 atl1e_irq_enable(adapter);
344 static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
346 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
347 /* enable VLAN tag insert/strip */
348 *mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
350 /* disable VLAN tag insert/strip */
351 *mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
355 static void atl1e_vlan_mode(struct net_device *netdev,
356 netdev_features_t features)
358 struct atl1e_adapter *adapter = netdev_priv(netdev);
359 u32 mac_ctrl_data = 0;
361 netdev_dbg(adapter->netdev, "%s\n", __func__);
363 atl1e_irq_disable(adapter);
364 mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
365 __atl1e_vlan_mode(features, &mac_ctrl_data);
366 AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
367 atl1e_irq_enable(adapter);
370 static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
372 netdev_dbg(adapter->netdev, "%s\n", __func__);
373 atl1e_vlan_mode(adapter->netdev, adapter->netdev->features);
377 * atl1e_set_mac - Change the Ethernet Address of the NIC
378 * @netdev: network interface device structure
379 * @p: pointer to an address structure
381 * Returns 0 on success, negative on failure
383 static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
385 struct atl1e_adapter *adapter = netdev_priv(netdev);
386 struct sockaddr *addr = p;
388 if (!is_valid_ether_addr(addr->sa_data))
389 return -EADDRNOTAVAIL;
391 if (netif_running(netdev))
394 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
395 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
397 atl1e_hw_set_mac_addr(&adapter->hw);
402 static netdev_features_t atl1e_fix_features(struct net_device *netdev,
403 netdev_features_t features)
406 * Since there is no support for separate rx/tx vlan accel
407 * enable/disable make sure tx flag is always in same state as rx.
409 if (features & NETIF_F_HW_VLAN_CTAG_RX)
410 features |= NETIF_F_HW_VLAN_CTAG_TX;
412 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
417 static int atl1e_set_features(struct net_device *netdev,
418 netdev_features_t features)
420 netdev_features_t changed = netdev->features ^ features;
422 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
423 atl1e_vlan_mode(netdev, features);
425 if (changed & NETIF_F_RXALL)
426 atl1e_rx_mode(netdev, features);
433 * atl1e_change_mtu - Change the Maximum Transfer Unit
434 * @netdev: network interface device structure
435 * @new_mtu: new value for maximum frame size
437 * Returns 0 on success, negative on failure
439 static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
441 struct atl1e_adapter *adapter = netdev_priv(netdev);
442 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
445 if (netif_running(netdev)) {
446 while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
448 netdev->mtu = new_mtu;
449 adapter->hw.max_frame_size = new_mtu;
450 adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3;
453 clear_bit(__AT_RESETTING, &adapter->flags);
459 * caller should hold mdio_lock
461 static int atl1e_mdio_read(struct net_device *netdev, int phy_id, int reg_num)
463 struct atl1e_adapter *adapter = netdev_priv(netdev);
466 atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result);
470 static void atl1e_mdio_write(struct net_device *netdev, int phy_id,
471 int reg_num, int val)
473 struct atl1e_adapter *adapter = netdev_priv(netdev);
475 if (atl1e_write_phy_reg(&adapter->hw,
476 reg_num & MDIO_REG_ADDR_MASK, val))
477 netdev_err(netdev, "write phy register failed\n");
480 static int atl1e_mii_ioctl(struct net_device *netdev,
481 struct ifreq *ifr, int cmd)
483 struct atl1e_adapter *adapter = netdev_priv(netdev);
484 struct mii_ioctl_data *data = if_mii(ifr);
488 if (!netif_running(netdev))
491 spin_lock_irqsave(&adapter->mdio_lock, flags);
498 if (atl1e_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
506 if (data->reg_num & ~(0x1F)) {
511 netdev_dbg(adapter->netdev, "<atl1e_mii_ioctl> write %x %x\n",
512 data->reg_num, data->val_in);
513 if (atl1e_write_phy_reg(&adapter->hw,
514 data->reg_num, data->val_in)) {
521 retval = -EOPNOTSUPP;
525 spin_unlock_irqrestore(&adapter->mdio_lock, flags);
530 static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
536 return atl1e_mii_ioctl(netdev, ifr, cmd);
542 static void atl1e_setup_pcicmd(struct pci_dev *pdev)
546 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
547 cmd &= ~(PCI_COMMAND_INTX_DISABLE | PCI_COMMAND_IO);
548 cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
549 pci_write_config_word(pdev, PCI_COMMAND, cmd);
552 * some motherboards BIOS(PXE/EFI) driver may set PME
553 * while they transfer control to OS (Windows/Linux)
554 * so we should clear this bit before NIC work normally
556 pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
561 * atl1e_alloc_queues - Allocate memory for all rings
562 * @adapter: board private structure to initialize
565 static int atl1e_alloc_queues(struct atl1e_adapter *adapter)
571 * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
572 * @adapter: board private structure to initialize
574 * atl1e_sw_init initializes the Adapter private data structure.
575 * Fields are initialized based on PCI device information and
576 * OS network device settings (MTU size).
578 static int atl1e_sw_init(struct atl1e_adapter *adapter)
580 struct atl1e_hw *hw = &adapter->hw;
581 struct pci_dev *pdev = adapter->pdev;
582 u32 phy_status_data = 0;
585 adapter->link_speed = SPEED_0; /* hardware init */
586 adapter->link_duplex = FULL_DUPLEX;
587 adapter->num_rx_queues = 1;
589 /* PCI config space info */
590 hw->vendor_id = pdev->vendor;
591 hw->device_id = pdev->device;
592 hw->subsystem_vendor_id = pdev->subsystem_vendor;
593 hw->subsystem_id = pdev->subsystem_device;
594 hw->revision_id = pdev->revision;
596 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
598 phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
600 if (hw->revision_id >= 0xF0) {
601 hw->nic_type = athr_l2e_revB;
603 if (phy_status_data & PHY_STATUS_100M)
604 hw->nic_type = athr_l1e;
606 hw->nic_type = athr_l2e_revA;
609 phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
611 if (phy_status_data & PHY_STATUS_EMI_CA)
616 hw->phy_configured = false;
617 hw->preamble_len = 7;
618 hw->max_frame_size = adapter->netdev->mtu;
619 hw->rx_jumbo_th = (hw->max_frame_size + ETH_HLEN +
620 VLAN_HLEN + ETH_FCS_LEN + 7) >> 3;
622 hw->rrs_type = atl1e_rrs_disable;
623 hw->indirect_tab = 0;
628 hw->ict = 50000; /* 100ms */
629 hw->smb_timer = 200000; /* 200ms */
632 hw->tpd_thresh = adapter->tx_ring.count / 2;
633 hw->rx_count_down = 4; /* 2us resolution */
634 hw->tx_count_down = hw->imt * 4 / 3;
635 hw->dmar_block = atl1e_dma_req_1024;
636 hw->dmaw_block = atl1e_dma_req_1024;
637 hw->dmar_dly_cnt = 15;
638 hw->dmaw_dly_cnt = 4;
640 if (atl1e_alloc_queues(adapter)) {
641 netdev_err(adapter->netdev, "Unable to allocate memory for queues\n");
645 atomic_set(&adapter->irq_sem, 1);
646 spin_lock_init(&adapter->mdio_lock);
648 set_bit(__AT_DOWN, &adapter->flags);
654 * atl1e_clean_tx_ring - Free Tx-skb
655 * @adapter: board private structure
657 static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
659 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
660 struct atl1e_tx_buffer *tx_buffer = NULL;
661 struct pci_dev *pdev = adapter->pdev;
662 u16 index, ring_count;
664 if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL)
667 ring_count = tx_ring->count;
668 /* first unmmap dma */
669 for (index = 0; index < ring_count; index++) {
670 tx_buffer = &tx_ring->tx_buffer[index];
671 if (tx_buffer->dma) {
672 if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
673 pci_unmap_single(pdev, tx_buffer->dma,
674 tx_buffer->length, PCI_DMA_TODEVICE);
675 else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
676 pci_unmap_page(pdev, tx_buffer->dma,
677 tx_buffer->length, PCI_DMA_TODEVICE);
681 /* second free skb */
682 for (index = 0; index < ring_count; index++) {
683 tx_buffer = &tx_ring->tx_buffer[index];
684 if (tx_buffer->skb) {
685 dev_kfree_skb_any(tx_buffer->skb);
686 tx_buffer->skb = NULL;
689 /* Zero out Tx-buffers */
690 memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) *
692 memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) *
697 * atl1e_clean_rx_ring - Free rx-reservation skbs
698 * @adapter: board private structure
700 static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
702 struct atl1e_rx_ring *rx_ring =
704 struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc;
708 if (adapter->ring_vir_addr == NULL)
710 /* Zero out the descriptor ring */
711 for (i = 0; i < adapter->num_rx_queues; i++) {
712 for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
713 if (rx_page_desc[i].rx_page[j].addr != NULL) {
714 memset(rx_page_desc[i].rx_page[j].addr, 0,
715 rx_ring->real_page_size);
721 static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size)
723 *ring_size = ((u32)(adapter->tx_ring.count *
724 sizeof(struct atl1e_tpd_desc) + 7
725 /* tx ring, qword align */
726 + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE *
727 adapter->num_rx_queues + 31
728 /* rx ring, 32 bytes align */
729 + (1 + AT_PAGE_NUM_PER_QUEUE * adapter->num_rx_queues) *
731 /* tx, rx cmd, dword align */
734 static void atl1e_init_ring_resources(struct atl1e_adapter *adapter)
736 struct atl1e_rx_ring *rx_ring = NULL;
738 rx_ring = &adapter->rx_ring;
740 rx_ring->real_page_size = adapter->rx_ring.page_size
741 + adapter->hw.max_frame_size
742 + ETH_HLEN + VLAN_HLEN
744 rx_ring->real_page_size = roundup(rx_ring->real_page_size, 32);
745 atl1e_cal_ring_size(adapter, &adapter->ring_size);
747 adapter->ring_vir_addr = NULL;
748 adapter->rx_ring.desc = NULL;
749 rwlock_init(&adapter->tx_ring.tx_lock);
753 * Read / Write Ptr Initialize:
755 static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter)
757 struct atl1e_tx_ring *tx_ring = NULL;
758 struct atl1e_rx_ring *rx_ring = NULL;
759 struct atl1e_rx_page_desc *rx_page_desc = NULL;
762 tx_ring = &adapter->tx_ring;
763 rx_ring = &adapter->rx_ring;
764 rx_page_desc = rx_ring->rx_page_desc;
766 tx_ring->next_to_use = 0;
767 atomic_set(&tx_ring->next_to_clean, 0);
769 for (i = 0; i < adapter->num_rx_queues; i++) {
770 rx_page_desc[i].rx_using = 0;
771 rx_page_desc[i].rx_nxseq = 0;
772 for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
773 *rx_page_desc[i].rx_page[j].write_offset_addr = 0;
774 rx_page_desc[i].rx_page[j].read_offset = 0;
780 * atl1e_free_ring_resources - Free Tx / RX descriptor Resources
781 * @adapter: board private structure
783 * Free all transmit software resources
785 static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
787 struct pci_dev *pdev = adapter->pdev;
789 atl1e_clean_tx_ring(adapter);
790 atl1e_clean_rx_ring(adapter);
792 if (adapter->ring_vir_addr) {
793 pci_free_consistent(pdev, adapter->ring_size,
794 adapter->ring_vir_addr, adapter->ring_dma);
795 adapter->ring_vir_addr = NULL;
798 if (adapter->tx_ring.tx_buffer) {
799 kfree(adapter->tx_ring.tx_buffer);
800 adapter->tx_ring.tx_buffer = NULL;
805 * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources
806 * @adapter: board private structure
808 * Return 0 on success, negative on failure
810 static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
812 struct pci_dev *pdev = adapter->pdev;
813 struct atl1e_tx_ring *tx_ring;
814 struct atl1e_rx_ring *rx_ring;
815 struct atl1e_rx_page_desc *rx_page_desc;
820 if (adapter->ring_vir_addr != NULL)
821 return 0; /* alloced already */
823 tx_ring = &adapter->tx_ring;
824 rx_ring = &adapter->rx_ring;
826 /* real ring DMA buffer */
828 size = adapter->ring_size;
829 adapter->ring_vir_addr = pci_zalloc_consistent(pdev, adapter->ring_size,
831 if (adapter->ring_vir_addr == NULL) {
832 netdev_err(adapter->netdev,
833 "pci_alloc_consistent failed, size = D%d\n", size);
837 rx_page_desc = rx_ring->rx_page_desc;
840 tx_ring->dma = roundup(adapter->ring_dma, 8);
841 offset = tx_ring->dma - adapter->ring_dma;
842 tx_ring->desc = adapter->ring_vir_addr + offset;
843 size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
844 tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
845 if (tx_ring->tx_buffer == NULL) {
851 offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count);
852 offset = roundup(offset, 32);
854 for (i = 0; i < adapter->num_rx_queues; i++) {
855 for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
856 rx_page_desc[i].rx_page[j].dma =
857 adapter->ring_dma + offset;
858 rx_page_desc[i].rx_page[j].addr =
859 adapter->ring_vir_addr + offset;
860 offset += rx_ring->real_page_size;
864 /* Init CMB dma address */
865 tx_ring->cmb_dma = adapter->ring_dma + offset;
866 tx_ring->cmb = adapter->ring_vir_addr + offset;
867 offset += sizeof(u32);
869 for (i = 0; i < adapter->num_rx_queues; i++) {
870 for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
871 rx_page_desc[i].rx_page[j].write_offset_dma =
872 adapter->ring_dma + offset;
873 rx_page_desc[i].rx_page[j].write_offset_addr =
874 adapter->ring_vir_addr + offset;
875 offset += sizeof(u32);
879 if (unlikely(offset > adapter->ring_size)) {
880 netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
881 offset, adapter->ring_size);
888 if (adapter->ring_vir_addr != NULL) {
889 pci_free_consistent(pdev, adapter->ring_size,
890 adapter->ring_vir_addr, adapter->ring_dma);
891 adapter->ring_vir_addr = NULL;
896 static inline void atl1e_configure_des_ring(struct atl1e_adapter *adapter)
899 struct atl1e_hw *hw = &adapter->hw;
900 struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
901 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
902 struct atl1e_rx_page_desc *rx_page_desc = NULL;
905 AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI,
906 (u32)((adapter->ring_dma & AT_DMA_HI_ADDR_MASK) >> 32));
907 AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO,
908 (u32)((tx_ring->dma) & AT_DMA_LO_ADDR_MASK));
909 AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count));
910 AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO,
911 (u32)((tx_ring->cmb_dma) & AT_DMA_LO_ADDR_MASK));
913 rx_page_desc = rx_ring->rx_page_desc;
914 /* RXF Page Physical address / Page Length */
915 for (i = 0; i < AT_MAX_RECEIVE_QUEUE; i++) {
916 AT_WRITE_REG(hw, atl1e_rx_page_hi_addr_regs[i],
917 (u32)((adapter->ring_dma &
918 AT_DMA_HI_ADDR_MASK) >> 32));
919 for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
923 page_phy_addr = rx_page_desc[i].rx_page[j].dma;
925 rx_page_desc[i].rx_page[j].write_offset_dma;
927 AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[i][j],
928 page_phy_addr & AT_DMA_LO_ADDR_MASK);
929 AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[i][j],
930 offset_phy_addr & AT_DMA_LO_ADDR_MASK);
931 AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[i][j], 1);
935 AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size);
936 /* Load all of base address above */
937 AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
940 static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
942 struct atl1e_hw *hw = &adapter->hw;
943 u32 dev_ctrl_data = 0;
944 u32 max_pay_load = 0;
945 u32 jumbo_thresh = 0;
946 u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */
948 /* configure TXQ param */
949 if (hw->nic_type != athr_l2e_revB) {
950 extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
951 if (hw->max_frame_size <= 1500) {
952 jumbo_thresh = hw->max_frame_size + extra_size;
953 } else if (hw->max_frame_size < 6*1024) {
955 (hw->max_frame_size + extra_size) * 2 / 3;
957 jumbo_thresh = (hw->max_frame_size + extra_size) / 2;
959 AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3);
962 dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL);
964 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
965 DEVICE_CTRL_MAX_PAYLOAD_MASK;
967 hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
969 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
970 DEVICE_CTRL_MAX_RREQ_SZ_MASK;
971 hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
973 if (hw->nic_type != athr_l2e_revB)
974 AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
975 atl1e_pay_load_size[hw->dmar_block]);
977 AT_WRITE_REGW(hw, REG_TXQ_CTRL,
978 (((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK)
979 << TXQ_CTRL_NUM_TPD_BURST_SHIFT)
980 | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN);
983 static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
985 struct atl1e_hw *hw = &adapter->hw;
989 u32 rxf_thresh_data = 0;
990 u32 rxq_ctrl_data = 0;
992 if (hw->nic_type != athr_l2e_revB) {
993 AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM,
994 (u16)((hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) <<
995 RXQ_JMBOSZ_TH_SHIFT |
996 (1 & RXQ_JMBO_LKAH_MASK) <<
997 RXQ_JMBO_LKAH_SHIFT));
999 rxf_len = AT_READ_REG(hw, REG_SRAM_RXF_LEN);
1000 rxf_high = rxf_len * 4 / 5;
1001 rxf_low = rxf_len / 5;
1002 rxf_thresh_data = ((rxf_high & RXQ_RXF_PAUSE_TH_HI_MASK)
1003 << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
1004 ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK)
1005 << RXQ_RXF_PAUSE_TH_LO_SHIFT);
1007 AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data);
1011 AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab);
1012 AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu);
1014 if (hw->rrs_type & atl1e_rrs_ipv4)
1015 rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4;
1017 if (hw->rrs_type & atl1e_rrs_ipv4_tcp)
1018 rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4_TCP;
1020 if (hw->rrs_type & atl1e_rrs_ipv6)
1021 rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6;
1023 if (hw->rrs_type & atl1e_rrs_ipv6_tcp)
1024 rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6_TCP;
1026 if (hw->rrs_type != atl1e_rrs_disable)
1028 (RXQ_CTRL_HASH_ENABLE | RXQ_CTRL_RSS_MODE_MQUESINT);
1030 rxq_ctrl_data |= RXQ_CTRL_IPV6_XSUM_VERIFY_EN | RXQ_CTRL_PBA_ALIGN_32 |
1031 RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
1033 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
1036 static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
1038 struct atl1e_hw *hw = &adapter->hw;
1039 u32 dma_ctrl_data = 0;
1041 dma_ctrl_data = DMA_CTRL_RXCMB_EN;
1042 dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
1043 << DMA_CTRL_DMAR_BURST_LEN_SHIFT;
1044 dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
1045 << DMA_CTRL_DMAW_BURST_LEN_SHIFT;
1046 dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER;
1047 dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK)
1048 << DMA_CTRL_DMAR_DLY_CNT_SHIFT;
1049 dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK)
1050 << DMA_CTRL_DMAW_DLY_CNT_SHIFT;
1052 AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
1055 static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
1058 struct atl1e_hw *hw = &adapter->hw;
1059 struct net_device *netdev = adapter->netdev;
1061 /* Config MAC CTRL Register */
1062 value = MAC_CTRL_TX_EN |
1065 if (FULL_DUPLEX == adapter->link_duplex)
1066 value |= MAC_CTRL_DUPLX;
1068 value |= ((u32)((SPEED_1000 == adapter->link_speed) ?
1069 MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
1070 MAC_CTRL_SPEED_SHIFT);
1071 value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
1073 value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1074 value |= (((u32)adapter->hw.preamble_len &
1075 MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
1077 __atl1e_vlan_mode(netdev->features, &value);
1079 value |= MAC_CTRL_BC_EN;
1080 if (netdev->flags & IFF_PROMISC)
1081 value |= MAC_CTRL_PROMIS_EN;
1082 if (netdev->flags & IFF_ALLMULTI)
1083 value |= MAC_CTRL_MC_ALL_EN;
1084 if (netdev->features & NETIF_F_RXALL)
1085 value |= MAC_CTRL_DBG;
1086 AT_WRITE_REG(hw, REG_MAC_CTRL, value);
1090 * atl1e_configure - Configure Transmit&Receive Unit after Reset
1091 * @adapter: board private structure
1093 * Configure the Tx /Rx unit of the MAC after a reset.
1095 static int atl1e_configure(struct atl1e_adapter *adapter)
1097 struct atl1e_hw *hw = &adapter->hw;
1099 u32 intr_status_data = 0;
1101 /* clear interrupt status */
1102 AT_WRITE_REG(hw, REG_ISR, ~0);
1104 /* 1. set MAC Address */
1105 atl1e_hw_set_mac_addr(hw);
1107 /* 2. Init the Multicast HASH table done by set_muti */
1109 /* 3. Clear any WOL status */
1110 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
1112 /* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr
1113 * TPD Ring/SMB/RXF0 Page CMBs, they use the same
1114 * High 32bits memory */
1115 atl1e_configure_des_ring(adapter);
1117 /* 5. set Interrupt Moderator Timer */
1118 AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, hw->imt);
1119 AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, hw->imt);
1120 AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE |
1121 MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN);
1123 /* 6. rx/tx threshold to trig interrupt */
1124 AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, hw->rrd_thresh);
1125 AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, hw->tpd_thresh);
1126 AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, hw->rx_count_down);
1127 AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, hw->tx_count_down);
1129 /* 7. set Interrupt Clear Timer */
1130 AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, hw->ict);
1133 AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
1134 VLAN_HLEN + ETH_FCS_LEN);
1136 /* 9. config TXQ early tx threshold */
1137 atl1e_configure_tx(adapter);
1139 /* 10. config RXQ */
1140 atl1e_configure_rx(adapter);
1142 /* 11. config DMA Engine */
1143 atl1e_configure_dma(adapter);
1145 /* 12. smb timer to trig interrupt */
1146 AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, hw->smb_timer);
1148 intr_status_data = AT_READ_REG(hw, REG_ISR);
1149 if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) {
1150 netdev_err(adapter->netdev,
1151 "atl1e_configure failed, PCIE phy link down\n");
1155 AT_WRITE_REG(hw, REG_ISR, 0x7fffffff);
1160 * atl1e_get_stats - Get System Network Statistics
1161 * @netdev: network interface device structure
1163 * Returns the address of the device statistics structure.
1164 * The statistics are actually updated from the timer callback.
1166 static struct net_device_stats *atl1e_get_stats(struct net_device *netdev)
1168 struct atl1e_adapter *adapter = netdev_priv(netdev);
1169 struct atl1e_hw_stats *hw_stats = &adapter->hw_stats;
1170 struct net_device_stats *net_stats = &netdev->stats;
1172 net_stats->rx_bytes = hw_stats->rx_byte_cnt;
1173 net_stats->tx_bytes = hw_stats->tx_byte_cnt;
1174 net_stats->multicast = hw_stats->rx_mcast;
1175 net_stats->collisions = hw_stats->tx_1_col +
1176 hw_stats->tx_2_col +
1177 hw_stats->tx_late_col +
1178 hw_stats->tx_abort_col;
1180 net_stats->rx_errors = hw_stats->rx_frag +
1181 hw_stats->rx_fcs_err +
1182 hw_stats->rx_len_err +
1183 hw_stats->rx_sz_ov +
1184 hw_stats->rx_rrd_ov +
1185 hw_stats->rx_align_err +
1186 hw_stats->rx_rxf_ov;
1188 net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov;
1189 net_stats->rx_length_errors = hw_stats->rx_len_err;
1190 net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
1191 net_stats->rx_frame_errors = hw_stats->rx_align_err;
1192 net_stats->rx_dropped = hw_stats->rx_rrd_ov;
1194 net_stats->tx_errors = hw_stats->tx_late_col +
1195 hw_stats->tx_abort_col +
1196 hw_stats->tx_underrun +
1199 net_stats->tx_fifo_errors = hw_stats->tx_underrun;
1200 net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
1201 net_stats->tx_window_errors = hw_stats->tx_late_col;
1203 net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
1204 net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
1209 static void atl1e_update_hw_stats(struct atl1e_adapter *adapter)
1211 u16 hw_reg_addr = 0;
1212 unsigned long *stats_item = NULL;
1214 /* update rx status */
1215 hw_reg_addr = REG_MAC_RX_STATUS_BIN;
1216 stats_item = &adapter->hw_stats.rx_ok;
1217 while (hw_reg_addr <= REG_MAC_RX_STATUS_END) {
1218 *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr);
1222 /* update tx status */
1223 hw_reg_addr = REG_MAC_TX_STATUS_BIN;
1224 stats_item = &adapter->hw_stats.tx_ok;
1225 while (hw_reg_addr <= REG_MAC_TX_STATUS_END) {
1226 *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr);
1232 static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter)
1236 spin_lock(&adapter->mdio_lock);
1237 atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data);
1238 spin_unlock(&adapter->mdio_lock);
1241 static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
1243 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1244 struct atl1e_tx_buffer *tx_buffer = NULL;
1245 u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
1246 u16 next_to_clean = atomic_read(&tx_ring->next_to_clean);
1248 while (next_to_clean != hw_next_to_clean) {
1249 tx_buffer = &tx_ring->tx_buffer[next_to_clean];
1250 if (tx_buffer->dma) {
1251 if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
1252 pci_unmap_single(adapter->pdev, tx_buffer->dma,
1253 tx_buffer->length, PCI_DMA_TODEVICE);
1254 else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
1255 pci_unmap_page(adapter->pdev, tx_buffer->dma,
1256 tx_buffer->length, PCI_DMA_TODEVICE);
1260 if (tx_buffer->skb) {
1261 dev_kfree_skb_irq(tx_buffer->skb);
1262 tx_buffer->skb = NULL;
1265 if (++next_to_clean == tx_ring->count)
1269 atomic_set(&tx_ring->next_to_clean, next_to_clean);
1271 if (netif_queue_stopped(adapter->netdev) &&
1272 netif_carrier_ok(adapter->netdev)) {
1273 netif_wake_queue(adapter->netdev);
1280 * atl1e_intr - Interrupt Handler
1281 * @irq: interrupt number
1282 * @data: pointer to a network interface device structure
1284 static irqreturn_t atl1e_intr(int irq, void *data)
1286 struct net_device *netdev = data;
1287 struct atl1e_adapter *adapter = netdev_priv(netdev);
1288 struct atl1e_hw *hw = &adapter->hw;
1289 int max_ints = AT_MAX_INT_WORK;
1290 int handled = IRQ_NONE;
1294 status = AT_READ_REG(hw, REG_ISR);
1295 if ((status & IMR_NORMAL_MASK) == 0 ||
1296 (status & ISR_DIS_INT) != 0) {
1297 if (max_ints != AT_MAX_INT_WORK)
1298 handled = IRQ_HANDLED;
1302 if (status & ISR_GPHY)
1303 atl1e_clear_phy_int(adapter);
1305 AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
1307 handled = IRQ_HANDLED;
1308 /* check if PCIE PHY Link down */
1309 if (status & ISR_PHY_LINKDOWN) {
1310 netdev_err(adapter->netdev,
1311 "pcie phy linkdown %x\n", status);
1312 if (netif_running(adapter->netdev)) {
1314 atl1e_irq_reset(adapter);
1315 schedule_work(&adapter->reset_task);
1320 /* check if DMA read/write error */
1321 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
1322 netdev_err(adapter->netdev,
1323 "PCIE DMA RW error (status = 0x%x)\n",
1325 atl1e_irq_reset(adapter);
1326 schedule_work(&adapter->reset_task);
1330 if (status & ISR_SMB)
1331 atl1e_update_hw_stats(adapter);
1334 if (status & (ISR_GPHY | ISR_MANUAL)) {
1335 netdev->stats.tx_carrier_errors++;
1336 atl1e_link_chg_event(adapter);
1340 /* transmit event */
1341 if (status & ISR_TX_EVENT)
1342 atl1e_clean_tx_irq(adapter);
1344 if (status & ISR_RX_EVENT) {
1346 * disable rx interrupts, without
1347 * the synchronize_irq bit
1349 AT_WRITE_REG(hw, REG_IMR,
1350 IMR_NORMAL_MASK & ~ISR_RX_EVENT);
1352 if (likely(napi_schedule_prep(
1354 __napi_schedule(&adapter->napi);
1356 } while (--max_ints > 0);
1357 /* re-enable Interrupt*/
1358 AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
1363 static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter,
1364 struct sk_buff *skb, struct atl1e_recv_ret_status *prrs)
1366 u8 *packet = (u8 *)(prrs + 1);
1368 u16 head_len = ETH_HLEN;
1372 skb_checksum_none_assert(skb);
1373 pkt_flags = prrs->pkt_flag;
1374 err_flags = prrs->err_flag;
1375 if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) &&
1376 ((pkt_flags & RRS_IS_TCP) || (pkt_flags & RRS_IS_UDP))) {
1377 if (pkt_flags & RRS_IS_IPV4) {
1378 if (pkt_flags & RRS_IS_802_3)
1380 iph = (struct iphdr *) (packet + head_len);
1381 if (iph->frag_off != 0 && !(pkt_flags & RRS_IS_IP_DF))
1384 if (!(err_flags & (RRS_ERR_IP_CSUM | RRS_ERR_L4_CSUM))) {
1385 skb->ip_summed = CHECKSUM_UNNECESSARY;
1394 static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
1397 struct atl1e_rx_page_desc *rx_page_desc =
1398 (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc;
1399 u8 rx_using = rx_page_desc[que].rx_using;
1401 return &(rx_page_desc[que].rx_page[rx_using]);
1404 static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1405 int *work_done, int work_to_do)
1407 struct net_device *netdev = adapter->netdev;
1408 struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
1409 struct atl1e_rx_page_desc *rx_page_desc =
1410 (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc;
1411 struct sk_buff *skb = NULL;
1412 struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter, que);
1413 u32 packet_size, write_offset;
1414 struct atl1e_recv_ret_status *prrs;
1416 write_offset = *(rx_page->write_offset_addr);
1417 if (likely(rx_page->read_offset < write_offset)) {
1419 if (*work_done >= work_to_do)
1422 /* get new packet's rrs */
1423 prrs = (struct atl1e_recv_ret_status *) (rx_page->addr +
1424 rx_page->read_offset);
1425 /* check sequence number */
1426 if (prrs->seq_num != rx_page_desc[que].rx_nxseq) {
1428 "rx sequence number error (rx=%d) (expect=%d)\n",
1430 rx_page_desc[que].rx_nxseq);
1431 rx_page_desc[que].rx_nxseq++;
1432 /* just for debug use */
1433 AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0,
1434 (((u32)prrs->seq_num) << 16) |
1435 rx_page_desc[que].rx_nxseq);
1438 rx_page_desc[que].rx_nxseq++;
1441 if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) &&
1442 !(netdev->features & NETIF_F_RXALL)) {
1443 if (prrs->err_flag & (RRS_ERR_BAD_CRC |
1444 RRS_ERR_DRIBBLE | RRS_ERR_CODE |
1446 /* hardware error, discard this packet*/
1448 "rx packet desc error %x\n",
1449 *((u32 *)prrs + 1));
1454 packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
1456 if (likely(!(netdev->features & NETIF_F_RXFCS)))
1457 packet_size -= 4; /* CRC */
1459 skb = netdev_alloc_skb_ip_align(netdev, packet_size);
1463 memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
1464 skb_put(skb, packet_size);
1465 skb->protocol = eth_type_trans(skb, netdev);
1466 atl1e_rx_checksum(adapter, skb, prrs);
1468 if (prrs->pkt_flag & RRS_IS_VLAN_TAG) {
1469 u16 vlan_tag = (prrs->vtag >> 4) |
1470 ((prrs->vtag & 7) << 13) |
1471 ((prrs->vtag & 8) << 9);
1473 "RXD VLAN TAG<RRD>=0x%04x\n",
1475 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1477 napi_gro_receive(&adapter->napi, skb);
1480 /* skip current packet whether it's ok or not. */
1481 rx_page->read_offset +=
1482 (((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
1483 RRS_PKT_SIZE_MASK) +
1484 sizeof(struct atl1e_recv_ret_status) + 31) &
1487 if (rx_page->read_offset >= rx_ring->page_size) {
1488 /* mark this page clean */
1492 rx_page->read_offset =
1493 *(rx_page->write_offset_addr) = 0;
1494 rx_using = rx_page_desc[que].rx_using;
1496 atl1e_rx_page_vld_regs[que][rx_using];
1497 AT_WRITE_REGB(&adapter->hw, reg_addr, 1);
1498 rx_page_desc[que].rx_using ^= 1;
1499 rx_page = atl1e_get_rx_page(adapter, que);
1501 write_offset = *(rx_page->write_offset_addr);
1502 } while (rx_page->read_offset < write_offset);
1508 if (!test_bit(__AT_DOWN, &adapter->flags))
1509 schedule_work(&adapter->reset_task);
1513 * atl1e_clean - NAPI Rx polling callback
1515 static int atl1e_clean(struct napi_struct *napi, int budget)
1517 struct atl1e_adapter *adapter =
1518 container_of(napi, struct atl1e_adapter, napi);
1522 /* Keep link state information with original netdev */
1523 if (!netif_carrier_ok(adapter->netdev))
1526 atl1e_clean_rx_irq(adapter, 0, &work_done, budget);
1528 /* If no Tx and not enough Rx work done, exit the polling mode */
1529 if (work_done < budget) {
1531 napi_complete_done(napi, work_done);
1532 imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
1533 AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
1535 if (test_bit(__AT_DOWN, &adapter->flags)) {
1536 atomic_dec(&adapter->irq_sem);
1537 netdev_err(adapter->netdev,
1538 "atl1e_clean is called when AT_DOWN\n");
1540 /* reenable RX intr */
1541 /*atl1e_irq_enable(adapter); */
1547 #ifdef CONFIG_NET_POLL_CONTROLLER
1550 * Polling 'interrupt' - used by things like netconsole to send skbs
1551 * without having to re-enable interrupts. It's not called while
1552 * the interrupt routine is executing.
1554 static void atl1e_netpoll(struct net_device *netdev)
1556 struct atl1e_adapter *adapter = netdev_priv(netdev);
1558 disable_irq(adapter->pdev->irq);
1559 atl1e_intr(adapter->pdev->irq, netdev);
1560 enable_irq(adapter->pdev->irq);
1564 static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter)
1566 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1567 u16 next_to_use = 0;
1568 u16 next_to_clean = 0;
1570 next_to_clean = atomic_read(&tx_ring->next_to_clean);
1571 next_to_use = tx_ring->next_to_use;
1573 return (u16)(next_to_clean > next_to_use) ?
1574 (next_to_clean - next_to_use - 1) :
1575 (tx_ring->count + next_to_clean - next_to_use - 1);
1579 * get next usable tpd
1580 * Note: should call atl1e_tdp_avail to make sure
1581 * there is enough tpd to use
1583 static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter)
1585 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1586 u16 next_to_use = 0;
1588 next_to_use = tx_ring->next_to_use;
1589 if (++tx_ring->next_to_use == tx_ring->count)
1590 tx_ring->next_to_use = 0;
1592 memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
1593 return &tx_ring->desc[next_to_use];
1596 static struct atl1e_tx_buffer *
1597 atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd)
1599 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1601 return &tx_ring->tx_buffer[tpd - tx_ring->desc];
1604 /* Calculate the transmit packet descript needed*/
1605 static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
1610 u16 proto_hdr_len = 0;
1612 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1613 fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1614 tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT);
1617 if (skb_is_gso(skb)) {
1618 if (skb->protocol == htons(ETH_P_IP) ||
1619 (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
1620 proto_hdr_len = skb_transport_offset(skb) +
1622 if (proto_hdr_len < skb_headlen(skb)) {
1623 tpd_req += ((skb_headlen(skb) - proto_hdr_len +
1624 MAX_TX_BUF_LEN - 1) >>
1633 static int atl1e_tso_csum(struct atl1e_adapter *adapter,
1634 struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
1636 unsigned short offload_type;
1640 if (skb_is_gso(skb)) {
1643 err = skb_cow_head(skb, 0);
1647 offload_type = skb_shinfo(skb)->gso_type;
1649 if (offload_type & SKB_GSO_TCPV4) {
1650 real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
1651 + ntohs(ip_hdr(skb)->tot_len));
1653 if (real_len < skb->len) {
1654 err = pskb_trim(skb, real_len);
1659 hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
1660 if (unlikely(skb->len == hdr_len)) {
1661 /* only xsum need */
1662 netdev_warn(adapter->netdev,
1663 "IPV4 tso with zero data??\n");
1666 ip_hdr(skb)->check = 0;
1667 ip_hdr(skb)->tot_len = 0;
1668 tcp_hdr(skb)->check = ~csum_tcpudp_magic(
1672 tpd->word3 |= (ip_hdr(skb)->ihl &
1673 TDP_V4_IPHL_MASK) <<
1675 tpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
1676 TPD_TCPHDRLEN_MASK) <<
1677 TPD_TCPHDRLEN_SHIFT;
1678 tpd->word3 |= ((skb_shinfo(skb)->gso_size) &
1679 TPD_MSS_MASK) << TPD_MSS_SHIFT;
1680 tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
1687 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1690 cso = skb_checksum_start_offset(skb);
1691 if (unlikely(cso & 0x1)) {
1692 netdev_err(adapter->netdev,
1693 "payload offset should not ant event number\n");
1696 css = cso + skb->csum_offset;
1697 tpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) <<
1698 TPD_PLOADOFFSET_SHIFT;
1699 tpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) <<
1700 TPD_CCSUMOFFSET_SHIFT;
1701 tpd->word3 |= 1 << TPD_CC_SEGMENT_EN_SHIFT;
1708 static int atl1e_tx_map(struct atl1e_adapter *adapter,
1709 struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
1711 struct atl1e_tpd_desc *use_tpd = NULL;
1712 struct atl1e_tx_buffer *tx_buffer = NULL;
1713 u16 buf_len = skb_headlen(skb);
1720 int ring_start = adapter->tx_ring.next_to_use;
1723 nr_frags = skb_shinfo(skb)->nr_frags;
1724 segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
1727 map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1730 tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
1731 tx_buffer->length = map_len;
1732 tx_buffer->dma = pci_map_single(adapter->pdev,
1733 skb->data, hdr_len, PCI_DMA_TODEVICE);
1734 if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma))
1737 ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
1738 mapped_len += map_len;
1739 use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
1740 use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
1741 ((cpu_to_le32(tx_buffer->length) &
1742 TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
1745 while (mapped_len < buf_len) {
1746 /* mapped_len == 0, means we should use the first tpd,
1747 which is given by caller */
1748 if (mapped_len == 0) {
1751 use_tpd = atl1e_get_tpd(adapter);
1752 memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc));
1754 tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
1755 tx_buffer->skb = NULL;
1757 tx_buffer->length = map_len =
1758 ((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ?
1759 MAX_TX_BUF_LEN : (buf_len - mapped_len);
1761 pci_map_single(adapter->pdev, skb->data + mapped_len,
1762 map_len, PCI_DMA_TODEVICE);
1764 if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
1765 /* We need to unwind the mappings we've done */
1766 ring_end = adapter->tx_ring.next_to_use;
1767 adapter->tx_ring.next_to_use = ring_start;
1768 while (adapter->tx_ring.next_to_use != ring_end) {
1769 tpd = atl1e_get_tpd(adapter);
1770 tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
1771 pci_unmap_single(adapter->pdev, tx_buffer->dma,
1772 tx_buffer->length, PCI_DMA_TODEVICE);
1774 /* Reset the tx rings next pointer */
1775 adapter->tx_ring.next_to_use = ring_start;
1779 ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
1780 mapped_len += map_len;
1781 use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
1782 use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
1783 ((cpu_to_le32(tx_buffer->length) &
1784 TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
1787 for (f = 0; f < nr_frags; f++) {
1788 const struct skb_frag_struct *frag;
1792 frag = &skb_shinfo(skb)->frags[f];
1793 buf_len = skb_frag_size(frag);
1795 seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
1796 for (i = 0; i < seg_num; i++) {
1797 use_tpd = atl1e_get_tpd(adapter);
1798 memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc));
1800 tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
1801 BUG_ON(tx_buffer->skb);
1803 tx_buffer->skb = NULL;
1805 (buf_len > MAX_TX_BUF_LEN) ?
1806 MAX_TX_BUF_LEN : buf_len;
1807 buf_len -= tx_buffer->length;
1809 tx_buffer->dma = skb_frag_dma_map(&adapter->pdev->dev,
1811 (i * MAX_TX_BUF_LEN),
1815 if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
1816 /* We need to unwind the mappings we've done */
1817 ring_end = adapter->tx_ring.next_to_use;
1818 adapter->tx_ring.next_to_use = ring_start;
1819 while (adapter->tx_ring.next_to_use != ring_end) {
1820 tpd = atl1e_get_tpd(adapter);
1821 tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
1822 dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma,
1823 tx_buffer->length, DMA_TO_DEVICE);
1826 /* Reset the ring next to use pointer */
1827 adapter->tx_ring.next_to_use = ring_start;
1831 ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
1832 use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
1833 use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
1834 ((cpu_to_le32(tx_buffer->length) &
1835 TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
1839 if ((tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK)
1840 /* note this one is a tcp header */
1841 tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
1844 use_tpd->word3 |= 1 << TPD_EOP_SHIFT;
1845 /* The last buffer info contain the skb address,
1846 so it will be free after unmap */
1847 tx_buffer->skb = skb;
1851 static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count,
1852 struct atl1e_tpd_desc *tpd)
1854 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1855 /* Force memory writes to complete before letting h/w
1856 * know there are new descriptors to fetch. (Only
1857 * applicable for weak-ordered memory model archs,
1858 * such as IA-64). */
1860 AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use);
1863 static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
1864 struct net_device *netdev)
1866 struct atl1e_adapter *adapter = netdev_priv(netdev);
1868 struct atl1e_tpd_desc *tpd;
1870 if (test_bit(__AT_DOWN, &adapter->flags)) {
1871 dev_kfree_skb_any(skb);
1872 return NETDEV_TX_OK;
1875 if (unlikely(skb->len <= 0)) {
1876 dev_kfree_skb_any(skb);
1877 return NETDEV_TX_OK;
1879 tpd_req = atl1e_cal_tdp_req(skb);
1881 if (atl1e_tpd_avail(adapter) < tpd_req) {
1882 /* no enough descriptor, just stop queue */
1883 netif_stop_queue(netdev);
1884 return NETDEV_TX_BUSY;
1887 tpd = atl1e_get_tpd(adapter);
1889 if (skb_vlan_tag_present(skb)) {
1890 u16 vlan_tag = skb_vlan_tag_get(skb);
1893 tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
1894 AT_VLAN_TAG_TO_TPD_TAG(vlan_tag, atl1e_vlan_tag);
1895 tpd->word2 |= (atl1e_vlan_tag & TPD_VLANTAG_MASK) <<
1899 if (skb->protocol == htons(ETH_P_8021Q))
1900 tpd->word3 |= 1 << TPD_VL_TAGGED_SHIFT;
1902 if (skb_network_offset(skb) != ETH_HLEN)
1903 tpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; /* 802.3 frame */
1905 /* do TSO and check sum */
1906 if (atl1e_tso_csum(adapter, skb, tpd) != 0) {
1907 dev_kfree_skb_any(skb);
1908 return NETDEV_TX_OK;
1911 if (atl1e_tx_map(adapter, skb, tpd)) {
1912 dev_kfree_skb_any(skb);
1916 atl1e_tx_queue(adapter, tpd_req, tpd);
1918 return NETDEV_TX_OK;
1921 static void atl1e_free_irq(struct atl1e_adapter *adapter)
1923 struct net_device *netdev = adapter->netdev;
1925 free_irq(adapter->pdev->irq, netdev);
1928 static int atl1e_request_irq(struct atl1e_adapter *adapter)
1930 struct pci_dev *pdev = adapter->pdev;
1931 struct net_device *netdev = adapter->netdev;
1934 err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name,
1937 netdev_dbg(adapter->netdev,
1938 "Unable to allocate interrupt Error: %d\n", err);
1941 netdev_dbg(netdev, "atl1e_request_irq OK\n");
1945 int atl1e_up(struct atl1e_adapter *adapter)
1947 struct net_device *netdev = adapter->netdev;
1951 /* hardware has been reset, we need to reload some things */
1952 err = atl1e_init_hw(&adapter->hw);
1957 atl1e_init_ring_ptrs(adapter);
1958 atl1e_set_multi(netdev);
1959 atl1e_restore_vlan(adapter);
1961 if (atl1e_configure(adapter)) {
1966 clear_bit(__AT_DOWN, &adapter->flags);
1967 napi_enable(&adapter->napi);
1968 atl1e_irq_enable(adapter);
1969 val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL);
1970 AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
1971 val | MASTER_CTRL_MANUAL_INT);
1977 void atl1e_down(struct atl1e_adapter *adapter)
1979 struct net_device *netdev = adapter->netdev;
1981 /* signal that we're down so the interrupt handler does not
1982 * reschedule our watchdog timer */
1983 set_bit(__AT_DOWN, &adapter->flags);
1985 netif_stop_queue(netdev);
1987 /* reset MAC to disable all RX/TX */
1988 atl1e_reset_hw(&adapter->hw);
1991 napi_disable(&adapter->napi);
1992 atl1e_del_timer(adapter);
1993 atl1e_irq_disable(adapter);
1995 netif_carrier_off(netdev);
1996 adapter->link_speed = SPEED_0;
1997 adapter->link_duplex = -1;
1998 atl1e_clean_tx_ring(adapter);
1999 atl1e_clean_rx_ring(adapter);
2003 * atl1e_open - Called when a network interface is made active
2004 * @netdev: network interface device structure
2006 * Returns 0 on success, negative value on failure
2008 * The open entry point is called when a network interface is made
2009 * active by the system (IFF_UP). At this point all resources needed
2010 * for transmit and receive operations are allocated, the interrupt
2011 * handler is registered with the OS, the watchdog timer is started,
2012 * and the stack is notified that the interface is ready.
2014 static int atl1e_open(struct net_device *netdev)
2016 struct atl1e_adapter *adapter = netdev_priv(netdev);
2019 /* disallow open during test */
2020 if (test_bit(__AT_TESTING, &adapter->flags))
2023 /* allocate rx/tx dma buffer & descriptors */
2024 atl1e_init_ring_resources(adapter);
2025 err = atl1e_setup_ring_resources(adapter);
2029 err = atl1e_request_irq(adapter);
2033 err = atl1e_up(adapter);
2040 atl1e_free_irq(adapter);
2042 atl1e_free_ring_resources(adapter);
2043 atl1e_reset_hw(&adapter->hw);
2049 * atl1e_close - Disables a network interface
2050 * @netdev: network interface device structure
2052 * Returns 0, this is not allowed to fail
2054 * The close entry point is called when an interface is de-activated
2055 * by the OS. The hardware is still under the drivers control, but
2056 * needs to be disabled. A global MAC reset is issued to stop the
2057 * hardware, and all transmit and receive resources are freed.
2059 static int atl1e_close(struct net_device *netdev)
2061 struct atl1e_adapter *adapter = netdev_priv(netdev);
2063 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2064 atl1e_down(adapter);
2065 atl1e_free_irq(adapter);
2066 atl1e_free_ring_resources(adapter);
2071 static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
2073 struct net_device *netdev = pci_get_drvdata(pdev);
2074 struct atl1e_adapter *adapter = netdev_priv(netdev);
2075 struct atl1e_hw *hw = &adapter->hw;
2077 u32 mac_ctrl_data = 0;
2078 u32 wol_ctrl_data = 0;
2079 u16 mii_advertise_data = 0;
2080 u16 mii_bmsr_data = 0;
2081 u16 mii_intr_status_data = 0;
2082 u32 wufc = adapter->wol;
2088 if (netif_running(netdev)) {
2089 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2090 atl1e_down(adapter);
2092 netif_device_detach(netdev);
2095 retval = pci_save_state(pdev);
2101 /* get link status */
2102 atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
2103 atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
2105 mii_advertise_data = ADVERTISE_10HALF;
2107 if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
2108 (atl1e_write_phy_reg(hw,
2109 MII_ADVERTISE, mii_advertise_data) != 0) ||
2110 (atl1e_phy_commit(hw)) != 0) {
2111 netdev_dbg(adapter->netdev, "set phy register failed\n");
2115 hw->phy_configured = false; /* re-init PHY when resume */
2117 /* turn on magic packet wol */
2118 if (wufc & AT_WUFC_MAG)
2119 wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
2121 if (wufc & AT_WUFC_LNKC) {
2122 /* if orignal link status is link, just wait for retrive link */
2123 if (mii_bmsr_data & BMSR_LSTATUS) {
2124 for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
2126 atl1e_read_phy_reg(hw, MII_BMSR,
2128 if (mii_bmsr_data & BMSR_LSTATUS)
2132 if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
2133 netdev_dbg(adapter->netdev,
2134 "Link may change when suspend\n");
2136 wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
2137 /* only link up can wake up */
2138 if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) {
2139 netdev_dbg(adapter->netdev,
2140 "read write phy register failed\n");
2144 /* clear phy interrupt */
2145 atl1e_read_phy_reg(hw, MII_INT_STATUS, &mii_intr_status_data);
2146 /* Config MAC Ctrl register */
2147 mac_ctrl_data = MAC_CTRL_RX_EN;
2148 /* set to 10/100M halt duplex */
2149 mac_ctrl_data |= MAC_CTRL_SPEED_10_100 << MAC_CTRL_SPEED_SHIFT;
2150 mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
2151 MAC_CTRL_PRMLEN_MASK) <<
2152 MAC_CTRL_PRMLEN_SHIFT);
2154 __atl1e_vlan_mode(netdev->features, &mac_ctrl_data);
2156 /* magic packet maybe Broadcast&multicast&Unicast frame */
2157 if (wufc & AT_WUFC_MAG)
2158 mac_ctrl_data |= MAC_CTRL_BC_EN;
2160 netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n",
2163 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
2164 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2166 ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC);
2167 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2168 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
2169 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
2175 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
2178 ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC);
2179 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2180 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
2183 hw->phy_configured = false; /* re-init PHY when resume */
2185 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
2189 if (netif_running(netdev))
2190 atl1e_free_irq(adapter);
2192 pci_disable_device(pdev);
2194 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2200 static int atl1e_resume(struct pci_dev *pdev)
2202 struct net_device *netdev = pci_get_drvdata(pdev);
2203 struct atl1e_adapter *adapter = netdev_priv(netdev);
2206 pci_set_power_state(pdev, PCI_D0);
2207 pci_restore_state(pdev);
2209 err = pci_enable_device(pdev);
2211 netdev_err(adapter->netdev,
2212 "Cannot enable PCI device from suspend\n");
2216 pci_set_master(pdev);
2218 AT_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */
2220 pci_enable_wake(pdev, PCI_D3hot, 0);
2221 pci_enable_wake(pdev, PCI_D3cold, 0);
2223 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
2225 if (netif_running(netdev)) {
2226 err = atl1e_request_irq(adapter);
2231 atl1e_reset_hw(&adapter->hw);
2233 if (netif_running(netdev))
2236 netif_device_attach(netdev);
2242 static void atl1e_shutdown(struct pci_dev *pdev)
2244 atl1e_suspend(pdev, PMSG_SUSPEND);
2247 static const struct net_device_ops atl1e_netdev_ops = {
2248 .ndo_open = atl1e_open,
2249 .ndo_stop = atl1e_close,
2250 .ndo_start_xmit = atl1e_xmit_frame,
2251 .ndo_get_stats = atl1e_get_stats,
2252 .ndo_set_rx_mode = atl1e_set_multi,
2253 .ndo_validate_addr = eth_validate_addr,
2254 .ndo_set_mac_address = atl1e_set_mac_addr,
2255 .ndo_fix_features = atl1e_fix_features,
2256 .ndo_set_features = atl1e_set_features,
2257 .ndo_change_mtu = atl1e_change_mtu,
2258 .ndo_do_ioctl = atl1e_ioctl,
2259 .ndo_tx_timeout = atl1e_tx_timeout,
2260 #ifdef CONFIG_NET_POLL_CONTROLLER
2261 .ndo_poll_controller = atl1e_netpoll,
2266 static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2268 SET_NETDEV_DEV(netdev, &pdev->dev);
2269 pci_set_drvdata(pdev, netdev);
2271 netdev->netdev_ops = &atl1e_netdev_ops;
2273 netdev->watchdog_timeo = AT_TX_WATCHDOG;
2274 /* MTU range: 42 - 8170 */
2275 netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN);
2276 netdev->max_mtu = MAX_JUMBO_FRAME_SIZE -
2277 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2278 atl1e_set_ethtool_ops(netdev);
2280 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
2281 NETIF_F_HW_VLAN_CTAG_RX;
2282 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX;
2283 /* not enabled by default */
2284 netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
2289 * atl1e_probe - Device Initialization Routine
2290 * @pdev: PCI device information struct
2291 * @ent: entry in atl1e_pci_tbl
2293 * Returns 0 on success, negative on failure
2295 * atl1e_probe initializes an adapter identified by a pci_dev structure.
2296 * The OS initialization, configuring of the adapter private structure,
2297 * and a hardware reset occur.
2299 static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2301 struct net_device *netdev;
2302 struct atl1e_adapter *adapter = NULL;
2303 static int cards_found;
2307 err = pci_enable_device(pdev);
2309 dev_err(&pdev->dev, "cannot enable PCI device\n");
2314 * The atl1e chip can DMA to 64-bit addresses, but it uses a single
2315 * shared register for the high 32 bits, so only a single, aligned,
2316 * 4 GB physical address range can be used at a time.
2318 * Supporting 64-bit DMA on this hardware is more trouble than it's
2319 * worth. It is far easier to limit to 32-bit DMA than update
2320 * various kernel subsystems to support the mechanics required by a
2321 * fixed-high-32-bit system.
2323 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
2324 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
2325 dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
2329 err = pci_request_regions(pdev, atl1e_driver_name);
2331 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2335 pci_set_master(pdev);
2337 netdev = alloc_etherdev(sizeof(struct atl1e_adapter));
2338 if (netdev == NULL) {
2340 goto err_alloc_etherdev;
2343 err = atl1e_init_netdev(netdev, pdev);
2345 netdev_err(netdev, "init netdevice failed\n");
2346 goto err_init_netdev;
2348 adapter = netdev_priv(netdev);
2349 adapter->bd_number = cards_found;
2350 adapter->netdev = netdev;
2351 adapter->pdev = pdev;
2352 adapter->hw.adapter = adapter;
2353 adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0);
2354 if (!adapter->hw.hw_addr) {
2356 netdev_err(netdev, "cannot map device registers\n");
2361 adapter->mii.dev = netdev;
2362 adapter->mii.mdio_read = atl1e_mdio_read;
2363 adapter->mii.mdio_write = atl1e_mdio_write;
2364 adapter->mii.phy_id_mask = 0x1f;
2365 adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
2367 netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
2369 setup_timer(&adapter->phy_config_timer, atl1e_phy_config,
2370 (unsigned long)adapter);
2372 /* get user settings */
2373 atl1e_check_options(adapter);
2375 * Mark all PCI regions associated with PCI device
2376 * pdev as being reserved by owner atl1e_driver_name
2377 * Enables bus-mastering on the device and calls
2378 * pcibios_set_master to do the needed arch specific settings
2380 atl1e_setup_pcicmd(pdev);
2381 /* setup the private structure */
2382 err = atl1e_sw_init(adapter);
2384 netdev_err(netdev, "net device private data init failed\n");
2388 /* Init GPHY as early as possible due to power saving issue */
2389 atl1e_phy_init(&adapter->hw);
2390 /* reset the controller to
2391 * put the device in a known good starting state */
2392 err = atl1e_reset_hw(&adapter->hw);
2398 if (atl1e_read_mac_addr(&adapter->hw) != 0) {
2400 netdev_err(netdev, "get mac address failed\n");
2404 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2405 netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
2407 INIT_WORK(&adapter->reset_task, atl1e_reset_task);
2408 INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
2409 netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);
2410 err = register_netdev(netdev);
2412 netdev_err(netdev, "register netdevice failed\n");
2416 /* assume we have no link for now */
2417 netif_stop_queue(netdev);
2418 netif_carrier_off(netdev);
2428 pci_iounmap(pdev, adapter->hw.hw_addr);
2431 free_netdev(netdev);
2433 pci_release_regions(pdev);
2436 pci_disable_device(pdev);
2441 * atl1e_remove - Device Removal Routine
2442 * @pdev: PCI device information struct
2444 * atl1e_remove is called by the PCI subsystem to alert the driver
2445 * that it should release a PCI device. The could be caused by a
2446 * Hot-Plug event, or because the driver is going to be removed from
2449 static void atl1e_remove(struct pci_dev *pdev)
2451 struct net_device *netdev = pci_get_drvdata(pdev);
2452 struct atl1e_adapter *adapter = netdev_priv(netdev);
2455 * flush_scheduled work may reschedule our watchdog task, so
2456 * explicitly disable watchdog tasks from being rescheduled
2458 set_bit(__AT_DOWN, &adapter->flags);
2460 atl1e_del_timer(adapter);
2461 atl1e_cancel_work(adapter);
2463 unregister_netdev(netdev);
2464 atl1e_free_ring_resources(adapter);
2465 atl1e_force_ps(&adapter->hw);
2466 pci_iounmap(pdev, adapter->hw.hw_addr);
2467 pci_release_regions(pdev);
2468 free_netdev(netdev);
2469 pci_disable_device(pdev);
2473 * atl1e_io_error_detected - called when PCI error is detected
2474 * @pdev: Pointer to PCI device
2475 * @state: The current pci connection state
2477 * This function is called after a PCI bus error affecting
2478 * this device has been detected.
2480 static pci_ers_result_t
2481 atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2483 struct net_device *netdev = pci_get_drvdata(pdev);
2484 struct atl1e_adapter *adapter = netdev_priv(netdev);
2486 netif_device_detach(netdev);
2488 if (state == pci_channel_io_perm_failure)
2489 return PCI_ERS_RESULT_DISCONNECT;
2491 if (netif_running(netdev))
2492 atl1e_down(adapter);
2494 pci_disable_device(pdev);
2496 /* Request a slot slot reset. */
2497 return PCI_ERS_RESULT_NEED_RESET;
2501 * atl1e_io_slot_reset - called after the pci bus has been reset.
2502 * @pdev: Pointer to PCI device
2504 * Restart the card from scratch, as if from a cold-boot. Implementation
2505 * resembles the first-half of the e1000_resume routine.
2507 static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
2509 struct net_device *netdev = pci_get_drvdata(pdev);
2510 struct atl1e_adapter *adapter = netdev_priv(netdev);
2512 if (pci_enable_device(pdev)) {
2513 netdev_err(adapter->netdev,
2514 "Cannot re-enable PCI device after reset\n");
2515 return PCI_ERS_RESULT_DISCONNECT;
2517 pci_set_master(pdev);
2519 pci_enable_wake(pdev, PCI_D3hot, 0);
2520 pci_enable_wake(pdev, PCI_D3cold, 0);
2522 atl1e_reset_hw(&adapter->hw);
2524 return PCI_ERS_RESULT_RECOVERED;
2528 * atl1e_io_resume - called when traffic can start flowing again.
2529 * @pdev: Pointer to PCI device
2531 * This callback is called when the error recovery driver tells us that
2532 * its OK to resume normal operation. Implementation resembles the
2533 * second-half of the atl1e_resume routine.
2535 static void atl1e_io_resume(struct pci_dev *pdev)
2537 struct net_device *netdev = pci_get_drvdata(pdev);
2538 struct atl1e_adapter *adapter = netdev_priv(netdev);
2540 if (netif_running(netdev)) {
2541 if (atl1e_up(adapter)) {
2542 netdev_err(adapter->netdev,
2543 "can't bring device back up after reset\n");
2548 netif_device_attach(netdev);
2551 static const struct pci_error_handlers atl1e_err_handler = {
2552 .error_detected = atl1e_io_error_detected,
2553 .slot_reset = atl1e_io_slot_reset,
2554 .resume = atl1e_io_resume,
2557 static struct pci_driver atl1e_driver = {
2558 .name = atl1e_driver_name,
2559 .id_table = atl1e_pci_tbl,
2560 .probe = atl1e_probe,
2561 .remove = atl1e_remove,
2562 /* Power Management Hooks */
2564 .suspend = atl1e_suspend,
2565 .resume = atl1e_resume,
2567 .shutdown = atl1e_shutdown,
2568 .err_handler = &atl1e_err_handler
2571 module_pci_driver(atl1e_driver);