2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2008 Nokia Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 /* #define VERBOSE_DEBUG */
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/gfp.h>
19 #include <linux/device.h>
20 #include <linux/ctype.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
29 * This component encapsulates the Ethernet link glue needed to provide
30 * one (!) network link through the USB gadget stack, normally "usb0".
32 * The control and data models are handled by the function driver which
33 * connects to this code; such as CDC Ethernet (ECM or EEM),
34 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
37 * Link level addressing is handled by this component using module
38 * parameters; if no such parameters are provided, random link level
39 * addresses are used. Each end of the link uses one address. The
40 * host end address is exported in various ways, and is often recorded
41 * in configuration databases.
43 * The driver which assembles each configuration using such a link is
44 * responsible for ensuring that each configuration includes at most one
45 * instance of is network link. (The network layer provides ways for
46 * this single "physical" link to be used by multiple virtual links.)
49 #define UETH__VERSION "29-May-2008"
51 /* Experiments show that both Linux and Windows hosts allow up to 16k
52 * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
53 * blocks and still have efficient handling. */
54 #define GETHER_MAX_MTU_SIZE 15412
55 #define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
58 /* lock is held while accessing port_usb
61 struct gether *port_usb;
63 struct net_device *net;
64 struct usb_gadget *gadget;
66 spinlock_t req_lock; /* guard {rx,tx}_reqs */
67 struct list_head tx_reqs, rx_reqs;
70 struct sk_buff_head rx_frames;
75 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
76 int (*unwrap)(struct gether *,
78 struct sk_buff_head *list);
80 struct work_struct work;
83 #define WORK_RX_MEMORY 0
87 u8 host_mac[ETH_ALEN];
91 /*-------------------------------------------------------------------------*/
93 #define RX_EXTRA 20 /* bytes guarding against rx overflows */
95 #define DEFAULT_QLEN 2 /* double buffering by default */
97 /* for dual-speed hardware, use deeper queues at high/super speed */
98 static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
100 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
101 gadget->speed >= USB_SPEED_SUPER))
102 return qmult * DEFAULT_QLEN;
107 /*-------------------------------------------------------------------------*/
109 /* REVISIT there must be a better way than having two sets
118 #define xprintk(d, level, fmt, args...) \
119 printk(level "%s: " fmt , (d)->net->name , ## args)
123 #define DBG(dev, fmt, args...) \
124 xprintk(dev , KERN_DEBUG , fmt , ## args)
126 #define DBG(dev, fmt, args...) \
133 #define VDBG(dev, fmt, args...) \
137 #define ERROR(dev, fmt, args...) \
138 xprintk(dev , KERN_ERR , fmt , ## args)
139 #define INFO(dev, fmt, args...) \
140 xprintk(dev , KERN_INFO , fmt , ## args)
142 /*-------------------------------------------------------------------------*/
144 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
146 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
148 struct eth_dev *dev = netdev_priv(net);
150 strlcpy(p->driver, "g_ether", sizeof(p->driver));
151 strlcpy(p->version, UETH__VERSION, sizeof(p->version));
152 strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
153 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
156 /* REVISIT can also support:
157 * - WOL (by tracking suspends and issuing remote wakeup)
158 * - msglevel (implies updated messaging)
159 * - ... probably more ethtool ops
162 static const struct ethtool_ops ops = {
163 .get_drvinfo = eth_get_drvinfo,
164 .get_link = ethtool_op_get_link,
167 static void defer_kevent(struct eth_dev *dev, int flag)
169 if (test_and_set_bit(flag, &dev->todo))
171 if (!schedule_work(&dev->work))
172 ERROR(dev, "kevent %d may have been dropped\n", flag);
174 DBG(dev, "kevent %d scheduled\n", flag);
177 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
180 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
182 struct usb_gadget *g = dev->gadget;
184 int retval = -ENOMEM;
189 spin_lock_irqsave(&dev->lock, flags);
191 out = dev->port_usb->out_ep;
197 spin_unlock_irqrestore(&dev->lock, flags);
201 /* Padding up to RX_EXTRA handles minor disagreements with host.
202 * Normally we use the USB "terminate on short read" convention;
203 * so allow up to (N*maxpacket), since that memory is normally
204 * already allocated. Some hardware doesn't deal well with short
205 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
206 * byte off the end (to force hardware errors on overflow).
208 * RNDIS uses internal framing, and explicitly allows senders to
209 * pad to end-of-packet. That's potentially nice for speed, but
210 * means receivers can't recover lost synch on their own (because
211 * new packets don't only start after a short RX).
213 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
214 size += dev->port_usb->header_len;
216 if (g->quirk_ep_out_aligned_size) {
217 size += out->maxpacket - 1;
218 size -= size % out->maxpacket;
221 if (dev->port_usb->is_fixed)
222 size = max_t(size_t, size, dev->port_usb->fixed_out_len);
223 spin_unlock_irqrestore(&dev->lock, flags);
225 skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
227 DBG(dev, "no rx skb\n");
231 /* Some platforms perform better when IP packets are aligned,
232 * but on at least one, checksumming fails otherwise. Note:
233 * RNDIS headers involve variable numbers of LE32 values.
235 if (likely(!dev->no_skb_reserve))
236 skb_reserve(skb, NET_IP_ALIGN);
238 req->buf = skb->data;
240 req->complete = rx_complete;
243 retval = usb_ep_queue(out, req, gfp_flags);
244 if (retval == -ENOMEM)
246 defer_kevent(dev, WORK_RX_MEMORY);
248 DBG(dev, "rx submit --> %d\n", retval);
250 dev_kfree_skb_any(skb);
251 spin_lock_irqsave(&dev->req_lock, flags);
252 list_add(&req->list, &dev->rx_reqs);
253 spin_unlock_irqrestore(&dev->req_lock, flags);
258 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
260 struct sk_buff *skb = req->context, *skb2;
261 struct eth_dev *dev = ep->driver_data;
262 int status = req->status;
266 /* normal completion */
268 skb_put(skb, req->actual);
273 spin_lock_irqsave(&dev->lock, flags);
275 status = dev->unwrap(dev->port_usb,
279 dev_kfree_skb_any(skb);
282 spin_unlock_irqrestore(&dev->lock, flags);
284 skb_queue_tail(&dev->rx_frames, skb);
288 skb2 = skb_dequeue(&dev->rx_frames);
291 || ETH_HLEN > skb2->len
292 || skb2->len > GETHER_MAX_ETH_FRAME_LEN) {
293 dev->net->stats.rx_errors++;
294 dev->net->stats.rx_length_errors++;
295 DBG(dev, "rx length %d\n", skb2->len);
296 dev_kfree_skb_any(skb2);
299 skb2->protocol = eth_type_trans(skb2, dev->net);
300 dev->net->stats.rx_packets++;
301 dev->net->stats.rx_bytes += skb2->len;
303 /* no buffer copies needed, unless hardware can't
306 status = netif_rx(skb2);
308 skb2 = skb_dequeue(&dev->rx_frames);
312 /* software-driven interface shutdown */
313 case -ECONNRESET: /* unlink */
314 case -ESHUTDOWN: /* disconnect etc */
315 VDBG(dev, "rx shutdown, code %d\n", status);
318 /* for hardware automagic (such as pxa) */
319 case -ECONNABORTED: /* endpoint reset */
320 DBG(dev, "rx %s reset\n", ep->name);
321 defer_kevent(dev, WORK_RX_MEMORY);
323 dev_kfree_skb_any(skb);
328 dev->net->stats.rx_over_errors++;
332 dev->net->stats.rx_errors++;
333 DBG(dev, "rx status %d\n", status);
338 dev_kfree_skb_any(skb);
339 if (!netif_running(dev->net)) {
341 spin_lock(&dev->req_lock);
342 list_add(&req->list, &dev->rx_reqs);
343 spin_unlock(&dev->req_lock);
347 rx_submit(dev, req, GFP_ATOMIC);
350 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
353 struct usb_request *req;
358 /* queue/recycle up to N requests */
360 list_for_each_entry(req, list, list) {
365 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
367 return list_empty(list) ? -ENOMEM : 0;
368 list_add(&req->list, list);
375 struct list_head *next;
377 next = req->list.next;
378 list_del(&req->list);
379 usb_ep_free_request(ep, req);
384 req = container_of(next, struct usb_request, list);
389 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
393 spin_lock(&dev->req_lock);
394 status = prealloc(&dev->tx_reqs, link->in_ep, n);
397 status = prealloc(&dev->rx_reqs, link->out_ep, n);
402 DBG(dev, "can't alloc requests\n");
404 spin_unlock(&dev->req_lock);
408 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
410 struct usb_request *req;
413 /* fill unused rxq slots with some skb */
414 spin_lock_irqsave(&dev->req_lock, flags);
415 while (!list_empty(&dev->rx_reqs)) {
416 req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
417 list_del_init(&req->list);
418 spin_unlock_irqrestore(&dev->req_lock, flags);
420 if (rx_submit(dev, req, gfp_flags) < 0) {
421 defer_kevent(dev, WORK_RX_MEMORY);
425 spin_lock_irqsave(&dev->req_lock, flags);
427 spin_unlock_irqrestore(&dev->req_lock, flags);
430 static void eth_work(struct work_struct *work)
432 struct eth_dev *dev = container_of(work, struct eth_dev, work);
434 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
435 if (netif_running(dev->net))
436 rx_fill(dev, GFP_KERNEL);
440 DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
443 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
445 struct sk_buff *skb = req->context;
446 struct eth_dev *dev = ep->driver_data;
448 switch (req->status) {
450 dev->net->stats.tx_errors++;
451 VDBG(dev, "tx err %d\n", req->status);
453 case -ECONNRESET: /* unlink */
454 case -ESHUTDOWN: /* disconnect etc */
455 dev_kfree_skb_any(skb);
458 dev->net->stats.tx_bytes += skb->len;
459 dev_consume_skb_any(skb);
461 dev->net->stats.tx_packets++;
463 spin_lock(&dev->req_lock);
464 list_add(&req->list, &dev->tx_reqs);
465 spin_unlock(&dev->req_lock);
467 atomic_dec(&dev->tx_qlen);
468 if (netif_carrier_ok(dev->net))
469 netif_wake_queue(dev->net);
472 static inline int is_promisc(u16 cdc_filter)
474 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
477 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
478 struct net_device *net)
480 struct eth_dev *dev = netdev_priv(net);
483 struct usb_request *req = NULL;
488 spin_lock_irqsave(&dev->lock, flags);
490 in = dev->port_usb->in_ep;
491 cdc_filter = dev->port_usb->cdc_filter;
496 spin_unlock_irqrestore(&dev->lock, flags);
500 dev_kfree_skb_any(skb);
504 /* apply outgoing CDC or RNDIS filters */
505 if (skb && !is_promisc(cdc_filter)) {
506 u8 *dest = skb->data;
508 if (is_multicast_ether_addr(dest)) {
511 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
512 * SET_ETHERNET_MULTICAST_FILTERS requests
514 if (is_broadcast_ether_addr(dest))
515 type = USB_CDC_PACKET_TYPE_BROADCAST;
517 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
518 if (!(cdc_filter & type)) {
519 dev_kfree_skb_any(skb);
523 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
526 spin_lock_irqsave(&dev->req_lock, flags);
528 * this freelist can be empty if an interrupt triggered disconnect()
529 * and reconfigured the gadget (shutting down this queue) after the
530 * network stack decided to xmit but before we got the spinlock.
532 if (list_empty(&dev->tx_reqs)) {
533 spin_unlock_irqrestore(&dev->req_lock, flags);
534 return NETDEV_TX_BUSY;
537 req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
538 list_del(&req->list);
540 /* temporarily stop TX queue when the freelist empties */
541 if (list_empty(&dev->tx_reqs))
542 netif_stop_queue(net);
543 spin_unlock_irqrestore(&dev->req_lock, flags);
545 /* no buffer copies needed, unless the network stack did it
546 * or the hardware can't use skb buffers.
547 * or there's not enough space for extra headers we need
552 spin_lock_irqsave(&dev->lock, flags);
554 skb = dev->wrap(dev->port_usb, skb);
555 spin_unlock_irqrestore(&dev->lock, flags);
557 /* Multi frame CDC protocols may store the frame for
558 * later which is not a dropped frame.
561 dev->port_usb->supports_multi_frame)
568 req->buf = skb->data;
570 req->complete = tx_complete;
572 /* NCM requires no zlp if transfer is dwNtbInMaxSize */
574 dev->port_usb->is_fixed &&
575 length == dev->port_usb->fixed_in_len &&
576 (length % in->maxpacket) == 0)
581 /* use zlp framing on tx for strict CDC-Ether conformance,
582 * though any robust network rx path ignores extra padding.
583 * and some hardware doesn't like to write zlps.
585 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
588 req->length = length;
590 retval = usb_ep_queue(in, req, GFP_ATOMIC);
593 DBG(dev, "tx queue err %d\n", retval);
596 netif_trans_update(net);
597 atomic_inc(&dev->tx_qlen);
601 dev_kfree_skb_any(skb);
603 dev->net->stats.tx_dropped++;
605 spin_lock_irqsave(&dev->req_lock, flags);
606 if (list_empty(&dev->tx_reqs))
607 netif_start_queue(net);
608 list_add(&req->list, &dev->tx_reqs);
609 spin_unlock_irqrestore(&dev->req_lock, flags);
614 /*-------------------------------------------------------------------------*/
616 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
618 DBG(dev, "%s\n", __func__);
620 /* fill the rx queue */
621 rx_fill(dev, gfp_flags);
623 /* and open the tx floodgates */
624 atomic_set(&dev->tx_qlen, 0);
625 netif_wake_queue(dev->net);
628 static int eth_open(struct net_device *net)
630 struct eth_dev *dev = netdev_priv(net);
633 DBG(dev, "%s\n", __func__);
634 if (netif_carrier_ok(dev->net))
635 eth_start(dev, GFP_KERNEL);
637 spin_lock_irq(&dev->lock);
638 link = dev->port_usb;
639 if (link && link->open)
641 spin_unlock_irq(&dev->lock);
646 static int eth_stop(struct net_device *net)
648 struct eth_dev *dev = netdev_priv(net);
651 VDBG(dev, "%s\n", __func__);
652 netif_stop_queue(net);
654 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
655 dev->net->stats.rx_packets, dev->net->stats.tx_packets,
656 dev->net->stats.rx_errors, dev->net->stats.tx_errors
659 /* ensure there are no more active requests */
660 spin_lock_irqsave(&dev->lock, flags);
662 struct gether *link = dev->port_usb;
663 const struct usb_endpoint_descriptor *in;
664 const struct usb_endpoint_descriptor *out;
669 /* NOTE: we have no abort-queue primitive we could use
670 * to cancel all pending I/O. Instead, we disable then
671 * reenable the endpoints ... this idiom may leave toggle
672 * wrong, but that's a self-correcting error.
674 * REVISIT: we *COULD* just let the transfers complete at
675 * their own pace; the network stack can handle old packets.
676 * For the moment we leave this here, since it works.
678 in = link->in_ep->desc;
679 out = link->out_ep->desc;
680 usb_ep_disable(link->in_ep);
681 usb_ep_disable(link->out_ep);
682 if (netif_carrier_ok(net)) {
683 DBG(dev, "host still using in/out endpoints\n");
684 link->in_ep->desc = in;
685 link->out_ep->desc = out;
686 usb_ep_enable(link->in_ep);
687 usb_ep_enable(link->out_ep);
690 spin_unlock_irqrestore(&dev->lock, flags);
695 /*-------------------------------------------------------------------------*/
697 static int get_ether_addr(const char *str, u8 *dev_addr)
702 for (i = 0; i < 6; i++) {
705 if ((*str == '.') || (*str == ':'))
707 num = hex_to_bin(*str++) << 4;
708 num |= hex_to_bin(*str++);
711 if (is_valid_ether_addr(dev_addr))
714 eth_random_addr(dev_addr);
718 static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
723 snprintf(str, len, "%pM", dev_addr);
727 static const struct net_device_ops eth_netdev_ops = {
728 .ndo_open = eth_open,
729 .ndo_stop = eth_stop,
730 .ndo_start_xmit = eth_start_xmit,
731 .ndo_set_mac_address = eth_mac_addr,
732 .ndo_validate_addr = eth_validate_addr,
735 static struct device_type gadget_type = {
740 * gether_setup_name - initialize one ethernet-over-usb link
741 * @g: gadget to associated with these links
742 * @ethaddr: NULL, or a buffer in which the ethernet address of the
743 * host side of the link is recorded
744 * @netname: name for network device (for example, "usb")
747 * This sets up the single network link that may be exported by a
748 * gadget driver using this framework. The link layer addresses are
749 * set up using module parameters.
751 * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
753 struct eth_dev *gether_setup_name(struct usb_gadget *g,
754 const char *dev_addr, const char *host_addr,
755 u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
758 struct net_device *net;
761 net = alloc_etherdev(sizeof *dev);
763 return ERR_PTR(-ENOMEM);
765 dev = netdev_priv(net);
766 spin_lock_init(&dev->lock);
767 spin_lock_init(&dev->req_lock);
768 INIT_WORK(&dev->work, eth_work);
769 INIT_LIST_HEAD(&dev->tx_reqs);
770 INIT_LIST_HEAD(&dev->rx_reqs);
772 skb_queue_head_init(&dev->rx_frames);
774 /* network device setup */
777 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
779 if (get_ether_addr(dev_addr, net->dev_addr))
781 "using random %s ethernet address\n", "self");
782 if (get_ether_addr(host_addr, dev->host_mac))
784 "using random %s ethernet address\n", "host");
787 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
789 net->netdev_ops = ð_netdev_ops;
791 net->ethtool_ops = &ops;
793 /* MTU range: 14 - 15412 */
794 net->min_mtu = ETH_HLEN;
795 net->max_mtu = GETHER_MAX_MTU_SIZE;
798 SET_NETDEV_DEV(net, &g->dev);
799 SET_NETDEV_DEVTYPE(net, &gadget_type);
801 status = register_netdev(net);
803 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
805 dev = ERR_PTR(status);
807 INFO(dev, "MAC %pM\n", net->dev_addr);
808 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
811 * two kinds of host-initiated state changes:
812 * - iff DATA transfer is active, carrier is "on"
813 * - tx queueing enabled if open *and* carrier is "on"
815 netif_carrier_off(net);
820 EXPORT_SYMBOL_GPL(gether_setup_name);
822 struct net_device *gether_setup_name_default(const char *netname)
824 struct net_device *net;
827 net = alloc_etherdev(sizeof(*dev));
829 return ERR_PTR(-ENOMEM);
831 dev = netdev_priv(net);
832 spin_lock_init(&dev->lock);
833 spin_lock_init(&dev->req_lock);
834 INIT_WORK(&dev->work, eth_work);
835 INIT_LIST_HEAD(&dev->tx_reqs);
836 INIT_LIST_HEAD(&dev->rx_reqs);
838 skb_queue_head_init(&dev->rx_frames);
840 /* network device setup */
842 dev->qmult = QMULT_DEFAULT;
843 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
845 eth_random_addr(dev->dev_mac);
846 pr_warn("using random %s ethernet address\n", "self");
847 eth_random_addr(dev->host_mac);
848 pr_warn("using random %s ethernet address\n", "host");
850 net->netdev_ops = ð_netdev_ops;
852 net->ethtool_ops = &ops;
853 SET_NETDEV_DEVTYPE(net, &gadget_type);
855 /* MTU range: 14 - 15412 */
856 net->min_mtu = ETH_HLEN;
857 net->max_mtu = GETHER_MAX_MTU_SIZE;
861 EXPORT_SYMBOL_GPL(gether_setup_name_default);
863 int gether_register_netdev(struct net_device *net)
866 struct usb_gadget *g;
870 if (!net->dev.parent)
872 dev = netdev_priv(net);
874 status = register_netdev(net);
876 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
879 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
881 /* two kinds of host-initiated state changes:
882 * - iff DATA transfer is active, carrier is "on"
883 * - tx queueing enabled if open *and* carrier is "on"
885 netif_carrier_off(net);
887 sa.sa_family = net->type;
888 memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
890 status = dev_set_mac_address(net, &sa);
893 pr_warn("cannot set self ethernet address: %d\n", status);
895 INFO(dev, "MAC %pM\n", dev->dev_mac);
899 EXPORT_SYMBOL_GPL(gether_register_netdev);
901 void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
905 dev = netdev_priv(net);
907 SET_NETDEV_DEV(net, &g->dev);
909 EXPORT_SYMBOL_GPL(gether_set_gadget);
911 int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
914 u8 new_addr[ETH_ALEN];
916 dev = netdev_priv(net);
917 if (get_ether_addr(dev_addr, new_addr))
919 memcpy(dev->dev_mac, new_addr, ETH_ALEN);
922 EXPORT_SYMBOL_GPL(gether_set_dev_addr);
924 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
929 dev = netdev_priv(net);
930 ret = get_ether_addr_str(dev->dev_mac, dev_addr, len);
932 dev_addr[ret++] = '\n';
933 dev_addr[ret] = '\0';
938 EXPORT_SYMBOL_GPL(gether_get_dev_addr);
940 int gether_set_host_addr(struct net_device *net, const char *host_addr)
943 u8 new_addr[ETH_ALEN];
945 dev = netdev_priv(net);
946 if (get_ether_addr(host_addr, new_addr))
948 memcpy(dev->host_mac, new_addr, ETH_ALEN);
951 EXPORT_SYMBOL_GPL(gether_set_host_addr);
953 int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
958 dev = netdev_priv(net);
959 ret = get_ether_addr_str(dev->host_mac, host_addr, len);
961 host_addr[ret++] = '\n';
962 host_addr[ret] = '\0';
967 EXPORT_SYMBOL_GPL(gether_get_host_addr);
969 int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
976 dev = netdev_priv(net);
977 snprintf(host_addr, len, "%pm", dev->host_mac);
979 return strlen(host_addr);
981 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
983 void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
987 dev = netdev_priv(net);
988 memcpy(host_mac, dev->host_mac, ETH_ALEN);
990 EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
992 void gether_set_qmult(struct net_device *net, unsigned qmult)
996 dev = netdev_priv(net);
999 EXPORT_SYMBOL_GPL(gether_set_qmult);
1001 unsigned gether_get_qmult(struct net_device *net)
1003 struct eth_dev *dev;
1005 dev = netdev_priv(net);
1008 EXPORT_SYMBOL_GPL(gether_get_qmult);
1010 int gether_get_ifname(struct net_device *net, char *name, int len)
1015 ret = snprintf(name, len, "%s\n", netdev_name(net));
1017 return ret < len ? ret : len;
1019 EXPORT_SYMBOL_GPL(gether_get_ifname);
1022 * gether_cleanup - remove Ethernet-over-USB device
1023 * Context: may sleep
1025 * This is called to free all resources allocated by @gether_setup().
1027 void gether_cleanup(struct eth_dev *dev)
1032 unregister_netdev(dev->net);
1033 flush_work(&dev->work);
1034 free_netdev(dev->net);
1036 EXPORT_SYMBOL_GPL(gether_cleanup);
1039 * gether_connect - notify network layer that USB link is active
1040 * @link: the USB link, set up with endpoints, descriptors matching
1041 * current device speed, and any framing wrapper(s) set up.
1042 * Context: irqs blocked
1044 * This is called to activate endpoints and let the network layer know
1045 * the connection is active ("carrier detect"). It may cause the I/O
1046 * queues to open and start letting network packets flow, but will in
1047 * any case activate the endpoints so that they respond properly to the
1050 * Verify net_device pointer returned using IS_ERR(). If it doesn't
1051 * indicate some error code (negative errno), ep->driver_data values
1052 * have been overwritten.
1054 struct net_device *gether_connect(struct gether *link)
1056 struct eth_dev *dev = link->ioport;
1060 return ERR_PTR(-EINVAL);
1062 link->in_ep->driver_data = dev;
1063 result = usb_ep_enable(link->in_ep);
1065 DBG(dev, "enable %s --> %d\n",
1066 link->in_ep->name, result);
1070 link->out_ep->driver_data = dev;
1071 result = usb_ep_enable(link->out_ep);
1073 DBG(dev, "enable %s --> %d\n",
1074 link->out_ep->name, result);
1079 result = alloc_requests(dev, link, qlen(dev->gadget,
1083 dev->zlp = link->is_zlp_ok;
1084 dev->no_skb_reserve = gadget_avoids_skb_reserve(dev->gadget);
1085 DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
1087 dev->header_len = link->header_len;
1088 dev->unwrap = link->unwrap;
1089 dev->wrap = link->wrap;
1091 spin_lock(&dev->lock);
1092 dev->port_usb = link;
1093 if (netif_running(dev->net)) {
1100 spin_unlock(&dev->lock);
1102 netif_carrier_on(dev->net);
1103 if (netif_running(dev->net))
1104 eth_start(dev, GFP_ATOMIC);
1106 /* on error, disable any endpoints */
1108 (void) usb_ep_disable(link->out_ep);
1110 (void) usb_ep_disable(link->in_ep);
1113 /* caller is responsible for cleanup on error */
1115 return ERR_PTR(result);
1118 EXPORT_SYMBOL_GPL(gether_connect);
1121 * gether_disconnect - notify network layer that USB link is inactive
1122 * @link: the USB link, on which gether_connect() was called
1123 * Context: irqs blocked
1125 * This is called to deactivate endpoints and let the network layer know
1126 * the connection went inactive ("no carrier").
1128 * On return, the state is as if gether_connect() had never been called.
1129 * The endpoints are inactive, and accordingly without active USB I/O.
1130 * Pointers to endpoint descriptors and endpoint private data are nulled.
1132 void gether_disconnect(struct gether *link)
1134 struct eth_dev *dev = link->ioport;
1135 struct usb_request *req;
1141 DBG(dev, "%s\n", __func__);
1143 netif_stop_queue(dev->net);
1144 netif_carrier_off(dev->net);
1146 /* disable endpoints, forcing (synchronous) completion
1147 * of all pending i/o. then free the request objects
1148 * and forget about the endpoints.
1150 usb_ep_disable(link->in_ep);
1151 spin_lock(&dev->req_lock);
1152 while (!list_empty(&dev->tx_reqs)) {
1153 req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
1154 list_del(&req->list);
1156 spin_unlock(&dev->req_lock);
1157 usb_ep_free_request(link->in_ep, req);
1158 spin_lock(&dev->req_lock);
1160 spin_unlock(&dev->req_lock);
1161 link->in_ep->desc = NULL;
1163 usb_ep_disable(link->out_ep);
1164 spin_lock(&dev->req_lock);
1165 while (!list_empty(&dev->rx_reqs)) {
1166 req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
1167 list_del(&req->list);
1169 spin_unlock(&dev->req_lock);
1170 usb_ep_free_request(link->out_ep, req);
1171 spin_lock(&dev->req_lock);
1173 spin_unlock(&dev->req_lock);
1174 link->out_ep->desc = NULL;
1176 /* finish forgetting about this USB link episode */
1177 dev->header_len = 0;
1181 spin_lock(&dev->lock);
1182 dev->port_usb = NULL;
1183 spin_unlock(&dev->lock);
1185 EXPORT_SYMBOL_GPL(gether_disconnect);
1187 MODULE_LICENSE("GPL");
1188 MODULE_AUTHOR("David Brownell");