1 // SPDX-License-Identifier: GPL-2.0-or-later
4 The purpose of this driver is to provide a device that allows
5 for sharing of resources:
7 1) qdiscs/policies that are per device as opposed to system wide.
8 ifb allows for a device which can be redirected to thus providing
9 an impression of sharing.
11 2) Allows for queueing incoming traffic for shaping instead of
14 The original concept is based on what is known as the IMQ
15 driver initially written by Martin Devera, later rewritten
16 by Patrick McHardy and then maintained by Andre Correa.
18 You need the tc action mirror or redirect to feed this device
22 Authors: Jamal Hadi Salim (2005)
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/init.h>
32 #include <linux/interrupt.h>
33 #include <linux/moduleparam.h>
34 #include <net/pkt_sched.h>
35 #include <net/net_namespace.h>
38 struct ifb_q_private {
39 struct net_device *dev;
40 struct tasklet_struct ifb_tasklet;
43 struct sk_buff_head rq;
46 struct u64_stats_sync rsync;
48 struct u64_stats_sync tsync;
51 struct sk_buff_head tq;
52 } ____cacheline_aligned_in_smp;
54 struct ifb_dev_private {
55 struct ifb_q_private *tx_private;
58 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
59 static int ifb_open(struct net_device *dev);
60 static int ifb_close(struct net_device *dev);
62 static void ifb_ri_tasklet(unsigned long _txp)
64 struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
65 struct netdev_queue *txq;
68 txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
69 skb = skb_peek(&txp->tq);
71 if (!__netif_tx_trylock(txq))
73 skb_queue_splice_tail_init(&txp->rq, &txp->tq);
74 __netif_tx_unlock(txq);
77 while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
79 #ifdef CONFIG_NET_CLS_ACT
80 skb->tc_skip_classify = 1;
83 u64_stats_update_begin(&txp->tsync);
85 txp->tx_bytes += skb->len;
86 u64_stats_update_end(&txp->tsync);
89 skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
93 txp->dev->stats.tx_dropped++;
94 if (skb_queue_len(&txp->tq) != 0)
99 skb->skb_iif = txp->dev->ifindex;
101 if (!skb->from_ingress) {
104 skb_pull_rcsum(skb, skb->mac_len);
105 netif_receive_skb(skb);
109 if (__netif_tx_trylock(txq)) {
110 skb = skb_peek(&txp->rq);
112 txp->tasklet_pending = 0;
113 if (netif_tx_queue_stopped(txq))
114 netif_tx_wake_queue(txq);
116 __netif_tx_unlock(txq);
119 __netif_tx_unlock(txq);
122 txp->tasklet_pending = 1;
123 tasklet_schedule(&txp->ifb_tasklet);
128 static void ifb_stats64(struct net_device *dev,
129 struct rtnl_link_stats64 *stats)
131 struct ifb_dev_private *dp = netdev_priv(dev);
132 struct ifb_q_private *txp = dp->tx_private;
137 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
139 start = u64_stats_fetch_begin_irq(&txp->rsync);
140 packets = txp->rx_packets;
141 bytes = txp->rx_bytes;
142 } while (u64_stats_fetch_retry_irq(&txp->rsync, start));
143 stats->rx_packets += packets;
144 stats->rx_bytes += bytes;
147 start = u64_stats_fetch_begin_irq(&txp->tsync);
148 packets = txp->tx_packets;
149 bytes = txp->tx_bytes;
150 } while (u64_stats_fetch_retry_irq(&txp->tsync, start));
151 stats->tx_packets += packets;
152 stats->tx_bytes += bytes;
154 stats->rx_dropped = dev->stats.rx_dropped;
155 stats->tx_dropped = dev->stats.tx_dropped;
158 static int ifb_dev_init(struct net_device *dev)
160 struct ifb_dev_private *dp = netdev_priv(dev);
161 struct ifb_q_private *txp;
164 txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
167 dp->tx_private = txp;
168 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
171 __skb_queue_head_init(&txp->rq);
172 __skb_queue_head_init(&txp->tq);
173 u64_stats_init(&txp->rsync);
174 u64_stats_init(&txp->tsync);
175 tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
177 netif_tx_start_queue(netdev_get_tx_queue(dev, i));
182 static const struct net_device_ops ifb_netdev_ops = {
183 .ndo_open = ifb_open,
184 .ndo_stop = ifb_close,
185 .ndo_get_stats64 = ifb_stats64,
186 .ndo_start_xmit = ifb_xmit,
187 .ndo_validate_addr = eth_validate_addr,
188 .ndo_init = ifb_dev_init,
191 #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
192 NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
193 NETIF_F_GSO_ENCAP_ALL | \
194 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
195 NETIF_F_HW_VLAN_STAG_TX)
197 static void ifb_dev_free(struct net_device *dev)
199 struct ifb_dev_private *dp = netdev_priv(dev);
200 struct ifb_q_private *txp = dp->tx_private;
203 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
204 tasklet_kill(&txp->ifb_tasklet);
205 __skb_queue_purge(&txp->rq);
206 __skb_queue_purge(&txp->tq);
208 kfree(dp->tx_private);
211 static void ifb_setup(struct net_device *dev)
213 /* Initialize the device structure. */
214 dev->netdev_ops = &ifb_netdev_ops;
216 /* Fill in device structure with ethernet-generic values. */
218 dev->tx_queue_len = TX_Q_LIMIT;
220 dev->features |= IFB_FEATURES;
221 dev->hw_features |= dev->features;
222 dev->hw_enc_features |= dev->features;
223 dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
224 NETIF_F_HW_VLAN_STAG_TX);
226 dev->flags |= IFF_NOARP;
227 dev->flags &= ~IFF_MULTICAST;
228 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
230 eth_hw_addr_random(dev);
231 dev->needs_free_netdev = true;
232 dev->priv_destructor = ifb_dev_free;
238 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
240 struct ifb_dev_private *dp = netdev_priv(dev);
241 struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
243 u64_stats_update_begin(&txp->rsync);
245 txp->rx_bytes += skb->len;
246 u64_stats_update_end(&txp->rsync);
248 if (!skb->redirected || !skb->skb_iif) {
250 dev->stats.rx_dropped++;
254 if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
255 netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
257 __skb_queue_tail(&txp->rq, skb);
258 if (!txp->tasklet_pending) {
259 txp->tasklet_pending = 1;
260 tasklet_schedule(&txp->ifb_tasklet);
266 static int ifb_close(struct net_device *dev)
268 netif_tx_stop_all_queues(dev);
272 static int ifb_open(struct net_device *dev)
274 netif_tx_start_all_queues(dev);
278 static int ifb_validate(struct nlattr *tb[], struct nlattr *data[],
279 struct netlink_ext_ack *extack)
281 if (tb[IFLA_ADDRESS]) {
282 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
284 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
285 return -EADDRNOTAVAIL;
290 static struct rtnl_link_ops ifb_link_ops __read_mostly = {
292 .priv_size = sizeof(struct ifb_dev_private),
294 .validate = ifb_validate,
297 /* Number of ifb devices to be set up by this module.
298 * Note that these legacy devices have one queue.
299 * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
301 static int numifbs = 2;
302 module_param(numifbs, int, 0);
303 MODULE_PARM_DESC(numifbs, "Number of ifb devices");
305 static int __init ifb_init_one(int index)
307 struct net_device *dev_ifb;
310 dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
311 NET_NAME_UNKNOWN, ifb_setup);
316 dev_ifb->rtnl_link_ops = &ifb_link_ops;
317 err = register_netdevice(dev_ifb);
324 free_netdev(dev_ifb);
328 static int __init ifb_init_module(void)
332 down_write(&pernet_ops_rwsem);
334 err = __rtnl_link_register(&ifb_link_ops);
338 for (i = 0; i < numifbs && !err; i++) {
339 err = ifb_init_one(i);
343 __rtnl_link_unregister(&ifb_link_ops);
347 up_write(&pernet_ops_rwsem);
352 static void __exit ifb_cleanup_module(void)
354 rtnl_link_unregister(&ifb_link_ops);
357 module_init(ifb_init_module);
358 module_exit(ifb_cleanup_module);
359 MODULE_LICENSE("GPL");
360 MODULE_AUTHOR("Jamal Hadi Salim");
361 MODULE_ALIAS_RTNL_LINK("ifb");