1 // SPDX-License-Identifier: GPL-2.0
3 * Microchip switch driver main logic
5 * Copyright (C) 2017-2019 Microchip Technology Inc.
8 #include <linux/delay.h>
9 #include <linux/export.h>
10 #include <linux/gpio/consumer.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_data/microchip-ksz.h>
14 #include <linux/phy.h>
15 #include <linux/etherdevice.h>
16 #include <linux/if_bridge.h>
17 #include <linux/of_net.h>
19 #include <net/switchdev.h>
21 #include "ksz_common.h"
23 void ksz_update_port_member(struct ksz_device *dev, int port)
28 for (i = 0; i < dev->port_cnt; i++) {
29 if (i == port || i == dev->cpu_port)
32 if (!(dev->member & (1 << i)))
35 /* Port is a member of the bridge and is forwarding. */
36 if (p->stp_state == BR_STATE_FORWARDING &&
37 p->member != dev->member)
38 dev->dev_ops->cfg_port_member(dev, i, dev->member);
41 EXPORT_SYMBOL_GPL(ksz_update_port_member);
43 static void port_r_cnt(struct ksz_device *dev, int port)
45 struct ksz_port_mib *mib = &dev->ports[port].mib;
48 /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
49 while (mib->cnt_ptr < dev->reg_mib_cnt) {
50 dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
51 &mib->counters[mib->cnt_ptr]);
55 /* last one in storage */
56 dropped = &mib->counters[dev->mib_cnt];
58 /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
59 while (mib->cnt_ptr < dev->mib_cnt) {
60 dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
61 dropped, &mib->counters[mib->cnt_ptr]);
67 static void ksz_mib_read_work(struct work_struct *work)
69 struct ksz_device *dev = container_of(work, struct ksz_device,
71 struct ksz_port_mib *mib;
75 for (i = 0; i < dev->mib_port_cnt; i++) {
76 if (dsa_is_unused_port(dev->ds, i))
81 mutex_lock(&mib->cnt_mutex);
83 /* Only read MIB counters when the port is told to do.
84 * If not, read only dropped counters when link is not up.
87 const struct dsa_port *dp = dsa_to_port(dev->ds, i);
89 if (!netif_carrier_ok(dp->slave))
90 mib->cnt_ptr = dev->reg_mib_cnt;
94 mutex_unlock(&mib->cnt_mutex);
97 schedule_delayed_work(&dev->mib_read, dev->mib_read_interval);
100 void ksz_init_mib_timer(struct ksz_device *dev)
104 INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
106 for (i = 0; i < dev->mib_port_cnt; i++)
107 dev->dev_ops->port_init_cnt(dev, i);
109 EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
111 int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
113 struct ksz_device *dev = ds->priv;
116 dev->dev_ops->r_phy(dev, addr, reg, &val);
120 EXPORT_SYMBOL_GPL(ksz_phy_read16);
122 int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
124 struct ksz_device *dev = ds->priv;
126 dev->dev_ops->w_phy(dev, addr, reg, val);
130 EXPORT_SYMBOL_GPL(ksz_phy_write16);
132 void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
133 phy_interface_t interface)
135 struct ksz_device *dev = ds->priv;
136 struct ksz_port *p = &dev->ports[port];
138 /* Read all MIB counters when the link is going down. */
141 if (dev->mib_read_interval)
142 schedule_delayed_work(&dev->mib_read, 0);
144 EXPORT_SYMBOL_GPL(ksz_mac_link_down);
146 int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
148 struct ksz_device *dev = ds->priv;
150 if (sset != ETH_SS_STATS)
155 EXPORT_SYMBOL_GPL(ksz_sset_count);
157 void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
159 const struct dsa_port *dp = dsa_to_port(ds, port);
160 struct ksz_device *dev = ds->priv;
161 struct ksz_port_mib *mib;
163 mib = &dev->ports[port].mib;
164 mutex_lock(&mib->cnt_mutex);
166 /* Only read dropped counters if no link. */
167 if (!netif_carrier_ok(dp->slave))
168 mib->cnt_ptr = dev->reg_mib_cnt;
169 port_r_cnt(dev, port);
170 memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64));
171 mutex_unlock(&mib->cnt_mutex);
173 EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
175 int ksz_port_bridge_join(struct dsa_switch *ds, int port,
176 struct net_device *br)
178 struct ksz_device *dev = ds->priv;
180 mutex_lock(&dev->dev_mutex);
181 dev->br_member |= (1 << port);
182 mutex_unlock(&dev->dev_mutex);
184 /* port_stp_state_set() will be called after to put the port in
185 * appropriate state so there is no need to do anything.
190 EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
192 void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
193 struct net_device *br)
195 struct ksz_device *dev = ds->priv;
197 mutex_lock(&dev->dev_mutex);
198 dev->br_member &= ~(1 << port);
199 dev->member &= ~(1 << port);
200 mutex_unlock(&dev->dev_mutex);
202 /* port_stp_state_set() will be called after to put the port in
203 * forwarding state so there is no need to do anything.
206 EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
208 void ksz_port_fast_age(struct dsa_switch *ds, int port)
210 struct ksz_device *dev = ds->priv;
212 dev->dev_ops->flush_dyn_mac_table(dev, port);
214 EXPORT_SYMBOL_GPL(ksz_port_fast_age);
216 int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
217 const struct switchdev_obj_port_vlan *vlan)
223 EXPORT_SYMBOL_GPL(ksz_port_vlan_prepare);
225 int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
228 struct ksz_device *dev = ds->priv;
235 struct alu_struct alu;
238 alu.is_static = false;
239 ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
242 if (!ret && (member & BIT(port))) {
243 ret = cb(alu.mac, alu.fid, alu.is_static, data);
248 } while (i < entries);
254 EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
256 int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
257 const struct switchdev_obj_port_mdb *mdb)
262 EXPORT_SYMBOL_GPL(ksz_port_mdb_prepare);
264 void ksz_port_mdb_add(struct dsa_switch *ds, int port,
265 const struct switchdev_obj_port_mdb *mdb)
267 struct ksz_device *dev = ds->priv;
268 struct alu_struct alu;
272 alu.port_forward = 0;
273 for (index = 0; index < dev->num_statics; index++) {
274 if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
275 /* Found one already in static MAC table. */
276 if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
279 /* Remember the first empty entry. */
285 /* no available entry */
286 if (index == dev->num_statics && !empty)
290 if (index == dev->num_statics) {
292 memset(&alu, 0, sizeof(alu));
293 memcpy(alu.mac, mdb->addr, ETH_ALEN);
294 alu.is_static = true;
296 alu.port_forward |= BIT(port);
298 alu.is_use_fid = true;
300 /* Need a way to map VID to FID. */
303 dev->dev_ops->w_sta_mac_table(dev, index, &alu);
305 EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
307 int ksz_port_mdb_del(struct dsa_switch *ds, int port,
308 const struct switchdev_obj_port_mdb *mdb)
310 struct ksz_device *dev = ds->priv;
311 struct alu_struct alu;
315 for (index = 0; index < dev->num_statics; index++) {
316 if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
317 /* Found one already in static MAC table. */
318 if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
324 /* no available entry */
325 if (index == dev->num_statics)
329 alu.port_forward &= ~BIT(port);
330 if (!alu.port_forward)
331 alu.is_static = false;
332 dev->dev_ops->w_sta_mac_table(dev, index, &alu);
337 EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
339 int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
341 struct ksz_device *dev = ds->priv;
343 if (!dsa_is_user_port(ds, port))
346 /* setup slave port */
347 dev->dev_ops->port_setup(dev, port, false);
349 /* port_stp_state_set() will be called after to enable the port so
350 * there is no need to do anything.
355 EXPORT_SYMBOL_GPL(ksz_enable_port);
357 struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
359 struct dsa_switch *ds;
360 struct ksz_device *swdev;
362 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
367 ds->num_ports = DSA_MAX_PORTS;
369 swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
381 EXPORT_SYMBOL(ksz_switch_alloc);
383 int ksz_switch_register(struct ksz_device *dev,
384 const struct ksz_dev_ops *ops)
386 struct device_node *port, *ports;
387 phy_interface_t interface;
388 unsigned int port_num;
392 dev->chip_id = dev->pdata->chip_id;
394 dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset",
396 if (IS_ERR(dev->reset_gpio))
397 return PTR_ERR(dev->reset_gpio);
399 if (dev->reset_gpio) {
400 gpiod_set_value_cansleep(dev->reset_gpio, 1);
401 usleep_range(10000, 12000);
402 gpiod_set_value_cansleep(dev->reset_gpio, 0);
406 mutex_init(&dev->dev_mutex);
407 mutex_init(&dev->regmap_mutex);
408 mutex_init(&dev->alu_mutex);
409 mutex_init(&dev->vlan_mutex);
413 if (dev->dev_ops->detect(dev))
416 ret = dev->dev_ops->init(dev);
420 /* Host port interface will be self detected, or specifically set in
423 for (port_num = 0; port_num < dev->port_cnt; ++port_num)
424 dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
425 if (dev->dev->of_node) {
426 ret = of_get_phy_mode(dev->dev->of_node, &interface);
428 dev->compat_interface = interface;
429 ports = of_get_child_by_name(dev->dev->of_node, "ports");
431 for_each_available_child_of_node(ports, port) {
432 if (of_property_read_u32(port, "reg",
435 if (port_num >= dev->mib_port_cnt)
437 of_get_phy_mode(port,
438 &dev->ports[port_num].interface);
440 dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
441 "microchip,synclko-125");
444 ret = dsa_register_switch(dev->ds);
446 dev->dev_ops->exit(dev);
450 /* Read MIB counters every 30 seconds to avoid overflow. */
451 dev->mib_read_interval = msecs_to_jiffies(30000);
453 /* Start the MIB timer. */
454 schedule_delayed_work(&dev->mib_read, 0);
458 EXPORT_SYMBOL(ksz_switch_register);
460 void ksz_switch_remove(struct ksz_device *dev)
463 if (dev->mib_read_interval) {
464 dev->mib_read_interval = 0;
465 cancel_delayed_work_sync(&dev->mib_read);
468 dev->dev_ops->exit(dev);
469 dsa_unregister_switch(dev->ds);
472 gpiod_set_value_cansleep(dev->reset_gpio, 1);
475 EXPORT_SYMBOL(ksz_switch_remove);
477 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
478 MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
479 MODULE_LICENSE("GPL");