2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
18 #include "ieee80211_i.h"
21 static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
23 static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
25 /* Use last four bytes of hw addr as hash index */
26 return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
29 static const struct rhashtable_params mesh_rht_params = {
31 .automatic_shrinking = true,
33 .key_offset = offsetof(struct mesh_path, dst),
34 .head_offset = offsetof(struct mesh_path, rhash),
35 .hashfn = mesh_table_hash,
38 static inline bool mpath_expired(struct mesh_path *mpath)
40 return (mpath->flags & MESH_PATH_ACTIVE) &&
41 time_after(jiffies, mpath->exp_time) &&
42 !(mpath->flags & MESH_PATH_FIXED);
45 static void mesh_path_rht_free(void *ptr, void *tblptr)
47 struct mesh_path *mpath = ptr;
48 struct mesh_table *tbl = tblptr;
50 mesh_path_free_rcu(tbl, mpath);
53 static struct mesh_table *mesh_table_alloc(void)
55 struct mesh_table *newtbl;
57 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
61 INIT_HLIST_HEAD(&newtbl->known_gates);
62 INIT_HLIST_HEAD(&newtbl->walk_head);
63 atomic_set(&newtbl->entries, 0);
64 spin_lock_init(&newtbl->gates_lock);
65 spin_lock_init(&newtbl->walk_lock);
66 if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
74 static void mesh_table_free(struct mesh_table *tbl)
76 rhashtable_free_and_destroy(&tbl->rhead,
77 mesh_path_rht_free, tbl);
83 * mesh_path_assign_nexthop - update mesh path next hop
85 * @mpath: mesh path to update
86 * @sta: next hop to assign
88 * Locking: mpath->state_lock must be held when calling this function
90 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
93 struct ieee80211_hdr *hdr;
96 rcu_assign_pointer(mpath->next_hop, sta);
98 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
99 skb_queue_walk(&mpath->frame_queue, skb) {
100 hdr = (struct ieee80211_hdr *) skb->data;
101 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
102 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
103 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
106 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
109 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
110 struct mesh_path *gate_mpath)
112 struct ieee80211_hdr *hdr;
113 struct ieee80211s_hdr *mshdr;
114 int mesh_hdrlen, hdrlen;
117 hdr = (struct ieee80211_hdr *) skb->data;
118 hdrlen = ieee80211_hdrlen(hdr->frame_control);
119 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
121 if (!(mshdr->flags & MESH_FLAGS_AE)) {
122 /* size of the fixed part of the mesh header */
125 /* make room for the two extended addresses */
126 skb_push(skb, 2 * ETH_ALEN);
127 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
129 hdr = (struct ieee80211_hdr *) skb->data;
131 /* we preserve the previous mesh header and only add
132 * the new addreses */
133 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
134 mshdr->flags = MESH_FLAGS_AE_A5_A6;
135 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
136 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
139 /* update next hop */
140 hdr = (struct ieee80211_hdr *) skb->data;
142 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
143 memcpy(hdr->addr1, next_hop, ETH_ALEN);
145 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
146 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
151 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
153 * This function is used to transfer or copy frames from an unresolved mpath to
154 * a gate mpath. The function also adds the Address Extension field and
155 * updates the next hop.
157 * If a frame already has an Address Extension field, only the next hop and
158 * destination addresses are updated.
160 * The gate mpath must be an active mpath with a valid mpath->next_hop.
162 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
163 * @from_mpath: The failed mpath
164 * @copy: When true, copy all the frames to the new mpath queue. When false,
167 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
168 struct mesh_path *from_mpath,
171 struct sk_buff *skb, *fskb, *tmp;
172 struct sk_buff_head failq;
175 if (WARN_ON(gate_mpath == from_mpath))
177 if (WARN_ON(!gate_mpath->next_hop))
180 __skb_queue_head_init(&failq);
182 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
183 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
184 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
186 skb_queue_walk_safe(&failq, fskb, tmp) {
187 if (skb_queue_len(&gate_mpath->frame_queue) >=
188 MESH_FRAME_QUEUE_LEN) {
189 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
193 skb = skb_copy(fskb, GFP_ATOMIC);
197 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
198 skb_queue_tail(&gate_mpath->frame_queue, skb);
203 __skb_unlink(fskb, &failq);
207 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
208 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
213 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
214 skb_queue_splice(&failq, &from_mpath->frame_queue);
215 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
219 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
220 struct ieee80211_sub_if_data *sdata)
222 struct mesh_path *mpath;
224 mpath = rhashtable_lookup_fast(&tbl->rhead, dst, mesh_rht_params);
226 if (mpath && mpath_expired(mpath)) {
227 spin_lock_bh(&mpath->state_lock);
228 mpath->flags &= ~MESH_PATH_ACTIVE;
229 spin_unlock_bh(&mpath->state_lock);
235 * mesh_path_lookup - look up a path in the mesh path table
236 * @sdata: local subif
237 * @dst: hardware address (ETH_ALEN length) of destination
239 * Returns: pointer to the mesh path structure, or NULL if not found
241 * Locking: must be called within a read rcu section.
244 mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
246 return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
250 mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
252 return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
255 static struct mesh_path *
256 __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
259 struct mesh_path *mpath;
261 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
269 if (mpath_expired(mpath)) {
270 spin_lock_bh(&mpath->state_lock);
271 mpath->flags &= ~MESH_PATH_ACTIVE;
272 spin_unlock_bh(&mpath->state_lock);
278 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
280 * @sdata: local subif, or NULL for all entries
282 * Returns: pointer to the mesh path structure, or NULL if not found.
284 * Locking: must be called within a read rcu section.
287 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
289 return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
293 * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
295 * @sdata: local subif, or NULL for all entries
297 * Returns: pointer to the proxy path structure, or NULL if not found.
299 * Locking: must be called within a read rcu section.
302 mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
304 return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
308 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
309 * @mpath: gate path to add to table
311 int mesh_path_add_gate(struct mesh_path *mpath)
313 struct mesh_table *tbl;
317 tbl = mpath->sdata->u.mesh.mesh_paths;
319 spin_lock_bh(&mpath->state_lock);
320 if (mpath->is_gate) {
322 spin_unlock_bh(&mpath->state_lock);
325 mpath->is_gate = true;
326 mpath->sdata->u.mesh.num_gates++;
328 spin_lock(&tbl->gates_lock);
329 hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates);
330 spin_unlock(&tbl->gates_lock);
332 spin_unlock_bh(&mpath->state_lock);
334 mpath_dbg(mpath->sdata,
335 "Mesh path: Recorded new gate: %pM. %d known gates\n",
336 mpath->dst, mpath->sdata->u.mesh.num_gates);
344 * mesh_gate_del - remove a mesh gate from the list of known gates
345 * @tbl: table which holds our list of known gates
348 static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
350 lockdep_assert_held(&mpath->state_lock);
354 mpath->is_gate = false;
355 spin_lock_bh(&tbl->gates_lock);
356 hlist_del_rcu(&mpath->gate_list);
357 mpath->sdata->u.mesh.num_gates--;
358 spin_unlock_bh(&tbl->gates_lock);
360 mpath_dbg(mpath->sdata,
361 "Mesh path: Deleted gate: %pM. %d known gates\n",
362 mpath->dst, mpath->sdata->u.mesh.num_gates);
366 * mesh_gate_num - number of gates known to this interface
369 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
371 return sdata->u.mesh.num_gates;
375 struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
376 const u8 *dst, gfp_t gfp_flags)
378 struct mesh_path *new_mpath;
380 new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags);
384 memcpy(new_mpath->dst, dst, ETH_ALEN);
385 eth_broadcast_addr(new_mpath->rann_snd_addr);
386 new_mpath->is_root = false;
387 new_mpath->sdata = sdata;
388 new_mpath->flags = 0;
389 skb_queue_head_init(&new_mpath->frame_queue);
390 new_mpath->exp_time = jiffies;
391 spin_lock_init(&new_mpath->state_lock);
392 timer_setup(&new_mpath->timer, mesh_path_timer, 0);
398 * mesh_path_add - allocate and add a new path to the mesh path table
399 * @dst: destination address of the path (ETH_ALEN length)
400 * @sdata: local subif
402 * Returns: 0 on success
404 * State: the initial state of the new path is set to 0
406 struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
409 struct mesh_table *tbl;
410 struct mesh_path *mpath, *new_mpath;
413 if (ether_addr_equal(dst, sdata->vif.addr))
414 /* never add ourselves as neighbours */
415 return ERR_PTR(-ENOTSUPP);
417 if (is_multicast_ether_addr(dst))
418 return ERR_PTR(-ENOTSUPP);
420 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
421 return ERR_PTR(-ENOSPC);
423 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
425 return ERR_PTR(-ENOMEM);
427 tbl = sdata->u.mesh.mesh_paths;
428 spin_lock_bh(&tbl->walk_lock);
430 ret = rhashtable_lookup_insert_fast(&tbl->rhead,
435 mpath = rhashtable_lookup_fast(&tbl->rhead,
439 hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
440 } while (unlikely(ret == -EEXIST && !mpath));
441 spin_unlock_bh(&tbl->walk_lock);
452 sdata->u.mesh.mesh_paths_generation++;
456 int mpp_path_add(struct ieee80211_sub_if_data *sdata,
457 const u8 *dst, const u8 *mpp)
459 struct mesh_table *tbl;
460 struct mesh_path *new_mpath;
463 if (ether_addr_equal(dst, sdata->vif.addr))
464 /* never add ourselves as neighbours */
467 if (is_multicast_ether_addr(dst))
470 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
475 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
476 tbl = sdata->u.mesh.mpp_paths;
478 spin_lock_bh(&tbl->walk_lock);
479 ret = rhashtable_lookup_insert_fast(&tbl->rhead,
483 hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
484 spin_unlock_bh(&tbl->walk_lock);
489 sdata->u.mesh.mpp_paths_generation++;
495 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
497 * @sta: broken peer link
499 * This function must be called from the rate control algorithm if enough
500 * delivery errors suggest that a peer link is no longer usable.
502 void mesh_plink_broken(struct sta_info *sta)
504 struct ieee80211_sub_if_data *sdata = sta->sdata;
505 struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
506 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
507 struct mesh_path *mpath;
510 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
511 if (rcu_access_pointer(mpath->next_hop) == sta &&
512 mpath->flags & MESH_PATH_ACTIVE &&
513 !(mpath->flags & MESH_PATH_FIXED)) {
514 spin_lock_bh(&mpath->state_lock);
515 mpath->flags &= ~MESH_PATH_ACTIVE;
517 spin_unlock_bh(&mpath->state_lock);
518 mesh_path_error_tx(sdata,
519 sdata->u.mesh.mshcfg.element_ttl,
520 mpath->dst, mpath->sn,
521 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
527 static void mesh_path_free_rcu(struct mesh_table *tbl,
528 struct mesh_path *mpath)
530 struct ieee80211_sub_if_data *sdata = mpath->sdata;
532 spin_lock_bh(&mpath->state_lock);
533 mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED;
534 mesh_gate_del(tbl, mpath);
535 spin_unlock_bh(&mpath->state_lock);
536 del_timer_sync(&mpath->timer);
537 atomic_dec(&sdata->u.mesh.mpaths);
538 atomic_dec(&tbl->entries);
539 mesh_path_flush_pending(mpath);
540 kfree_rcu(mpath, rcu);
543 static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
545 hlist_del_rcu(&mpath->walk_list);
546 rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
547 mesh_path_free_rcu(tbl, mpath);
551 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
553 * @sta: mesh peer to match
555 * RCU notes: this function is called when a mesh plink transitions from
556 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
557 * allows path creation. This will happen before the sta can be freed (because
558 * sta_info_destroy() calls this) so any reader in a rcu read block will be
559 * protected against the plink disappearing.
561 void mesh_path_flush_by_nexthop(struct sta_info *sta)
563 struct ieee80211_sub_if_data *sdata = sta->sdata;
564 struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
565 struct mesh_path *mpath;
566 struct hlist_node *n;
568 spin_lock_bh(&tbl->walk_lock);
569 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
570 if (rcu_access_pointer(mpath->next_hop) == sta)
571 __mesh_path_del(tbl, mpath);
573 spin_unlock_bh(&tbl->walk_lock);
576 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
579 struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
580 struct mesh_path *mpath;
581 struct hlist_node *n;
583 spin_lock_bh(&tbl->walk_lock);
584 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
585 if (ether_addr_equal(mpath->mpp, proxy))
586 __mesh_path_del(tbl, mpath);
588 spin_unlock_bh(&tbl->walk_lock);
591 static void table_flush_by_iface(struct mesh_table *tbl)
593 struct mesh_path *mpath;
594 struct hlist_node *n;
596 spin_lock_bh(&tbl->walk_lock);
597 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
598 __mesh_path_del(tbl, mpath);
600 spin_unlock_bh(&tbl->walk_lock);
604 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
606 * This function deletes both mesh paths as well as mesh portal paths.
608 * @sdata: interface data to match
611 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
613 table_flush_by_iface(sdata->u.mesh.mesh_paths);
614 table_flush_by_iface(sdata->u.mesh.mpp_paths);
618 * table_path_del - delete a path from the mesh or mpp table
620 * @tbl: mesh or mpp path table
621 * @sdata: local subif
622 * @addr: dst address (ETH_ALEN length)
624 * Returns: 0 if successful
626 static int table_path_del(struct mesh_table *tbl,
627 struct ieee80211_sub_if_data *sdata,
630 struct mesh_path *mpath;
632 spin_lock_bh(&tbl->walk_lock);
633 mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
635 spin_unlock_bh(&tbl->walk_lock);
639 __mesh_path_del(tbl, mpath);
640 spin_unlock_bh(&tbl->walk_lock);
646 * mesh_path_del - delete a mesh path from the table
648 * @addr: dst address (ETH_ALEN length)
649 * @sdata: local subif
651 * Returns: 0 if successful
653 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
657 /* flush relevant mpp entries first */
658 mpp_flush_by_proxy(sdata, addr);
660 err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
661 sdata->u.mesh.mesh_paths_generation++;
666 * mesh_path_tx_pending - sends pending frames in a mesh path queue
668 * @mpath: mesh path to activate
670 * Locking: the state_lock of the mpath structure must NOT be held when calling
673 void mesh_path_tx_pending(struct mesh_path *mpath)
675 if (mpath->flags & MESH_PATH_ACTIVE)
676 ieee80211_add_pending_skbs(mpath->sdata->local,
677 &mpath->frame_queue);
681 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
683 * @mpath: mesh path whose queue will be emptied
685 * If there is only one gate, the frames are transferred from the failed mpath
686 * queue to that gate's queue. If there are more than one gates, the frames
687 * are copied from each gate to the next. After frames are copied, the
688 * mpath queues are emptied onto the transmission queue.
690 int mesh_path_send_to_gates(struct mesh_path *mpath)
692 struct ieee80211_sub_if_data *sdata = mpath->sdata;
693 struct mesh_table *tbl;
694 struct mesh_path *from_mpath = mpath;
695 struct mesh_path *gate;
698 tbl = sdata->u.mesh.mesh_paths;
701 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
702 if (gate->flags & MESH_PATH_ACTIVE) {
703 mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
704 mesh_path_move_to_queue(gate, from_mpath, copy);
709 "Not forwarding to %pM (flags %#x)\n",
710 gate->dst, gate->flags);
714 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
715 mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
716 mesh_path_tx_pending(gate);
720 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
724 * mesh_path_discard_frame - discard a frame whose path could not be resolved
726 * @skb: frame to discard
727 * @sdata: network subif the frame was to be sent through
729 * Locking: the function must me called within a rcu_read_lock region
731 void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
735 sdata->u.mesh.mshstats.dropped_frames_no_route++;
739 * mesh_path_flush_pending - free the pending queue of a mesh path
741 * @mpath: mesh path whose queue has to be freed
743 * Locking: the function must me called within a rcu_read_lock region
745 void mesh_path_flush_pending(struct mesh_path *mpath)
749 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
750 mesh_path_discard_frame(mpath->sdata, skb);
754 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
756 * @mpath: the mesh path to modify
757 * @next_hop: the next hop to force
759 * Locking: this function must be called holding mpath->state_lock
761 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
763 spin_lock_bh(&mpath->state_lock);
764 mesh_path_assign_nexthop(mpath, next_hop);
767 mpath->hop_count = 0;
769 mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID;
770 mesh_path_activate(mpath);
771 spin_unlock_bh(&mpath->state_lock);
772 ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg);
773 /* init it at a low value - 0 start is tricky */
774 ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1);
775 mesh_path_tx_pending(mpath);
778 int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
780 struct mesh_table *tbl_path, *tbl_mpp;
783 tbl_path = mesh_table_alloc();
787 tbl_mpp = mesh_table_alloc();
793 sdata->u.mesh.mesh_paths = tbl_path;
794 sdata->u.mesh.mpp_paths = tbl_mpp;
799 mesh_table_free(tbl_path);
804 void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
805 struct mesh_table *tbl)
807 struct mesh_path *mpath;
808 struct hlist_node *n;
810 spin_lock_bh(&tbl->walk_lock);
811 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
812 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
813 (!(mpath->flags & MESH_PATH_FIXED)) &&
814 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
815 __mesh_path_del(tbl, mpath);
817 spin_unlock_bh(&tbl->walk_lock);
820 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
822 mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
823 mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
826 void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
828 mesh_table_free(sdata->u.mesh.mesh_paths);
829 mesh_table_free(sdata->u.mesh.mpp_paths);