1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2018 - 2021 Intel Corporation
7 #include <net/cfg80211.h>
12 static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
13 struct nlattr *ftmreq,
14 struct cfg80211_pmsr_request_peer *out,
15 struct genl_info *info)
17 const struct cfg80211_pmsr_capabilities *capa = rdev->wiphy.pmsr_capa;
18 struct nlattr *tb[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1];
19 u32 preamble = NL80211_PREAMBLE_DMG; /* only optional in DMG */
21 /* validate existing data */
22 if (!(rdev->wiphy.pmsr_capa->ftm.bandwidths & BIT(out->chandef.width))) {
23 NL_SET_ERR_MSG(info->extack, "FTM: unsupported bandwidth");
27 /* no validation needed - was already done via nested policy */
28 nla_parse_nested_deprecated(tb, NL80211_PMSR_FTM_REQ_ATTR_MAX, ftmreq,
31 if (tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE])
32 preamble = nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]);
34 /* set up values - struct is 0-initialized */
35 out->ftm.requested = true;
37 switch (out->chandef.chan->band) {
38 case NL80211_BAND_60GHZ:
42 if (!tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]) {
43 NL_SET_ERR_MSG(info->extack,
44 "FTM: must specify preamble");
49 if (!(capa->ftm.preambles & BIT(preamble))) {
50 NL_SET_ERR_MSG_ATTR(info->extack,
51 tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE],
52 "FTM: invalid preamble");
56 out->ftm.preamble = preamble;
58 out->ftm.burst_period = 0;
59 if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD])
60 out->ftm.burst_period =
61 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
63 out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP];
64 if (out->ftm.asap && !capa->ftm.asap) {
65 NL_SET_ERR_MSG_ATTR(info->extack,
66 tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP],
67 "FTM: ASAP mode not supported");
71 if (!out->ftm.asap && !capa->ftm.non_asap) {
72 NL_SET_ERR_MSG(info->extack,
73 "FTM: non-ASAP mode not supported");
77 out->ftm.num_bursts_exp = 0;
78 if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP])
79 out->ftm.num_bursts_exp =
80 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
82 if (capa->ftm.max_bursts_exponent >= 0 &&
83 out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) {
84 NL_SET_ERR_MSG_ATTR(info->extack,
85 tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP],
86 "FTM: max NUM_BURSTS_EXP must be set lower than the device limit");
90 out->ftm.burst_duration = 15;
91 if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION])
92 out->ftm.burst_duration =
93 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
95 out->ftm.ftms_per_burst = 0;
96 if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST])
97 out->ftm.ftms_per_burst =
98 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST]);
100 if (capa->ftm.max_ftms_per_burst &&
101 (out->ftm.ftms_per_burst > capa->ftm.max_ftms_per_burst ||
102 out->ftm.ftms_per_burst == 0)) {
103 NL_SET_ERR_MSG_ATTR(info->extack,
104 tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST],
105 "FTM: FTMs per burst must be set lower than the device limit but non-zero");
109 out->ftm.ftmr_retries = 3;
110 if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES])
111 out->ftm.ftmr_retries =
112 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
114 out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI];
115 if (out->ftm.request_lci && !capa->ftm.request_lci) {
116 NL_SET_ERR_MSG_ATTR(info->extack,
117 tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI],
118 "FTM: LCI request not supported");
121 out->ftm.request_civicloc =
122 !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC];
123 if (out->ftm.request_civicloc && !capa->ftm.request_civicloc) {
124 NL_SET_ERR_MSG_ATTR(info->extack,
125 tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC],
126 "FTM: civic location request not supported");
129 out->ftm.trigger_based =
130 !!tb[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED];
131 if (out->ftm.trigger_based && !capa->ftm.trigger_based) {
132 NL_SET_ERR_MSG_ATTR(info->extack,
133 tb[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED],
134 "FTM: trigger based ranging is not supported");
138 out->ftm.non_trigger_based =
139 !!tb[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED];
140 if (out->ftm.non_trigger_based && !capa->ftm.non_trigger_based) {
141 NL_SET_ERR_MSG_ATTR(info->extack,
142 tb[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED],
143 "FTM: trigger based ranging is not supported");
147 if (out->ftm.trigger_based && out->ftm.non_trigger_based) {
148 NL_SET_ERR_MSG(info->extack,
149 "FTM: can't set both trigger based and non trigger based");
153 if ((out->ftm.trigger_based || out->ftm.non_trigger_based) &&
154 out->ftm.preamble != NL80211_PREAMBLE_HE) {
155 NL_SET_ERR_MSG_ATTR(info->extack,
156 tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE],
157 "FTM: non EDCA based ranging must use HE preamble");
161 out->ftm.lmr_feedback =
162 !!tb[NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK];
163 if (!out->ftm.trigger_based && !out->ftm.non_trigger_based &&
164 out->ftm.lmr_feedback) {
165 NL_SET_ERR_MSG_ATTR(info->extack,
166 tb[NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK],
167 "FTM: LMR feedback set for EDCA based ranging");
174 static int pmsr_parse_peer(struct cfg80211_registered_device *rdev,
176 struct cfg80211_pmsr_request_peer *out,
177 struct genl_info *info)
179 struct nlattr *tb[NL80211_PMSR_PEER_ATTR_MAX + 1];
180 struct nlattr *req[NL80211_PMSR_REQ_ATTR_MAX + 1];
184 /* no validation needed - was already done via nested policy */
185 nla_parse_nested_deprecated(tb, NL80211_PMSR_PEER_ATTR_MAX, peer,
188 if (!tb[NL80211_PMSR_PEER_ATTR_ADDR] ||
189 !tb[NL80211_PMSR_PEER_ATTR_CHAN] ||
190 !tb[NL80211_PMSR_PEER_ATTR_REQ]) {
191 NL_SET_ERR_MSG_ATTR(info->extack, peer,
192 "insufficient peer data");
196 memcpy(out->addr, nla_data(tb[NL80211_PMSR_PEER_ATTR_ADDR]), ETH_ALEN);
198 /* reuse info->attrs */
199 memset(info->attrs, 0, sizeof(*info->attrs) * (NL80211_ATTR_MAX + 1));
200 err = nla_parse_nested_deprecated(info->attrs, NL80211_ATTR_MAX,
201 tb[NL80211_PMSR_PEER_ATTR_CHAN],
206 err = nl80211_parse_chandef(rdev, info, &out->chandef);
210 /* no validation needed - was already done via nested policy */
211 nla_parse_nested_deprecated(req, NL80211_PMSR_REQ_ATTR_MAX,
212 tb[NL80211_PMSR_PEER_ATTR_REQ], NULL,
215 if (!req[NL80211_PMSR_REQ_ATTR_DATA]) {
216 NL_SET_ERR_MSG_ATTR(info->extack,
217 tb[NL80211_PMSR_PEER_ATTR_REQ],
218 "missing request type/data");
222 if (req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF])
223 out->report_ap_tsf = true;
225 if (out->report_ap_tsf && !rdev->wiphy.pmsr_capa->report_ap_tsf) {
226 NL_SET_ERR_MSG_ATTR(info->extack,
227 req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF],
228 "reporting AP TSF is not supported");
232 nla_for_each_nested(treq, req[NL80211_PMSR_REQ_ATTR_DATA], rem) {
233 switch (nla_type(treq)) {
234 case NL80211_PMSR_TYPE_FTM:
235 err = pmsr_parse_ftm(rdev, treq, out, info);
238 NL_SET_ERR_MSG_ATTR(info->extack, treq,
239 "unsupported measurement type");
250 int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info)
252 struct nlattr *reqattr = info->attrs[NL80211_ATTR_PEER_MEASUREMENTS];
253 struct cfg80211_registered_device *rdev = info->user_ptr[0];
254 struct wireless_dev *wdev = info->user_ptr[1];
255 struct cfg80211_pmsr_request *req;
256 struct nlattr *peers, *peer;
257 int count, rem, err, idx;
259 if (!rdev->wiphy.pmsr_capa)
265 peers = nla_find(nla_data(reqattr), nla_len(reqattr),
266 NL80211_PMSR_ATTR_PEERS);
271 nla_for_each_nested(peer, peers, rem) {
274 if (count > rdev->wiphy.pmsr_capa->max_peers) {
275 NL_SET_ERR_MSG_ATTR(info->extack, peer,
276 "Too many peers used");
281 req = kzalloc(struct_size(req, peers, count), GFP_KERNEL);
285 if (info->attrs[NL80211_ATTR_TIMEOUT])
286 req->timeout = nla_get_u32(info->attrs[NL80211_ATTR_TIMEOUT]);
288 if (info->attrs[NL80211_ATTR_MAC]) {
289 if (!rdev->wiphy.pmsr_capa->randomize_mac_addr) {
290 NL_SET_ERR_MSG_ATTR(info->extack,
291 info->attrs[NL80211_ATTR_MAC],
292 "device cannot randomize MAC address");
297 err = nl80211_parse_random_mac(info->attrs, req->mac_addr,
302 memcpy(req->mac_addr, wdev_address(wdev), ETH_ALEN);
303 eth_broadcast_addr(req->mac_addr_mask);
307 nla_for_each_nested(peer, peers, rem) {
308 /* NB: this reuses info->attrs, but we no longer need it */
309 err = pmsr_parse_peer(rdev, peer, &req->peers[idx], info);
315 req->n_peers = count;
316 req->cookie = cfg80211_assign_cookie(rdev);
317 req->nl_portid = info->snd_portid;
319 err = rdev_start_pmsr(rdev, wdev, req);
323 list_add_tail(&req->list, &wdev->pmsr_list);
325 nl_set_extack_cookie_u64(info->extack, req->cookie);
332 void cfg80211_pmsr_complete(struct wireless_dev *wdev,
333 struct cfg80211_pmsr_request *req,
336 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
337 struct cfg80211_pmsr_request *tmp, *prev, *to_free = NULL;
341 trace_cfg80211_pmsr_complete(wdev->wiphy, wdev, req->cookie);
343 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
347 hdr = nl80211hdr_put(msg, 0, 0, 0,
348 NL80211_CMD_PEER_MEASUREMENT_COMPLETE);
352 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
353 nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
357 if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie,
361 genlmsg_end(msg, hdr);
362 genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid);
367 spin_lock_bh(&wdev->pmsr_lock);
369 * cfg80211_pmsr_process_abort() may have already moved this request
370 * to the free list, and will free it later. In this case, don't free
373 list_for_each_entry_safe(tmp, prev, &wdev->pmsr_list, list) {
375 list_del(&req->list);
380 spin_unlock_bh(&wdev->pmsr_lock);
383 EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete);
385 static int nl80211_pmsr_send_ftm_res(struct sk_buff *msg,
386 struct cfg80211_pmsr_result *res)
388 if (res->status == NL80211_PMSR_STATUS_FAILURE) {
389 if (nla_put_u32(msg, NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON,
390 res->ftm.failure_reason))
393 if (res->ftm.failure_reason ==
394 NL80211_PMSR_FTM_FAILURE_PEER_BUSY &&
395 res->ftm.busy_retry_time &&
396 nla_put_u32(msg, NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME,
397 res->ftm.busy_retry_time))
403 #define PUT(tp, attr, val) \
405 if (nla_put_##tp(msg, \
406 NL80211_PMSR_FTM_RESP_ATTR_##attr, \
411 #define PUTOPT(tp, attr, val) \
413 if (res->ftm.val##_valid) \
414 PUT(tp, attr, val); \
417 #define PUT_U64(attr, val) \
419 if (nla_put_u64_64bit(msg, \
420 NL80211_PMSR_FTM_RESP_ATTR_##attr,\
422 NL80211_PMSR_FTM_RESP_ATTR_PAD)) \
426 #define PUTOPT_U64(attr, val) \
428 if (res->ftm.val##_valid) \
429 PUT_U64(attr, val); \
432 if (res->ftm.burst_index >= 0)
433 PUT(u32, BURST_INDEX, burst_index);
434 PUTOPT(u32, NUM_FTMR_ATTEMPTS, num_ftmr_attempts);
435 PUTOPT(u32, NUM_FTMR_SUCCESSES, num_ftmr_successes);
436 PUT(u8, NUM_BURSTS_EXP, num_bursts_exp);
437 PUT(u8, BURST_DURATION, burst_duration);
438 PUT(u8, FTMS_PER_BURST, ftms_per_burst);
439 PUTOPT(s32, RSSI_AVG, rssi_avg);
440 PUTOPT(s32, RSSI_SPREAD, rssi_spread);
441 if (res->ftm.tx_rate_valid &&
442 !nl80211_put_sta_rate(msg, &res->ftm.tx_rate,
443 NL80211_PMSR_FTM_RESP_ATTR_TX_RATE))
445 if (res->ftm.rx_rate_valid &&
446 !nl80211_put_sta_rate(msg, &res->ftm.rx_rate,
447 NL80211_PMSR_FTM_RESP_ATTR_RX_RATE))
449 PUTOPT_U64(RTT_AVG, rtt_avg);
450 PUTOPT_U64(RTT_VARIANCE, rtt_variance);
451 PUTOPT_U64(RTT_SPREAD, rtt_spread);
452 PUTOPT_U64(DIST_AVG, dist_avg);
453 PUTOPT_U64(DIST_VARIANCE, dist_variance);
454 PUTOPT_U64(DIST_SPREAD, dist_spread);
455 if (res->ftm.lci && res->ftm.lci_len &&
456 nla_put(msg, NL80211_PMSR_FTM_RESP_ATTR_LCI,
457 res->ftm.lci_len, res->ftm.lci))
459 if (res->ftm.civicloc && res->ftm.civicloc_len &&
460 nla_put(msg, NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC,
461 res->ftm.civicloc_len, res->ftm.civicloc))
473 static int nl80211_pmsr_send_result(struct sk_buff *msg,
474 struct cfg80211_pmsr_result *res)
476 struct nlattr *pmsr, *peers, *peer, *resp, *data, *typedata;
478 pmsr = nla_nest_start_noflag(msg, NL80211_ATTR_PEER_MEASUREMENTS);
482 peers = nla_nest_start_noflag(msg, NL80211_PMSR_ATTR_PEERS);
486 peer = nla_nest_start_noflag(msg, 1);
490 if (nla_put(msg, NL80211_PMSR_PEER_ATTR_ADDR, ETH_ALEN, res->addr))
493 resp = nla_nest_start_noflag(msg, NL80211_PMSR_PEER_ATTR_RESP);
497 if (nla_put_u32(msg, NL80211_PMSR_RESP_ATTR_STATUS, res->status) ||
498 nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_HOST_TIME,
499 res->host_time, NL80211_PMSR_RESP_ATTR_PAD))
502 if (res->ap_tsf_valid &&
503 nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_AP_TSF,
504 res->ap_tsf, NL80211_PMSR_RESP_ATTR_PAD))
507 if (res->final && nla_put_flag(msg, NL80211_PMSR_RESP_ATTR_FINAL))
510 data = nla_nest_start_noflag(msg, NL80211_PMSR_RESP_ATTR_DATA);
514 typedata = nla_nest_start_noflag(msg, res->type);
519 case NL80211_PMSR_TYPE_FTM:
520 if (nl80211_pmsr_send_ftm_res(msg, res))
527 nla_nest_end(msg, typedata);
528 nla_nest_end(msg, data);
529 nla_nest_end(msg, resp);
530 nla_nest_end(msg, peer);
531 nla_nest_end(msg, peers);
532 nla_nest_end(msg, pmsr);
539 void cfg80211_pmsr_report(struct wireless_dev *wdev,
540 struct cfg80211_pmsr_request *req,
541 struct cfg80211_pmsr_result *result,
544 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
549 trace_cfg80211_pmsr_report(wdev->wiphy, wdev, req->cookie,
553 * Currently, only variable items are LCI and civic location,
554 * both of which are reasonably short so we don't need to
555 * worry about them here for the allocation.
557 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
561 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PEER_MEASUREMENT_RESULT);
565 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
566 nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
570 if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie,
574 err = nl80211_pmsr_send_result(msg, result);
576 pr_err_ratelimited("peer measurement result: message didn't fit!");
580 genlmsg_end(msg, hdr);
581 genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid);
586 EXPORT_SYMBOL_GPL(cfg80211_pmsr_report);
588 static void cfg80211_pmsr_process_abort(struct wireless_dev *wdev)
590 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
591 struct cfg80211_pmsr_request *req, *tmp;
592 LIST_HEAD(free_list);
594 lockdep_assert_held(&wdev->mtx);
596 spin_lock_bh(&wdev->pmsr_lock);
597 list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) {
600 list_move_tail(&req->list, &free_list);
602 spin_unlock_bh(&wdev->pmsr_lock);
604 list_for_each_entry_safe(req, tmp, &free_list, list) {
605 rdev_abort_pmsr(rdev, wdev, req);
611 void cfg80211_pmsr_free_wk(struct work_struct *work)
613 struct wireless_dev *wdev = container_of(work, struct wireless_dev,
617 cfg80211_pmsr_process_abort(wdev);
621 void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
623 struct cfg80211_pmsr_request *req;
626 spin_lock_bh(&wdev->pmsr_lock);
627 list_for_each_entry(req, &wdev->pmsr_list, list) {
631 spin_unlock_bh(&wdev->pmsr_lock);
634 cfg80211_pmsr_process_abort(wdev);
636 WARN_ON(!list_empty(&wdev->pmsr_list));
639 void cfg80211_release_pmsr(struct wireless_dev *wdev, u32 portid)
641 struct cfg80211_pmsr_request *req;
643 spin_lock_bh(&wdev->pmsr_lock);
644 list_for_each_entry(req, &wdev->pmsr_list, list) {
645 if (req->nl_portid == portid) {
647 schedule_work(&wdev->pmsr_free_wk);
650 spin_unlock_bh(&wdev->pmsr_lock);
653 #endif /* __PMSR_H */