1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2018 Intel Corporation
7 #include <net/cfg80211.h>
12 static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
13 struct nlattr *ftmreq,
14 struct cfg80211_pmsr_request_peer *out,
15 struct genl_info *info)
17 const struct cfg80211_pmsr_capabilities *capa = rdev->wiphy.pmsr_capa;
18 struct nlattr *tb[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1];
19 u32 preamble = NL80211_PREAMBLE_DMG; /* only optional in DMG */
21 /* validate existing data */
22 if (!(rdev->wiphy.pmsr_capa->ftm.bandwidths & BIT(out->chandef.width))) {
23 NL_SET_ERR_MSG(info->extack, "FTM: unsupported bandwidth");
27 /* no validation needed - was already done via nested policy */
28 nla_parse_nested(tb, NL80211_PMSR_FTM_REQ_ATTR_MAX, ftmreq, NULL, NULL);
30 if (tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE])
31 preamble = nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]);
33 /* set up values - struct is 0-initialized */
34 out->ftm.requested = true;
36 switch (out->chandef.chan->band) {
37 case NL80211_BAND_60GHZ:
41 if (!tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]) {
42 NL_SET_ERR_MSG(info->extack,
43 "FTM: must specify preamble");
48 if (!(capa->ftm.preambles & BIT(preamble))) {
49 NL_SET_ERR_MSG_ATTR(info->extack,
50 tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE],
51 "FTM: invalid preamble");
55 out->ftm.preamble = preamble;
57 out->ftm.burst_period = 0;
58 if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD])
59 out->ftm.burst_period =
60 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
62 out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP];
63 if (out->ftm.asap && !capa->ftm.asap) {
64 NL_SET_ERR_MSG_ATTR(info->extack,
65 tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP],
66 "FTM: ASAP mode not supported");
70 if (!out->ftm.asap && !capa->ftm.non_asap) {
71 NL_SET_ERR_MSG(info->extack,
72 "FTM: non-ASAP mode not supported");
76 out->ftm.num_bursts_exp = 0;
77 if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP])
78 out->ftm.num_bursts_exp =
79 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
81 if (capa->ftm.max_bursts_exponent >= 0 &&
82 out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) {
83 NL_SET_ERR_MSG_ATTR(info->extack,
84 tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP],
85 "FTM: max NUM_BURSTS_EXP must be set lower than the device limit");
89 out->ftm.burst_duration = 15;
90 if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION])
91 out->ftm.burst_duration =
92 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
94 out->ftm.ftms_per_burst = 0;
95 if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST])
96 out->ftm.ftms_per_burst =
97 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST]);
99 if (capa->ftm.max_ftms_per_burst &&
100 (out->ftm.ftms_per_burst > capa->ftm.max_ftms_per_burst ||
101 out->ftm.ftms_per_burst == 0)) {
102 NL_SET_ERR_MSG_ATTR(info->extack,
103 tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST],
104 "FTM: FTMs per burst must be set lower than the device limit but non-zero");
108 out->ftm.ftmr_retries = 3;
109 if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES])
110 out->ftm.ftmr_retries =
111 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
113 out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI];
114 if (out->ftm.request_lci && !capa->ftm.request_lci) {
115 NL_SET_ERR_MSG_ATTR(info->extack,
116 tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI],
117 "FTM: LCI request not supported");
120 out->ftm.request_civicloc =
121 !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC];
122 if (out->ftm.request_civicloc && !capa->ftm.request_civicloc) {
123 NL_SET_ERR_MSG_ATTR(info->extack,
124 tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC],
125 "FTM: civic location request not supported");
131 static int pmsr_parse_peer(struct cfg80211_registered_device *rdev,
133 struct cfg80211_pmsr_request_peer *out,
134 struct genl_info *info)
136 struct nlattr *tb[NL80211_PMSR_PEER_ATTR_MAX + 1];
137 struct nlattr *req[NL80211_PMSR_REQ_ATTR_MAX + 1];
141 /* no validation needed - was already done via nested policy */
142 nla_parse_nested(tb, NL80211_PMSR_PEER_ATTR_MAX, peer, NULL, NULL);
144 if (!tb[NL80211_PMSR_PEER_ATTR_ADDR] ||
145 !tb[NL80211_PMSR_PEER_ATTR_CHAN] ||
146 !tb[NL80211_PMSR_PEER_ATTR_REQ]) {
147 NL_SET_ERR_MSG_ATTR(info->extack, peer,
148 "insufficient peer data");
152 memcpy(out->addr, nla_data(tb[NL80211_PMSR_PEER_ATTR_ADDR]), ETH_ALEN);
154 /* reuse info->attrs */
155 memset(info->attrs, 0, sizeof(*info->attrs) * (NL80211_ATTR_MAX + 1));
156 /* need to validate here, we don't want to have validation recursion */
157 err = nla_parse_nested(info->attrs, NL80211_ATTR_MAX,
158 tb[NL80211_PMSR_PEER_ATTR_CHAN],
159 nl80211_policy, info->extack);
163 err = nl80211_parse_chandef(rdev, info, &out->chandef);
167 /* no validation needed - was already done via nested policy */
168 nla_parse_nested(req, NL80211_PMSR_REQ_ATTR_MAX,
169 tb[NL80211_PMSR_PEER_ATTR_REQ],
172 if (!req[NL80211_PMSR_REQ_ATTR_DATA]) {
173 NL_SET_ERR_MSG_ATTR(info->extack,
174 tb[NL80211_PMSR_PEER_ATTR_REQ],
175 "missing request type/data");
179 if (req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF])
180 out->report_ap_tsf = true;
182 if (out->report_ap_tsf && !rdev->wiphy.pmsr_capa->report_ap_tsf) {
183 NL_SET_ERR_MSG_ATTR(info->extack,
184 req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF],
185 "reporting AP TSF is not supported");
189 nla_for_each_nested(treq, req[NL80211_PMSR_REQ_ATTR_DATA], rem) {
190 switch (nla_type(treq)) {
191 case NL80211_PMSR_TYPE_FTM:
192 err = pmsr_parse_ftm(rdev, treq, out, info);
195 NL_SET_ERR_MSG_ATTR(info->extack, treq,
196 "unsupported measurement type");
207 int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info)
209 struct nlattr *reqattr = info->attrs[NL80211_ATTR_PEER_MEASUREMENTS];
210 struct cfg80211_registered_device *rdev = info->user_ptr[0];
211 struct wireless_dev *wdev = info->user_ptr[1];
212 struct cfg80211_pmsr_request *req;
213 struct nlattr *peers, *peer;
214 int count, rem, err, idx;
216 if (!rdev->wiphy.pmsr_capa)
222 peers = nla_find(nla_data(reqattr), nla_len(reqattr),
223 NL80211_PMSR_ATTR_PEERS);
228 nla_for_each_nested(peer, peers, rem) {
231 if (count > rdev->wiphy.pmsr_capa->max_peers) {
232 NL_SET_ERR_MSG_ATTR(info->extack, peer,
233 "Too many peers used");
238 req = kzalloc(struct_size(req, peers, count), GFP_KERNEL);
242 if (info->attrs[NL80211_ATTR_TIMEOUT])
243 req->timeout = nla_get_u32(info->attrs[NL80211_ATTR_TIMEOUT]);
245 if (info->attrs[NL80211_ATTR_MAC]) {
246 if (!rdev->wiphy.pmsr_capa->randomize_mac_addr) {
247 NL_SET_ERR_MSG_ATTR(info->extack,
248 info->attrs[NL80211_ATTR_MAC],
249 "device cannot randomize MAC address");
254 err = nl80211_parse_random_mac(info->attrs, req->mac_addr,
259 memcpy(req->mac_addr, nla_data(info->attrs[NL80211_ATTR_MAC]),
261 memset(req->mac_addr_mask, 0xff, ETH_ALEN);
265 nla_for_each_nested(peer, peers, rem) {
266 /* NB: this reuses info->attrs, but we no longer need it */
267 err = pmsr_parse_peer(rdev, peer, &req->peers[idx], info);
273 req->n_peers = count;
274 req->cookie = cfg80211_assign_cookie(rdev);
276 err = rdev_start_pmsr(rdev, wdev, req);
280 list_add_tail(&req->list, &wdev->pmsr_list);
282 nl_set_extack_cookie_u64(info->extack, req->cookie);
289 void cfg80211_pmsr_complete(struct wireless_dev *wdev,
290 struct cfg80211_pmsr_request *req,
293 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
297 trace_cfg80211_pmsr_complete(wdev->wiphy, wdev, req->cookie);
299 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
303 hdr = nl80211hdr_put(msg, 0, 0, 0,
304 NL80211_CMD_PEER_MEASUREMENT_COMPLETE);
308 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
309 nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
313 if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie,
317 genlmsg_end(msg, hdr);
318 genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid);
323 spin_lock_bh(&wdev->pmsr_lock);
324 list_del(&req->list);
325 spin_unlock_bh(&wdev->pmsr_lock);
328 EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete);
330 static int nl80211_pmsr_send_ftm_res(struct sk_buff *msg,
331 struct cfg80211_pmsr_result *res)
333 if (res->status == NL80211_PMSR_STATUS_FAILURE) {
334 if (nla_put_u32(msg, NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON,
335 res->ftm.failure_reason))
338 if (res->ftm.failure_reason ==
339 NL80211_PMSR_FTM_FAILURE_PEER_BUSY &&
340 res->ftm.busy_retry_time &&
341 nla_put_u32(msg, NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME,
342 res->ftm.busy_retry_time))
348 #define PUT(tp, attr, val) \
350 if (nla_put_##tp(msg, \
351 NL80211_PMSR_FTM_RESP_ATTR_##attr, \
356 #define PUTOPT(tp, attr, val) \
358 if (res->ftm.val##_valid) \
359 PUT(tp, attr, val); \
362 #define PUT_U64(attr, val) \
364 if (nla_put_u64_64bit(msg, \
365 NL80211_PMSR_FTM_RESP_ATTR_##attr,\
367 NL80211_PMSR_FTM_RESP_ATTR_PAD)) \
371 #define PUTOPT_U64(attr, val) \
373 if (res->ftm.val##_valid) \
374 PUT_U64(attr, val); \
377 if (res->ftm.burst_index >= 0)
378 PUT(u32, BURST_INDEX, burst_index);
379 PUTOPT(u32, NUM_FTMR_ATTEMPTS, num_ftmr_attempts);
380 PUTOPT(u32, NUM_FTMR_SUCCESSES, num_ftmr_successes);
381 PUT(u8, NUM_BURSTS_EXP, num_bursts_exp);
382 PUT(u8, BURST_DURATION, burst_duration);
383 PUT(u8, FTMS_PER_BURST, ftms_per_burst);
384 PUTOPT(s32, RSSI_AVG, rssi_avg);
385 PUTOPT(s32, RSSI_SPREAD, rssi_spread);
386 if (res->ftm.tx_rate_valid &&
387 !nl80211_put_sta_rate(msg, &res->ftm.tx_rate,
388 NL80211_PMSR_FTM_RESP_ATTR_TX_RATE))
390 if (res->ftm.rx_rate_valid &&
391 !nl80211_put_sta_rate(msg, &res->ftm.rx_rate,
392 NL80211_PMSR_FTM_RESP_ATTR_RX_RATE))
394 PUTOPT_U64(RTT_AVG, rtt_avg);
395 PUTOPT_U64(RTT_VARIANCE, rtt_variance);
396 PUTOPT_U64(RTT_SPREAD, rtt_spread);
397 PUTOPT_U64(DIST_AVG, dist_avg);
398 PUTOPT_U64(DIST_VARIANCE, dist_variance);
399 PUTOPT_U64(DIST_SPREAD, dist_spread);
400 if (res->ftm.lci && res->ftm.lci_len &&
401 nla_put(msg, NL80211_PMSR_FTM_RESP_ATTR_LCI,
402 res->ftm.lci_len, res->ftm.lci))
404 if (res->ftm.civicloc && res->ftm.civicloc_len &&
405 nla_put(msg, NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC,
406 res->ftm.civicloc_len, res->ftm.civicloc))
418 static int nl80211_pmsr_send_result(struct sk_buff *msg,
419 struct cfg80211_pmsr_result *res)
421 struct nlattr *pmsr, *peers, *peer, *resp, *data, *typedata;
423 pmsr = nla_nest_start(msg, NL80211_ATTR_PEER_MEASUREMENTS);
427 peers = nla_nest_start(msg, NL80211_PMSR_ATTR_PEERS);
431 peer = nla_nest_start(msg, 1);
435 if (nla_put(msg, NL80211_PMSR_PEER_ATTR_ADDR, ETH_ALEN, res->addr))
438 resp = nla_nest_start(msg, NL80211_PMSR_PEER_ATTR_RESP);
442 if (nla_put_u32(msg, NL80211_PMSR_RESP_ATTR_STATUS, res->status) ||
443 nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_HOST_TIME,
444 res->host_time, NL80211_PMSR_RESP_ATTR_PAD))
447 if (res->ap_tsf_valid &&
448 nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_AP_TSF,
449 res->host_time, NL80211_PMSR_RESP_ATTR_PAD))
452 if (res->final && nla_put_flag(msg, NL80211_PMSR_RESP_ATTR_FINAL))
455 data = nla_nest_start(msg, NL80211_PMSR_RESP_ATTR_DATA);
459 typedata = nla_nest_start(msg, res->type);
464 case NL80211_PMSR_TYPE_FTM:
465 if (nl80211_pmsr_send_ftm_res(msg, res))
472 nla_nest_end(msg, typedata);
473 nla_nest_end(msg, data);
474 nla_nest_end(msg, resp);
475 nla_nest_end(msg, peer);
476 nla_nest_end(msg, peers);
477 nla_nest_end(msg, pmsr);
484 void cfg80211_pmsr_report(struct wireless_dev *wdev,
485 struct cfg80211_pmsr_request *req,
486 struct cfg80211_pmsr_result *result,
489 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
494 trace_cfg80211_pmsr_report(wdev->wiphy, wdev, req->cookie,
498 * Currently, only variable items are LCI and civic location,
499 * both of which are reasonably short so we don't need to
500 * worry about them here for the allocation.
502 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
506 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PEER_MEASUREMENT_RESULT);
510 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
511 nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
515 if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie,
519 err = nl80211_pmsr_send_result(msg, result);
521 pr_err_ratelimited("peer measurement result: message didn't fit!");
525 genlmsg_end(msg, hdr);
526 genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid);
531 EXPORT_SYMBOL_GPL(cfg80211_pmsr_report);
533 void cfg80211_pmsr_free_wk(struct work_struct *work)
535 struct wireless_dev *wdev = container_of(work, struct wireless_dev,
537 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
538 struct cfg80211_pmsr_request *req, *tmp;
539 LIST_HEAD(free_list);
541 spin_lock_bh(&wdev->pmsr_lock);
542 list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) {
545 list_move_tail(&req->list, &free_list);
547 spin_unlock_bh(&wdev->pmsr_lock);
549 list_for_each_entry_safe(req, tmp, &free_list, list) {
551 rdev_abort_pmsr(rdev, wdev, req);
558 void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
560 struct cfg80211_pmsr_request *req;
563 spin_lock_bh(&wdev->pmsr_lock);
564 list_for_each_entry(req, &wdev->pmsr_list, list) {
568 spin_unlock_bh(&wdev->pmsr_lock);
571 schedule_work(&wdev->pmsr_free_wk);
572 flush_work(&wdev->pmsr_free_wk);
573 WARN_ON(!list_empty(&wdev->pmsr_list));
576 void cfg80211_release_pmsr(struct wireless_dev *wdev, u32 portid)
578 struct cfg80211_pmsr_request *req;
580 spin_lock_bh(&wdev->pmsr_lock);
581 list_for_each_entry(req, &wdev->pmsr_list, list) {
582 if (req->nl_portid == portid) {
584 schedule_work(&wdev->pmsr_free_wk);
587 spin_unlock_bh(&wdev->pmsr_lock);
590 #endif /* __PMSR_H */