2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
22 struct mt76_txq *mtxq;
27 mtxq = (struct mt76_txq *) txq->drv_priv;
29 struct mt76x2_sta *sta;
31 sta = (struct mt76x2_sta *) txq->sta->drv_priv;
32 mtxq->wcid = &sta->wcid;
34 struct mt76x2_vif *mvif;
36 mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
37 mtxq->wcid = &mvif->group_wcid;
40 mt76_txq_init(&dev->mt76, txq);
42 EXPORT_SYMBOL_GPL(mt76x2_txq_init);
44 int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
45 struct ieee80211_ampdu_params *params)
47 enum ieee80211_ampdu_mlme_action action = params->action;
48 struct ieee80211_sta *sta = params->sta;
49 struct mt76x2_dev *dev = hw->priv;
50 struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
51 struct ieee80211_txq *txq = sta->txq[params->tid];
52 u16 tid = params->tid;
53 u16 *ssn = ¶ms->ssn;
54 struct mt76_txq *mtxq;
59 mtxq = (struct mt76_txq *)txq->drv_priv;
62 case IEEE80211_AMPDU_RX_START:
63 mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
64 mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
66 case IEEE80211_AMPDU_RX_STOP:
67 mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
68 mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
71 case IEEE80211_AMPDU_TX_OPERATIONAL:
73 mtxq->send_bar = false;
74 ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
76 case IEEE80211_AMPDU_TX_STOP_FLUSH:
77 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
79 ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
81 case IEEE80211_AMPDU_TX_START:
82 mtxq->agg_ssn = *ssn << 4;
83 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
85 case IEEE80211_AMPDU_TX_STOP_CONT:
87 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
93 EXPORT_SYMBOL_GPL(mt76x2_ampdu_action);
95 int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
96 struct ieee80211_sta *sta)
98 struct mt76x2_dev *dev = hw->priv;
99 struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
100 struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
105 mutex_lock(&dev->mutex);
107 idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
115 msta->wcid.idx = idx;
116 msta->wcid.hw_key_idx = -1;
117 mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
118 mt76x2_mac_wcid_set_drop(dev, idx, false);
119 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
120 mt76x2_txq_init(dev, sta->txq[i]);
122 if (vif->type == NL80211_IFTYPE_AP)
123 set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
125 ewma_signal_init(&msta->rssi);
127 rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
130 mutex_unlock(&dev->mutex);
134 EXPORT_SYMBOL_GPL(mt76x2_sta_add);
136 int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
137 struct ieee80211_sta *sta)
139 struct mt76x2_dev *dev = hw->priv;
140 struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
141 int idx = msta->wcid.idx;
144 mutex_lock(&dev->mutex);
145 rcu_assign_pointer(dev->wcid[idx], NULL);
146 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
147 mt76_txq_remove(&dev->mt76, sta->txq[i]);
148 mt76x2_mac_wcid_set_drop(dev, idx, true);
149 mt76_wcid_free(dev->wcid_mask, idx);
150 mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
151 mutex_unlock(&dev->mutex);
155 EXPORT_SYMBOL_GPL(mt76x2_sta_remove);
157 void mt76x2_remove_interface(struct ieee80211_hw *hw,
158 struct ieee80211_vif *vif)
160 struct mt76x2_dev *dev = hw->priv;
162 mt76_txq_remove(&dev->mt76, vif->txq);
164 EXPORT_SYMBOL_GPL(mt76x2_remove_interface);
166 int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
167 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
168 struct ieee80211_key_conf *key)
170 struct mt76x2_dev *dev = hw->priv;
171 struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
172 struct mt76x2_sta *msta;
173 struct mt76_wcid *wcid;
174 int idx = key->keyidx;
177 /* fall back to sw encryption for unsupported ciphers */
178 switch (key->cipher) {
179 case WLAN_CIPHER_SUITE_WEP40:
180 case WLAN_CIPHER_SUITE_WEP104:
181 case WLAN_CIPHER_SUITE_TKIP:
182 case WLAN_CIPHER_SUITE_CCMP:
189 * The hardware does not support per-STA RX GTK, fall back
190 * to software mode for these.
192 if ((vif->type == NL80211_IFTYPE_ADHOC ||
193 vif->type == NL80211_IFTYPE_MESH_POINT) &&
194 (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
195 key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
196 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
199 msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
200 wcid = msta ? &msta->wcid : &mvif->group_wcid;
202 if (cmd == SET_KEY) {
203 key->hw_key_idx = wcid->idx;
204 wcid->hw_key_idx = idx;
205 if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
206 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
210 if (idx == wcid->hw_key_idx) {
211 wcid->hw_key_idx = -1;
217 mt76_wcid_key_setup(&dev->mt76, wcid, key);
220 if (key || wcid->hw_key_idx == idx) {
221 ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
226 return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
229 return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
231 EXPORT_SYMBOL_GPL(mt76x2_set_key);
233 int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
234 u16 queue, const struct ieee80211_tx_queue_params *params)
236 struct mt76x2_dev *dev = hw->priv;
237 u8 cw_min = 5, cw_max = 10, qid;
240 qid = dev->mt76.q_tx[queue].hw_idx;
243 cw_min = fls(params->cw_min);
245 cw_max = fls(params->cw_max);
247 val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
248 FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
249 FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
250 FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
251 mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
253 val = mt76_rr(dev, MT_WMM_TXOP(qid));
254 val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
255 val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
256 mt76_wr(dev, MT_WMM_TXOP(qid), val);
258 val = mt76_rr(dev, MT_WMM_AIFSN);
259 val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
260 val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
261 mt76_wr(dev, MT_WMM_AIFSN, val);
263 val = mt76_rr(dev, MT_WMM_CWMIN);
264 val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
265 val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
266 mt76_wr(dev, MT_WMM_CWMIN, val);
268 val = mt76_rr(dev, MT_WMM_CWMAX);
269 val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
270 val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
271 mt76_wr(dev, MT_WMM_CWMAX, val);
275 EXPORT_SYMBOL_GPL(mt76x2_conf_tx);
277 void mt76x2_configure_filter(struct ieee80211_hw *hw,
278 unsigned int changed_flags,
279 unsigned int *total_flags, u64 multicast)
281 struct mt76x2_dev *dev = hw->priv;
284 #define MT76_FILTER(_flag, _hw) do { \
285 flags |= *total_flags & FIF_##_flag; \
286 dev->rxfilter &= ~(_hw); \
287 dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
290 mutex_lock(&dev->mutex);
292 dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
294 MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
295 MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
296 MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
297 MT_RX_FILTR_CFG_CTS |
298 MT_RX_FILTR_CFG_CFEND |
299 MT_RX_FILTR_CFG_CFACK |
301 MT_RX_FILTR_CFG_CTRL_RSV);
302 MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
304 *total_flags = flags;
305 mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
307 mutex_unlock(&dev->mutex);
309 EXPORT_SYMBOL_GPL(mt76x2_configure_filter);
311 void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
312 struct ieee80211_vif *vif,
313 struct ieee80211_sta *sta)
315 struct mt76x2_dev *dev = hw->priv;
316 struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
317 struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
318 struct ieee80211_tx_rate rate = {};
323 rate.idx = rates->rate[0].idx;
324 rate.flags = rates->rate[0].flags;
325 mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
326 msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
328 EXPORT_SYMBOL_GPL(mt76x2_sta_rate_tbl_update);
330 void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
333 struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
334 void *rxwi = skb->data;
336 if (q == MT_RXQ_MCU) {
337 skb_queue_tail(&dev->mcu.res_q, skb);
338 wake_up(&dev->mcu.wait);
342 skb_pull(skb, sizeof(struct mt76x2_rxwi));
343 if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
348 mt76_rx(&dev->mt76, q, skb);
350 EXPORT_SYMBOL_GPL(mt76x2_queue_rx_skb);