1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
9 #define ATH11K_DB_MAGIC_VALUE 0xdeadbeaf
11 int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size)
18 for (idx = 0, temp = buffer; idx < size; idx++, temp++) {
19 if (*temp == ATH11K_DB_MAGIC_VALUE)
26 static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
27 void *buffer, u32 size)
34 for (idx = 0, temp = buffer; idx < size; idx++, temp++)
35 *temp++ = ATH11K_DB_MAGIC_VALUE;
38 static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
39 struct ath11k_dbring *ring,
40 struct ath11k_dbring_element *buff,
41 enum wmi_direct_buffer_module id)
43 struct ath11k_base *ab = ar->ab;
44 struct hal_srng *srng;
46 void *ptr_aligned, *ptr_unaligned, *desc;
51 srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
53 lockdep_assert_held(&srng->lock);
55 ath11k_hal_srng_access_begin(ab, srng);
57 ptr_unaligned = buff->payload;
58 ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
59 ath11k_dbring_fill_magic_value(ar, ptr_aligned, ring->buf_sz);
60 paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
63 ret = dma_mapping_error(ab->dev, paddr);
67 spin_lock_bh(&ring->idr_lock);
68 buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC);
69 spin_unlock_bh(&ring->idr_lock);
75 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
83 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) |
84 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
86 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
88 ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng);
89 ath11k_hal_srng_access_end(ab, srng);
94 spin_lock_bh(&ring->idr_lock);
95 idr_remove(&ring->bufs_idr, buf_id);
96 spin_unlock_bh(&ring->idr_lock);
98 dma_unmap_single(ab->dev, paddr, ring->buf_sz,
101 ath11k_hal_srng_access_end(ab, srng);
105 static int ath11k_dbring_fill_bufs(struct ath11k *ar,
106 struct ath11k_dbring *ring,
107 enum wmi_direct_buffer_module id)
109 struct ath11k_dbring_element *buff;
110 struct hal_srng *srng;
111 int num_remain, req_entries, num_free;
115 srng = &ar->ab->hal.srng_list[ring->refill_srng.ring_id];
117 spin_lock_bh(&srng->lock);
119 num_free = ath11k_hal_srng_src_num_free(ar->ab, srng, true);
120 req_entries = min(num_free, ring->bufs_max);
121 num_remain = req_entries;
122 align = ring->buf_align;
123 size = ring->buf_sz + align - 1;
125 while (num_remain > 0) {
126 buff = kzalloc(sizeof(*buff), GFP_ATOMIC);
130 buff->payload = kzalloc(size, GFP_ATOMIC);
131 if (!buff->payload) {
135 ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id);
137 ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
138 num_remain, req_entries);
139 kfree(buff->payload);
146 spin_unlock_bh(&srng->lock);
151 int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
152 struct ath11k_dbring *ring,
153 enum wmi_direct_buffer_module id)
155 struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {0};
158 if (id >= WMI_DIRECT_BUF_MAX)
161 param.pdev_id = DP_SW2HW_MACID(ring->pdev_id);
162 param.module_id = id;
163 param.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr);
164 param.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr);
165 param.head_idx_paddr_lo = lower_32_bits(ring->hp_addr);
166 param.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
167 param.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
168 param.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
169 param.num_elems = ring->bufs_max;
170 param.buf_size = ring->buf_sz;
171 param.num_resp_per_event = ring->num_resp_per_event;
172 param.event_timeout_ms = ring->event_timeout_ms;
174 ret = ath11k_wmi_pdev_dma_ring_cfg(ar, ¶m);
176 ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
183 int ath11k_dbring_set_cfg(struct ath11k *ar, struct ath11k_dbring *ring,
184 u32 num_resp_per_event, u32 event_timeout_ms,
185 int (*handler)(struct ath11k *,
186 struct ath11k_dbring_data *))
191 ring->num_resp_per_event = num_resp_per_event;
192 ring->event_timeout_ms = event_timeout_ms;
193 ring->handler = handler;
198 int ath11k_dbring_buf_setup(struct ath11k *ar,
199 struct ath11k_dbring *ring,
200 struct ath11k_dbring_cap *db_cap)
202 struct ath11k_base *ab = ar->ab;
203 struct hal_srng *srng;
206 srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
207 ring->bufs_max = ring->refill_srng.size /
208 ath11k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
210 ring->buf_sz = db_cap->min_buf_sz;
211 ring->buf_align = db_cap->min_buf_align;
212 ring->pdev_id = db_cap->pdev_id;
213 ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
214 ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
216 ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id);
221 int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring,
222 int ring_num, int num_entries)
226 ret = ath11k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
227 ring_num, ar->pdev_idx, num_entries);
229 ath11k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
236 ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
240 int ath11k_dbring_get_cap(struct ath11k_base *ab,
242 enum wmi_direct_buffer_module id,
243 struct ath11k_dbring_cap *db_cap)
247 if (!ab->num_db_cap || !ab->db_caps)
250 if (id >= WMI_DIRECT_BUF_MAX)
253 for (i = 0; i < ab->num_db_cap; i++) {
254 if (pdev_idx == ab->db_caps[i].pdev_id &&
255 id == ab->db_caps[i].id) {
256 *db_cap = ab->db_caps[i];
265 int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
266 struct ath11k_dbring_buf_release_event *ev)
268 struct ath11k_dbring *ring;
269 struct hal_srng *srng;
271 struct ath11k_dbring_element *buff;
272 struct ath11k_dbring_data handler_data;
273 struct ath11k_buffer_addr desc;
275 u32 num_entry, num_buff_reaped;
276 u8 pdev_idx, rbm, module_id;
283 pdev_idx = ev->fixed.pdev_id;
284 module_id = ev->fixed.module_id;
286 if (pdev_idx >= ab->num_radios) {
287 ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
291 if (ev->fixed.num_buf_release_entry !=
292 ev->fixed.num_meta_data_entry) {
293 ath11k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
294 ev->fixed.num_buf_release_entry,
295 ev->fixed.num_meta_data_entry);
299 ar = ab->pdevs[pdev_idx].ar;
302 if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
307 switch (ev->fixed.module_id) {
308 case WMI_DIRECT_BUF_SPECTRAL:
309 ring = ath11k_spectral_get_dbring(ar);
313 ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
314 ev->fixed.module_id);
323 srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
324 num_entry = ev->fixed.num_buf_release_entry;
325 size = ring->buf_sz + ring->buf_align - 1;
328 spin_lock_bh(&srng->lock);
330 while (num_buff_reaped < num_entry) {
331 desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
332 desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
333 handler_data.meta = ev->meta_data[num_buff_reaped];
337 ath11k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
339 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
341 spin_lock_bh(&ring->idr_lock);
342 buff = idr_find(&ring->bufs_idr, buf_id);
344 spin_unlock_bh(&ring->idr_lock);
347 idr_remove(&ring->bufs_idr, buf_id);
348 spin_unlock_bh(&ring->idr_lock);
350 dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
353 ath11k_debugfs_add_dbring_entry(ar, module_id,
354 ATH11K_DBG_DBR_EVENT_RX, srng);
357 vaddr_unalign = buff->payload;
358 handler_data.data = PTR_ALIGN(vaddr_unalign,
360 handler_data.data_sz = ring->buf_sz;
362 ring->handler(ar, &handler_data);
366 memset(buff->payload, 0, size);
367 ath11k_dbring_bufs_replenish(ar, ring, buff, module_id);
370 spin_unlock_bh(&srng->lock);
378 void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
380 ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
383 void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
385 struct ath11k_dbring_element *buff;
388 spin_lock_bh(&ring->idr_lock);
389 idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
390 idr_remove(&ring->bufs_idr, buf_id);
391 dma_unmap_single(ar->ab->dev, buff->paddr,
392 ring->buf_sz, DMA_FROM_DEVICE);
393 kfree(buff->payload);
397 idr_destroy(&ring->bufs_idr);
398 spin_unlock_bh(&ring->idr_lock);