1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright 2014-2016 Freescale Semiconductor Inc.
7 #include <linux/types.h>
8 #include <linux/fsl/mc.h>
9 #include <soc/fsl/dpaa2-io.h>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/interrupt.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/slab.h>
18 #include "qbman-portal.h"
21 struct dpaa2_io_desc dpio_desc;
22 struct qbman_swp_desc swp_desc;
23 struct qbman_swp *swp;
24 struct list_head node;
25 /* protect against multiple management commands */
26 spinlock_t lock_mgmt_cmd;
27 /* protect notifications list */
28 spinlock_t lock_notifications;
29 struct list_head notifications;
32 struct dpaa2_io_store {
35 struct dpaa2_dq *vaddr;
36 void *alloced_addr; /* unaligned value from kmalloc() */
37 unsigned int idx; /* position of the next-to-be-returned entry */
38 struct qbman_swp *swp; /* portal used to issue VDQCR */
39 struct device *dev; /* device used for DMA mapping */
42 /* keep a per cpu array of DPIOs for fast access */
43 static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
44 static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
45 static DEFINE_SPINLOCK(dpio_list_lock);
47 static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
53 if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
57 * If cpu == -1, choose the current cpu, with no guarantees about
58 * potentially being migrated away.
60 if (unlikely(cpu < 0))
61 cpu = smp_processor_id();
63 /* If a specific cpu was requested, pick it up immediately */
64 return dpio_by_cpu[cpu];
67 static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
72 spin_lock(&dpio_list_lock);
73 d = list_entry(dpio_list.next, struct dpaa2_io, node);
75 list_add_tail(&d->node, &dpio_list);
76 spin_unlock(&dpio_list_lock);
82 * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
85 * Return the affine dpaa2_io service, or NULL if there is no service affined
86 * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
89 struct dpaa2_io *dpaa2_io_service_select(int cpu)
91 if (cpu == DPAA2_IO_ANY_CPU)
92 return service_select(NULL);
94 return service_select_by_cpu(NULL, cpu);
96 EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
99 * dpaa2_io_create() - create a dpaa2_io object.
100 * @desc: the dpaa2_io descriptor
102 * Activates a "struct dpaa2_io" corresponding to the given config of an actual
105 * Return a valid dpaa2_io object for success, or NULL for failure.
107 struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
109 struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
114 /* check if CPU is out of range (-1 means any cpu) */
115 if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
120 obj->dpio_desc = *desc;
121 obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
122 obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
123 obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
124 obj->swp = qbman_swp_init(&obj->swp_desc);
131 INIT_LIST_HEAD(&obj->node);
132 spin_lock_init(&obj->lock_mgmt_cmd);
133 spin_lock_init(&obj->lock_notifications);
134 INIT_LIST_HEAD(&obj->notifications);
136 /* For now only enable DQRR interrupts */
137 qbman_swp_interrupt_set_trigger(obj->swp,
138 QBMAN_SWP_INTERRUPT_DQRI);
139 qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
140 if (obj->dpio_desc.receives_notifications)
141 qbman_swp_push_set(obj->swp, 0, 1);
143 spin_lock(&dpio_list_lock);
144 list_add_tail(&obj->node, &dpio_list);
145 if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
146 dpio_by_cpu[desc->cpu] = obj;
147 spin_unlock(&dpio_list_lock);
153 * dpaa2_io_down() - release the dpaa2_io object.
154 * @d: the dpaa2_io object to be released.
156 * The "struct dpaa2_io" type can represent an individual DPIO object (as
157 * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
158 * which can be used to group/encapsulate multiple DPIO objects. In all cases,
159 * each handle obtained should be released using this function.
161 void dpaa2_io_down(struct dpaa2_io *d)
166 #define DPAA_POLL_MAX 32
169 * dpaa2_io_irq() - ISR for DPIO interrupts
171 * @obj: the given DPIO object.
173 * Return IRQ_HANDLED for success or IRQ_NONE if there
174 * were no pending interrupts.
176 irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
178 const struct dpaa2_dq *dq;
180 struct qbman_swp *swp;
184 status = qbman_swp_interrupt_read_status(swp);
188 dq = qbman_swp_dqrr_next(swp);
190 if (qbman_result_is_SCN(dq)) {
191 struct dpaa2_io_notification_ctx *ctx;
194 q64 = qbman_result_SCN_ctx(dq);
195 ctx = (void *)(uintptr_t)q64;
198 pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
200 qbman_swp_dqrr_consume(swp, dq);
202 if (max > DPAA_POLL_MAX)
204 dq = qbman_swp_dqrr_next(swp);
207 qbman_swp_interrupt_clear_status(swp, status);
208 qbman_swp_interrupt_set_inhibit(swp, 0);
213 * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
214 * notifications on the given DPIO service.
215 * @d: the given DPIO service.
216 * @ctx: the notification context.
218 * The caller should make the MC command to attach a DPAA2 object to
219 * a DPIO after this function completes successfully. In that way:
220 * (a) The DPIO service is "ready" to handle a notification arrival
221 * (which might happen before the "attach" command to MC has
222 * returned control of execution back to the caller)
223 * (b) The DPIO service can provide back to the caller the 'dpio_id' and
224 * 'qman64' parameters that it should pass along in the MC command
225 * in order for the object to be configured to produce the right
226 * notification fields to the DPIO service.
228 * Return 0 for success, or -ENODEV for failure.
230 int dpaa2_io_service_register(struct dpaa2_io *d,
231 struct dpaa2_io_notification_ctx *ctx)
233 unsigned long irqflags;
235 d = service_select_by_cpu(d, ctx->desired_cpu);
239 ctx->dpio_id = d->dpio_desc.dpio_id;
240 ctx->qman64 = (u64)(uintptr_t)ctx;
241 ctx->dpio_private = d;
242 spin_lock_irqsave(&d->lock_notifications, irqflags);
243 list_add(&ctx->node, &d->notifications);
244 spin_unlock_irqrestore(&d->lock_notifications, irqflags);
246 /* Enable the generation of CDAN notifications */
248 return qbman_swp_CDAN_set_context_enable(d->swp,
253 EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
256 * dpaa2_io_service_deregister - The opposite of 'register'.
257 * @service: the given DPIO service.
258 * @ctx: the notification context.
260 * This function should be called only after sending the MC command to
261 * to detach the notification-producing device from the DPIO.
263 void dpaa2_io_service_deregister(struct dpaa2_io *service,
264 struct dpaa2_io_notification_ctx *ctx)
266 struct dpaa2_io *d = ctx->dpio_private;
267 unsigned long irqflags;
270 qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
272 spin_lock_irqsave(&d->lock_notifications, irqflags);
273 list_del(&ctx->node);
274 spin_unlock_irqrestore(&d->lock_notifications, irqflags);
276 EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
279 * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
280 * @d: the given DPIO service.
281 * @ctx: the notification context.
283 * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
284 * considered "disarmed". Ie. the user can issue pull dequeue operations on that
285 * traffic source for as long as it likes. Eventually it may wish to "rearm"
286 * that source to allow it to produce another FQDAN/CDAN, that's what this
289 * Return 0 for success.
291 int dpaa2_io_service_rearm(struct dpaa2_io *d,
292 struct dpaa2_io_notification_ctx *ctx)
294 unsigned long irqflags;
297 d = service_select_by_cpu(d, ctx->desired_cpu);
301 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
303 err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
305 err = qbman_swp_fq_schedule(d->swp, ctx->id);
306 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
310 EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
313 * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
314 * @d: the given DPIO service.
315 * @channelid: the given channel id.
316 * @s: the dpaa2_io_store object for the result.
318 * Return 0 for success, or error code for failure.
320 int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
321 struct dpaa2_io_store *s)
323 struct qbman_pull_desc pd;
326 qbman_pull_desc_clear(&pd);
327 qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
328 qbman_pull_desc_set_numframes(&pd, (u8)s->max);
329 qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
331 d = service_select(d);
336 err = qbman_swp_pull(d->swp, &pd);
342 EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
345 * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
346 * @d: the given DPIO service.
347 * @qdid: the given queuing destination id.
348 * @prio: the given queuing priority.
349 * @qdbin: the given queuing destination bin.
350 * @fd: the frame descriptor which is enqueued.
352 * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
353 * or -ENODEV if there is no dpio service.
355 int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
356 u32 qdid, u8 prio, u16 qdbin,
357 const struct dpaa2_fd *fd)
359 struct qbman_eq_desc ed;
361 d = service_select(d);
365 qbman_eq_desc_clear(&ed);
366 qbman_eq_desc_set_no_orp(&ed, 0);
367 qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
369 return qbman_swp_enqueue(d->swp, &ed, fd);
371 EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
374 * dpaa2_io_service_release() - Release buffers to a buffer pool.
375 * @d: the given DPIO object.
376 * @bpid: the buffer pool id.
377 * @buffers: the buffers to be released.
378 * @num_buffers: the number of the buffers to be released.
380 * Return 0 for success, and negative error code for failure.
382 int dpaa2_io_service_release(struct dpaa2_io *d,
385 unsigned int num_buffers)
387 struct qbman_release_desc rd;
389 d = service_select(d);
393 qbman_release_desc_clear(&rd);
394 qbman_release_desc_set_bpid(&rd, bpid);
396 return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
398 EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
401 * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
402 * @d: the given DPIO object.
403 * @bpid: the buffer pool id.
404 * @buffers: the buffer addresses for acquired buffers.
405 * @num_buffers: the expected number of the buffers to acquire.
407 * Return a negative error code if the command failed, otherwise it returns
408 * the number of buffers acquired, which may be less than the number requested.
409 * Eg. if the buffer pool is empty, this will return zero.
411 int dpaa2_io_service_acquire(struct dpaa2_io *d,
414 unsigned int num_buffers)
416 unsigned long irqflags;
419 d = service_select(d);
423 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
424 err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
425 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
429 EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
432 * 'Stores' are reusable memory blocks for holding dequeue results, and to
433 * assist with parsing those results.
437 * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
438 * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
439 * @dev: the device to allow mapping/unmapping the DMAable region.
441 * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
442 * The 'dpaa2_io_store' returned is a DPIO service managed object.
444 * Return pointer to dpaa2_io_store struct for successfully created storage
445 * memory, or NULL on error.
447 struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
450 struct dpaa2_io_store *ret;
453 if (!max_frames || (max_frames > 16))
456 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
460 ret->max = max_frames;
461 size = max_frames * sizeof(struct dpaa2_dq) + 64;
462 ret->alloced_addr = kzalloc(size, GFP_KERNEL);
463 if (!ret->alloced_addr) {
468 ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
469 ret->paddr = dma_map_single(dev, ret->vaddr,
470 sizeof(struct dpaa2_dq) * max_frames,
472 if (dma_mapping_error(dev, ret->paddr)) {
473 kfree(ret->alloced_addr);
483 EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
486 * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
488 * @s: the storage memory to be destroyed.
490 void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
492 dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
494 kfree(s->alloced_addr);
497 EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
500 * dpaa2_io_store_next() - Determine when the next dequeue result is available.
501 * @s: the dpaa2_io_store object.
502 * @is_last: indicate whether this is the last frame in the pull command.
504 * When an object driver performs dequeues to a dpaa2_io_store, this function
505 * can be used to determine when the next frame result is available. Once
506 * this function returns non-NULL, a subsequent call to it will try to find
507 * the next dequeue result.
509 * Note that if a pull-dequeue has a NULL result because the target FQ/channel
510 * was empty, then this function will also return NULL (rather than expecting
511 * the caller to always check for this. As such, "is_last" can be used to
512 * differentiate between "end-of-empty-dequeue" and "still-waiting".
514 * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
516 struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
519 struct dpaa2_dq *ret = &s->vaddr[s->idx];
521 match = qbman_result_has_new_result(s->swp, ret);
529 if (dpaa2_dq_is_pull_complete(ret)) {
533 * If we get an empty dequeue result to terminate a zero-results
534 * vdqcr, return NULL to the caller rather than expecting him to
535 * check non-NULL results every time.
537 if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
545 EXPORT_SYMBOL_GPL(dpaa2_io_store_next);