1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "qman_priv.h"
33 #define DQRR_MAXFILL 15
34 #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
35 #define IRQNAME "QMan portal %d"
36 #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
37 #define QMAN_POLL_LIMIT 32
38 #define QMAN_PIRQ_DQRR_ITHRESH 12
39 #define QMAN_PIRQ_MR_ITHRESH 4
40 #define QMAN_PIRQ_IPERIOD 100
42 /* Portal register assists */
44 /* Cache-inhibited register offsets */
45 #define QM_REG_EQCR_PI_CINH 0x0000
46 #define QM_REG_EQCR_CI_CINH 0x0004
47 #define QM_REG_EQCR_ITR 0x0008
48 #define QM_REG_DQRR_PI_CINH 0x0040
49 #define QM_REG_DQRR_CI_CINH 0x0044
50 #define QM_REG_DQRR_ITR 0x0048
51 #define QM_REG_DQRR_DCAP 0x0050
52 #define QM_REG_DQRR_SDQCR 0x0054
53 #define QM_REG_DQRR_VDQCR 0x0058
54 #define QM_REG_DQRR_PDQCR 0x005c
55 #define QM_REG_MR_PI_CINH 0x0080
56 #define QM_REG_MR_CI_CINH 0x0084
57 #define QM_REG_MR_ITR 0x0088
58 #define QM_REG_CFG 0x0100
59 #define QM_REG_ISR 0x0e00
60 #define QM_REG_IER 0x0e04
61 #define QM_REG_ISDR 0x0e08
62 #define QM_REG_IIR 0x0e0c
63 #define QM_REG_ITPR 0x0e14
65 /* Cache-enabled register offsets */
66 #define QM_CL_EQCR 0x0000
67 #define QM_CL_DQRR 0x1000
68 #define QM_CL_MR 0x2000
69 #define QM_CL_EQCR_PI_CENA 0x3000
70 #define QM_CL_EQCR_CI_CENA 0x3100
71 #define QM_CL_DQRR_PI_CENA 0x3200
72 #define QM_CL_DQRR_CI_CENA 0x3300
73 #define QM_CL_MR_PI_CENA 0x3400
74 #define QM_CL_MR_CI_CENA 0x3500
75 #define QM_CL_CR 0x3800
76 #define QM_CL_RR0 0x3900
77 #define QM_CL_RR1 0x3940
80 * BTW, the drivers (and h/w programming model) already obtain the required
81 * synchronisation for portal accesses and data-dependencies. Use of barrier()s
82 * or other order-preserving primitives simply degrade performance. Hence the
83 * use of the __raw_*() interfaces, which simply ensure that the compiler treats
84 * the portal registers as volatile
87 /* Cache-enabled ring access */
88 #define qm_cl(base, idx) ((void *)base + ((idx) << 6))
93 * pmode == production mode
94 * cmode == consumption mode,
95 * dmode == h/w dequeue mode.
96 * Enum values use 3 letter codes. First letter matches the portal mode,
97 * remaining two letters indicate;
98 * ci == cache-inhibited portal register
99 * ce == cache-enabled portal register
100 * vb == in-band valid-bit (cache-enabled)
101 * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
102 * As for "enum qm_dqrr_dmode", it should be self-explanatory.
104 enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
105 qm_eqcr_pci = 0, /* PI index, cache-inhibited */
106 qm_eqcr_pce = 1, /* PI index, cache-enabled */
107 qm_eqcr_pvb = 2 /* valid-bit */
109 enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
110 qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
111 qm_dqrr_dpull = 1 /* PDQCR */
113 enum qm_dqrr_pmode { /* s/w-only */
114 qm_dqrr_pci, /* reads DQRR_PI_CINH */
115 qm_dqrr_pce, /* reads DQRR_PI_CENA */
116 qm_dqrr_pvb /* reads valid-bit */
118 enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
119 qm_dqrr_cci = 0, /* CI index, cache-inhibited */
120 qm_dqrr_cce = 1, /* CI index, cache-enabled */
121 qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
123 enum qm_mr_pmode { /* s/w-only */
124 qm_mr_pci, /* reads MR_PI_CINH */
125 qm_mr_pce, /* reads MR_PI_CENA */
126 qm_mr_pvb /* reads valid-bit */
128 enum qm_mr_cmode { /* matches QCSP_CFG::MM */
129 qm_mr_cci = 0, /* CI index, cache-inhibited */
130 qm_mr_cce = 1 /* CI index, cache-enabled */
133 /* --- Portal structures --- */
135 #define QM_EQCR_SIZE 8
136 #define QM_DQRR_SIZE 16
139 /* "Enqueue Command" */
140 struct qm_eqcr_entry {
141 u8 _ncw_verb; /* writes to this are non-coherent */
145 __be32 fqid; /* 24-bit */
149 } __packed __aligned(8);
150 #define QM_EQCR_VERB_VBIT 0x80
151 #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
152 #define QM_EQCR_VERB_CMD_ENQUEUE 0x01
153 #define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
154 #define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
155 #define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
158 struct qm_eqcr_entry *ring, *cursor;
159 u8 ci, available, ithresh, vbit;
160 #ifdef CONFIG_FSL_DPAA_CHECKING
162 enum qm_eqcr_pmode pmode;
167 const struct qm_dqrr_entry *ring, *cursor;
168 u8 pi, ci, fill, ithresh, vbit;
169 #ifdef CONFIG_FSL_DPAA_CHECKING
170 enum qm_dqrr_dmode dmode;
171 enum qm_dqrr_pmode pmode;
172 enum qm_dqrr_cmode cmode;
177 union qm_mr_entry *ring, *cursor;
178 u8 pi, ci, fill, ithresh, vbit;
179 #ifdef CONFIG_FSL_DPAA_CHECKING
180 enum qm_mr_pmode pmode;
181 enum qm_mr_cmode cmode;
185 /* MC (Management Command) command */
186 /* "FQ" command layout */
190 __be32 fqid; /* 24-bit */
194 /* "CGR" command layout */
202 #define QM_MCC_VERB_VBIT 0x80
203 #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
204 #define QM_MCC_VERB_INITFQ_PARKED 0x40
205 #define QM_MCC_VERB_INITFQ_SCHED 0x41
206 #define QM_MCC_VERB_QUERYFQ 0x44
207 #define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
208 #define QM_MCC_VERB_QUERYWQ 0x46
209 #define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
210 #define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
211 #define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
212 #define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
213 #define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
214 #define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
215 #define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
216 #define QM_MCC_VERB_INITCGR 0x50
217 #define QM_MCC_VERB_MODIFYCGR 0x51
218 #define QM_MCC_VERB_CGRTESTWRITE 0x52
219 #define QM_MCC_VERB_QUERYCGR 0x58
220 #define QM_MCC_VERB_QUERYCONGESTION 0x59
221 union qm_mc_command {
223 u8 _ncw_verb; /* writes to this are non-coherent */
226 struct qm_mcc_initfq initfq;
227 struct qm_mcc_initcgr initcgr;
229 struct qm_mcc_cgr cgr;
232 /* MC (Management Command) result */
234 struct qm_mcr_queryfq {
238 struct qm_fqd fqd; /* the FQD fields are here */
242 /* "Alter FQ State Commands" */
243 struct qm_mcr_alterfq {
246 u8 fqs; /* Frame Queue Status */
249 #define QM_MCR_VERB_RRID 0x80
250 #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
251 #define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
252 #define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
253 #define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
254 #define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
255 #define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
256 #define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
257 #define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
258 #define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
259 #define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
260 #define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
261 #define QM_MCR_RESULT_NULL 0x00
262 #define QM_MCR_RESULT_OK 0xf0
263 #define QM_MCR_RESULT_ERR_FQID 0xf1
264 #define QM_MCR_RESULT_ERR_FQSTATE 0xf2
265 #define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
266 #define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
267 #define QM_MCR_RESULT_PENDING 0xf8
268 #define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
269 #define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
270 #define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
271 #define QM_MCR_TIMEOUT 10000 /* us */
278 struct qm_mcr_queryfq queryfq;
279 struct qm_mcr_alterfq alterfq;
280 struct qm_mcr_querycgr querycgr;
281 struct qm_mcr_querycongestion querycongestion;
282 struct qm_mcr_querywq querywq;
283 struct qm_mcr_queryfq_np queryfq_np;
287 union qm_mc_command *cr;
288 union qm_mc_result *rr;
290 #ifdef CONFIG_FSL_DPAA_CHECKING
292 /* Can be _mc_start()ed */
294 /* Can be _mc_commit()ed or _mc_abort()ed */
296 /* Can only be _mc_retry()ed */
303 void __iomem *ce; /* cache-enabled */
304 void __iomem *ci; /* cache-inhibited */
309 * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
310 * and including 'mc' fits within a cacheline (yay!). The 'config' part
311 * is setup-only, so isn't a cause for a concern. In other words, don't
312 * rearrange this structure on a whim, there be dragons ...
319 } ____cacheline_aligned;
321 /* Cache-inhibited register access. */
322 static inline u32 qm_in(struct qm_portal *p, u32 offset)
324 return be32_to_cpu(__raw_readl(p->addr.ci + offset));
327 static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
329 __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
332 /* Cache Enabled Portal Access */
333 static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
335 dpaa_invalidate(p->addr.ce + offset);
338 static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
340 dpaa_touch_ro(p->addr.ce + offset);
343 static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
345 return be32_to_cpu(__raw_readl(p->addr.ce + offset));
348 /* --- EQCR API --- */
350 #define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
351 #define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
353 /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
354 static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
356 uintptr_t addr = (uintptr_t)p;
360 return (struct qm_eqcr_entry *)addr;
363 /* Bit-wise logic to convert a ring pointer to a ring index */
364 static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
366 return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
369 /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
370 static inline void eqcr_inc(struct qm_eqcr *eqcr)
372 /* increment to the next EQCR pointer and handle overflow and 'vbit' */
373 struct qm_eqcr_entry *partial = eqcr->cursor + 1;
375 eqcr->cursor = eqcr_carryclear(partial);
376 if (partial != eqcr->cursor)
377 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
380 static inline int qm_eqcr_init(struct qm_portal *portal,
381 enum qm_eqcr_pmode pmode,
382 unsigned int eq_stash_thresh,
385 struct qm_eqcr *eqcr = &portal->eqcr;
389 eqcr->ring = portal->addr.ce + QM_CL_EQCR;
390 eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
391 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
392 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
393 eqcr->cursor = eqcr->ring + pi;
394 eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
395 QM_EQCR_VERB_VBIT : 0;
396 eqcr->available = QM_EQCR_SIZE - 1 -
397 dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
398 eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
399 #ifdef CONFIG_FSL_DPAA_CHECKING
403 cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
404 (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
405 (eq_stash_prio << 26) | /* QCSP_CFG: EP */
406 ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
407 qm_out(portal, QM_REG_CFG, cfg);
411 static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
413 return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
416 static inline void qm_eqcr_finish(struct qm_portal *portal)
418 struct qm_eqcr *eqcr = &portal->eqcr;
419 u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
420 u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
422 DPAA_ASSERT(!eqcr->busy);
423 if (pi != eqcr_ptr2idx(eqcr->cursor))
424 pr_crit("losing uncommitted EQCR entries\n");
426 pr_crit("missing existing EQCR completions\n");
427 if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
428 pr_crit("EQCR destroyed unquiesced\n");
431 static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
434 struct qm_eqcr *eqcr = &portal->eqcr;
436 DPAA_ASSERT(!eqcr->busy);
437 if (!eqcr->available)
440 #ifdef CONFIG_FSL_DPAA_CHECKING
443 dpaa_zero(eqcr->cursor);
447 static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
450 struct qm_eqcr *eqcr = &portal->eqcr;
453 DPAA_ASSERT(!eqcr->busy);
454 if (!eqcr->available) {
456 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
458 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
459 eqcr->available += diff;
463 #ifdef CONFIG_FSL_DPAA_CHECKING
466 dpaa_zero(eqcr->cursor);
470 static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
472 DPAA_ASSERT(eqcr->busy);
473 DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
474 DPAA_ASSERT(eqcr->available >= 1);
477 static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
479 struct qm_eqcr *eqcr = &portal->eqcr;
480 struct qm_eqcr_entry *eqcursor;
482 eqcr_commit_checks(eqcr);
483 DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
485 eqcursor = eqcr->cursor;
486 eqcursor->_ncw_verb = myverb | eqcr->vbit;
487 dpaa_flush(eqcursor);
490 #ifdef CONFIG_FSL_DPAA_CHECKING
495 static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
497 qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
500 static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
502 struct qm_eqcr *eqcr = &portal->eqcr;
503 u8 diff, old_ci = eqcr->ci;
505 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
506 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
507 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
508 eqcr->available += diff;
512 static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
514 struct qm_eqcr *eqcr = &portal->eqcr;
516 eqcr->ithresh = ithresh;
517 qm_out(portal, QM_REG_EQCR_ITR, ithresh);
520 static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
522 struct qm_eqcr *eqcr = &portal->eqcr;
524 return eqcr->available;
527 static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
529 struct qm_eqcr *eqcr = &portal->eqcr;
531 return QM_EQCR_SIZE - 1 - eqcr->available;
534 /* --- DQRR API --- */
536 #define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
537 #define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
539 static const struct qm_dqrr_entry *dqrr_carryclear(
540 const struct qm_dqrr_entry *p)
542 uintptr_t addr = (uintptr_t)p;
546 return (const struct qm_dqrr_entry *)addr;
549 static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
551 return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
554 static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
556 return dqrr_carryclear(e + 1);
559 static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
561 qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
562 ((mf & (QM_DQRR_SIZE - 1)) << 20));
565 static inline int qm_dqrr_init(struct qm_portal *portal,
566 const struct qm_portal_config *config,
567 enum qm_dqrr_dmode dmode,
568 enum qm_dqrr_pmode pmode,
569 enum qm_dqrr_cmode cmode, u8 max_fill)
571 struct qm_dqrr *dqrr = &portal->dqrr;
574 /* Make sure the DQRR will be idle when we enable */
575 qm_out(portal, QM_REG_DQRR_SDQCR, 0);
576 qm_out(portal, QM_REG_DQRR_VDQCR, 0);
577 qm_out(portal, QM_REG_DQRR_PDQCR, 0);
578 dqrr->ring = portal->addr.ce + QM_CL_DQRR;
579 dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
580 dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
581 dqrr->cursor = dqrr->ring + dqrr->ci;
582 dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
583 dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
584 QM_DQRR_VERB_VBIT : 0;
585 dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
586 #ifdef CONFIG_FSL_DPAA_CHECKING
591 /* Invalidate every ring entry before beginning */
592 for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
593 dpaa_invalidate(qm_cl(dqrr->ring, cfg));
594 cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
595 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
596 ((dmode & 1) << 18) | /* DP */
597 ((cmode & 3) << 16) | /* DCM */
599 (0 ? 0x40 : 0) | /* Ignore RP */
600 (0 ? 0x10 : 0); /* Ignore SP */
601 qm_out(portal, QM_REG_CFG, cfg);
602 qm_dqrr_set_maxfill(portal, max_fill);
606 static inline void qm_dqrr_finish(struct qm_portal *portal)
608 #ifdef CONFIG_FSL_DPAA_CHECKING
609 struct qm_dqrr *dqrr = &portal->dqrr;
611 if (dqrr->cmode != qm_dqrr_cdc &&
612 dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
613 pr_crit("Ignoring completed DQRR entries\n");
617 static inline const struct qm_dqrr_entry *qm_dqrr_current(
618 struct qm_portal *portal)
620 struct qm_dqrr *dqrr = &portal->dqrr;
627 static inline u8 qm_dqrr_next(struct qm_portal *portal)
629 struct qm_dqrr *dqrr = &portal->dqrr;
631 DPAA_ASSERT(dqrr->fill);
632 dqrr->cursor = dqrr_inc(dqrr->cursor);
636 static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
638 struct qm_dqrr *dqrr = &portal->dqrr;
639 struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
641 DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
642 #ifndef CONFIG_FSL_PAMU
644 * If PAMU is not available we need to invalidate the cache.
645 * When PAMU is available the cache is updated by stash
647 dpaa_invalidate_touch_ro(res);
650 * when accessing 'verb', use __raw_readb() to ensure that compiler
651 * inlining doesn't try to optimise out "excess reads".
653 if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
654 dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
656 dqrr->vbit ^= QM_DQRR_VERB_VBIT;
661 static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
662 const struct qm_dqrr_entry *dq,
665 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
666 int idx = dqrr_ptr2idx(dq);
668 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
669 DPAA_ASSERT((dqrr->ring + idx) == dq);
670 DPAA_ASSERT(idx < QM_DQRR_SIZE);
671 qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
672 ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
673 idx); /* DQRR_DCAP::DCAP_CI */
676 static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
678 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
680 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
681 qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
682 (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
685 static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
687 qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
690 static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
692 qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
695 static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
697 qm_out(portal, QM_REG_DQRR_ITR, ithresh);
702 #define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
703 #define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
705 static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
707 uintptr_t addr = (uintptr_t)p;
711 return (union qm_mr_entry *)addr;
714 static inline int mr_ptr2idx(const union qm_mr_entry *e)
716 return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
719 static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
721 return mr_carryclear(e + 1);
724 static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
725 enum qm_mr_cmode cmode)
727 struct qm_mr *mr = &portal->mr;
730 mr->ring = portal->addr.ce + QM_CL_MR;
731 mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
732 mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
733 mr->cursor = mr->ring + mr->ci;
734 mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
735 mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
736 ? QM_MR_VERB_VBIT : 0;
737 mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
738 #ifdef CONFIG_FSL_DPAA_CHECKING
742 cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
743 ((cmode & 1) << 8); /* QCSP_CFG:MM */
744 qm_out(portal, QM_REG_CFG, cfg);
748 static inline void qm_mr_finish(struct qm_portal *portal)
750 struct qm_mr *mr = &portal->mr;
752 if (mr->ci != mr_ptr2idx(mr->cursor))
753 pr_crit("Ignoring completed MR entries\n");
756 static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
758 struct qm_mr *mr = &portal->mr;
765 static inline int qm_mr_next(struct qm_portal *portal)
767 struct qm_mr *mr = &portal->mr;
769 DPAA_ASSERT(mr->fill);
770 mr->cursor = mr_inc(mr->cursor);
774 static inline void qm_mr_pvb_update(struct qm_portal *portal)
776 struct qm_mr *mr = &portal->mr;
777 union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
779 DPAA_ASSERT(mr->pmode == qm_mr_pvb);
781 * when accessing 'verb', use __raw_readb() to ensure that compiler
782 * inlining doesn't try to optimise out "excess reads".
784 if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
785 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
787 mr->vbit ^= QM_MR_VERB_VBIT;
791 dpaa_invalidate_touch_ro(res);
794 static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
796 struct qm_mr *mr = &portal->mr;
798 DPAA_ASSERT(mr->cmode == qm_mr_cci);
799 mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
800 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
803 static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
805 struct qm_mr *mr = &portal->mr;
807 DPAA_ASSERT(mr->cmode == qm_mr_cci);
808 mr->ci = mr_ptr2idx(mr->cursor);
809 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
812 static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
814 qm_out(portal, QM_REG_MR_ITR, ithresh);
817 /* --- Management command API --- */
819 static inline int qm_mc_init(struct qm_portal *portal)
821 struct qm_mc *mc = &portal->mc;
823 mc->cr = portal->addr.ce + QM_CL_CR;
824 mc->rr = portal->addr.ce + QM_CL_RR0;
825 mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT)
827 mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
828 #ifdef CONFIG_FSL_DPAA_CHECKING
829 mc->state = qman_mc_idle;
834 static inline void qm_mc_finish(struct qm_portal *portal)
836 #ifdef CONFIG_FSL_DPAA_CHECKING
837 struct qm_mc *mc = &portal->mc;
839 DPAA_ASSERT(mc->state == qman_mc_idle);
840 if (mc->state != qman_mc_idle)
841 pr_crit("Losing incomplete MC command\n");
845 static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
847 struct qm_mc *mc = &portal->mc;
849 DPAA_ASSERT(mc->state == qman_mc_idle);
850 #ifdef CONFIG_FSL_DPAA_CHECKING
851 mc->state = qman_mc_user;
857 static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
859 struct qm_mc *mc = &portal->mc;
860 union qm_mc_result *rr = mc->rr + mc->rridx;
862 DPAA_ASSERT(mc->state == qman_mc_user);
864 mc->cr->_ncw_verb = myverb | mc->vbit;
866 dpaa_invalidate_touch_ro(rr);
867 #ifdef CONFIG_FSL_DPAA_CHECKING
868 mc->state = qman_mc_hw;
872 static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
874 struct qm_mc *mc = &portal->mc;
875 union qm_mc_result *rr = mc->rr + mc->rridx;
877 DPAA_ASSERT(mc->state == qman_mc_hw);
879 * The inactive response register's verb byte always returns zero until
880 * its command is submitted and completed. This includes the valid-bit,
881 * in case you were wondering...
883 if (!__raw_readb(&rr->verb)) {
884 dpaa_invalidate_touch_ro(rr);
888 mc->vbit ^= QM_MCC_VERB_VBIT;
889 #ifdef CONFIG_FSL_DPAA_CHECKING
890 mc->state = qman_mc_idle;
895 static inline int qm_mc_result_timeout(struct qm_portal *portal,
896 union qm_mc_result **mcr)
898 int timeout = QM_MCR_TIMEOUT;
901 *mcr = qm_mc_result(portal);
910 static inline void fq_set(struct qman_fq *fq, u32 mask)
912 set_bits(mask, &fq->flags);
915 static inline void fq_clear(struct qman_fq *fq, u32 mask)
917 clear_bits(mask, &fq->flags);
920 static inline int fq_isset(struct qman_fq *fq, u32 mask)
922 return fq->flags & mask;
925 static inline int fq_isclear(struct qman_fq *fq, u32 mask)
927 return !(fq->flags & mask);
932 /* PORTAL_BITS_*** - dynamic, strictly internal */
934 /* interrupt sources processed by portal_isr(), configurable */
935 unsigned long irq_sources;
936 u32 use_eqcr_ci_stashing;
937 /* only 1 volatile dequeue at a time */
938 struct qman_fq *vdqcr_owned;
940 /* probing time config params for cpu-affine portals */
941 const struct qm_portal_config *config;
942 /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
943 struct qman_cgrs *cgrs;
944 /* linked-list of CSCN handlers. */
945 struct list_head cgr_cbs;
948 struct work_struct congestion_work;
949 struct work_struct mr_work;
950 char irqname[MAX_IRQNAME];
953 static cpumask_t affine_mask;
954 static DEFINE_SPINLOCK(affine_mask_lock);
955 static u16 affine_channels[NR_CPUS];
956 static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
957 struct qman_portal *affine_portals[NR_CPUS];
959 static inline struct qman_portal *get_affine_portal(void)
961 return &get_cpu_var(qman_affine_portal);
964 static inline void put_affine_portal(void)
966 put_cpu_var(qman_affine_portal);
969 static struct workqueue_struct *qm_portal_wq;
971 int qman_wq_alloc(void)
973 qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
980 * This is what everything can wait on, even if it migrates to a different cpu
981 * to the one whose affine portal it is waiting on.
983 static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
985 static struct qman_fq **fq_table;
986 static u32 num_fqids;
988 int qman_alloc_fq_table(u32 _num_fqids)
990 num_fqids = _num_fqids;
992 fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *));
996 pr_debug("Allocated fq lookup table at %p, entry count %u\n",
997 fq_table, num_fqids * 2);
1001 static struct qman_fq *idx_to_fq(u32 idx)
1005 #ifdef CONFIG_FSL_DPAA_CHECKING
1006 if (WARN_ON(idx >= num_fqids * 2))
1010 DPAA_ASSERT(!fq || idx == fq->idx);
1016 * Only returns full-service fq objects, not enqueue-only
1017 * references (QMAN_FQ_FLAG_NO_MODIFY).
1019 static struct qman_fq *fqid_to_fq(u32 fqid)
1021 return idx_to_fq(fqid * 2);
1024 static struct qman_fq *tag_to_fq(u32 tag)
1026 #if BITS_PER_LONG == 64
1027 return idx_to_fq(tag);
1029 return (struct qman_fq *)tag;
1033 static u32 fq_to_tag(struct qman_fq *fq)
1035 #if BITS_PER_LONG == 64
1042 static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
1043 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1044 unsigned int poll_limit);
1045 static void qm_congestion_task(struct work_struct *work);
1046 static void qm_mr_process_task(struct work_struct *work);
1048 static irqreturn_t portal_isr(int irq, void *ptr)
1050 struct qman_portal *p = ptr;
1051 u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
1057 /* DQRR-handling if it's interrupt-driven */
1058 if (is & QM_PIRQ_DQRI) {
1059 __poll_portal_fast(p, QMAN_POLL_LIMIT);
1060 clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
1062 /* Handling of anything else that's interrupt-driven */
1063 clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
1064 qm_out(&p->p, QM_REG_ISR, clear);
1068 static int drain_mr_fqrni(struct qm_portal *p)
1070 const union qm_mr_entry *msg;
1072 msg = qm_mr_current(p);
1075 * if MR was full and h/w had other FQRNI entries to produce, we
1076 * need to allow it time to produce those entries once the
1077 * existing entries are consumed. A worst-case situation
1078 * (fully-loaded system) means h/w sequencers may have to do 3-4
1079 * other things before servicing the portal's MR pump, each of
1080 * which (if slow) may take ~50 qman cycles (which is ~200
1081 * processor cycles). So rounding up and then multiplying this
1082 * worst-case estimate by a factor of 10, just to be
1083 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
1084 * one entry at a time, so h/w has an opportunity to produce new
1085 * entries well before the ring has been fully consumed, so
1086 * we're being *really* paranoid here.
1088 u64 now, then = jiffies;
1092 } while ((then + 10000) > now);
1093 msg = qm_mr_current(p);
1097 if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
1098 /* We aren't draining anything but FQRNIs */
1099 pr_err("Found verb 0x%x in MR\n", msg->verb);
1103 qm_mr_cci_consume(p, 1);
1107 static int qman_create_portal(struct qman_portal *portal,
1108 const struct qm_portal_config *c,
1109 const struct qman_cgrs *cgrs)
1111 struct qm_portal *p;
1117 #ifdef CONFIG_FSL_PAMU
1118 /* PAMU is required for stashing */
1119 portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
1121 portal->use_eqcr_ci_stashing = 0;
1124 * prep the low-level portal struct with the mapped addresses from the
1125 * config, everything that follows depends on it and "config" is more
1128 p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
1129 p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
1131 * If CI-stashing is used, the current defaults use a threshold of 3,
1132 * and stash with high-than-DQRR priority.
1134 if (qm_eqcr_init(p, qm_eqcr_pvb,
1135 portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
1136 dev_err(c->dev, "EQCR initialisation failed\n");
1139 if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
1140 qm_dqrr_cdc, DQRR_MAXFILL)) {
1141 dev_err(c->dev, "DQRR initialisation failed\n");
1144 if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
1145 dev_err(c->dev, "MR initialisation failed\n");
1148 if (qm_mc_init(p)) {
1149 dev_err(c->dev, "MC initialisation failed\n");
1152 /* static interrupt-gating controls */
1153 qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
1154 qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
1155 qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
1156 portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
1159 /* initial snapshot is no-depletion */
1160 qman_cgrs_init(&portal->cgrs[1]);
1162 portal->cgrs[0] = *cgrs;
1164 /* if the given mask is NULL, assume all CGRs can be seen */
1165 qman_cgrs_fill(&portal->cgrs[0]);
1166 INIT_LIST_HEAD(&portal->cgr_cbs);
1167 spin_lock_init(&portal->cgr_lock);
1168 INIT_WORK(&portal->congestion_work, qm_congestion_task);
1169 INIT_WORK(&portal->mr_work, qm_mr_process_task);
1171 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
1172 QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
1173 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
1175 qm_out(p, QM_REG_ISDR, isdr);
1176 portal->irq_sources = 0;
1177 qm_out(p, QM_REG_IER, 0);
1178 qm_out(p, QM_REG_ISR, 0xffffffff);
1179 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
1180 if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
1181 dev_err(c->dev, "request_irq() failed\n");
1184 if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
1185 irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
1186 dev_err(c->dev, "irq_set_affinity() failed\n");
1190 /* Need EQCR to be empty before continuing */
1191 isdr &= ~QM_PIRQ_EQCI;
1192 qm_out(p, QM_REG_ISDR, isdr);
1193 ret = qm_eqcr_get_fill(p);
1195 dev_err(c->dev, "EQCR unclean\n");
1196 goto fail_eqcr_empty;
1198 isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
1199 qm_out(p, QM_REG_ISDR, isdr);
1200 if (qm_dqrr_current(p)) {
1201 dev_err(c->dev, "DQRR unclean\n");
1202 qm_dqrr_cdc_consume_n(p, 0xffff);
1204 if (qm_mr_current(p) && drain_mr_fqrni(p)) {
1205 /* special handling, drain just in case it's a few FQRNIs */
1206 const union qm_mr_entry *e = qm_mr_current(p);
1208 dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
1209 e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
1210 goto fail_dqrr_mr_empty;
1214 qm_out(p, QM_REG_ISDR, 0);
1215 qm_out(p, QM_REG_IIR, 0);
1216 /* Write a sane SDQCR */
1217 qm_dqrr_sdqcr_set(p, portal->sdqcr);
1223 free_irq(c->irq, portal);
1225 kfree(portal->cgrs);
1238 struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
1239 const struct qman_cgrs *cgrs)
1241 struct qman_portal *portal;
1244 portal = &per_cpu(qman_affine_portal, c->cpu);
1245 err = qman_create_portal(portal, c, cgrs);
1249 spin_lock(&affine_mask_lock);
1250 cpumask_set_cpu(c->cpu, &affine_mask);
1251 affine_channels[c->cpu] = c->channel;
1252 affine_portals[c->cpu] = portal;
1253 spin_unlock(&affine_mask_lock);
1258 static void qman_destroy_portal(struct qman_portal *qm)
1260 const struct qm_portal_config *pcfg;
1262 /* Stop dequeues on the portal */
1263 qm_dqrr_sdqcr_set(&qm->p, 0);
1266 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
1267 * something related to QM_PIRQ_EQCI, this may need fixing.
1268 * Also, due to the prefetching model used for CI updates in the enqueue
1269 * path, this update will only invalidate the CI cacheline *after*
1270 * working on it, so we need to call this twice to ensure a full update
1271 * irrespective of where the enqueue processing was at when the teardown
1274 qm_eqcr_cce_update(&qm->p);
1275 qm_eqcr_cce_update(&qm->p);
1278 free_irq(pcfg->irq, qm);
1281 qm_mc_finish(&qm->p);
1282 qm_mr_finish(&qm->p);
1283 qm_dqrr_finish(&qm->p);
1284 qm_eqcr_finish(&qm->p);
1289 const struct qm_portal_config *qman_destroy_affine_portal(void)
1291 struct qman_portal *qm = get_affine_portal();
1292 const struct qm_portal_config *pcfg;
1298 qman_destroy_portal(qm);
1300 spin_lock(&affine_mask_lock);
1301 cpumask_clear_cpu(cpu, &affine_mask);
1302 spin_unlock(&affine_mask_lock);
1303 put_affine_portal();
1307 /* Inline helper to reduce nesting in __poll_portal_slow() */
1308 static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
1309 const union qm_mr_entry *msg, u8 verb)
1312 case QM_MR_VERB_FQRL:
1313 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
1314 fq_clear(fq, QMAN_FQ_STATE_ORL);
1316 case QM_MR_VERB_FQRN:
1317 DPAA_ASSERT(fq->state == qman_fq_state_parked ||
1318 fq->state == qman_fq_state_sched);
1319 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
1320 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
1321 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
1322 fq_set(fq, QMAN_FQ_STATE_NE);
1323 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
1324 fq_set(fq, QMAN_FQ_STATE_ORL);
1325 fq->state = qman_fq_state_retired;
1327 case QM_MR_VERB_FQPN:
1328 DPAA_ASSERT(fq->state == qman_fq_state_sched);
1329 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
1330 fq->state = qman_fq_state_parked;
1334 static void qm_congestion_task(struct work_struct *work)
1336 struct qman_portal *p = container_of(work, struct qman_portal,
1338 struct qman_cgrs rr, c;
1339 union qm_mc_result *mcr;
1340 struct qman_cgr *cgr;
1342 spin_lock(&p->cgr_lock);
1344 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1345 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1346 spin_unlock(&p->cgr_lock);
1347 dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
1348 qman_p_irqsource_add(p, QM_PIRQ_CSCI);
1351 /* mask out the ones I'm not interested in */
1352 qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
1354 /* check previous snapshot for delta, enter/exit congestion */
1355 qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
1356 /* update snapshot */
1357 qman_cgrs_cp(&p->cgrs[1], &rr);
1358 /* Invoke callback */
1359 list_for_each_entry(cgr, &p->cgr_cbs, node)
1360 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
1361 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
1362 spin_unlock(&p->cgr_lock);
1363 qman_p_irqsource_add(p, QM_PIRQ_CSCI);
1366 static void qm_mr_process_task(struct work_struct *work)
1368 struct qman_portal *p = container_of(work, struct qman_portal,
1370 const union qm_mr_entry *msg;
1377 qm_mr_pvb_update(&p->p);
1378 msg = qm_mr_current(&p->p);
1382 verb = msg->verb & QM_MR_VERB_TYPE_MASK;
1383 /* The message is a software ERN iff the 0x20 bit is clear */
1386 case QM_MR_VERB_FQRNI:
1387 /* nada, we drop FQRNIs on the floor */
1389 case QM_MR_VERB_FQRN:
1390 case QM_MR_VERB_FQRL:
1391 /* Lookup in the retirement table */
1392 fq = fqid_to_fq(qm_fqid_get(&msg->fq));
1395 fq_state_change(p, fq, msg, verb);
1397 fq->cb.fqs(p, fq, msg);
1399 case QM_MR_VERB_FQPN:
1401 fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
1402 fq_state_change(p, fq, msg, verb);
1404 fq->cb.fqs(p, fq, msg);
1406 case QM_MR_VERB_DC_ERN:
1408 pr_crit_once("Leaking DCP ERNs!\n");
1411 pr_crit("Invalid MR verb 0x%02x\n", verb);
1414 /* Its a software ERN */
1415 fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
1416 fq->cb.ern(p, fq, msg);
1422 qm_mr_cci_consume(&p->p, num);
1423 qman_p_irqsource_add(p, QM_PIRQ_MRI);
1427 static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
1429 if (is & QM_PIRQ_CSCI) {
1430 qman_p_irqsource_remove(p, QM_PIRQ_CSCI);
1431 queue_work_on(smp_processor_id(), qm_portal_wq,
1432 &p->congestion_work);
1435 if (is & QM_PIRQ_EQRI) {
1436 qm_eqcr_cce_update(&p->p);
1437 qm_eqcr_set_ithresh(&p->p, 0);
1438 wake_up(&affine_queue);
1441 if (is & QM_PIRQ_MRI) {
1442 qman_p_irqsource_remove(p, QM_PIRQ_MRI);
1443 queue_work_on(smp_processor_id(), qm_portal_wq,
1451 * remove some slowish-path stuff from the "fast path" and make sure it isn't
1454 static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
1456 p->vdqcr_owned = NULL;
1457 fq_clear(fq, QMAN_FQ_STATE_VDQCR);
1458 wake_up(&affine_queue);
1462 * The only states that would conflict with other things if they ran at the
1463 * same time on the same cpu are:
1465 * (i) setting/clearing vdqcr_owned, and
1466 * (ii) clearing the NE (Not Empty) flag.
1468 * Both are safe. Because;
1470 * (i) this clearing can only occur after qman_volatile_dequeue() has set the
1471 * vdqcr_owned field (which it does before setting VDQCR), and
1472 * qman_volatile_dequeue() blocks interrupts and preemption while this is
1473 * done so that we can't interfere.
1474 * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
1475 * with (i) that API prevents us from interfering until it's safe.
1477 * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
1478 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
1479 * advantage comes from this function not having to "lock" anything at all.
1481 * Note also that the callbacks are invoked at points which are safe against the
1482 * above potential conflicts, but that this function itself is not re-entrant
1483 * (this is because the function tracks one end of each FIFO in the portal and
1484 * we do *not* want to lock that). So the consequence is that it is safe for
1485 * user callbacks to call into any QMan API.
1487 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1488 unsigned int poll_limit)
1490 const struct qm_dqrr_entry *dq;
1492 enum qman_cb_dqrr_result res;
1493 unsigned int limit = 0;
1496 qm_dqrr_pvb_update(&p->p);
1497 dq = qm_dqrr_current(&p->p);
1501 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
1503 * VDQCR: don't trust context_b as the FQ may have
1504 * been configured for h/w consumption and we're
1505 * draining it post-retirement.
1507 fq = p->vdqcr_owned;
1509 * We only set QMAN_FQ_STATE_NE when retiring, so we
1510 * only need to check for clearing it when doing
1511 * volatile dequeues. It's one less thing to check
1512 * in the critical path (SDQCR).
1514 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1515 fq_clear(fq, QMAN_FQ_STATE_NE);
1517 * This is duplicated from the SDQCR code, but we
1518 * have stuff to do before *and* after this callback,
1519 * and we don't want multiple if()s in the critical
1522 res = fq->cb.dqrr(p, fq, dq);
1523 if (res == qman_cb_dqrr_stop)
1525 /* Check for VDQCR completion */
1526 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1529 /* SDQCR: context_b points to the FQ */
1530 fq = tag_to_fq(be32_to_cpu(dq->context_b));
1531 /* Now let the callback do its stuff */
1532 res = fq->cb.dqrr(p, fq, dq);
1534 * The callback can request that we exit without
1535 * consuming this entry nor advancing;
1537 if (res == qman_cb_dqrr_stop)
1540 /* Interpret 'dq' from a driver perspective. */
1542 * Parking isn't possible unless HELDACTIVE was set. NB,
1543 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1544 * check for HELDACTIVE to cover both.
1546 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1547 (res != qman_cb_dqrr_park));
1548 /* just means "skip it, I'll consume it myself later on" */
1549 if (res != qman_cb_dqrr_defer)
1550 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1551 res == qman_cb_dqrr_park);
1553 qm_dqrr_next(&p->p);
1555 * Entry processed and consumed, increment our counter. The
1556 * callback can request that we exit after consuming the
1557 * entry, and we also exit if we reach our processing limit,
1558 * so loop back only if neither of these conditions is met.
1560 } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1565 void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
1567 unsigned long irqflags;
1569 local_irq_save(irqflags);
1570 set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
1571 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1572 local_irq_restore(irqflags);
1574 EXPORT_SYMBOL(qman_p_irqsource_add);
1576 void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
1578 unsigned long irqflags;
1582 * Our interrupt handler only processes+clears status register bits that
1583 * are in p->irq_sources. As we're trimming that mask, if one of them
1584 * were to assert in the status register just before we remove it from
1585 * the enable register, there would be an interrupt-storm when we
1586 * release the IRQ lock. So we wait for the enable register update to
1587 * take effect in h/w (by reading it back) and then clear all other bits
1588 * in the status register. Ie. we clear them from ISR once it's certain
1589 * IER won't allow them to reassert.
1591 local_irq_save(irqflags);
1592 bits &= QM_PIRQ_VISIBLE;
1593 clear_bits(bits, &p->irq_sources);
1594 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1595 ier = qm_in(&p->p, QM_REG_IER);
1597 * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1598 * data-dependency, ie. to protect against re-ordering.
1600 qm_out(&p->p, QM_REG_ISR, ~ier);
1601 local_irq_restore(irqflags);
1603 EXPORT_SYMBOL(qman_p_irqsource_remove);
1605 const cpumask_t *qman_affine_cpus(void)
1607 return &affine_mask;
1609 EXPORT_SYMBOL(qman_affine_cpus);
1611 u16 qman_affine_channel(int cpu)
1614 struct qman_portal *portal = get_affine_portal();
1616 cpu = portal->config->cpu;
1617 put_affine_portal();
1619 WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
1620 return affine_channels[cpu];
1622 EXPORT_SYMBOL(qman_affine_channel);
1624 struct qman_portal *qman_get_affine_portal(int cpu)
1626 return affine_portals[cpu];
1628 EXPORT_SYMBOL(qman_get_affine_portal);
1630 int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
1632 return __poll_portal_fast(p, limit);
1634 EXPORT_SYMBOL(qman_p_poll_dqrr);
1636 void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
1638 unsigned long irqflags;
1640 local_irq_save(irqflags);
1641 pools &= p->config->pools;
1643 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1644 local_irq_restore(irqflags);
1646 EXPORT_SYMBOL(qman_p_static_dequeue_add);
1648 /* Frame queue API */
1650 static const char *mcr_result_str(u8 result)
1653 case QM_MCR_RESULT_NULL:
1654 return "QM_MCR_RESULT_NULL";
1655 case QM_MCR_RESULT_OK:
1656 return "QM_MCR_RESULT_OK";
1657 case QM_MCR_RESULT_ERR_FQID:
1658 return "QM_MCR_RESULT_ERR_FQID";
1659 case QM_MCR_RESULT_ERR_FQSTATE:
1660 return "QM_MCR_RESULT_ERR_FQSTATE";
1661 case QM_MCR_RESULT_ERR_NOTEMPTY:
1662 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1663 case QM_MCR_RESULT_PENDING:
1664 return "QM_MCR_RESULT_PENDING";
1665 case QM_MCR_RESULT_ERR_BADCOMMAND:
1666 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1668 return "<unknown MCR result>";
1671 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1673 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1674 int ret = qman_alloc_fqid(&fqid);
1681 fq->state = qman_fq_state_oos;
1682 fq->cgr_groupid = 0;
1684 /* A context_b of 0 is allegedly special, so don't use that fqid */
1685 if (fqid == 0 || fqid >= num_fqids) {
1686 WARN(1, "bad fqid %d\n", fqid);
1691 if (flags & QMAN_FQ_FLAG_NO_MODIFY)
1694 WARN_ON(fq_table[fq->idx]);
1695 fq_table[fq->idx] = fq;
1699 EXPORT_SYMBOL(qman_create_fq);
1701 void qman_destroy_fq(struct qman_fq *fq)
1704 * We don't need to lock the FQ as it is a pre-condition that the FQ be
1705 * quiesced. Instead, run some checks.
1707 switch (fq->state) {
1708 case qman_fq_state_parked:
1709 case qman_fq_state_oos:
1710 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1711 qman_release_fqid(fq->fqid);
1713 DPAA_ASSERT(fq_table[fq->idx]);
1714 fq_table[fq->idx] = NULL;
1719 DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1721 EXPORT_SYMBOL(qman_destroy_fq);
1723 u32 qman_fq_fqid(struct qman_fq *fq)
1727 EXPORT_SYMBOL(qman_fq_fqid);
1729 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1731 union qm_mc_command *mcc;
1732 union qm_mc_result *mcr;
1733 struct qman_portal *p;
1737 myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
1738 ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1740 if (fq->state != qman_fq_state_oos &&
1741 fq->state != qman_fq_state_parked)
1743 #ifdef CONFIG_FSL_DPAA_CHECKING
1744 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1747 if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
1748 /* And can't be set at the same time as TDTHRESH */
1749 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
1752 /* Issue an INITFQ_[PARKED|SCHED] management command */
1753 p = get_affine_portal();
1754 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1755 (fq->state != qman_fq_state_oos &&
1756 fq->state != qman_fq_state_parked)) {
1760 mcc = qm_mc_start(&p->p);
1762 mcc->initfq = *opts;
1763 qm_fqid_set(&mcc->fq, fq->fqid);
1764 mcc->initfq.count = 0;
1766 * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
1767 * demux pointer. Otherwise, the caller-provided value is allowed to
1768 * stand, don't overwrite it.
1770 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1773 mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
1774 mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
1776 * and the physical address - NB, if the user wasn't trying to
1777 * set CONTEXTA, clear the stashing settings.
1779 if (!(be16_to_cpu(mcc->initfq.we_mask) &
1780 QM_INITFQ_WE_CONTEXTA)) {
1781 mcc->initfq.we_mask |=
1782 cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1783 memset(&mcc->initfq.fqd.context_a, 0,
1784 sizeof(mcc->initfq.fqd.context_a));
1786 struct qman_portal *p = qman_dma_portal;
1788 phys_fq = dma_map_single(p->config->dev, fq,
1789 sizeof(*fq), DMA_TO_DEVICE);
1790 if (dma_mapping_error(p->config->dev, phys_fq)) {
1791 dev_err(p->config->dev, "dma_mapping failed\n");
1796 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1799 if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1802 if (!(be16_to_cpu(mcc->initfq.we_mask) &
1803 QM_INITFQ_WE_DESTWQ)) {
1804 mcc->initfq.we_mask |=
1805 cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1808 qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
1810 qm_mc_commit(&p->p, myverb);
1811 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1812 dev_err(p->config->dev, "MCR timeout\n");
1817 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1819 if (res != QM_MCR_RESULT_OK) {
1824 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
1825 if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
1826 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1828 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1830 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
1831 fq->cgr_groupid = opts->fqd.cgid;
1833 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1834 qman_fq_state_sched : qman_fq_state_parked;
1837 put_affine_portal();
1840 EXPORT_SYMBOL(qman_init_fq);
1842 int qman_schedule_fq(struct qman_fq *fq)
1844 union qm_mc_command *mcc;
1845 union qm_mc_result *mcr;
1846 struct qman_portal *p;
1849 if (fq->state != qman_fq_state_parked)
1851 #ifdef CONFIG_FSL_DPAA_CHECKING
1852 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1855 /* Issue a ALTERFQ_SCHED management command */
1856 p = get_affine_portal();
1857 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1858 fq->state != qman_fq_state_parked) {
1862 mcc = qm_mc_start(&p->p);
1863 qm_fqid_set(&mcc->fq, fq->fqid);
1864 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1865 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1866 dev_err(p->config->dev, "ALTER_SCHED timeout\n");
1871 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1872 if (mcr->result != QM_MCR_RESULT_OK) {
1876 fq->state = qman_fq_state_sched;
1878 put_affine_portal();
1881 EXPORT_SYMBOL(qman_schedule_fq);
1883 int qman_retire_fq(struct qman_fq *fq, u32 *flags)
1885 union qm_mc_command *mcc;
1886 union qm_mc_result *mcr;
1887 struct qman_portal *p;
1891 if (fq->state != qman_fq_state_parked &&
1892 fq->state != qman_fq_state_sched)
1894 #ifdef CONFIG_FSL_DPAA_CHECKING
1895 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1898 p = get_affine_portal();
1899 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1900 fq->state == qman_fq_state_retired ||
1901 fq->state == qman_fq_state_oos) {
1905 mcc = qm_mc_start(&p->p);
1906 qm_fqid_set(&mcc->fq, fq->fqid);
1907 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
1908 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1909 dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
1914 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
1917 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
1918 * and defer the flags until FQRNI or FQRN (respectively) show up. But
1919 * "Friendly" is to process OK immediately, and not set CHANGING. We do
1920 * friendly, otherwise the caller doesn't necessarily have a fully
1921 * "retired" FQ on return even if the retirement was immediate. However
1922 * this does mean some code duplication between here and
1923 * fq_state_change().
1925 if (res == QM_MCR_RESULT_OK) {
1927 /* Process 'fq' right away, we'll ignore FQRNI */
1928 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
1929 fq_set(fq, QMAN_FQ_STATE_NE);
1930 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
1931 fq_set(fq, QMAN_FQ_STATE_ORL);
1934 fq->state = qman_fq_state_retired;
1937 * Another issue with supporting "immediate" retirement
1938 * is that we're forced to drop FQRNIs, because by the
1939 * time they're seen it may already be "too late" (the
1940 * fq may have been OOS'd and free()'d already). But if
1941 * the upper layer wants a callback whether it's
1942 * immediate or not, we have to fake a "MR" entry to
1943 * look like an FQRNI...
1945 union qm_mr_entry msg;
1947 msg.verb = QM_MR_VERB_FQRNI;
1948 msg.fq.fqs = mcr->alterfq.fqs;
1949 qm_fqid_set(&msg.fq, fq->fqid);
1950 msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
1951 fq->cb.fqs(p, fq, &msg);
1953 } else if (res == QM_MCR_RESULT_PENDING) {
1955 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1960 put_affine_portal();
1963 EXPORT_SYMBOL(qman_retire_fq);
1965 int qman_oos_fq(struct qman_fq *fq)
1967 union qm_mc_command *mcc;
1968 union qm_mc_result *mcr;
1969 struct qman_portal *p;
1972 if (fq->state != qman_fq_state_retired)
1974 #ifdef CONFIG_FSL_DPAA_CHECKING
1975 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1978 p = get_affine_portal();
1979 if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
1980 fq->state != qman_fq_state_retired) {
1984 mcc = qm_mc_start(&p->p);
1985 qm_fqid_set(&mcc->fq, fq->fqid);
1986 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
1987 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1991 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
1992 if (mcr->result != QM_MCR_RESULT_OK) {
1996 fq->state = qman_fq_state_oos;
1998 put_affine_portal();
2001 EXPORT_SYMBOL(qman_oos_fq);
2003 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
2005 union qm_mc_command *mcc;
2006 union qm_mc_result *mcr;
2007 struct qman_portal *p = get_affine_portal();
2010 mcc = qm_mc_start(&p->p);
2011 qm_fqid_set(&mcc->fq, fq->fqid);
2012 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2013 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2018 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2019 if (mcr->result == QM_MCR_RESULT_OK)
2020 *fqd = mcr->queryfq.fqd;
2024 put_affine_portal();
2028 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
2030 union qm_mc_command *mcc;
2031 union qm_mc_result *mcr;
2032 struct qman_portal *p = get_affine_portal();
2035 mcc = qm_mc_start(&p->p);
2036 qm_fqid_set(&mcc->fq, fq->fqid);
2037 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2038 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2043 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2044 if (mcr->result == QM_MCR_RESULT_OK)
2045 *np = mcr->queryfq_np;
2046 else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
2051 put_affine_portal();
2054 EXPORT_SYMBOL(qman_query_fq_np);
2056 static int qman_query_cgr(struct qman_cgr *cgr,
2057 struct qm_mcr_querycgr *cgrd)
2059 union qm_mc_command *mcc;
2060 union qm_mc_result *mcr;
2061 struct qman_portal *p = get_affine_portal();
2064 mcc = qm_mc_start(&p->p);
2065 mcc->cgr.cgid = cgr->cgrid;
2066 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
2067 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2071 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2072 if (mcr->result == QM_MCR_RESULT_OK)
2073 *cgrd = mcr->querycgr;
2075 dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
2076 mcr_result_str(mcr->result));
2080 put_affine_portal();
2084 int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
2086 struct qm_mcr_querycgr query_cgr;
2089 err = qman_query_cgr(cgr, &query_cgr);
2093 *result = !!query_cgr.cgr.cs;
2096 EXPORT_SYMBOL(qman_query_cgr_congested);
2098 /* internal function used as a wait_event() expression */
2099 static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
2101 unsigned long irqflags;
2104 local_irq_save(irqflags);
2107 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2110 fq_set(fq, QMAN_FQ_STATE_VDQCR);
2111 p->vdqcr_owned = fq;
2112 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2115 local_irq_restore(irqflags);
2119 static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
2123 *p = get_affine_portal();
2124 ret = set_p_vdqcr(*p, fq, vdqcr);
2125 put_affine_portal();
2129 static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
2130 u32 vdqcr, u32 flags)
2134 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2135 ret = wait_event_interruptible(affine_queue,
2136 !set_vdqcr(p, fq, vdqcr));
2138 wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
2142 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
2144 struct qman_portal *p;
2147 if (fq->state != qman_fq_state_parked &&
2148 fq->state != qman_fq_state_retired)
2150 if (vdqcr & QM_VDQCR_FQID_MASK)
2152 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2154 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2155 if (flags & QMAN_VOLATILE_FLAG_WAIT)
2156 ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
2158 ret = set_vdqcr(&p, fq, vdqcr);
2162 if (flags & QMAN_VOLATILE_FLAG_FINISH) {
2163 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2165 * NB: don't propagate any error - the caller wouldn't
2166 * know whether the VDQCR was issued or not. A signal
2167 * could arrive after returning anyway, so the caller
2168 * can check signal_pending() if that's an issue.
2170 wait_event_interruptible(affine_queue,
2171 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2173 wait_event(affine_queue,
2174 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2178 EXPORT_SYMBOL(qman_volatile_dequeue);
2180 static void update_eqcr_ci(struct qman_portal *p, u8 avail)
2183 qm_eqcr_cce_prefetch(&p->p);
2185 qm_eqcr_cce_update(&p->p);
2188 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
2190 struct qman_portal *p;
2191 struct qm_eqcr_entry *eq;
2192 unsigned long irqflags;
2195 p = get_affine_portal();
2196 local_irq_save(irqflags);
2198 if (p->use_eqcr_ci_stashing) {
2200 * The stashing case is easy, only update if we need to in
2201 * order to try and liberate ring entries.
2203 eq = qm_eqcr_start_stash(&p->p);
2206 * The non-stashing case is harder, need to prefetch ahead of
2209 avail = qm_eqcr_get_avail(&p->p);
2211 update_eqcr_ci(p, avail);
2212 eq = qm_eqcr_start_no_stash(&p->p);
2218 qm_fqid_set(eq, fq->fqid);
2219 eq->tag = cpu_to_be32(fq_to_tag(fq));
2222 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
2224 local_irq_restore(irqflags);
2225 put_affine_portal();
2228 EXPORT_SYMBOL(qman_enqueue);
2230 static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
2231 struct qm_mcc_initcgr *opts)
2233 union qm_mc_command *mcc;
2234 union qm_mc_result *mcr;
2235 struct qman_portal *p = get_affine_portal();
2236 u8 verb = QM_MCC_VERB_MODIFYCGR;
2239 mcc = qm_mc_start(&p->p);
2241 mcc->initcgr = *opts;
2242 mcc->initcgr.cgid = cgr->cgrid;
2243 if (flags & QMAN_CGR_FLAG_USE_INIT)
2244 verb = QM_MCC_VERB_INITCGR;
2245 qm_mc_commit(&p->p, verb);
2246 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2251 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2252 if (mcr->result != QM_MCR_RESULT_OK)
2256 put_affine_portal();
2260 #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2262 /* congestion state change notification target update control */
2263 static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
2265 if (qman_ip_rev >= QMAN_REV30)
2266 cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
2267 QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
2269 cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
2272 static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
2274 if (qman_ip_rev >= QMAN_REV30)
2275 cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
2277 cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
2280 static u8 qman_cgr_cpus[CGR_NUM];
2282 void qman_init_cgr_all(void)
2284 struct qman_cgr cgr;
2287 for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
2288 if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
2293 pr_err("Warning: %d error%s while initialising CGR h/w\n",
2294 err_cnt, (err_cnt > 1) ? "s" : "");
2297 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2298 struct qm_mcc_initcgr *opts)
2300 struct qm_mcr_querycgr cgr_state;
2302 struct qman_portal *p;
2305 * We have to check that the provided CGRID is within the limits of the
2306 * data-structures, for obvious reasons. However we'll let h/w take
2307 * care of determining whether it's within the limits of what exists on
2310 if (cgr->cgrid >= CGR_NUM)
2314 p = get_affine_portal();
2315 qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
2318 cgr->chan = p->config->channel;
2319 spin_lock(&p->cgr_lock);
2322 struct qm_mcc_initcgr local_opts = *opts;
2324 ret = qman_query_cgr(cgr, &cgr_state);
2328 qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
2329 be32_to_cpu(cgr_state.cgr.cscn_targ));
2330 local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2332 /* send init if flags indicate so */
2333 if (flags & QMAN_CGR_FLAG_USE_INIT)
2334 ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2337 ret = qm_modify_cgr(cgr, 0, &local_opts);
2342 list_add(&cgr->node, &p->cgr_cbs);
2344 /* Determine if newly added object requires its callback to be called */
2345 ret = qman_query_cgr(cgr, &cgr_state);
2347 /* we can't go back, so proceed and return success */
2348 dev_err(p->config->dev, "CGR HW state partially modified\n");
2352 if (cgr->cb && cgr_state.cgr.cscn_en &&
2353 qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
2356 spin_unlock(&p->cgr_lock);
2357 put_affine_portal();
2360 EXPORT_SYMBOL(qman_create_cgr);
2362 int qman_delete_cgr(struct qman_cgr *cgr)
2364 unsigned long irqflags;
2365 struct qm_mcr_querycgr cgr_state;
2366 struct qm_mcc_initcgr local_opts;
2369 struct qman_portal *p = get_affine_portal();
2371 if (cgr->chan != p->config->channel) {
2372 /* attempt to delete from other portal than creator */
2373 dev_err(p->config->dev, "CGR not owned by current portal");
2374 dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
2375 cgr->chan, p->config->channel);
2380 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2381 spin_lock_irqsave(&p->cgr_lock, irqflags);
2382 list_del(&cgr->node);
2384 * If there are no other CGR objects for this CGRID in the list,
2385 * update CSCN_TARG accordingly
2387 list_for_each_entry(i, &p->cgr_cbs, node)
2388 if (i->cgrid == cgr->cgrid && i->cb)
2390 ret = qman_query_cgr(cgr, &cgr_state);
2392 /* add back to the list */
2393 list_add(&cgr->node, &p->cgr_cbs);
2397 local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2398 qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
2399 be32_to_cpu(cgr_state.cgr.cscn_targ));
2401 ret = qm_modify_cgr(cgr, 0, &local_opts);
2403 /* add back to the list */
2404 list_add(&cgr->node, &p->cgr_cbs);
2406 spin_unlock_irqrestore(&p->cgr_lock, irqflags);
2408 put_affine_portal();
2411 EXPORT_SYMBOL(qman_delete_cgr);
2414 struct qman_cgr *cgr;
2415 struct completion completion;
2418 static void qman_delete_cgr_smp_call(void *p)
2420 qman_delete_cgr((struct qman_cgr *)p);
2423 void qman_delete_cgr_safe(struct qman_cgr *cgr)
2426 if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
2427 smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
2428 qman_delete_cgr_smp_call, cgr, true);
2433 qman_delete_cgr(cgr);
2436 EXPORT_SYMBOL(qman_delete_cgr_safe);
2440 static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
2442 const union qm_mr_entry *msg;
2445 qm_mr_pvb_update(p);
2446 msg = qm_mr_current(p);
2448 if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
2451 qm_mr_cci_consume_to_current(p);
2452 qm_mr_pvb_update(p);
2453 msg = qm_mr_current(p);
2458 static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
2461 const struct qm_dqrr_entry *dqrr;
2465 qm_dqrr_pvb_update(p);
2466 dqrr = qm_dqrr_current(p);
2469 } while (wait && !dqrr);
2472 if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
2474 qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
2475 qm_dqrr_pvb_update(p);
2477 dqrr = qm_dqrr_current(p);
2482 #define qm_mr_drain(p, V) \
2483 _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
2485 #define qm_dqrr_drain(p, f, S) \
2486 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
2488 #define qm_dqrr_drain_wait(p, f, S) \
2489 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
2491 #define qm_dqrr_drain_nomatch(p) \
2492 _qm_dqrr_consume_and_match(p, 0, 0, false)
2494 static int qman_shutdown_fq(u32 fqid)
2496 struct qman_portal *p;
2498 union qm_mc_command *mcc;
2499 union qm_mc_result *mcr;
2500 int orl_empty, drain = 0, ret = 0;
2501 u32 channel, wq, res;
2504 p = get_affine_portal();
2505 dev = p->config->dev;
2506 /* Determine the state of the FQID */
2507 mcc = qm_mc_start(&p->p);
2508 qm_fqid_set(&mcc->fq, fqid);
2509 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2510 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2511 dev_err(dev, "QUERYFQ_NP timeout\n");
2516 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2517 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2518 if (state == QM_MCR_NP_STATE_OOS)
2519 goto out; /* Already OOS, no need to do anymore checks */
2521 /* Query which channel the FQ is using */
2522 mcc = qm_mc_start(&p->p);
2523 qm_fqid_set(&mcc->fq, fqid);
2524 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2525 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2526 dev_err(dev, "QUERYFQ timeout\n");
2531 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2532 /* Need to store these since the MCR gets reused */
2533 channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
2534 wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
2537 case QM_MCR_NP_STATE_TEN_SCHED:
2538 case QM_MCR_NP_STATE_TRU_SCHED:
2539 case QM_MCR_NP_STATE_ACTIVE:
2540 case QM_MCR_NP_STATE_PARKED:
2542 mcc = qm_mc_start(&p->p);
2543 qm_fqid_set(&mcc->fq, fqid);
2544 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
2545 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2546 dev_err(dev, "QUERYFQ_NP timeout\n");
2550 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2551 QM_MCR_VERB_ALTER_RETIRE);
2552 res = mcr->result; /* Make a copy as we reuse MCR below */
2554 if (res == QM_MCR_RESULT_PENDING) {
2556 * Need to wait for the FQRN in the message ring, which
2557 * will only occur once the FQ has been drained. In
2558 * order for the FQ to drain the portal needs to be set
2559 * to dequeue from the channel the FQ is scheduled on
2564 /* Flag that we need to drain FQ */
2567 if (channel >= qm_channel_pool1 &&
2568 channel < qm_channel_pool1 + 15) {
2569 /* Pool channel, enable the bit in the portal */
2570 dequeue_wq = (channel -
2571 qm_channel_pool1 + 1)<<4 | wq;
2572 } else if (channel < qm_channel_pool1) {
2573 /* Dedicated channel */
2576 dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
2581 /* Set the sdqcr to drain this channel */
2582 if (channel < qm_channel_pool1)
2583 qm_dqrr_sdqcr_set(&p->p,
2584 QM_SDQCR_TYPE_ACTIVE |
2585 QM_SDQCR_CHANNELS_DEDICATED);
2587 qm_dqrr_sdqcr_set(&p->p,
2588 QM_SDQCR_TYPE_ACTIVE |
2589 QM_SDQCR_CHANNELS_POOL_CONV
2592 /* Keep draining DQRR while checking the MR*/
2593 qm_dqrr_drain_nomatch(&p->p);
2594 /* Process message ring too */
2595 found_fqrn = qm_mr_drain(&p->p, FQRN);
2597 } while (!found_fqrn);
2600 if (res != QM_MCR_RESULT_OK &&
2601 res != QM_MCR_RESULT_PENDING) {
2602 dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
2607 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2609 * ORL had no entries, no need to wait until the
2615 * Retirement succeeded, check to see if FQ needs
2618 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2619 /* FQ is Not Empty, drain using volatile DQ commands */
2621 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2623 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2625 * Wait for a dequeue and process the dequeues,
2626 * making sure to empty the ring completely
2628 } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
2630 qm_dqrr_sdqcr_set(&p->p, 0);
2632 while (!orl_empty) {
2633 /* Wait for the ORL to have been completely drained */
2634 orl_empty = qm_mr_drain(&p->p, FQRL);
2637 mcc = qm_mc_start(&p->p);
2638 qm_fqid_set(&mcc->fq, fqid);
2639 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2640 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2645 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2646 QM_MCR_VERB_ALTER_OOS);
2647 if (mcr->result != QM_MCR_RESULT_OK) {
2648 dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
2655 case QM_MCR_NP_STATE_RETIRED:
2656 /* Send OOS Command */
2657 mcc = qm_mc_start(&p->p);
2658 qm_fqid_set(&mcc->fq, fqid);
2659 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2660 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2665 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2666 QM_MCR_VERB_ALTER_OOS);
2668 dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
2675 case QM_MCR_NP_STATE_OOS:
2684 put_affine_portal();
2688 const struct qm_portal_config *qman_get_qm_portal_config(
2689 struct qman_portal *portal)
2691 return portal->config;
2693 EXPORT_SYMBOL(qman_get_qm_portal_config);
2695 struct gen_pool *qm_fqalloc; /* FQID allocator */
2696 struct gen_pool *qm_qpalloc; /* pool-channel allocator */
2697 struct gen_pool *qm_cgralloc; /* CGR ID allocator */
2699 static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2706 addr = gen_pool_alloc(p, cnt);
2710 *result = addr & ~DPAA_GENALLOC_OFF;
2715 int qman_alloc_fqid_range(u32 *result, u32 count)
2717 return qman_alloc_range(qm_fqalloc, result, count);
2719 EXPORT_SYMBOL(qman_alloc_fqid_range);
2721 int qman_alloc_pool_range(u32 *result, u32 count)
2723 return qman_alloc_range(qm_qpalloc, result, count);
2725 EXPORT_SYMBOL(qman_alloc_pool_range);
2727 int qman_alloc_cgrid_range(u32 *result, u32 count)
2729 return qman_alloc_range(qm_cgralloc, result, count);
2731 EXPORT_SYMBOL(qman_alloc_cgrid_range);
2733 int qman_release_fqid(u32 fqid)
2735 int ret = qman_shutdown_fq(fqid);
2738 pr_debug("FQID %d leaked\n", fqid);
2742 gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
2745 EXPORT_SYMBOL(qman_release_fqid);
2747 static int qpool_cleanup(u32 qp)
2750 * We query all FQDs starting from
2751 * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
2752 * whose destination channel is the pool-channel being released.
2753 * When a non-OOS FQD is found we attempt to clean it up
2755 struct qman_fq fq = {
2756 .fqid = QM_FQID_RANGE_START
2761 struct qm_mcr_queryfq_np np;
2763 err = qman_query_fq_np(&fq, &np);
2765 /* FQID range exceeded, found no problems */
2767 else if (WARN_ON(err))
2770 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2773 err = qman_query_fq(&fq, &fqd);
2776 if (qm_fqd_get_chan(&fqd) == qp) {
2777 /* The channel is the FQ's target, clean it */
2778 err = qman_shutdown_fq(fq.fqid);
2781 * Couldn't shut down the FQ
2782 * so the pool must be leaked
2787 /* Move to the next FQID */
2792 int qman_release_pool(u32 qp)
2796 ret = qpool_cleanup(qp);
2798 pr_debug("CHID %d leaked\n", qp);
2802 gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
2805 EXPORT_SYMBOL(qman_release_pool);
2807 static int cgr_cleanup(u32 cgrid)
2810 * query all FQDs starting from FQID 1 until we get an "invalid FQID"
2811 * error, looking for non-OOS FQDs whose CGR is the CGR being released
2813 struct qman_fq fq = {
2814 .fqid = QM_FQID_RANGE_START
2819 struct qm_mcr_queryfq_np np;
2821 err = qman_query_fq_np(&fq, &np);
2823 /* FQID range exceeded, found no problems */
2825 else if (WARN_ON(err))
2828 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2831 err = qman_query_fq(&fq, &fqd);
2834 if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
2835 fqd.cgid == cgrid) {
2836 pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
2841 /* Move to the next FQID */
2846 int qman_release_cgrid(u32 cgrid)
2850 ret = cgr_cleanup(cgrid);
2852 pr_debug("CGRID %d leaked\n", cgrid);
2856 gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
2859 EXPORT_SYMBOL(qman_release_cgrid);