GNU Linux-libre 4.19.263-gnu1
[releases.git] / include / linux / qed / qed_chain.h
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #ifndef _QED_CHAIN_H
34 #define _QED_CHAIN_H
35
36 #include <linux/types.h>
37 #include <asm/byteorder.h>
38 #include <linux/kernel.h>
39 #include <linux/list.h>
40 #include <linux/slab.h>
41 #include <linux/qed/common_hsi.h>
42
43 enum qed_chain_mode {
44         /* Each Page contains a next pointer at its end */
45         QED_CHAIN_MODE_NEXT_PTR,
46
47         /* Chain is a single page (next ptr) is unrequired */
48         QED_CHAIN_MODE_SINGLE,
49
50         /* Page pointers are located in a side list */
51         QED_CHAIN_MODE_PBL,
52 };
53
54 enum qed_chain_use_mode {
55         QED_CHAIN_USE_TO_PRODUCE,               /* Chain starts empty */
56         QED_CHAIN_USE_TO_CONSUME,               /* Chain starts full */
57         QED_CHAIN_USE_TO_CONSUME_PRODUCE,       /* Chain starts empty */
58 };
59
60 enum qed_chain_cnt_type {
61         /* The chain's size/prod/cons are kept in 16-bit variables */
62         QED_CHAIN_CNT_TYPE_U16,
63
64         /* The chain's size/prod/cons are kept in 32-bit variables  */
65         QED_CHAIN_CNT_TYPE_U32,
66 };
67
68 struct qed_chain_next {
69         struct regpair  next_phys;
70         void            *next_virt;
71 };
72
73 struct qed_chain_pbl_u16 {
74         u16 prod_page_idx;
75         u16 cons_page_idx;
76 };
77
78 struct qed_chain_pbl_u32 {
79         u32 prod_page_idx;
80         u32 cons_page_idx;
81 };
82
83 struct qed_chain_ext_pbl {
84         dma_addr_t p_pbl_phys;
85         void *p_pbl_virt;
86 };
87
88 struct qed_chain_u16 {
89         /* Cyclic index of next element to produce/consme */
90         u16 prod_idx;
91         u16 cons_idx;
92 };
93
94 struct qed_chain_u32 {
95         /* Cyclic index of next element to produce/consme */
96         u32 prod_idx;
97         u32 cons_idx;
98 };
99
100 struct addr_tbl_entry {
101         void *virt_addr;
102         dma_addr_t dma_map;
103 };
104
105 struct qed_chain {
106         /* fastpath portion of the chain - required for commands such
107          * as produce / consume.
108          */
109         /* Point to next element to produce/consume */
110         void *p_prod_elem;
111         void *p_cons_elem;
112
113         /* Fastpath portions of the PBL [if exists] */
114         struct {
115                 /* Table for keeping the virtual and physical addresses of the
116                  * chain pages, respectively to the physical addresses
117                  * in the pbl table.
118                  */
119                 struct addr_tbl_entry *pp_addr_tbl;
120
121                 union {
122                         struct qed_chain_pbl_u16 u16;
123                         struct qed_chain_pbl_u32 u32;
124                 } c;
125         } pbl;
126
127         union {
128                 struct qed_chain_u16 chain16;
129                 struct qed_chain_u32 chain32;
130         } u;
131
132         /* Capacity counts only usable elements */
133         u32 capacity;
134         u32 page_cnt;
135
136         enum qed_chain_mode mode;
137
138         /* Elements information for fast calculations */
139         u16 elem_per_page;
140         u16 elem_per_page_mask;
141         u16 elem_size;
142         u16 next_page_mask;
143         u16 usable_per_page;
144         u8 elem_unusable;
145
146         u8 cnt_type;
147
148         /* Slowpath of the chain - required for initialization and destruction,
149          * but isn't involved in regular functionality.
150          */
151
152         /* Base address of a pre-allocated buffer for pbl */
153         struct {
154                 dma_addr_t p_phys_table;
155                 void *p_virt_table;
156         } pbl_sp;
157
158         /* Address of first page of the chain - the address is required
159          * for fastpath operation [consume/produce] but only for the the SINGLE
160          * flavour which isn't considered fastpath [== SPQ].
161          */
162         void *p_virt_addr;
163         dma_addr_t p_phys_addr;
164
165         /* Total number of elements [for entire chain] */
166         u32 size;
167
168         u8 intended_use;
169
170         bool b_external_pbl;
171 };
172
173 #define QED_CHAIN_PBL_ENTRY_SIZE        (8)
174 #define QED_CHAIN_PAGE_SIZE             (0x1000)
175 #define ELEMS_PER_PAGE(elem_size)       (QED_CHAIN_PAGE_SIZE / (elem_size))
176
177 #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)         \
178         (((mode) == QED_CHAIN_MODE_NEXT_PTR) ?           \
179          (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
180                    (elem_size))) : 0)
181
182 #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
183         ((u32)(ELEMS_PER_PAGE(elem_size) -     \
184                UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
185
186 #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
187         DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
188
189 #define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
190 #define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
191
192 /* Accessors */
193 static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
194 {
195         return p_chain->u.chain16.prod_idx;
196 }
197
198 static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
199 {
200         return p_chain->u.chain16.cons_idx;
201 }
202
203 static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
204 {
205         return p_chain->u.chain32.cons_idx;
206 }
207
208 static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
209 {
210         u16 elem_per_page = p_chain->elem_per_page;
211         u32 prod = p_chain->u.chain16.prod_idx;
212         u32 cons = p_chain->u.chain16.cons_idx;
213         u16 used;
214
215         if (prod < cons)
216                 prod += (u32)U16_MAX + 1;
217
218         used = (u16)(prod - cons);
219         if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
220                 used -= prod / elem_per_page - cons / elem_per_page;
221
222         return (u16)(p_chain->capacity - used);
223 }
224
225 static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
226 {
227         u16 elem_per_page = p_chain->elem_per_page;
228         u64 prod = p_chain->u.chain32.prod_idx;
229         u64 cons = p_chain->u.chain32.cons_idx;
230         u32 used;
231
232         if (prod < cons)
233                 prod += (u64)U32_MAX + 1;
234
235         used = (u32)(prod - cons);
236         if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
237                 used -= (u32)(prod / elem_per_page - cons / elem_per_page);
238
239         return p_chain->capacity - used;
240 }
241
242 static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
243 {
244         return p_chain->usable_per_page;
245 }
246
247 static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
248 {
249         return p_chain->elem_unusable;
250 }
251
252 static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
253 {
254         return p_chain->page_cnt;
255 }
256
257 static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
258 {
259         return p_chain->pbl_sp.p_phys_table;
260 }
261
262 /**
263  * @brief qed_chain_advance_page -
264  *
265  * Advance the next element accros pages for a linked chain
266  *
267  * @param p_chain
268  * @param p_next_elem
269  * @param idx_to_inc
270  * @param page_to_inc
271  */
272 static inline void
273 qed_chain_advance_page(struct qed_chain *p_chain,
274                        void **p_next_elem, void *idx_to_inc, void *page_to_inc)
275 {
276         struct qed_chain_next *p_next = NULL;
277         u32 page_index = 0;
278
279         switch (p_chain->mode) {
280         case QED_CHAIN_MODE_NEXT_PTR:
281                 p_next = *p_next_elem;
282                 *p_next_elem = p_next->next_virt;
283                 if (is_chain_u16(p_chain))
284                         *(u16 *)idx_to_inc += p_chain->elem_unusable;
285                 else
286                         *(u32 *)idx_to_inc += p_chain->elem_unusable;
287                 break;
288         case QED_CHAIN_MODE_SINGLE:
289                 *p_next_elem = p_chain->p_virt_addr;
290                 break;
291
292         case QED_CHAIN_MODE_PBL:
293                 if (is_chain_u16(p_chain)) {
294                         if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
295                                 *(u16 *)page_to_inc = 0;
296                         page_index = *(u16 *)page_to_inc;
297                 } else {
298                         if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
299                                 *(u32 *)page_to_inc = 0;
300                         page_index = *(u32 *)page_to_inc;
301                 }
302                 *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr;
303         }
304 }
305
306 #define is_unusable_idx(p, idx) \
307         (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
308
309 #define is_unusable_idx_u32(p, idx) \
310         (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
311 #define is_unusable_next_idx(p, idx)                             \
312         ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
313          (p)->usable_per_page)
314
315 #define is_unusable_next_idx_u32(p, idx)                         \
316         ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
317          (p)->usable_per_page)
318
319 #define test_and_skip(p, idx)                                              \
320         do {                                            \
321                 if (is_chain_u16(p)) {                                     \
322                         if (is_unusable_idx(p, idx))                       \
323                                 (p)->u.chain16.idx += (p)->elem_unusable;  \
324                 } else {                                                   \
325                         if (is_unusable_idx_u32(p, idx))                   \
326                                 (p)->u.chain32.idx += (p)->elem_unusable;  \
327                 }                                       \
328         } while (0)
329
330 /**
331  * @brief qed_chain_return_produced -
332  *
333  * A chain in which the driver "Produces" elements should use this API
334  * to indicate previous produced elements are now consumed.
335  *
336  * @param p_chain
337  */
338 static inline void qed_chain_return_produced(struct qed_chain *p_chain)
339 {
340         if (is_chain_u16(p_chain))
341                 p_chain->u.chain16.cons_idx++;
342         else
343                 p_chain->u.chain32.cons_idx++;
344         test_and_skip(p_chain, cons_idx);
345 }
346
347 /**
348  * @brief qed_chain_produce -
349  *
350  * A chain in which the driver "Produces" elements should use this to get
351  * a pointer to the next element which can be "Produced". It's driver
352  * responsibility to validate that the chain has room for new element.
353  *
354  * @param p_chain
355  *
356  * @return void*, a pointer to next element
357  */
358 static inline void *qed_chain_produce(struct qed_chain *p_chain)
359 {
360         void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
361
362         if (is_chain_u16(p_chain)) {
363                 if ((p_chain->u.chain16.prod_idx &
364                      p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
365                         p_prod_idx = &p_chain->u.chain16.prod_idx;
366                         p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
367                         qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
368                                                p_prod_idx, p_prod_page_idx);
369                 }
370                 p_chain->u.chain16.prod_idx++;
371         } else {
372                 if ((p_chain->u.chain32.prod_idx &
373                      p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
374                         p_prod_idx = &p_chain->u.chain32.prod_idx;
375                         p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
376                         qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
377                                                p_prod_idx, p_prod_page_idx);
378                 }
379                 p_chain->u.chain32.prod_idx++;
380         }
381
382         p_ret = p_chain->p_prod_elem;
383         p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
384                                         p_chain->elem_size);
385
386         return p_ret;
387 }
388
389 /**
390  * @brief qed_chain_get_capacity -
391  *
392  * Get the maximum number of BDs in chain
393  *
394  * @param p_chain
395  * @param num
396  *
397  * @return number of unusable BDs
398  */
399 static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
400 {
401         return p_chain->capacity;
402 }
403
404 /**
405  * @brief qed_chain_recycle_consumed -
406  *
407  * Returns an element which was previously consumed;
408  * Increments producers so they could be written to FW.
409  *
410  * @param p_chain
411  */
412 static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
413 {
414         test_and_skip(p_chain, prod_idx);
415         if (is_chain_u16(p_chain))
416                 p_chain->u.chain16.prod_idx++;
417         else
418                 p_chain->u.chain32.prod_idx++;
419 }
420
421 /**
422  * @brief qed_chain_consume -
423  *
424  * A Chain in which the driver utilizes data written by a different source
425  * (i.e., FW) should use this to access passed buffers.
426  *
427  * @param p_chain
428  *
429  * @return void*, a pointer to the next buffer written
430  */
431 static inline void *qed_chain_consume(struct qed_chain *p_chain)
432 {
433         void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
434
435         if (is_chain_u16(p_chain)) {
436                 if ((p_chain->u.chain16.cons_idx &
437                      p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
438                         p_cons_idx = &p_chain->u.chain16.cons_idx;
439                         p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
440                         qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
441                                                p_cons_idx, p_cons_page_idx);
442                 }
443                 p_chain->u.chain16.cons_idx++;
444         } else {
445                 if ((p_chain->u.chain32.cons_idx &
446                      p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
447                         p_cons_idx = &p_chain->u.chain32.cons_idx;
448                         p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
449                         qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
450                                                p_cons_idx, p_cons_page_idx);
451                 }
452                 p_chain->u.chain32.cons_idx++;
453         }
454
455         p_ret = p_chain->p_cons_elem;
456         p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
457                                         p_chain->elem_size);
458
459         return p_ret;
460 }
461
462 /**
463  * @brief qed_chain_reset - Resets the chain to its start state
464  *
465  * @param p_chain pointer to a previously allocted chain
466  */
467 static inline void qed_chain_reset(struct qed_chain *p_chain)
468 {
469         u32 i;
470
471         if (is_chain_u16(p_chain)) {
472                 p_chain->u.chain16.prod_idx = 0;
473                 p_chain->u.chain16.cons_idx = 0;
474         } else {
475                 p_chain->u.chain32.prod_idx = 0;
476                 p_chain->u.chain32.cons_idx = 0;
477         }
478         p_chain->p_cons_elem = p_chain->p_virt_addr;
479         p_chain->p_prod_elem = p_chain->p_virt_addr;
480
481         if (p_chain->mode == QED_CHAIN_MODE_PBL) {
482                 /* Use (page_cnt - 1) as a reset value for the prod/cons page's
483                  * indices, to avoid unnecessary page advancing on the first
484                  * call to qed_chain_produce/consume. Instead, the indices
485                  * will be advanced to page_cnt and then will be wrapped to 0.
486                  */
487                 u32 reset_val = p_chain->page_cnt - 1;
488
489                 if (is_chain_u16(p_chain)) {
490                         p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
491                         p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
492                 } else {
493                         p_chain->pbl.c.u32.prod_page_idx = reset_val;
494                         p_chain->pbl.c.u32.cons_page_idx = reset_val;
495                 }
496         }
497
498         switch (p_chain->intended_use) {
499         case QED_CHAIN_USE_TO_CONSUME:
500                 /* produce empty elements */
501                 for (i = 0; i < p_chain->capacity; i++)
502                         qed_chain_recycle_consumed(p_chain);
503                 break;
504
505         case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
506         case QED_CHAIN_USE_TO_PRODUCE:
507         default:
508                 /* Do nothing */
509                 break;
510         }
511 }
512
513 /**
514  * @brief qed_chain_init - Initalizes a basic chain struct
515  *
516  * @param p_chain
517  * @param p_virt_addr
518  * @param p_phys_addr   physical address of allocated buffer's beginning
519  * @param page_cnt      number of pages in the allocated buffer
520  * @param elem_size     size of each element in the chain
521  * @param intended_use
522  * @param mode
523  */
524 static inline void qed_chain_init_params(struct qed_chain *p_chain,
525                                          u32 page_cnt,
526                                          u8 elem_size,
527                                          enum qed_chain_use_mode intended_use,
528                                          enum qed_chain_mode mode,
529                                          enum qed_chain_cnt_type cnt_type)
530 {
531         /* chain fixed parameters */
532         p_chain->p_virt_addr = NULL;
533         p_chain->p_phys_addr = 0;
534         p_chain->elem_size      = elem_size;
535         p_chain->intended_use = (u8)intended_use;
536         p_chain->mode           = mode;
537         p_chain->cnt_type = (u8)cnt_type;
538
539         p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
540         p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
541         p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
542         p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
543         p_chain->next_page_mask = (p_chain->usable_per_page &
544                                    p_chain->elem_per_page_mask);
545
546         p_chain->page_cnt = page_cnt;
547         p_chain->capacity = p_chain->usable_per_page * page_cnt;
548         p_chain->size = p_chain->elem_per_page * page_cnt;
549
550         p_chain->pbl_sp.p_phys_table = 0;
551         p_chain->pbl_sp.p_virt_table = NULL;
552         p_chain->pbl.pp_addr_tbl = NULL;
553 }
554
555 /**
556  * @brief qed_chain_init_mem -
557  *
558  * Initalizes a basic chain struct with its chain buffers
559  *
560  * @param p_chain
561  * @param p_virt_addr   virtual address of allocated buffer's beginning
562  * @param p_phys_addr   physical address of allocated buffer's beginning
563  *
564  */
565 static inline void qed_chain_init_mem(struct qed_chain *p_chain,
566                                       void *p_virt_addr, dma_addr_t p_phys_addr)
567 {
568         p_chain->p_virt_addr = p_virt_addr;
569         p_chain->p_phys_addr = p_phys_addr;
570 }
571
572 /**
573  * @brief qed_chain_init_pbl_mem -
574  *
575  * Initalizes a basic chain struct with its pbl buffers
576  *
577  * @param p_chain
578  * @param p_virt_pbl    pointer to a pre allocated side table which will hold
579  *                      virtual page addresses.
580  * @param p_phys_pbl    pointer to a pre-allocated side table which will hold
581  *                      physical page addresses.
582  * @param pp_virt_addr_tbl
583  *                      pointer to a pre-allocated side table which will hold
584  *                      the virtual addresses of the chain pages.
585  *
586  */
587 static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
588                                           void *p_virt_pbl,
589                                           dma_addr_t p_phys_pbl,
590                                           struct addr_tbl_entry *pp_addr_tbl)
591 {
592         p_chain->pbl_sp.p_phys_table = p_phys_pbl;
593         p_chain->pbl_sp.p_virt_table = p_virt_pbl;
594         p_chain->pbl.pp_addr_tbl = pp_addr_tbl;
595 }
596
597 /**
598  * @brief qed_chain_init_next_ptr_elem -
599  *
600  * Initalizes a next pointer element
601  *
602  * @param p_chain
603  * @param p_virt_curr   virtual address of a chain page of which the next
604  *                      pointer element is initialized
605  * @param p_virt_next   virtual address of the next chain page
606  * @param p_phys_next   physical address of the next chain page
607  *
608  */
609 static inline void
610 qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
611                              void *p_virt_curr,
612                              void *p_virt_next, dma_addr_t p_phys_next)
613 {
614         struct qed_chain_next *p_next;
615         u32 size;
616
617         size = p_chain->elem_size * p_chain->usable_per_page;
618         p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
619
620         DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
621
622         p_next->next_virt = p_virt_next;
623 }
624
625 /**
626  * @brief qed_chain_get_last_elem -
627  *
628  * Returns a pointer to the last element of the chain
629  *
630  * @param p_chain
631  *
632  * @return void*
633  */
634 static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
635 {
636         struct qed_chain_next *p_next = NULL;
637         void *p_virt_addr = NULL;
638         u32 size, last_page_idx;
639
640         if (!p_chain->p_virt_addr)
641                 goto out;
642
643         switch (p_chain->mode) {
644         case QED_CHAIN_MODE_NEXT_PTR:
645                 size = p_chain->elem_size * p_chain->usable_per_page;
646                 p_virt_addr = p_chain->p_virt_addr;
647                 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
648                 while (p_next->next_virt != p_chain->p_virt_addr) {
649                         p_virt_addr = p_next->next_virt;
650                         p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
651                                                            size);
652                 }
653                 break;
654         case QED_CHAIN_MODE_SINGLE:
655                 p_virt_addr = p_chain->p_virt_addr;
656                 break;
657         case QED_CHAIN_MODE_PBL:
658                 last_page_idx = p_chain->page_cnt - 1;
659                 p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr;
660                 break;
661         }
662         /* p_virt_addr points at this stage to the last page of the chain */
663         size = p_chain->elem_size * (p_chain->usable_per_page - 1);
664         p_virt_addr = (u8 *)p_virt_addr + size;
665 out:
666         return p_virt_addr;
667 }
668
669 /**
670  * @brief qed_chain_set_prod - sets the prod to the given value
671  *
672  * @param prod_idx
673  * @param p_prod_elem
674  */
675 static inline void qed_chain_set_prod(struct qed_chain *p_chain,
676                                       u32 prod_idx, void *p_prod_elem)
677 {
678         if (p_chain->mode == QED_CHAIN_MODE_PBL) {
679                 u32 cur_prod, page_mask, page_cnt, page_diff;
680
681                 cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
682                            p_chain->u.chain32.prod_idx;
683
684                 /* Assume that number of elements in a page is power of 2 */
685                 page_mask = ~p_chain->elem_per_page_mask;
686
687                 /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
688                  * reaches the first element of next page before the page index
689                  * is incremented. See qed_chain_produce().
690                  * Index wrap around is not a problem because the difference
691                  * between current and given producer indices is always
692                  * positive and lower than the chain's capacity.
693                  */
694                 page_diff = (((cur_prod - 1) & page_mask) -
695                              ((prod_idx - 1) & page_mask)) /
696                             p_chain->elem_per_page;
697
698                 page_cnt = qed_chain_get_page_cnt(p_chain);
699                 if (is_chain_u16(p_chain))
700                         p_chain->pbl.c.u16.prod_page_idx =
701                                 (p_chain->pbl.c.u16.prod_page_idx -
702                                  page_diff + page_cnt) % page_cnt;
703                 else
704                         p_chain->pbl.c.u32.prod_page_idx =
705                                 (p_chain->pbl.c.u32.prod_page_idx -
706                                  page_diff + page_cnt) % page_cnt;
707         }
708
709         if (is_chain_u16(p_chain))
710                 p_chain->u.chain16.prod_idx = (u16) prod_idx;
711         else
712                 p_chain->u.chain32.prod_idx = prod_idx;
713         p_chain->p_prod_elem = p_prod_elem;
714 }
715
716 /**
717  * @brief qed_chain_pbl_zero_mem - set chain memory to 0
718  *
719  * @param p_chain
720  */
721 static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
722 {
723         u32 i, page_cnt;
724
725         if (p_chain->mode != QED_CHAIN_MODE_PBL)
726                 return;
727
728         page_cnt = qed_chain_get_page_cnt(p_chain);
729
730         for (i = 0; i < page_cnt; i++)
731                 memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
732                        QED_CHAIN_PAGE_SIZE);
733 }
734
735 #endif