1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/types.h>
37 #include <asm/byteorder.h>
38 #include <linux/kernel.h>
39 #include <linux/list.h>
40 #include <linux/slab.h>
41 #include <linux/qed/common_hsi.h>
44 /* Each Page contains a next pointer at its end */
45 QED_CHAIN_MODE_NEXT_PTR,
47 /* Chain is a single page (next ptr) is unrequired */
48 QED_CHAIN_MODE_SINGLE,
50 /* Page pointers are located in a side list */
54 enum qed_chain_use_mode {
55 QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
56 QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
57 QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
60 enum qed_chain_cnt_type {
61 /* The chain's size/prod/cons are kept in 16-bit variables */
62 QED_CHAIN_CNT_TYPE_U16,
64 /* The chain's size/prod/cons are kept in 32-bit variables */
65 QED_CHAIN_CNT_TYPE_U32,
68 struct qed_chain_next {
69 struct regpair next_phys;
73 struct qed_chain_pbl_u16 {
78 struct qed_chain_pbl_u32 {
83 struct qed_chain_ext_pbl {
84 dma_addr_t p_pbl_phys;
88 struct qed_chain_u16 {
89 /* Cyclic index of next element to produce/consme */
94 struct qed_chain_u32 {
95 /* Cyclic index of next element to produce/consme */
100 struct addr_tbl_entry {
106 /* fastpath portion of the chain - required for commands such
107 * as produce / consume.
109 /* Point to next element to produce/consume */
113 /* Fastpath portions of the PBL [if exists] */
115 /* Table for keeping the virtual and physical addresses of the
116 * chain pages, respectively to the physical addresses
119 struct addr_tbl_entry *pp_addr_tbl;
122 struct qed_chain_pbl_u16 u16;
123 struct qed_chain_pbl_u32 u32;
128 struct qed_chain_u16 chain16;
129 struct qed_chain_u32 chain32;
132 /* Capacity counts only usable elements */
136 enum qed_chain_mode mode;
138 /* Elements information for fast calculations */
140 u16 elem_per_page_mask;
148 /* Slowpath of the chain - required for initialization and destruction,
149 * but isn't involved in regular functionality.
152 /* Base address of a pre-allocated buffer for pbl */
154 dma_addr_t p_phys_table;
158 /* Address of first page of the chain - the address is required
159 * for fastpath operation [consume/produce] but only for the the SINGLE
160 * flavour which isn't considered fastpath [== SPQ].
163 dma_addr_t p_phys_addr;
165 /* Total number of elements [for entire chain] */
173 #define QED_CHAIN_PBL_ENTRY_SIZE (8)
174 #define QED_CHAIN_PAGE_SIZE (0x1000)
175 #define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
177 #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
178 (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
179 (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
182 #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
183 ((u32)(ELEMS_PER_PAGE(elem_size) - \
184 UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
186 #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
187 DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
189 #define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
190 #define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
193 static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
195 return p_chain->u.chain16.prod_idx;
198 static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
200 return p_chain->u.chain16.cons_idx;
203 static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
205 return p_chain->u.chain32.cons_idx;
208 static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
210 u16 elem_per_page = p_chain->elem_per_page;
211 u32 prod = p_chain->u.chain16.prod_idx;
212 u32 cons = p_chain->u.chain16.cons_idx;
216 prod += (u32)U16_MAX + 1;
218 used = (u16)(prod - cons);
219 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
220 used -= prod / elem_per_page - cons / elem_per_page;
222 return (u16)(p_chain->capacity - used);
225 static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
227 u16 elem_per_page = p_chain->elem_per_page;
228 u64 prod = p_chain->u.chain32.prod_idx;
229 u64 cons = p_chain->u.chain32.cons_idx;
233 prod += (u64)U32_MAX + 1;
235 used = (u32)(prod - cons);
236 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
237 used -= (u32)(prod / elem_per_page - cons / elem_per_page);
239 return p_chain->capacity - used;
242 static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
244 return p_chain->usable_per_page;
247 static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
249 return p_chain->elem_unusable;
252 static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
254 return p_chain->page_cnt;
257 static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
259 return p_chain->pbl_sp.p_phys_table;
263 * @brief qed_chain_advance_page -
265 * Advance the next element accros pages for a linked chain
273 qed_chain_advance_page(struct qed_chain *p_chain,
274 void **p_next_elem, void *idx_to_inc, void *page_to_inc)
276 struct qed_chain_next *p_next = NULL;
279 switch (p_chain->mode) {
280 case QED_CHAIN_MODE_NEXT_PTR:
281 p_next = *p_next_elem;
282 *p_next_elem = p_next->next_virt;
283 if (is_chain_u16(p_chain))
284 *(u16 *)idx_to_inc += p_chain->elem_unusable;
286 *(u32 *)idx_to_inc += p_chain->elem_unusable;
288 case QED_CHAIN_MODE_SINGLE:
289 *p_next_elem = p_chain->p_virt_addr;
292 case QED_CHAIN_MODE_PBL:
293 if (is_chain_u16(p_chain)) {
294 if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
295 *(u16 *)page_to_inc = 0;
296 page_index = *(u16 *)page_to_inc;
298 if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
299 *(u32 *)page_to_inc = 0;
300 page_index = *(u32 *)page_to_inc;
302 *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr;
306 #define is_unusable_idx(p, idx) \
307 (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
309 #define is_unusable_idx_u32(p, idx) \
310 (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
311 #define is_unusable_next_idx(p, idx) \
312 ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
313 (p)->usable_per_page)
315 #define is_unusable_next_idx_u32(p, idx) \
316 ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
317 (p)->usable_per_page)
319 #define test_and_skip(p, idx) \
321 if (is_chain_u16(p)) { \
322 if (is_unusable_idx(p, idx)) \
323 (p)->u.chain16.idx += (p)->elem_unusable; \
325 if (is_unusable_idx_u32(p, idx)) \
326 (p)->u.chain32.idx += (p)->elem_unusable; \
331 * @brief qed_chain_return_produced -
333 * A chain in which the driver "Produces" elements should use this API
334 * to indicate previous produced elements are now consumed.
338 static inline void qed_chain_return_produced(struct qed_chain *p_chain)
340 if (is_chain_u16(p_chain))
341 p_chain->u.chain16.cons_idx++;
343 p_chain->u.chain32.cons_idx++;
344 test_and_skip(p_chain, cons_idx);
348 * @brief qed_chain_produce -
350 * A chain in which the driver "Produces" elements should use this to get
351 * a pointer to the next element which can be "Produced". It's driver
352 * responsibility to validate that the chain has room for new element.
356 * @return void*, a pointer to next element
358 static inline void *qed_chain_produce(struct qed_chain *p_chain)
360 void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
362 if (is_chain_u16(p_chain)) {
363 if ((p_chain->u.chain16.prod_idx &
364 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
365 p_prod_idx = &p_chain->u.chain16.prod_idx;
366 p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
367 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
368 p_prod_idx, p_prod_page_idx);
370 p_chain->u.chain16.prod_idx++;
372 if ((p_chain->u.chain32.prod_idx &
373 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
374 p_prod_idx = &p_chain->u.chain32.prod_idx;
375 p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
376 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
377 p_prod_idx, p_prod_page_idx);
379 p_chain->u.chain32.prod_idx++;
382 p_ret = p_chain->p_prod_elem;
383 p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
390 * @brief qed_chain_get_capacity -
392 * Get the maximum number of BDs in chain
397 * @return number of unusable BDs
399 static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
401 return p_chain->capacity;
405 * @brief qed_chain_recycle_consumed -
407 * Returns an element which was previously consumed;
408 * Increments producers so they could be written to FW.
412 static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
414 test_and_skip(p_chain, prod_idx);
415 if (is_chain_u16(p_chain))
416 p_chain->u.chain16.prod_idx++;
418 p_chain->u.chain32.prod_idx++;
422 * @brief qed_chain_consume -
424 * A Chain in which the driver utilizes data written by a different source
425 * (i.e., FW) should use this to access passed buffers.
429 * @return void*, a pointer to the next buffer written
431 static inline void *qed_chain_consume(struct qed_chain *p_chain)
433 void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
435 if (is_chain_u16(p_chain)) {
436 if ((p_chain->u.chain16.cons_idx &
437 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
438 p_cons_idx = &p_chain->u.chain16.cons_idx;
439 p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
440 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
441 p_cons_idx, p_cons_page_idx);
443 p_chain->u.chain16.cons_idx++;
445 if ((p_chain->u.chain32.cons_idx &
446 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
447 p_cons_idx = &p_chain->u.chain32.cons_idx;
448 p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
449 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
450 p_cons_idx, p_cons_page_idx);
452 p_chain->u.chain32.cons_idx++;
455 p_ret = p_chain->p_cons_elem;
456 p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
463 * @brief qed_chain_reset - Resets the chain to its start state
465 * @param p_chain pointer to a previously allocted chain
467 static inline void qed_chain_reset(struct qed_chain *p_chain)
471 if (is_chain_u16(p_chain)) {
472 p_chain->u.chain16.prod_idx = 0;
473 p_chain->u.chain16.cons_idx = 0;
475 p_chain->u.chain32.prod_idx = 0;
476 p_chain->u.chain32.cons_idx = 0;
478 p_chain->p_cons_elem = p_chain->p_virt_addr;
479 p_chain->p_prod_elem = p_chain->p_virt_addr;
481 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
482 /* Use (page_cnt - 1) as a reset value for the prod/cons page's
483 * indices, to avoid unnecessary page advancing on the first
484 * call to qed_chain_produce/consume. Instead, the indices
485 * will be advanced to page_cnt and then will be wrapped to 0.
487 u32 reset_val = p_chain->page_cnt - 1;
489 if (is_chain_u16(p_chain)) {
490 p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
491 p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
493 p_chain->pbl.c.u32.prod_page_idx = reset_val;
494 p_chain->pbl.c.u32.cons_page_idx = reset_val;
498 switch (p_chain->intended_use) {
499 case QED_CHAIN_USE_TO_CONSUME:
500 /* produce empty elements */
501 for (i = 0; i < p_chain->capacity; i++)
502 qed_chain_recycle_consumed(p_chain);
505 case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
506 case QED_CHAIN_USE_TO_PRODUCE:
514 * @brief qed_chain_init - Initalizes a basic chain struct
518 * @param p_phys_addr physical address of allocated buffer's beginning
519 * @param page_cnt number of pages in the allocated buffer
520 * @param elem_size size of each element in the chain
521 * @param intended_use
524 static inline void qed_chain_init_params(struct qed_chain *p_chain,
527 enum qed_chain_use_mode intended_use,
528 enum qed_chain_mode mode,
529 enum qed_chain_cnt_type cnt_type)
531 /* chain fixed parameters */
532 p_chain->p_virt_addr = NULL;
533 p_chain->p_phys_addr = 0;
534 p_chain->elem_size = elem_size;
535 p_chain->intended_use = (u8)intended_use;
536 p_chain->mode = mode;
537 p_chain->cnt_type = (u8)cnt_type;
539 p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
540 p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
541 p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
542 p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
543 p_chain->next_page_mask = (p_chain->usable_per_page &
544 p_chain->elem_per_page_mask);
546 p_chain->page_cnt = page_cnt;
547 p_chain->capacity = p_chain->usable_per_page * page_cnt;
548 p_chain->size = p_chain->elem_per_page * page_cnt;
550 p_chain->pbl_sp.p_phys_table = 0;
551 p_chain->pbl_sp.p_virt_table = NULL;
552 p_chain->pbl.pp_addr_tbl = NULL;
556 * @brief qed_chain_init_mem -
558 * Initalizes a basic chain struct with its chain buffers
561 * @param p_virt_addr virtual address of allocated buffer's beginning
562 * @param p_phys_addr physical address of allocated buffer's beginning
565 static inline void qed_chain_init_mem(struct qed_chain *p_chain,
566 void *p_virt_addr, dma_addr_t p_phys_addr)
568 p_chain->p_virt_addr = p_virt_addr;
569 p_chain->p_phys_addr = p_phys_addr;
573 * @brief qed_chain_init_pbl_mem -
575 * Initalizes a basic chain struct with its pbl buffers
578 * @param p_virt_pbl pointer to a pre allocated side table which will hold
579 * virtual page addresses.
580 * @param p_phys_pbl pointer to a pre-allocated side table which will hold
581 * physical page addresses.
582 * @param pp_virt_addr_tbl
583 * pointer to a pre-allocated side table which will hold
584 * the virtual addresses of the chain pages.
587 static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
589 dma_addr_t p_phys_pbl,
590 struct addr_tbl_entry *pp_addr_tbl)
592 p_chain->pbl_sp.p_phys_table = p_phys_pbl;
593 p_chain->pbl_sp.p_virt_table = p_virt_pbl;
594 p_chain->pbl.pp_addr_tbl = pp_addr_tbl;
598 * @brief qed_chain_init_next_ptr_elem -
600 * Initalizes a next pointer element
603 * @param p_virt_curr virtual address of a chain page of which the next
604 * pointer element is initialized
605 * @param p_virt_next virtual address of the next chain page
606 * @param p_phys_next physical address of the next chain page
610 qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
612 void *p_virt_next, dma_addr_t p_phys_next)
614 struct qed_chain_next *p_next;
617 size = p_chain->elem_size * p_chain->usable_per_page;
618 p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
620 DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
622 p_next->next_virt = p_virt_next;
626 * @brief qed_chain_get_last_elem -
628 * Returns a pointer to the last element of the chain
634 static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
636 struct qed_chain_next *p_next = NULL;
637 void *p_virt_addr = NULL;
638 u32 size, last_page_idx;
640 if (!p_chain->p_virt_addr)
643 switch (p_chain->mode) {
644 case QED_CHAIN_MODE_NEXT_PTR:
645 size = p_chain->elem_size * p_chain->usable_per_page;
646 p_virt_addr = p_chain->p_virt_addr;
647 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
648 while (p_next->next_virt != p_chain->p_virt_addr) {
649 p_virt_addr = p_next->next_virt;
650 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
654 case QED_CHAIN_MODE_SINGLE:
655 p_virt_addr = p_chain->p_virt_addr;
657 case QED_CHAIN_MODE_PBL:
658 last_page_idx = p_chain->page_cnt - 1;
659 p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr;
662 /* p_virt_addr points at this stage to the last page of the chain */
663 size = p_chain->elem_size * (p_chain->usable_per_page - 1);
664 p_virt_addr = (u8 *)p_virt_addr + size;
670 * @brief qed_chain_set_prod - sets the prod to the given value
675 static inline void qed_chain_set_prod(struct qed_chain *p_chain,
676 u32 prod_idx, void *p_prod_elem)
678 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
679 u32 cur_prod, page_mask, page_cnt, page_diff;
681 cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
682 p_chain->u.chain32.prod_idx;
684 /* Assume that number of elements in a page is power of 2 */
685 page_mask = ~p_chain->elem_per_page_mask;
687 /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
688 * reaches the first element of next page before the page index
689 * is incremented. See qed_chain_produce().
690 * Index wrap around is not a problem because the difference
691 * between current and given producer indices is always
692 * positive and lower than the chain's capacity.
694 page_diff = (((cur_prod - 1) & page_mask) -
695 ((prod_idx - 1) & page_mask)) /
696 p_chain->elem_per_page;
698 page_cnt = qed_chain_get_page_cnt(p_chain);
699 if (is_chain_u16(p_chain))
700 p_chain->pbl.c.u16.prod_page_idx =
701 (p_chain->pbl.c.u16.prod_page_idx -
702 page_diff + page_cnt) % page_cnt;
704 p_chain->pbl.c.u32.prod_page_idx =
705 (p_chain->pbl.c.u32.prod_page_idx -
706 page_diff + page_cnt) % page_cnt;
709 if (is_chain_u16(p_chain))
710 p_chain->u.chain16.prod_idx = (u16) prod_idx;
712 p_chain->u.chain32.prod_idx = prod_idx;
713 p_chain->p_prod_elem = p_prod_elem;
717 * @brief qed_chain_pbl_zero_mem - set chain memory to 0
721 static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
725 if (p_chain->mode != QED_CHAIN_MODE_PBL)
728 page_cnt = qed_chain_get_page_cnt(p_chain);
730 for (i = 0; i < page_cnt; i++)
731 memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
732 QED_CHAIN_PAGE_SIZE);