1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/netdevice.h>
20 #include <linux/vmalloc.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
28 #include "cn66xx_regs.h"
29 #include "cn66xx_device.h"
30 #include "cn23xx_pf_device.h"
31 #include "cn23xx_vf_device.h"
34 struct list_head list;
39 struct list_head list;
40 struct octeon_recv_info *rinfo;
41 octeon_dispatch_fn_t disp_fn;
44 /** Get the argument that the user set when registering dispatch
45 * function for a given opcode/subcode.
46 * @param octeon_dev - the octeon device pointer.
47 * @param opcode - the opcode for which the dispatch argument
49 * @param subcode - the subcode for which the dispatch argument
51 * @return Success: void * (argument to the dispatch function)
52 * @return Failure: NULL
55 void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
56 u16 opcode, u16 subcode)
59 struct list_head *dispatch;
61 u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
63 idx = combined_opcode & OCTEON_OPCODE_MASK;
65 spin_lock_bh(&octeon_dev->dispatch.lock);
67 if (octeon_dev->dispatch.count == 0) {
68 spin_unlock_bh(&octeon_dev->dispatch.lock);
72 if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
73 fn_arg = octeon_dev->dispatch.dlist[idx].arg;
75 list_for_each(dispatch,
76 &octeon_dev->dispatch.dlist[idx].list) {
77 if (((struct octeon_dispatch *)dispatch)->opcode ==
79 fn_arg = ((struct octeon_dispatch *)
86 spin_unlock_bh(&octeon_dev->dispatch.lock);
90 /** Check for packets on Droq. This function should be called with lock held.
91 * @param droq - Droq on which count is checked.
92 * @return Returns packet count.
94 u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
99 pkt_count = readl(droq->pkts_sent_reg);
101 last_count = pkt_count - droq->pkt_count;
102 droq->pkt_count = pkt_count;
104 /* we shall write to cnts at napi irq enable or end of droq tasklet */
106 atomic_add(last_count, &droq->pkts_pending);
110 EXPORT_SYMBOL_GPL(octeon_droq_check_hw_for_pkts);
112 static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
116 /* max_empty_descs is the max. no. of descs that can have no buffers.
117 * If the empty desc count goes beyond this value, we cannot safely
118 * read in a 64K packet sent by Octeon
119 * (64K is max pkt size from Octeon)
121 droq->max_empty_descs = 0;
124 droq->max_empty_descs++;
125 count += droq->buffer_size;
126 } while (count < (64 * 1024));
128 droq->max_empty_descs = droq->max_count - droq->max_empty_descs;
131 static void octeon_droq_reset_indices(struct octeon_droq *droq)
135 droq->refill_idx = 0;
136 droq->refill_count = 0;
137 atomic_set(&droq->pkts_pending, 0);
141 octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
142 struct octeon_droq *droq)
145 struct octeon_skb_page_info *pg_info;
147 for (i = 0; i < droq->max_count; i++) {
148 pg_info = &droq->recv_buf_list[i].pg_info;
153 lio_unmap_ring(oct->pci_dev,
158 recv_buffer_destroy(droq->recv_buf_list[i].buffer,
161 droq->recv_buf_list[i].buffer = NULL;
164 octeon_droq_reset_indices(droq);
168 octeon_droq_setup_ring_buffers(struct octeon_device *oct,
169 struct octeon_droq *droq)
173 struct octeon_droq_desc *desc_ring = droq->desc_ring;
175 for (i = 0; i < droq->max_count; i++) {
176 buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info);
179 dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n",
181 droq->stats.rx_alloc_failure++;
185 droq->recv_buf_list[i].buffer = buf;
186 droq->recv_buf_list[i].data = get_rbd(buf);
187 desc_ring[i].info_ptr = 0;
188 desc_ring[i].buffer_ptr =
189 lio_map_ring(droq->recv_buf_list[i].buffer);
192 octeon_droq_reset_indices(droq);
194 octeon_droq_compute_max_packet_bufs(droq);
199 int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
201 struct octeon_droq *droq = oct->droq[q_no];
203 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
205 octeon_droq_destroy_ring_buffers(oct, droq);
206 vfree(droq->recv_buf_list);
209 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
210 droq->desc_ring, droq->desc_ring_dma);
212 memset(droq, 0, OCT_DROQ_SIZE);
213 oct->io_qmask.oq &= ~(1ULL << q_no);
214 vfree(oct->droq[q_no]);
215 oct->droq[q_no] = NULL;
220 EXPORT_SYMBOL_GPL(octeon_delete_droq);
222 int octeon_init_droq(struct octeon_device *oct,
228 struct octeon_droq *droq;
229 u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
230 u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
231 int numa_node = dev_to_node(&oct->pci_dev->dev);
233 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
235 droq = oct->droq[q_no];
236 memset(droq, 0, OCT_DROQ_SIZE);
241 droq->app_ctx = app_ctx;
243 droq->app_ctx = (void *)(size_t)q_no;
245 c_num_descs = num_descs;
246 c_buf_size = desc_size;
247 if (OCTEON_CN6XXX(oct)) {
248 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
250 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
252 (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
253 } else if (OCTEON_CN23XX_PF(oct)) {
254 struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
256 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23);
257 c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23);
258 } else if (OCTEON_CN23XX_VF(oct)) {
259 struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_vf);
261 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23);
262 c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23);
267 droq->max_count = c_num_descs;
268 droq->buffer_size = c_buf_size;
270 desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
271 droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
272 (dma_addr_t *)&droq->desc_ring_dma);
274 if (!droq->desc_ring) {
275 dev_err(&oct->pci_dev->dev,
276 "Output queue %d ring alloc failed\n", q_no);
280 dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
281 q_no, droq->desc_ring, droq->desc_ring_dma);
282 dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
285 droq->recv_buf_list = vzalloc_node(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE),
287 if (!droq->recv_buf_list)
288 droq->recv_buf_list = vzalloc(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE));
289 if (!droq->recv_buf_list) {
290 dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
294 if (octeon_droq_setup_ring_buffers(oct, droq))
297 droq->pkts_per_intr = c_pkts_per_intr;
298 droq->refill_threshold = c_refill_threshold;
300 dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n",
301 droq->max_empty_descs);
303 INIT_LIST_HEAD(&droq->dispatch_list);
305 /* For 56xx Pass1, this function won't be called, so no checks. */
306 oct->fn_list.setup_oq_regs(oct, q_no);
308 oct->io_qmask.oq |= BIT_ULL(q_no);
313 octeon_delete_droq(oct, q_no);
317 /* octeon_create_recv_info
319 * octeon_dev - pointer to the octeon device structure
320 * droq - droq in which the packet arrived.
321 * buf_cnt - no. of buffers used by the packet.
322 * idx - index in the descriptor for the first buffer in the packet.
324 * Allocates a recv_info_t and copies the buffer addresses for packet data
325 * into the recv_pkt space which starts at an 8B offset from recv_info_t.
326 * Flags the descriptors for refill later. If available descriptors go
327 * below the threshold to receive a 64K pkt, new buffers are first allocated
328 * before the recv_pkt_t is created.
329 * This routine will be called in interrupt context.
331 * Success: Pointer to recv_info_t
334 static inline struct octeon_recv_info *octeon_create_recv_info(
335 struct octeon_device *octeon_dev,
336 struct octeon_droq *droq,
340 struct octeon_droq_info *info;
341 struct octeon_recv_pkt *recv_pkt;
342 struct octeon_recv_info *recv_info;
344 struct octeon_skb_page_info *pg_info;
346 info = (struct octeon_droq_info *)droq->recv_buf_list[idx].data;
348 recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch));
352 recv_pkt = recv_info->recv_pkt;
353 recv_pkt->rh = info->rh;
354 recv_pkt->length = (u32)info->length;
355 recv_pkt->buffer_count = (u16)buf_cnt;
356 recv_pkt->octeon_id = (u16)octeon_dev->octeon_id;
359 bytes_left = (u32)info->length;
363 pg_info = &droq->recv_buf_list[idx].pg_info;
365 lio_unmap_ring(octeon_dev->pci_dev,
367 pg_info->page = NULL;
371 recv_pkt->buffer_size[i] =
373 droq->buffer_size) ? droq->buffer_size : bytes_left;
375 recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer;
376 droq->recv_buf_list[idx].buffer = NULL;
378 idx = incr_index(idx, 1, droq->max_count);
379 bytes_left -= droq->buffer_size;
387 /* If we were not able to refill all buffers, try to move around
388 * the buffers that were not dispatched.
391 octeon_droq_refill_pullup_descs(struct octeon_droq *droq,
392 struct octeon_droq_desc *desc_ring)
394 u32 desc_refilled = 0;
396 u32 refill_index = droq->refill_idx;
398 while (refill_index != droq->read_idx) {
399 if (droq->recv_buf_list[refill_index].buffer) {
400 droq->recv_buf_list[droq->refill_idx].buffer =
401 droq->recv_buf_list[refill_index].buffer;
402 droq->recv_buf_list[droq->refill_idx].data =
403 droq->recv_buf_list[refill_index].data;
404 desc_ring[droq->refill_idx].buffer_ptr =
405 desc_ring[refill_index].buffer_ptr;
406 droq->recv_buf_list[refill_index].buffer = NULL;
407 desc_ring[refill_index].buffer_ptr = 0;
409 droq->refill_idx = incr_index(droq->refill_idx,
413 droq->refill_count--;
414 } while (droq->recv_buf_list[droq->refill_idx].buffer);
416 refill_index = incr_index(refill_index, 1, droq->max_count);
418 return desc_refilled;
421 /* octeon_droq_refill
423 * droq - droq in which descriptors require new buffers.
425 * Called during normal DROQ processing in interrupt mode or by the poll
426 * thread to refill the descriptors from which buffers were dispatched
427 * to upper layers. Attempts to allocate new buffers. If that fails, moves
428 * up buffers (that were not dispatched) to form a contiguous ring.
430 * No of descriptors refilled.
433 octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
435 struct octeon_droq_desc *desc_ring;
438 u32 desc_refilled = 0;
439 struct octeon_skb_page_info *pg_info;
441 desc_ring = droq->desc_ring;
443 while (droq->refill_count && (desc_refilled < droq->max_count)) {
444 /* If a valid buffer exists (happens if there is no dispatch),
445 * reuse the buffer, else allocate.
447 if (!droq->recv_buf_list[droq->refill_idx].buffer) {
449 &droq->recv_buf_list[droq->refill_idx].pg_info;
450 /* Either recycle the existing pages or go for
454 buf = recv_buffer_reuse(octeon_dev, pg_info);
456 buf = recv_buffer_alloc(octeon_dev, pg_info);
457 /* If a buffer could not be allocated, no point in
461 droq->stats.rx_alloc_failure++;
464 droq->recv_buf_list[droq->refill_idx].buffer =
468 data = get_rbd(droq->recv_buf_list
469 [droq->refill_idx].buffer);
472 droq->recv_buf_list[droq->refill_idx].data = data;
474 desc_ring[droq->refill_idx].buffer_ptr =
475 lio_map_ring(droq->recv_buf_list[
476 droq->refill_idx].buffer);
478 droq->refill_idx = incr_index(droq->refill_idx, 1,
481 droq->refill_count--;
484 if (droq->refill_count)
486 octeon_droq_refill_pullup_descs(droq, desc_ring);
488 /* if droq->refill_count
489 * The refill count would not change in pass two. We only moved buffers
490 * to close the gap in the ring, but we would still have the same no. of
493 return desc_refilled;
496 /** check if we can allocate packets to get out of oom.
497 * @param droq - Droq being checked.
498 * @return 1 if fails to refill minimum
500 int octeon_retry_droq_refill(struct octeon_droq *droq)
502 struct octeon_device *oct = droq->oct_dev;
503 int desc_refilled, reschedule = 1;
506 pkts_credit = readl(droq->pkts_credit_reg);
507 desc_refilled = octeon_droq_refill(oct, droq);
509 /* Flush the droq descriptor data to memory to be sure
510 * that when we update the credits the data in memory
514 writel(desc_refilled, droq->pkts_credit_reg);
516 if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP)
524 octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
526 return DIV_ROUND_UP(total_len, buf_size);
530 octeon_droq_dispatch_pkt(struct octeon_device *oct,
531 struct octeon_droq *droq,
533 struct octeon_droq_info *info)
536 octeon_dispatch_fn_t disp_fn;
537 struct octeon_recv_info *rinfo;
539 cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length);
541 disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode,
544 rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx);
546 struct __dispatch *rdisp = rinfo->rsvd;
548 rdisp->rinfo = rinfo;
549 rdisp->disp_fn = disp_fn;
550 rinfo->recv_pkt->rh = *rh;
551 list_add_tail(&rdisp->list,
552 &droq->dispatch_list);
554 droq->stats.dropped_nomem++;
557 dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n",
558 (unsigned int)rh->r.opcode,
559 (unsigned int)rh->r.subcode);
560 droq->stats.dropped_nodispatch++;
566 static inline void octeon_droq_drop_packets(struct octeon_device *oct,
567 struct octeon_droq *droq,
571 struct octeon_droq_info *info;
573 for (i = 0; i < cnt; i++) {
574 info = (struct octeon_droq_info *)
575 droq->recv_buf_list[droq->read_idx].data;
576 octeon_swap_8B_data((u64 *)info, 2);
579 info->length += OCTNET_FRM_LENGTH_SIZE;
580 droq->stats.bytes_received += info->length;
581 buf_cnt = octeon_droq_get_bufcount(droq->buffer_size,
584 dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n");
588 droq->read_idx = incr_index(droq->read_idx, buf_cnt,
590 droq->refill_count += buf_cnt;
595 octeon_droq_fast_process_packets(struct octeon_device *oct,
596 struct octeon_droq *droq,
599 u32 pkt, total_len = 0, pkt_count, retval;
600 struct octeon_droq_info *info;
603 pkt_count = pkts_to_process;
605 for (pkt = 0; pkt < pkt_count; pkt++) {
607 struct sk_buff *nicbuf = NULL;
608 struct octeon_skb_page_info *pg_info;
611 info = (struct octeon_droq_info *)
612 droq->recv_buf_list[droq->read_idx].data;
613 octeon_swap_8B_data((u64 *)info, 2);
616 dev_err(&oct->pci_dev->dev,
617 "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
618 droq->q_no, droq->read_idx, pkt_count);
619 print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS,
625 /* Len of resp hdr in included in the received data len. */
628 info->length += OCTNET_FRM_LENGTH_SIZE;
629 rh->r_dh.len += (ROUNDUP8(OCT_DROQ_INFO_SIZE) / sizeof(u64));
630 total_len += (u32)info->length;
631 if (opcode_slow_path(rh)) {
634 buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info);
635 droq->read_idx = incr_index(droq->read_idx,
636 buf_cnt, droq->max_count);
637 droq->refill_count += buf_cnt;
639 if (info->length <= droq->buffer_size) {
640 pkt_len = (u32)info->length;
641 nicbuf = droq->recv_buf_list[
642 droq->read_idx].buffer;
643 pg_info = &droq->recv_buf_list[
644 droq->read_idx].pg_info;
645 if (recv_buffer_recycle(oct, pg_info))
646 pg_info->page = NULL;
647 droq->recv_buf_list[droq->read_idx].buffer =
650 droq->read_idx = incr_index(droq->read_idx, 1,
652 droq->refill_count++;
654 nicbuf = octeon_fast_packet_alloc((u32)
657 /* nicbuf allocation can fail. We'll handle it
660 while (pkt_len < info->length) {
661 int cpy_len, idx = droq->read_idx;
663 cpy_len = ((pkt_len + droq->buffer_size)
665 ((u32)info->length - pkt_len) :
669 octeon_fast_packet_next(droq,
673 buf = droq->recv_buf_list[
675 recv_buffer_fast_free(buf);
676 droq->recv_buf_list[idx].buffer
679 droq->stats.rx_alloc_failure++;
684 incr_index(droq->read_idx, 1,
686 droq->refill_count++;
691 if (droq->ops.fptr) {
692 droq->ops.fptr(oct->octeon_id,
697 recv_buffer_free(nicbuf);
702 if (droq->refill_count >= droq->refill_threshold) {
703 int desc_refilled = octeon_droq_refill(oct, droq);
706 /* Flush the droq descriptor data to memory to
707 * be sure that when we update the credits the
708 * data in memory is accurate.
711 writel(desc_refilled, droq->pkts_credit_reg);
714 } /* for (each packet)... */
716 /* Increment refill_count by the number of buffers processed. */
717 droq->stats.pkts_received += pkt;
718 droq->stats.bytes_received += total_len;
721 if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) {
722 octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt));
724 droq->stats.dropped_toomany += (pkts_to_process - pkt);
725 retval = pkts_to_process;
728 atomic_sub(retval, &droq->pkts_pending);
730 if (droq->refill_count >= droq->refill_threshold &&
731 readl(droq->pkts_credit_reg) < CN23XX_SLI_DEF_BP) {
732 octeon_droq_check_hw_for_pkts(droq);
734 /* Make sure there are no pkts_pending */
735 if (!atomic_read(&droq->pkts_pending))
736 octeon_schedule_rxq_oom_work(oct, droq);
743 octeon_droq_process_packets(struct octeon_device *oct,
744 struct octeon_droq *droq,
748 struct list_head *tmp, *tmp2;
750 octeon_droq_check_hw_for_pkts(droq);
751 pkt_count = atomic_read(&droq->pkts_pending);
756 if (pkt_count > budget)
759 octeon_droq_fast_process_packets(oct, droq, pkt_count);
761 list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
762 struct __dispatch *rdisp = (struct __dispatch *)tmp;
765 rdisp->disp_fn(rdisp->rinfo,
766 octeon_get_dispatch_arg
768 (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
769 (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
772 /* If there are packets pending. schedule tasklet again */
773 if (atomic_read(&droq->pkts_pending))
778 EXPORT_SYMBOL_GPL(octeon_droq_process_packets);
781 * Utility function to poll for packets. check_hw_for_packets must be
782 * called before calling this routine.
786 octeon_droq_process_poll_pkts(struct octeon_device *oct,
787 struct octeon_droq *droq, u32 budget)
789 struct list_head *tmp, *tmp2;
790 u32 pkts_available = 0, pkts_processed = 0;
791 u32 total_pkts_processed = 0;
793 if (budget > droq->max_count)
794 budget = droq->max_count;
796 while (total_pkts_processed < budget) {
797 octeon_droq_check_hw_for_pkts(droq);
799 pkts_available = min((budget - total_pkts_processed),
800 (u32)(atomic_read(&droq->pkts_pending)));
802 if (pkts_available == 0)
806 octeon_droq_fast_process_packets(oct, droq,
809 total_pkts_processed += pkts_processed;
812 list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
813 struct __dispatch *rdisp = (struct __dispatch *)tmp;
816 rdisp->disp_fn(rdisp->rinfo,
817 octeon_get_dispatch_arg
819 (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
820 (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
823 return total_pkts_processed;
826 /* Enable Pkt Interrupt */
828 octeon_enable_irq(struct octeon_device *oct, u32 q_no)
830 switch (oct->chip_id) {
832 case OCTEON_CN68XX: {
833 struct octeon_cn6xxx *cn6xxx =
834 (struct octeon_cn6xxx *)oct->chip;
839 (&cn6xxx->lock_for_droq_int_enb_reg, flags);
840 value = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
841 value |= (1 << q_no);
842 octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, value);
843 value = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
844 value |= (1 << q_no);
845 octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, value);
847 /* don't bother flushing the enables */
849 spin_unlock_irqrestore
850 (&cn6xxx->lock_for_droq_int_enb_reg, flags);
853 case OCTEON_CN23XX_PF_VID:
854 lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]);
857 case OCTEON_CN23XX_VF_VID:
858 lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]);
861 dev_err(&oct->pci_dev->dev, "%s Unknown Chip\n", __func__);
868 int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no,
869 struct octeon_droq_ops *ops)
871 struct octeon_config *oct_cfg = NULL;
872 struct octeon_droq *droq;
874 oct_cfg = octeon_get_conf(oct);
880 dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n",
885 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
886 dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
887 __func__, q_no, (oct->num_oqs - 1));
891 droq = oct->droq[q_no];
892 memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops));
897 int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
899 struct octeon_config *oct_cfg = NULL;
900 struct octeon_droq *droq;
902 oct_cfg = octeon_get_conf(oct);
907 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
908 dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
909 __func__, q_no, oct->num_oqs - 1);
913 droq = oct->droq[q_no];
916 dev_info(&oct->pci_dev->dev,
917 "Droq id (%d) not available.\n", q_no);
921 droq->ops.fptr = NULL;
922 droq->ops.farg = NULL;
923 droq->ops.drop_on_max = 0;
927 EXPORT_SYMBOL_GPL(octeon_unregister_droq_ops);
929 int octeon_create_droq(struct octeon_device *oct,
930 u32 q_no, u32 num_descs,
931 u32 desc_size, void *app_ctx)
933 struct octeon_droq *droq;
934 int numa_node = dev_to_node(&oct->pci_dev->dev);
936 if (oct->droq[q_no]) {
937 dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
942 /* Allocate the DS for the new droq. */
943 droq = vmalloc_node(sizeof(*droq), numa_node);
945 droq = vmalloc(sizeof(*droq));
949 memset(droq, 0, sizeof(struct octeon_droq));
951 /*Disable the pkt o/p for this Q */
952 octeon_set_droq_pkt_op(oct, q_no, 0);
953 oct->droq[q_no] = droq;
955 /* Initialize the Droq */
956 if (octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx)) {
957 vfree(oct->droq[q_no]);
958 oct->droq[q_no] = NULL;
964 dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__,
967 /* Global Droq register settings */
969 /* As of now not required, as setting are done for all 32 Droqs at