1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14 #include <linux/etherdevice.h>
15 #include <linux/prefetch.h>
17 #include "vxge-traffic.h"
18 #include "vxge-config.h"
19 #include "vxge-main.h"
22 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
23 * @vp: Virtual Path handle.
25 * Enable vpath interrupts. The function is to be executed the last in
26 * vpath initialization sequence.
28 * See also: vxge_hw_vpath_intr_disable()
30 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
32 struct __vxge_hw_virtualpath *vpath;
33 struct vxge_hw_vpath_reg __iomem *vp_reg;
34 enum vxge_hw_status status = VXGE_HW_OK;
36 status = VXGE_HW_ERR_INVALID_HANDLE;
42 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
43 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
47 vp_reg = vpath->vp_reg;
49 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
51 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
52 &vp_reg->general_errors_reg);
54 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
55 &vp_reg->pci_config_errors_reg);
57 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
58 &vp_reg->mrpcim_to_vpath_alarm_reg);
60 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
61 &vp_reg->srpcim_to_vpath_alarm_reg);
63 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
64 &vp_reg->vpath_ppif_int_status);
66 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
67 &vp_reg->srpcim_msg_to_vpath_reg);
69 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
70 &vp_reg->vpath_pcipif_int_status);
72 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
73 &vp_reg->prc_alarm_reg);
75 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
76 &vp_reg->wrdma_alarm_status);
78 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
79 &vp_reg->asic_ntwk_vp_err_reg);
81 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
82 &vp_reg->xgmac_vp_int_status);
84 readq(&vp_reg->vpath_general_int_status);
86 /* Mask unwanted interrupts */
88 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
89 &vp_reg->vpath_pcipif_int_mask);
91 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
92 &vp_reg->srpcim_msg_to_vpath_mask);
94 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
95 &vp_reg->srpcim_to_vpath_alarm_mask);
97 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
98 &vp_reg->mrpcim_to_vpath_alarm_mask);
100 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
101 &vp_reg->pci_config_errors_mask);
103 /* Unmask the individual interrupts */
105 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
106 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
107 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
108 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
109 &vp_reg->general_errors_mask);
111 __vxge_hw_pio_mem_write32_upper(
112 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
113 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
114 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
118 &vp_reg->kdfcctl_errors_mask);
120 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
122 __vxge_hw_pio_mem_write32_upper(
123 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
124 &vp_reg->prc_alarm_mask);
126 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
127 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
129 if (vpath->hldev->first_vp_id != vpath->vp_id)
130 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
131 &vp_reg->asic_ntwk_vp_err_mask);
133 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
134 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
135 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
136 &vp_reg->asic_ntwk_vp_err_mask);
138 __vxge_hw_pio_mem_write32_upper(0,
139 &vp_reg->vpath_general_int_mask);
146 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
147 * @vp: Virtual Path handle.
149 * Disable vpath interrupts. The function is to be executed the last in
150 * vpath initialization sequence.
152 * See also: vxge_hw_vpath_intr_enable()
154 enum vxge_hw_status vxge_hw_vpath_intr_disable(
155 struct __vxge_hw_vpath_handle *vp)
157 struct __vxge_hw_virtualpath *vpath;
158 enum vxge_hw_status status = VXGE_HW_OK;
159 struct vxge_hw_vpath_reg __iomem *vp_reg;
161 status = VXGE_HW_ERR_INVALID_HANDLE;
167 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
168 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
171 vp_reg = vpath->vp_reg;
173 __vxge_hw_pio_mem_write32_upper(
174 (u32)VXGE_HW_INTR_MASK_ALL,
175 &vp_reg->vpath_general_int_mask);
177 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
179 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
180 &vp_reg->general_errors_mask);
182 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
183 &vp_reg->pci_config_errors_mask);
185 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
186 &vp_reg->mrpcim_to_vpath_alarm_mask);
188 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
189 &vp_reg->srpcim_to_vpath_alarm_mask);
191 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
192 &vp_reg->vpath_ppif_int_mask);
194 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
195 &vp_reg->srpcim_msg_to_vpath_mask);
197 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
198 &vp_reg->vpath_pcipif_int_mask);
200 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
201 &vp_reg->wrdma_alarm_mask);
203 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
204 &vp_reg->prc_alarm_mask);
206 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
207 &vp_reg->xgmac_vp_int_mask);
209 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
210 &vp_reg->asic_ntwk_vp_err_mask);
216 void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
218 struct vxge_hw_vpath_reg __iomem *vp_reg;
219 struct vxge_hw_vp_config *config;
222 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
225 vp_reg = fifo->vp_reg;
226 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
228 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
229 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
230 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
231 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
232 fifo->tim_tti_cfg1_saved = val64;
233 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
237 void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
239 u64 val64 = ring->tim_rti_cfg1_saved;
241 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
242 ring->tim_rti_cfg1_saved = val64;
243 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
246 void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
248 u64 val64 = fifo->tim_tti_cfg3_saved;
249 u64 timer = (fifo->rtimer * 1000) / 272;
251 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
253 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
254 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
256 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
257 /* tti_cfg3_saved is not updated again because it is
258 * initialized at one place only - init time.
262 void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
264 u64 val64 = ring->tim_rti_cfg3_saved;
265 u64 timer = (ring->rtimer * 1000) / 272;
267 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
269 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
270 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
272 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
273 /* rti_cfg3_saved is not updated again because it is
274 * initialized at one place only - init time.
279 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
280 * @channeh: Channel for rx or tx handle
283 * The function masks the msix interrupt for the given msix_id
287 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
290 __vxge_hw_pio_mem_write32_upper(
291 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
292 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
296 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
297 * @channeh: Channel for rx or tx handle
300 * The function unmasks the msix interrupt for the given msix_id
305 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
308 __vxge_hw_pio_mem_write32_upper(
309 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
310 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
314 * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
315 * @channel: Channel for rx or tx handle
318 * The function unmasks the msix interrupt for the given msix_id
319 * if configured in MSIX oneshot mode
323 void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
325 __vxge_hw_pio_mem_write32_upper(
326 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
327 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
331 * vxge_hw_device_set_intr_type - Updates the configuration
332 * with new interrupt type.
333 * @hldev: HW device handle.
334 * @intr_mode: New interrupt type
336 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
339 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
340 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
341 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
342 (intr_mode != VXGE_HW_INTR_MODE_DEF))
343 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
345 hldev->config.intr_mode = intr_mode;
350 * vxge_hw_device_intr_enable - Enable interrupts.
351 * @hldev: HW device handle.
352 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
353 * the type(s) of interrupts to enable.
355 * Enable Titan interrupts. The function is to be executed the last in
356 * Titan initialization sequence.
358 * See also: vxge_hw_device_intr_disable()
360 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
366 vxge_hw_device_mask_all(hldev);
368 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
370 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
373 vxge_hw_vpath_intr_enable(
374 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
377 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
378 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
379 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
382 writeq(val64, &hldev->common_reg->tim_int_status0);
384 writeq(~val64, &hldev->common_reg->tim_int_mask0);
387 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
388 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
391 __vxge_hw_pio_mem_write32_upper(val32,
392 &hldev->common_reg->tim_int_status1);
394 __vxge_hw_pio_mem_write32_upper(~val32,
395 &hldev->common_reg->tim_int_mask1);
399 val64 = readq(&hldev->common_reg->titan_general_int_status);
401 vxge_hw_device_unmask_all(hldev);
405 * vxge_hw_device_intr_disable - Disable Titan interrupts.
406 * @hldev: HW device handle.
407 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
408 * the type(s) of interrupts to disable.
410 * Disable Titan interrupts.
412 * See also: vxge_hw_device_intr_enable()
414 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
418 vxge_hw_device_mask_all(hldev);
420 /* mask all the tim interrupts */
421 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
422 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
423 &hldev->common_reg->tim_int_mask1);
425 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
427 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
430 vxge_hw_vpath_intr_disable(
431 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
436 * vxge_hw_device_mask_all - Mask all device interrupts.
437 * @hldev: HW device handle.
439 * Mask all device interrupts.
441 * See also: vxge_hw_device_unmask_all()
443 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
447 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
448 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
450 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
451 &hldev->common_reg->titan_mask_all_int);
455 * vxge_hw_device_unmask_all - Unmask all device interrupts.
456 * @hldev: HW device handle.
458 * Unmask all device interrupts.
460 * See also: vxge_hw_device_mask_all()
462 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
466 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
467 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
469 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
470 &hldev->common_reg->titan_mask_all_int);
474 * vxge_hw_device_flush_io - Flush io writes.
475 * @hldev: HW device handle.
477 * The function performs a read operation to flush io writes.
481 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
483 readl(&hldev->common_reg->titan_general_int_status);
487 * __vxge_hw_device_handle_error - Handle error
490 * @type: Error type. Please see enum vxge_hw_event{}
494 static enum vxge_hw_status
495 __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
496 enum vxge_hw_event type)
499 case VXGE_HW_EVENT_UNKNOWN:
501 case VXGE_HW_EVENT_RESET_START:
502 case VXGE_HW_EVENT_RESET_COMPLETE:
503 case VXGE_HW_EVENT_LINK_DOWN:
504 case VXGE_HW_EVENT_LINK_UP:
506 case VXGE_HW_EVENT_ALARM_CLEARED:
508 case VXGE_HW_EVENT_ECCERR:
509 case VXGE_HW_EVENT_MRPCIM_ECCERR:
511 case VXGE_HW_EVENT_FIFO_ERR:
512 case VXGE_HW_EVENT_VPATH_ERR:
513 case VXGE_HW_EVENT_CRITICAL_ERR:
514 case VXGE_HW_EVENT_SERR:
516 case VXGE_HW_EVENT_SRPCIM_SERR:
517 case VXGE_HW_EVENT_MRPCIM_SERR:
519 case VXGE_HW_EVENT_SLOT_FREEZE:
527 if (hldev->uld_callbacks->crit_err)
528 hldev->uld_callbacks->crit_err(hldev,
536 * __vxge_hw_device_handle_link_down_ind
537 * @hldev: HW device handle.
539 * Link down indication handler. The function is invoked by HW when
540 * Titan indicates that the link is down.
542 static enum vxge_hw_status
543 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
546 * If the previous link state is not down, return.
548 if (hldev->link_state == VXGE_HW_LINK_DOWN)
551 hldev->link_state = VXGE_HW_LINK_DOWN;
554 if (hldev->uld_callbacks->link_down)
555 hldev->uld_callbacks->link_down(hldev);
561 * __vxge_hw_device_handle_link_up_ind
562 * @hldev: HW device handle.
564 * Link up indication handler. The function is invoked by HW when
565 * Titan indicates that the link is up for programmable amount of time.
567 static enum vxge_hw_status
568 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
571 * If the previous link state is not down, return.
573 if (hldev->link_state == VXGE_HW_LINK_UP)
576 hldev->link_state = VXGE_HW_LINK_UP;
579 if (hldev->uld_callbacks->link_up)
580 hldev->uld_callbacks->link_up(hldev);
586 * __vxge_hw_vpath_alarm_process - Process Alarms.
587 * @vpath: Virtual Path.
588 * @skip_alarms: Do not clear the alarms
590 * Process vpath alarms.
593 static enum vxge_hw_status
594 __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
600 struct __vxge_hw_device *hldev = NULL;
601 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
603 struct vxge_hw_vpath_stats_sw_info *sw_stats;
604 struct vxge_hw_vpath_reg __iomem *vp_reg;
607 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
612 hldev = vpath->hldev;
613 vp_reg = vpath->vp_reg;
614 alarm_status = readq(&vp_reg->vpath_general_int_status);
616 if (alarm_status == VXGE_HW_ALL_FOXES) {
617 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
622 sw_stats = vpath->sw_stats;
624 if (alarm_status & ~(
625 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
626 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
627 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
628 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
629 sw_stats->error_stats.unknown_alarms++;
631 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
636 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
638 val64 = readq(&vp_reg->xgmac_vp_int_status);
641 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
643 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
646 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
648 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
650 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
652 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
654 sw_stats->error_stats.network_sustained_fault++;
657 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
658 &vp_reg->asic_ntwk_vp_err_mask);
660 __vxge_hw_device_handle_link_down_ind(hldev);
661 alarm_event = VXGE_HW_SET_LEVEL(
662 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
666 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
668 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
670 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
672 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
675 sw_stats->error_stats.network_sustained_ok++;
678 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
679 &vp_reg->asic_ntwk_vp_err_mask);
681 __vxge_hw_device_handle_link_up_ind(hldev);
682 alarm_event = VXGE_HW_SET_LEVEL(
683 VXGE_HW_EVENT_LINK_UP, alarm_event);
686 writeq(VXGE_HW_INTR_MASK_ALL,
687 &vp_reg->asic_ntwk_vp_err_reg);
689 alarm_event = VXGE_HW_SET_LEVEL(
690 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
697 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
699 pic_status = readq(&vp_reg->vpath_ppif_int_status);
702 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
704 val64 = readq(&vp_reg->general_errors_reg);
705 mask64 = readq(&vp_reg->general_errors_mask);
708 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
710 sw_stats->error_stats.ini_serr_det++;
712 alarm_event = VXGE_HW_SET_LEVEL(
713 VXGE_HW_EVENT_SERR, alarm_event);
717 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
719 sw_stats->error_stats.dblgen_fifo0_overflow++;
721 alarm_event = VXGE_HW_SET_LEVEL(
722 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
726 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
728 sw_stats->error_stats.statsb_pif_chain_error++;
731 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
733 sw_stats->error_stats.statsb_drop_timeout++;
736 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
738 sw_stats->error_stats.target_illegal_access++;
741 writeq(VXGE_HW_INTR_MASK_ALL,
742 &vp_reg->general_errors_reg);
743 alarm_event = VXGE_HW_SET_LEVEL(
744 VXGE_HW_EVENT_ALARM_CLEARED,
750 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
752 val64 = readq(&vp_reg->kdfcctl_errors_reg);
753 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
756 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
758 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
760 alarm_event = VXGE_HW_SET_LEVEL(
761 VXGE_HW_EVENT_FIFO_ERR,
766 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
768 sw_stats->error_stats.kdfcctl_fifo0_poison++;
770 alarm_event = VXGE_HW_SET_LEVEL(
771 VXGE_HW_EVENT_FIFO_ERR,
776 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
778 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
780 alarm_event = VXGE_HW_SET_LEVEL(
781 VXGE_HW_EVENT_FIFO_ERR,
786 writeq(VXGE_HW_INTR_MASK_ALL,
787 &vp_reg->kdfcctl_errors_reg);
788 alarm_event = VXGE_HW_SET_LEVEL(
789 VXGE_HW_EVENT_ALARM_CLEARED,
796 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
798 val64 = readq(&vp_reg->wrdma_alarm_status);
800 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
802 val64 = readq(&vp_reg->prc_alarm_reg);
803 mask64 = readq(&vp_reg->prc_alarm_mask);
805 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
807 sw_stats->error_stats.prc_ring_bumps++;
809 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
811 sw_stats->error_stats.prc_rxdcm_sc_err++;
813 alarm_event = VXGE_HW_SET_LEVEL(
814 VXGE_HW_EVENT_VPATH_ERR,
818 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
820 sw_stats->error_stats.prc_rxdcm_sc_abort++;
822 alarm_event = VXGE_HW_SET_LEVEL(
823 VXGE_HW_EVENT_VPATH_ERR,
827 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
829 sw_stats->error_stats.prc_quanta_size_err++;
831 alarm_event = VXGE_HW_SET_LEVEL(
832 VXGE_HW_EVENT_VPATH_ERR,
837 writeq(VXGE_HW_INTR_MASK_ALL,
838 &vp_reg->prc_alarm_reg);
839 alarm_event = VXGE_HW_SET_LEVEL(
840 VXGE_HW_EVENT_ALARM_CLEARED,
846 hldev->stats.sw_dev_err_stats.vpath_alarms++;
848 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
849 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
852 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
854 if (alarm_event == VXGE_HW_EVENT_SERR)
855 return VXGE_HW_ERR_CRITICAL;
857 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
858 VXGE_HW_ERR_SLOT_FREEZE :
859 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
864 * vxge_hw_device_begin_irq - Begin IRQ processing.
865 * @hldev: HW device handle.
866 * @skip_alarms: Do not clear the alarms
867 * @reason: "Reason" for the interrupt, the value of Titan's
868 * general_int_status register.
870 * The function performs two actions, It first checks whether (shared IRQ) the
871 * interrupt was raised by the device. Next, it masks the device interrupts.
874 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
875 * bridge. Therefore, two back-to-back interrupts are potentially possible.
877 * Returns: 0, if the interrupt is not "ours" (note that in this case the
878 * device remain enabled).
879 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
882 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
883 u32 skip_alarms, u64 *reason)
889 enum vxge_hw_status ret = VXGE_HW_OK;
891 val64 = readq(&hldev->common_reg->titan_general_int_status);
893 if (unlikely(!val64)) {
894 /* not Titan interrupt */
896 ret = VXGE_HW_ERR_WRONG_IRQ;
900 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
902 adapter_status = readq(&hldev->common_reg->adapter_status);
904 if (adapter_status == VXGE_HW_ALL_FOXES) {
906 __vxge_hw_device_handle_error(hldev,
907 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
909 ret = VXGE_HW_ERR_SLOT_FREEZE;
914 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
918 vpath_mask = hldev->vpaths_deployed >>
919 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
922 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
923 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
928 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
931 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
933 enum vxge_hw_status error_level = VXGE_HW_OK;
935 hldev->stats.sw_dev_err_stats.vpath_alarms++;
937 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
939 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
942 ret = __vxge_hw_vpath_alarm_process(
943 &hldev->virtual_paths[i], skip_alarms);
945 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
947 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
948 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
959 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
960 * condition that has caused the Tx and RX interrupt.
963 * Acknowledge (that is, clear) the condition that has caused
964 * the Tx and Rx interrupt.
965 * See also: vxge_hw_device_begin_irq(),
966 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
968 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
971 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
972 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
973 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
974 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
975 &hldev->common_reg->tim_int_status0);
978 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
979 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
980 __vxge_hw_pio_mem_write32_upper(
981 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
982 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
983 &hldev->common_reg->tim_int_status1);
988 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
990 * @dtrh: Buffer to return the DTR pointer
992 * Allocates a dtr from the reserve array. If the reserve array is empty,
993 * it swaps the reserve and free arrays.
996 static enum vxge_hw_status
997 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
999 if (channel->reserve_ptr - channel->reserve_top > 0) {
1001 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
1006 /* switch between empty and full arrays */
1008 /* the idea behind such a design is that by having free and reserved
1009 * arrays separated we basically separated irq and non-irq parts.
1010 * i.e. no additional lock need to be done when we free a resource */
1012 if (channel->length - channel->free_ptr > 0) {
1013 swap(channel->reserve_arr, channel->free_arr);
1014 channel->reserve_ptr = channel->length;
1015 channel->reserve_top = channel->free_ptr;
1016 channel->free_ptr = channel->length;
1018 channel->stats->reserve_free_swaps_cnt++;
1020 goto _alloc_after_swap;
1023 channel->stats->full_cnt++;
1026 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1030 * vxge_hw_channel_dtr_post - Post a dtr to the channel
1031 * @channelh: Channel
1032 * @dtrh: DTR pointer
1034 * Posts a dtr to work array.
1038 vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1040 vxge_assert(channel->work_arr[channel->post_index] == NULL);
1042 channel->work_arr[channel->post_index++] = dtrh;
1045 if (channel->post_index == channel->length)
1046 channel->post_index = 0;
1050 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
1052 * @dtr: Buffer to return the next completed DTR pointer
1054 * Returns the next completed dtr with out removing it from work array
1058 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1060 vxge_assert(channel->compl_index < channel->length);
1062 *dtrh = channel->work_arr[channel->compl_index];
1067 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1068 * @channel: Channel handle
1070 * Removes the next completed dtr from work array
1073 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1075 channel->work_arr[channel->compl_index] = NULL;
1078 if (++channel->compl_index == channel->length)
1079 channel->compl_index = 0;
1081 channel->stats->total_compl_cnt++;
1085 * vxge_hw_channel_dtr_free - Frees a dtr
1086 * @channel: Channel handle
1089 * Returns the dtr to free array
1092 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1094 channel->free_arr[--channel->free_ptr] = dtrh;
1098 * vxge_hw_channel_dtr_count
1099 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
1101 * Retrieve number of DTRs available. This function can not be called
1102 * from data path. ring_initial_replenishi() is the only user.
1104 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1106 return (channel->reserve_ptr - channel->reserve_top) +
1107 (channel->length - channel->free_ptr);
1111 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
1112 * @ring: Handle to the ring object used for receive
1113 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
1114 * with a valid handle.
1116 * Reserve Rx descriptor for the subsequent filling-in driver
1117 * and posting on the corresponding channel (@channelh)
1118 * via vxge_hw_ring_rxd_post().
1120 * Returns: VXGE_HW_OK - success.
1121 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
1124 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1127 enum vxge_hw_status status;
1128 struct __vxge_hw_channel *channel;
1130 channel = &ring->channel;
1132 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1134 if (status == VXGE_HW_OK) {
1135 struct vxge_hw_ring_rxd_1 *rxdp =
1136 (struct vxge_hw_ring_rxd_1 *)*rxdh;
1138 rxdp->control_0 = rxdp->control_1 = 0;
1145 * vxge_hw_ring_rxd_free - Free descriptor.
1146 * @ring: Handle to the ring object used for receive
1147 * @rxdh: Descriptor handle.
1149 * Free the reserved descriptor. This operation is "symmetrical" to
1150 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
1153 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
1156 * - reserved (vxge_hw_ring_rxd_reserve);
1158 * - posted (vxge_hw_ring_rxd_post);
1160 * - completed (vxge_hw_ring_rxd_next_completed);
1162 * - and recycled again (vxge_hw_ring_rxd_free).
1164 * For alternative state transitions and more details please refer to
1168 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1170 struct __vxge_hw_channel *channel;
1172 channel = &ring->channel;
1174 vxge_hw_channel_dtr_free(channel, rxdh);
1179 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
1180 * @ring: Handle to the ring object used for receive
1181 * @rxdh: Descriptor handle.
1183 * This routine prepares a rxd and posts
1185 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1187 struct __vxge_hw_channel *channel;
1189 channel = &ring->channel;
1191 vxge_hw_channel_dtr_post(channel, rxdh);
1195 * vxge_hw_ring_rxd_post_post - Process rxd after post.
1196 * @ring: Handle to the ring object used for receive
1197 * @rxdh: Descriptor handle.
1199 * Processes rxd after post
1201 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1203 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1204 struct __vxge_hw_channel *channel;
1206 channel = &ring->channel;
1208 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1210 if (ring->stats->common_stats.usage_cnt > 0)
1211 ring->stats->common_stats.usage_cnt--;
1215 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
1216 * @ring: Handle to the ring object used for receive
1217 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
1219 * Post descriptor on the ring.
1220 * Prior to posting the descriptor should be filled in accordance with
1221 * Host/Titan interface specification for a given service (LL, etc.).
1224 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1226 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1227 struct __vxge_hw_channel *channel;
1229 channel = &ring->channel;
1232 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1234 vxge_hw_channel_dtr_post(channel, rxdh);
1236 if (ring->stats->common_stats.usage_cnt > 0)
1237 ring->stats->common_stats.usage_cnt--;
1241 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
1242 * @ring: Handle to the ring object used for receive
1243 * @rxdh: Descriptor handle.
1245 * Processes rxd after post with memory barrier.
1247 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1250 vxge_hw_ring_rxd_post_post(ring, rxdh);
1254 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
1255 * @ring: Handle to the ring object used for receive
1256 * @rxdh: Descriptor handle. Returned by HW.
1257 * @t_code: Transfer code, as per Titan User Guide,
1258 * Receive Descriptor Format. Returned by HW.
1260 * Retrieve the _next_ completed descriptor.
1261 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
1262 * driver of new completed descriptors. After that
1263 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
1264 * completions (the very first completion is passed by HW via
1265 * vxge_hw_ring_callback_f).
1267 * Implementation-wise, the driver is free to call
1268 * vxge_hw_ring_rxd_next_completed either immediately from inside the
1269 * ring callback, or in a deferred fashion and separate (from HW)
1272 * Non-zero @t_code means failure to fill-in receive buffer(s)
1273 * of the descriptor.
1274 * For instance, parity error detected during the data transfer.
1275 * In this case Titan will complete the descriptor and indicate
1276 * for the host that the received data is not to be used.
1277 * For details please refer to Titan User Guide.
1279 * Returns: VXGE_HW_OK - success.
1280 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1281 * are currently available for processing.
1283 * See also: vxge_hw_ring_callback_f{},
1284 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
1286 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1287 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1289 struct __vxge_hw_channel *channel;
1290 struct vxge_hw_ring_rxd_1 *rxdp;
1291 enum vxge_hw_status status = VXGE_HW_OK;
1294 channel = &ring->channel;
1296 vxge_hw_channel_dtr_try_complete(channel, rxdh);
1300 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1304 control_0 = rxdp->control_0;
1305 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1306 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1308 /* check whether it is not the end */
1309 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1311 vxge_assert((rxdp)->host_control !=
1315 vxge_hw_channel_dtr_complete(channel);
1317 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1319 ring->stats->common_stats.usage_cnt++;
1320 if (ring->stats->common_stats.usage_max <
1321 ring->stats->common_stats.usage_cnt)
1322 ring->stats->common_stats.usage_max =
1323 ring->stats->common_stats.usage_cnt;
1325 status = VXGE_HW_OK;
1329 /* reset it. since we don't want to return
1330 * garbage to the driver */
1332 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1338 * vxge_hw_ring_handle_tcode - Handle transfer code.
1339 * @ring: Handle to the ring object used for receive
1340 * @rxdh: Descriptor handle.
1341 * @t_code: One of the enumerated (and documented in the Titan user guide)
1344 * Handle descriptor's transfer code. The latter comes with each completed
1347 * Returns: one of the enum vxge_hw_status{} enumerated types.
1348 * VXGE_HW_OK - for success.
1349 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1351 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1352 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1354 struct __vxge_hw_channel *channel;
1355 enum vxge_hw_status status = VXGE_HW_OK;
1357 channel = &ring->channel;
1359 /* If the t_code is not supported and if the
1360 * t_code is other than 0x5 (unparseable packet
1361 * such as unknown UPV6 header), Drop it !!!
1364 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1365 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1366 status = VXGE_HW_OK;
1370 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1371 status = VXGE_HW_ERR_INVALID_TCODE;
1375 ring->stats->rxd_t_code_err_cnt[t_code]++;
1381 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1384 * @txdl_ptr: The starting location of the TxDL in host memory
1385 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1386 * @no_snoop: No snoop flags
1388 * This function posts a non-offload doorbell to doorbell FIFO
1391 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1392 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1394 struct __vxge_hw_channel *channel;
1396 channel = &fifo->channel;
1398 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1399 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1400 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1401 &fifo->nofl_db->control_0);
1405 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1411 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1413 * @fifoh: Handle to the fifo object used for non offload send
1415 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1417 return vxge_hw_channel_dtr_count(&fifoh->channel);
1421 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1422 * @fifoh: Handle to the fifo object used for non offload send
1423 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1424 * with a valid handle.
1425 * @txdl_priv: Buffer to return the pointer to per txdl space
1427 * Reserve a single TxDL (that is, fifo descriptor)
1428 * for the subsequent filling-in by driver)
1429 * and posting on the corresponding channel (@channelh)
1430 * via vxge_hw_fifo_txdl_post().
1432 * Note: it is the responsibility of driver to reserve multiple descriptors
1433 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1434 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1436 * Returns: VXGE_HW_OK - success;
1437 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1440 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1441 struct __vxge_hw_fifo *fifo,
1442 void **txdlh, void **txdl_priv)
1444 struct __vxge_hw_channel *channel;
1445 enum vxge_hw_status status;
1448 channel = &fifo->channel;
1450 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1452 if (status == VXGE_HW_OK) {
1453 struct vxge_hw_fifo_txd *txdp =
1454 (struct vxge_hw_fifo_txd *)*txdlh;
1455 struct __vxge_hw_fifo_txdl_priv *priv;
1457 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1459 /* reset the TxDL's private */
1460 priv->align_dma_offset = 0;
1461 priv->align_vaddr_start = priv->align_vaddr;
1462 priv->align_used_frags = 0;
1464 priv->alloc_frags = fifo->config->max_frags;
1465 priv->next_txdl_priv = NULL;
1467 *txdl_priv = (void *)(size_t)txdp->host_control;
1469 for (i = 0; i < fifo->config->max_frags; i++) {
1470 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1471 txdp->control_0 = txdp->control_1 = 0;
1479 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1481 * @fifo: Handle to the fifo object used for non offload send
1482 * @txdlh: Descriptor handle.
1483 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1485 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1486 * @size: Size of the data buffer (in bytes).
1488 * This API is part of the preparation of the transmit descriptor for posting
1489 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1490 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1491 * All three APIs fill in the fields of the fifo descriptor,
1492 * in accordance with the Titan specification.
1495 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1496 void *txdlh, u32 frag_idx,
1497 dma_addr_t dma_pointer, u32 size)
1499 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1500 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1501 struct __vxge_hw_channel *channel;
1503 channel = &fifo->channel;
1505 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1506 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1509 txdp->control_0 = txdp->control_1 = 0;
1511 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1512 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1513 txdp->control_1 |= fifo->interrupt_type;
1514 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1516 if (txdl_priv->frags) {
1517 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1518 (txdl_priv->frags - 1);
1519 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1520 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1524 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1526 txdp->buffer_pointer = (u64)dma_pointer;
1527 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1528 fifo->stats->total_buffers++;
1533 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1534 * @fifo: Handle to the fifo object used for non offload send
1535 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1536 * @frags: Number of contiguous buffers that are part of a single
1537 * transmit operation.
1539 * Post descriptor on the 'fifo' type channel for transmission.
1540 * Prior to posting the descriptor should be filled in accordance with
1541 * Host/Titan interface specification for a given service (LL, etc.).
1544 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1546 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1547 struct vxge_hw_fifo_txd *txdp_last;
1548 struct vxge_hw_fifo_txd *txdp_first;
1549 struct __vxge_hw_channel *channel;
1551 channel = &fifo->channel;
1553 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1556 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1557 txdp_last->control_0 |=
1558 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1559 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1561 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1563 __vxge_hw_non_offload_db_post(fifo,
1564 (u64)txdl_priv->dma_addr,
1565 txdl_priv->frags - 1,
1566 fifo->no_snoop_bits);
1568 fifo->stats->total_posts++;
1569 fifo->stats->common_stats.usage_cnt++;
1570 if (fifo->stats->common_stats.usage_max <
1571 fifo->stats->common_stats.usage_cnt)
1572 fifo->stats->common_stats.usage_max =
1573 fifo->stats->common_stats.usage_cnt;
1577 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1578 * @fifo: Handle to the fifo object used for non offload send
1579 * @txdlh: Descriptor handle. Returned by HW.
1580 * @t_code: Transfer code, as per Titan User Guide,
1581 * Transmit Descriptor Format.
1584 * Retrieve the _next_ completed descriptor.
1585 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1586 * driver of new completed descriptors. After that
1587 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1588 * completions (the very first completion is passed by HW via
1589 * vxge_hw_channel_callback_f).
1591 * Implementation-wise, the driver is free to call
1592 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1593 * channel callback, or in a deferred fashion and separate (from HW)
1596 * Non-zero @t_code means failure to process the descriptor.
1597 * The failure could happen, for instance, when the link is
1598 * down, in which case Titan completes the descriptor because it
1599 * is not able to send the data out.
1601 * For details please refer to Titan User Guide.
1603 * Returns: VXGE_HW_OK - success.
1604 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1605 * are currently available for processing.
1608 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1609 struct __vxge_hw_fifo *fifo, void **txdlh,
1610 enum vxge_hw_fifo_tcode *t_code)
1612 struct __vxge_hw_channel *channel;
1613 struct vxge_hw_fifo_txd *txdp;
1614 enum vxge_hw_status status = VXGE_HW_OK;
1616 channel = &fifo->channel;
1618 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1622 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1626 /* check whether host owns it */
1627 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1629 vxge_assert(txdp->host_control != 0);
1631 vxge_hw_channel_dtr_complete(channel);
1633 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1635 if (fifo->stats->common_stats.usage_cnt > 0)
1636 fifo->stats->common_stats.usage_cnt--;
1638 status = VXGE_HW_OK;
1642 /* no more completions */
1644 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1650 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1651 * @fifo: Handle to the fifo object used for non offload send
1652 * @txdlh: Descriptor handle.
1653 * @t_code: One of the enumerated (and documented in the Titan user guide)
1656 * Handle descriptor's transfer code. The latter comes with each completed
1659 * Returns: one of the enum vxge_hw_status{} enumerated types.
1660 * VXGE_HW_OK - for success.
1661 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1663 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1665 enum vxge_hw_fifo_tcode t_code)
1667 struct __vxge_hw_channel *channel;
1669 enum vxge_hw_status status = VXGE_HW_OK;
1670 channel = &fifo->channel;
1672 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1673 status = VXGE_HW_ERR_INVALID_TCODE;
1677 fifo->stats->txd_t_code_err_cnt[t_code]++;
1683 * vxge_hw_fifo_txdl_free - Free descriptor.
1684 * @fifo: Handle to the fifo object used for non offload send
1685 * @txdlh: Descriptor handle.
1687 * Free the reserved descriptor. This operation is "symmetrical" to
1688 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1691 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1694 * - reserved (vxge_hw_fifo_txdl_reserve);
1696 * - posted (vxge_hw_fifo_txdl_post);
1698 * - completed (vxge_hw_fifo_txdl_next_completed);
1700 * - and recycled again (vxge_hw_fifo_txdl_free).
1702 * For alternative state transitions and more details please refer to
1706 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1708 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1710 struct __vxge_hw_channel *channel;
1712 channel = &fifo->channel;
1714 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1715 (struct vxge_hw_fifo_txd *)txdlh);
1717 max_frags = fifo->config->max_frags;
1719 vxge_hw_channel_dtr_free(channel, txdlh);
1723 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1724 * to MAC address table.
1725 * @vp: Vpath handle.
1726 * @macaddr: MAC address to be added for this vpath into the list
1727 * @macaddr_mask: MAC address mask for macaddr
1728 * @duplicate_mode: Duplicate MAC address add mode. Please see
1729 * enum vxge_hw_vpath_mac_addr_add_mode{}
1731 * Adds the given mac address and mac address mask into the list for this
1733 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1734 * vxge_hw_vpath_mac_addr_get_next
1738 vxge_hw_vpath_mac_addr_add(
1739 struct __vxge_hw_vpath_handle *vp,
1742 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1747 enum vxge_hw_status status = VXGE_HW_OK;
1750 status = VXGE_HW_ERR_INVALID_HANDLE;
1754 for (i = 0; i < ETH_ALEN; i++) {
1756 data1 |= (u8)macaddr[i];
1759 data2 |= (u8)macaddr_mask[i];
1762 switch (duplicate_mode) {
1763 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1766 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1769 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1777 status = __vxge_hw_vpath_rts_table_set(vp,
1778 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1779 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1781 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1782 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1783 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1789 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1790 * from MAC address table.
1791 * @vp: Vpath handle.
1792 * @macaddr: First MAC address entry for this vpath in the list
1793 * @macaddr_mask: MAC address mask for macaddr
1795 * Returns the first mac address and mac address mask in the list for this
1797 * see also: vxge_hw_vpath_mac_addr_get_next
1801 vxge_hw_vpath_mac_addr_get(
1802 struct __vxge_hw_vpath_handle *vp,
1809 enum vxge_hw_status status = VXGE_HW_OK;
1812 status = VXGE_HW_ERR_INVALID_HANDLE;
1816 status = __vxge_hw_vpath_rts_table_get(vp,
1817 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1818 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1821 if (status != VXGE_HW_OK)
1824 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1826 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1828 for (i = ETH_ALEN; i > 0; i--) {
1829 macaddr[i-1] = (u8)(data1 & 0xFF);
1832 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1840 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1842 * from MAC address table.
1843 * @vp: Vpath handle.
1844 * @macaddr: Next MAC address entry for this vpath in the list
1845 * @macaddr_mask: MAC address mask for macaddr
1847 * Returns the next mac address and mac address mask in the list for this
1849 * see also: vxge_hw_vpath_mac_addr_get
1853 vxge_hw_vpath_mac_addr_get_next(
1854 struct __vxge_hw_vpath_handle *vp,
1861 enum vxge_hw_status status = VXGE_HW_OK;
1864 status = VXGE_HW_ERR_INVALID_HANDLE;
1868 status = __vxge_hw_vpath_rts_table_get(vp,
1869 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1870 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1873 if (status != VXGE_HW_OK)
1876 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1878 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1880 for (i = ETH_ALEN; i > 0; i--) {
1881 macaddr[i-1] = (u8)(data1 & 0xFF);
1884 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1893 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1894 * to MAC address table.
1895 * @vp: Vpath handle.
1896 * @macaddr: MAC address to be added for this vpath into the list
1897 * @macaddr_mask: MAC address mask for macaddr
1899 * Delete the given mac address and mac address mask into the list for this
1901 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1902 * vxge_hw_vpath_mac_addr_get_next
1906 vxge_hw_vpath_mac_addr_delete(
1907 struct __vxge_hw_vpath_handle *vp,
1914 enum vxge_hw_status status = VXGE_HW_OK;
1917 status = VXGE_HW_ERR_INVALID_HANDLE;
1921 for (i = 0; i < ETH_ALEN; i++) {
1923 data1 |= (u8)macaddr[i];
1926 data2 |= (u8)macaddr_mask[i];
1929 status = __vxge_hw_vpath_rts_table_set(vp,
1930 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1931 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1933 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1934 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1940 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1942 * @vp: Vpath handle.
1943 * @vid: vlan id to be added for this vpath into the list
1945 * Adds the given vlan id into the list for this vpath.
1946 * see also: vxge_hw_vpath_vid_delete
1950 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1952 enum vxge_hw_status status = VXGE_HW_OK;
1955 status = VXGE_HW_ERR_INVALID_HANDLE;
1959 status = __vxge_hw_vpath_rts_table_set(vp,
1960 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1961 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1962 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1968 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1970 * @vp: Vpath handle.
1971 * @vid: vlan id to be added for this vpath into the list
1973 * Adds the given vlan id into the list for this vpath.
1974 * see also: vxge_hw_vpath_vid_add
1978 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1980 enum vxge_hw_status status = VXGE_HW_OK;
1983 status = VXGE_HW_ERR_INVALID_HANDLE;
1987 status = __vxge_hw_vpath_rts_table_set(vp,
1988 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1989 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1990 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1996 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1997 * @vp: Vpath handle.
1999 * Enable promiscuous mode of Titan-e operation.
2001 * See also: vxge_hw_vpath_promisc_disable().
2003 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
2004 struct __vxge_hw_vpath_handle *vp)
2007 struct __vxge_hw_virtualpath *vpath;
2008 enum vxge_hw_status status = VXGE_HW_OK;
2010 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2011 status = VXGE_HW_ERR_INVALID_HANDLE;
2017 /* Enable promiscuous mode for function 0 only */
2018 if (!(vpath->hldev->access_rights &
2019 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
2022 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2024 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
2026 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2027 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2028 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
2029 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
2031 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2038 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
2039 * @vp: Vpath handle.
2041 * Disable promiscuous mode of Titan-e operation.
2043 * See also: vxge_hw_vpath_promisc_enable().
2045 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2046 struct __vxge_hw_vpath_handle *vp)
2049 struct __vxge_hw_virtualpath *vpath;
2050 enum vxge_hw_status status = VXGE_HW_OK;
2052 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2053 status = VXGE_HW_ERR_INVALID_HANDLE;
2059 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2061 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2063 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2064 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2065 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2067 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2074 * vxge_hw_vpath_bcast_enable - Enable broadcast
2075 * @vp: Vpath handle.
2077 * Enable receiving broadcasts.
2079 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2080 struct __vxge_hw_vpath_handle *vp)
2083 struct __vxge_hw_virtualpath *vpath;
2084 enum vxge_hw_status status = VXGE_HW_OK;
2086 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2087 status = VXGE_HW_ERR_INVALID_HANDLE;
2093 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2095 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2096 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2097 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2104 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
2105 * @vp: Vpath handle.
2107 * Enable Titan-e multicast addresses.
2108 * Returns: VXGE_HW_OK on success.
2111 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2112 struct __vxge_hw_vpath_handle *vp)
2115 struct __vxge_hw_virtualpath *vpath;
2116 enum vxge_hw_status status = VXGE_HW_OK;
2118 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2119 status = VXGE_HW_ERR_INVALID_HANDLE;
2125 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2127 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2128 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2129 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2136 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
2137 * @vp: Vpath handle.
2139 * Disable Titan-e multicast addresses.
2140 * Returns: VXGE_HW_OK - success.
2141 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
2145 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2148 struct __vxge_hw_virtualpath *vpath;
2149 enum vxge_hw_status status = VXGE_HW_OK;
2151 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2152 status = VXGE_HW_ERR_INVALID_HANDLE;
2158 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2160 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2161 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2162 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2169 * vxge_hw_vpath_alarm_process - Process Alarms.
2170 * @vpath: Virtual Path.
2171 * @skip_alarms: Do not clear the alarms
2173 * Process vpath alarms.
2176 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2177 struct __vxge_hw_vpath_handle *vp,
2180 enum vxge_hw_status status = VXGE_HW_OK;
2183 status = VXGE_HW_ERR_INVALID_HANDLE;
2187 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2193 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2195 * @vp: Virtual Path handle.
2196 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2197 * interrupts(Can be repeated). If fifo or ring are not enabled
2198 * the MSIX vector for that should be set to 0
2199 * @alarm_msix_id: MSIX vector for alarm.
2201 * This API will associate a given MSIX vector numbers with the four TIM
2202 * interrupts and alarm interrupt.
2205 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2209 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2210 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2211 u32 vp_id = vp->vpath->vp_id;
2213 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2214 (vp_id * 4) + tim_msix_id[0]) |
2215 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2216 (vp_id * 4) + tim_msix_id[1]);
2218 writeq(val64, &vp_reg->interrupt_cfg0);
2220 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2221 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2222 &vp_reg->interrupt_cfg2);
2224 if (vpath->hldev->config.intr_mode ==
2225 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2226 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2227 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2228 0, 32), &vp_reg->one_shot_vect0_en);
2229 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2230 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2231 0, 32), &vp_reg->one_shot_vect1_en);
2232 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2233 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2234 0, 32), &vp_reg->one_shot_vect2_en);
2239 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2240 * @vp: Virtual Path handle.
2243 * The function masks the msix interrupt for the given msix_id
2246 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2251 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2253 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2254 __vxge_hw_pio_mem_write32_upper(
2255 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2256 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2260 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2261 * @vp: Virtual Path handle.
2264 * The function clears the msix interrupt for the given msix_id
2267 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2271 void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2273 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2275 if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2276 __vxge_hw_pio_mem_write32_upper(
2277 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2278 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2280 __vxge_hw_pio_mem_write32_upper(
2281 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2282 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2286 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2287 * @vp: Virtual Path handle.
2290 * The function unmasks the msix interrupt for the given msix_id
2293 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2298 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2300 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2301 __vxge_hw_pio_mem_write32_upper(
2302 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2303 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2307 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2308 * @vp: Virtual Path handle.
2310 * Mask Tx and Rx vpath interrupts.
2312 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2314 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2316 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2317 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2319 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2321 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2322 tim_int_mask1, vp->vpath->vp_id);
2324 val64 = readq(&hldev->common_reg->tim_int_mask0);
2326 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2327 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2328 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2329 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2330 &hldev->common_reg->tim_int_mask0);
2333 val64 = readl(&hldev->common_reg->tim_int_mask1);
2335 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2336 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2337 __vxge_hw_pio_mem_write32_upper(
2338 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2339 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2340 &hldev->common_reg->tim_int_mask1);
2345 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2346 * @vp: Virtual Path handle.
2348 * Unmask Tx and Rx vpath interrupts.
2350 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2352 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2354 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2355 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2357 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2359 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2360 tim_int_mask1, vp->vpath->vp_id);
2362 val64 = readq(&hldev->common_reg->tim_int_mask0);
2364 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2365 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2366 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2367 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2368 &hldev->common_reg->tim_int_mask0);
2371 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2372 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2373 __vxge_hw_pio_mem_write32_upper(
2374 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2375 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2376 &hldev->common_reg->tim_int_mask1);
2381 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2382 * descriptors and process the same.
2383 * @ring: Handle to the ring object used for receive
2385 * The function polls the Rx for the completed descriptors and calls
2386 * the driver via supplied completion callback.
2388 * Returns: VXGE_HW_OK, if the polling is completed successful.
2389 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2390 * descriptors available which are yet to be processed.
2392 * See also: vxge_hw_vpath_poll_rx()
2394 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2397 enum vxge_hw_status status = VXGE_HW_OK;
2403 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2404 if (status == VXGE_HW_OK)
2405 ring->callback(ring, first_rxdh,
2406 t_code, ring->channel.userdata);
2408 if (ring->cmpl_cnt != 0) {
2409 ring->doorbell_cnt += ring->cmpl_cnt;
2410 if (ring->doorbell_cnt >= ring->rxds_limit) {
2412 * Each RxD is of 4 qwords, update the number of
2413 * qwords replenished
2415 new_count = (ring->doorbell_cnt * 4);
2417 /* For each block add 4 more qwords */
2418 ring->total_db_cnt += ring->doorbell_cnt;
2419 if (ring->total_db_cnt >= ring->rxds_per_block) {
2421 /* Reset total count */
2422 ring->total_db_cnt %= ring->rxds_per_block;
2424 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2425 &ring->vp_reg->prc_rxd_doorbell);
2426 readl(&ring->common_reg->titan_general_int_status);
2427 ring->doorbell_cnt = 0;
2435 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2437 * @fifo: Handle to the fifo object used for non offload send
2439 * The function polls the Tx for the completed descriptors and calls
2440 * the driver via supplied completion callback.
2442 * Returns: VXGE_HW_OK, if the polling is completed successful.
2443 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2444 * descriptors available which are yet to be processed.
2446 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2447 struct sk_buff ***skb_ptr, int nr_skb,
2450 enum vxge_hw_fifo_tcode t_code;
2452 enum vxge_hw_status status = VXGE_HW_OK;
2453 struct __vxge_hw_channel *channel;
2455 channel = &fifo->channel;
2457 status = vxge_hw_fifo_txdl_next_completed(fifo,
2458 &first_txdlh, &t_code);
2459 if (status == VXGE_HW_OK)
2460 if (fifo->callback(fifo, first_txdlh, t_code,
2461 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2462 status = VXGE_HW_COMPLETIONS_REMAIN;