1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * This file contains HW queue descriptor formats, config register
6 * Copyright (C) 2015 Cavium, Inc.
12 /* Load transaction types for reading segment bytes specified by
13 * NIC_SEND_GATHER_S[LD_TYPE].
15 enum nic_send_ld_type_e {
16 NIC_SEND_LD_TYPE_E_LDD = 0x0,
17 NIC_SEND_LD_TYPE_E_LDT = 0x1,
18 NIC_SEND_LD_TYPE_E_LDWB = 0x2,
19 NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
22 enum ether_type_algorithm {
25 ETYPE_ALG_ENDPARSE = 0x2,
27 ETYPE_ALG_VLAN_STRIP = 0x4,
34 L3TYPE_IPV4_OPTIONS = 0x05,
36 L3TYPE_IPV6_OPTIONS = 0x07,
37 L3TYPE_ET_STOP = 0x0D,
43 L4TYPE_IPSEC_ESP = 0x01,
50 L4TYPE_ROCE_BTH = 0x08,
54 /* CPI and RSSI configuration */
55 enum cpi_algorithm_type {
62 enum rss_algorithm_type {
66 RSS_ALG_TCP_IP = 0x03,
67 RSS_ALG_UDP_IP = 0x04,
68 RSS_ALG_SCTP_IP = 0x05,
69 RSS_ALG_GRE_IP = 0x06,
74 RSS_HASH_L2ETC = 0x00,
77 RSS_HASH_TCP_SYN_DIS = 0x03,
79 RSS_HASH_L4ETC = 0x05,
85 /* Completion queue entry types */
87 CQE_TYPE_INVALID = 0x0,
89 CQE_TYPE_RX_SPLIT = 0x3,
90 CQE_TYPE_RX_TCP = 0x4,
92 CQE_TYPE_SEND_PTP = 0x9,
95 enum cqe_rx_tcp_status {
96 CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
97 CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
100 enum cqe_send_status {
101 CQE_SEND_STATUS_GOOD = 0x00,
102 CQE_SEND_STATUS_DESC_FAULT = 0x01,
103 CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
104 CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
105 CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
106 CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
107 CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
108 CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
109 CQE_SEND_STATUS_LOCK_VIOL = 0x84,
110 CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
111 CQE_SEND_STATUS_DATA_FAULT = 0x86,
112 CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
113 CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
114 CQE_SEND_STATUS_MEM_FAULT = 0x89,
115 CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
116 CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
119 enum cqe_rx_tcp_end_reason {
120 CQE_RX_TCP_END_FIN_FLAG_DET = 0,
121 CQE_RX_TCP_END_INVALID_FLAG = 1,
122 CQE_RX_TCP_END_TIMEOUT = 2,
123 CQE_RX_TCP_END_OUT_OF_SEQ = 3,
124 CQE_RX_TCP_END_PKT_ERR = 4,
125 CQE_RX_TCP_END_QS_DISABLED = 0x0F,
128 /* Packet protocol level error enumeration */
129 enum cqe_rx_err_level {
130 CQE_RX_ERRLVL_RE = 0x0,
131 CQE_RX_ERRLVL_L2 = 0x1,
132 CQE_RX_ERRLVL_L3 = 0x2,
133 CQE_RX_ERRLVL_L4 = 0x3,
136 /* Packet protocol level error type enumeration */
137 enum cqe_rx_err_opcode {
138 CQE_RX_ERR_RE_NONE = 0x0,
139 CQE_RX_ERR_RE_PARTIAL = 0x1,
140 CQE_RX_ERR_RE_JABBER = 0x2,
141 CQE_RX_ERR_RE_FCS = 0x7,
142 CQE_RX_ERR_RE_TERMINATE = 0x9,
143 CQE_RX_ERR_RE_RX_CTL = 0xb,
144 CQE_RX_ERR_PREL2_ERR = 0x1f,
145 CQE_RX_ERR_L2_FRAGMENT = 0x20,
146 CQE_RX_ERR_L2_OVERRUN = 0x21,
147 CQE_RX_ERR_L2_PFCS = 0x22,
148 CQE_RX_ERR_L2_PUNY = 0x23,
149 CQE_RX_ERR_L2_MAL = 0x24,
150 CQE_RX_ERR_L2_OVERSIZE = 0x25,
151 CQE_RX_ERR_L2_UNDERSIZE = 0x26,
152 CQE_RX_ERR_L2_LENMISM = 0x27,
153 CQE_RX_ERR_L2_PCLP = 0x28,
154 CQE_RX_ERR_IP_NOT = 0x41,
155 CQE_RX_ERR_IP_CHK = 0x42,
156 CQE_RX_ERR_IP_MAL = 0x43,
157 CQE_RX_ERR_IP_MALD = 0x44,
158 CQE_RX_ERR_IP_HOP = 0x45,
159 CQE_RX_ERR_L3_ICRC = 0x46,
160 CQE_RX_ERR_L3_PCLP = 0x47,
161 CQE_RX_ERR_L4_MAL = 0x61,
162 CQE_RX_ERR_L4_CHK = 0x62,
163 CQE_RX_ERR_UDP_LEN = 0x63,
164 CQE_RX_ERR_L4_PORT = 0x64,
165 CQE_RX_ERR_TCP_FLAG = 0x65,
166 CQE_RX_ERR_TCP_OFFSET = 0x66,
167 CQE_RX_ERR_L4_PCLP = 0x67,
168 CQE_RX_ERR_RBDR_TRUNC = 0x70,
172 #if defined(__BIG_ENDIAN_BITFIELD)
173 u64 cqe_type:4; /* W0 */
185 u64 vlan2_stripped:1;
192 u64 pkt_len:16; /* W1 */
201 u64 rss_tag:32; /* W2 */
206 u64 rb3_sz:16; /* W3 */
211 u64 rb7_sz:16; /* W4 */
216 u64 rb11_sz:16; /* W5 */
220 #elif defined(__LITTLE_ENDIAN_BITFIELD)
226 u64 vlan2_stripped:1;
238 u64 cqe_type:4; /* W0 */
246 u64 pkt_len:16; /* W1 */
250 u64 rss_tag:32; /* W2 */
254 u64 rb3_sz:16; /* W3 */
258 u64 rb7_sz:16; /* W4 */
262 u64 rb11_sz:16; /* W5 */
278 struct cqe_rx_tcp_err_t {
279 #if defined(__BIG_ENDIAN_BITFIELD)
280 u64 cqe_type:4; /* W0 */
283 u64 rsvd1:4; /* W1 */
288 #elif defined(__LITTLE_ENDIAN_BITFIELD)
300 struct cqe_rx_tcp_t {
301 #if defined(__BIG_ENDIAN_BITFIELD)
302 u64 cqe_type:4; /* W0 */
306 u64 rsvd1:32; /* W1 */
307 u64 tcp_cntx_bytes:8;
309 u64 tcp_err_bytes:16;
310 #elif defined(__LITTLE_ENDIAN_BITFIELD)
313 u64 cqe_type:4; /* W0 */
315 u64 tcp_err_bytes:16;
317 u64 tcp_cntx_bytes:8;
318 u64 rsvd1:32; /* W1 */
323 #if defined(__BIG_ENDIAN_BITFIELD)
324 u64 cqe_type:4; /* W0 */
334 u64 ptp_timestamp:64; /* W1 */
335 #elif defined(__LITTLE_ENDIAN_BITFIELD)
344 u64 cqe_type:4; /* W0 */
346 u64 ptp_timestamp:64; /* W1 */
352 struct cqe_send_t snd_hdr;
353 struct cqe_rx_t rx_hdr;
354 struct cqe_rx_tcp_t rx_tcp_hdr;
355 struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
358 struct rbdr_entry_t {
362 /* TCP reassembly context */
363 struct rbe_tcp_cnxt_t {
364 #if defined(__BIG_ENDIAN_BITFIELD)
367 u64 align_hdr_bytes:4;
368 u64 align_ptr_bytes:4;
373 u64 tcp_end_reason:2;
375 #elif defined(__LITTLE_ENDIAN_BITFIELD)
377 u64 tcp_end_reason:2;
382 u64 align_ptr_bytes:4;
383 u64 align_hdr_bytes:4;
389 /* Always Big endian */
395 u64 disable_tcp_reassembly:1;
402 enum send_l4_csum_type {
403 SEND_L4_CSUM_DISABLE = 0x00,
404 SEND_L4_CSUM_UDP = 0x01,
405 SEND_L4_CSUM_TCP = 0x02,
406 SEND_L4_CSUM_SCTP = 0x03,
410 SEND_CRCALG_CRC32 = 0x00,
411 SEND_CRCALG_CRC32C = 0x01,
412 SEND_CRCALG_ICRC = 0x02,
415 enum send_load_type {
416 SEND_LD_TYPE_LDD = 0x00,
417 SEND_LD_TYPE_LDT = 0x01,
418 SEND_LD_TYPE_LDWB = 0x02,
421 enum send_mem_alg_type {
422 SEND_MEMALG_SET = 0x00,
423 SEND_MEMALG_ADD = 0x08,
424 SEND_MEMALG_SUB = 0x09,
425 SEND_MEMALG_ADDLEN = 0x0A,
426 SEND_MEMALG_SUBLEN = 0x0B,
429 enum send_mem_dsz_type {
430 SEND_MEMDSZ_B64 = 0x00,
431 SEND_MEMDSZ_B32 = 0x01,
432 SEND_MEMDSZ_B8 = 0x03,
435 enum sq_subdesc_type {
436 SQ_DESC_TYPE_INVALID = 0x00,
437 SQ_DESC_TYPE_HEADER = 0x01,
438 SQ_DESC_TYPE_CRC = 0x02,
439 SQ_DESC_TYPE_IMMEDIATE = 0x03,
440 SQ_DESC_TYPE_GATHER = 0x04,
441 SQ_DESC_TYPE_MEMORY = 0x05,
444 struct sq_crc_subdesc {
445 #if defined(__BIG_ENDIAN_BITFIELD)
451 u64 crc_insert_pos:16;
454 #elif defined(__LITTLE_ENDIAN_BITFIELD)
457 u64 crc_insert_pos:16;
466 struct sq_gather_subdesc {
467 #if defined(__BIG_ENDIAN_BITFIELD)
468 u64 subdesc_type:4; /* W0 */
473 u64 rsvd1:15; /* W1 */
475 #elif defined(__LITTLE_ENDIAN_BITFIELD)
479 u64 subdesc_type:4; /* W0 */
482 u64 rsvd1:15; /* W1 */
486 /* SQ immediate subdescriptor */
487 struct sq_imm_subdesc {
488 #if defined(__BIG_ENDIAN_BITFIELD)
489 u64 subdesc_type:4; /* W0 */
493 u64 data:64; /* W1 */
494 #elif defined(__LITTLE_ENDIAN_BITFIELD)
497 u64 subdesc_type:4; /* W0 */
499 u64 data:64; /* W1 */
503 struct sq_mem_subdesc {
504 #if defined(__BIG_ENDIAN_BITFIELD)
505 u64 subdesc_type:4; /* W0 */
512 u64 rsvd1:15; /* W1 */
514 #elif defined(__LITTLE_ENDIAN_BITFIELD)
520 u64 subdesc_type:4; /* W0 */
523 u64 rsvd1:15; /* W1 */
527 struct sq_hdr_subdesc {
528 #if defined(__BIG_ENDIAN_BITFIELD)
531 u64 post_cqe:1; /* Post CQE on no error also */
543 u64 tot_len:20; /* W0 */
546 u64 inner_l4_offset:8;
547 u64 inner_l3_offset:8;
550 u64 tso_max_paysize:14; /* W1 */
551 #elif defined(__LITTLE_ENDIAN_BITFIELD)
564 u64 post_cqe:1; /* Post CQE on no error also */
566 u64 subdesc_type:4; /* W0 */
568 u64 tso_max_paysize:14;
571 u64 inner_l3_offset:8;
572 u64 inner_l4_offset:8;
573 u64 rsvd2:24; /* W1 */
577 /* Queue config register formats */
579 #if defined(__BIG_ENDIAN_BITFIELD)
580 u64 reserved_2_63:62;
583 #elif defined(__LITTLE_ENDIAN_BITFIELD)
586 u64 reserved_2_63:62;
591 #if defined(__BIG_ENDIAN_BITFIELD)
592 u64 reserved_43_63:21;
596 u64 reserved_35_39:5;
598 u64 reserved_25_31:7;
600 u64 reserved_0_15:16;
601 #elif defined(__LITTLE_ENDIAN_BITFIELD)
602 u64 reserved_0_15:16;
604 u64 reserved_25_31:7;
606 u64 reserved_35_39:5;
610 u64 reserved_43_63:21;
615 #if defined(__BIG_ENDIAN_BITFIELD)
616 u64 reserved_32_63:32;
618 u64 reserved_20_23:4;
620 u64 reserved_18_18:1;
623 u64 reserved_11_15:5;
626 u64 tstmp_bgx_intf:3;
627 #elif defined(__LITTLE_ENDIAN_BITFIELD)
628 u64 tstmp_bgx_intf:3;
631 u64 reserved_11_15:5;
634 u64 reserved_18_18:1;
636 u64 reserved_20_23:4;
638 u64 reserved_32_63:32;
643 #if defined(__BIG_ENDIAN_BITFIELD)
644 u64 reserved_45_63:19;
648 u64 reserved_36_41:6;
650 u64 reserved_25_31:7;
652 u64 reserved_12_15:4;
654 #elif defined(__LITTLE_ENDIAN_BITFIELD)
656 u64 reserved_12_15:4;
658 u64 reserved_25_31:7;
660 u64 reserved_36_41:6;
664 u64 reserved_45_63:19;
669 #if defined(__BIG_ENDIAN_BITFIELD)
670 u64 reserved_32_63:32;
672 u64 reserved_27_30:4;
676 u64 lock_viol_cqe_ena:1;
677 u64 send_tstmp_ena:1;
681 #elif defined(__LITTLE_ENDIAN_BITFIELD)
685 u64 send_tstmp_ena:1;
686 u64 lock_viol_cqe_ena:1;
690 u64 reserved_27_30:4;
692 u64 reserved_32_63:32;
696 #endif /* Q_STRUCT_H */