2 * This file contains HW queue descriptor formats, config register
5 * Copyright (C) 2015 Cavium, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
15 /* Load transaction types for reading segment bytes specified by
16 * NIC_SEND_GATHER_S[LD_TYPE].
18 enum nic_send_ld_type_e {
19 NIC_SEND_LD_TYPE_E_LDD = 0x0,
20 NIC_SEND_LD_TYPE_E_LDT = 0x1,
21 NIC_SEND_LD_TYPE_E_LDWB = 0x2,
22 NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
25 enum ether_type_algorithm {
28 ETYPE_ALG_ENDPARSE = 0x2,
30 ETYPE_ALG_VLAN_STRIP = 0x4,
37 L3TYPE_IPV4_OPTIONS = 0x05,
39 L3TYPE_IPV6_OPTIONS = 0x07,
40 L3TYPE_ET_STOP = 0x0D,
46 L4TYPE_IPSEC_ESP = 0x01,
53 L4TYPE_ROCE_BTH = 0x08,
57 /* CPI and RSSI configuration */
58 enum cpi_algorithm_type {
65 enum rss_algorithm_type {
69 RSS_ALG_TCP_IP = 0x03,
70 RSS_ALG_UDP_IP = 0x04,
71 RSS_ALG_SCTP_IP = 0x05,
72 RSS_ALG_GRE_IP = 0x06,
77 RSS_HASH_L2ETC = 0x00,
80 RSS_HASH_TCP_SYN_DIS = 0x03,
82 RSS_HASH_L4ETC = 0x05,
88 /* Completion queue entry types */
90 CQE_TYPE_INVALID = 0x0,
92 CQE_TYPE_RX_SPLIT = 0x3,
93 CQE_TYPE_RX_TCP = 0x4,
95 CQE_TYPE_SEND_PTP = 0x9,
98 enum cqe_rx_tcp_status {
99 CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
100 CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
103 enum cqe_send_status {
104 CQE_SEND_STATUS_GOOD = 0x00,
105 CQE_SEND_STATUS_DESC_FAULT = 0x01,
106 CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
107 CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
108 CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
109 CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
110 CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
111 CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
112 CQE_SEND_STATUS_LOCK_VIOL = 0x84,
113 CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
114 CQE_SEND_STATUS_DATA_FAULT = 0x86,
115 CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
116 CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
117 CQE_SEND_STATUS_MEM_FAULT = 0x89,
118 CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
119 CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
122 enum cqe_rx_tcp_end_reason {
123 CQE_RX_TCP_END_FIN_FLAG_DET = 0,
124 CQE_RX_TCP_END_INVALID_FLAG = 1,
125 CQE_RX_TCP_END_TIMEOUT = 2,
126 CQE_RX_TCP_END_OUT_OF_SEQ = 3,
127 CQE_RX_TCP_END_PKT_ERR = 4,
128 CQE_RX_TCP_END_QS_DISABLED = 0x0F,
131 /* Packet protocol level error enumeration */
132 enum cqe_rx_err_level {
133 CQE_RX_ERRLVL_RE = 0x0,
134 CQE_RX_ERRLVL_L2 = 0x1,
135 CQE_RX_ERRLVL_L3 = 0x2,
136 CQE_RX_ERRLVL_L4 = 0x3,
139 /* Packet protocol level error type enumeration */
140 enum cqe_rx_err_opcode {
141 CQE_RX_ERR_RE_NONE = 0x0,
142 CQE_RX_ERR_RE_PARTIAL = 0x1,
143 CQE_RX_ERR_RE_JABBER = 0x2,
144 CQE_RX_ERR_RE_FCS = 0x7,
145 CQE_RX_ERR_RE_TERMINATE = 0x9,
146 CQE_RX_ERR_RE_RX_CTL = 0xb,
147 CQE_RX_ERR_PREL2_ERR = 0x1f,
148 CQE_RX_ERR_L2_FRAGMENT = 0x20,
149 CQE_RX_ERR_L2_OVERRUN = 0x21,
150 CQE_RX_ERR_L2_PFCS = 0x22,
151 CQE_RX_ERR_L2_PUNY = 0x23,
152 CQE_RX_ERR_L2_MAL = 0x24,
153 CQE_RX_ERR_L2_OVERSIZE = 0x25,
154 CQE_RX_ERR_L2_UNDERSIZE = 0x26,
155 CQE_RX_ERR_L2_LENMISM = 0x27,
156 CQE_RX_ERR_L2_PCLP = 0x28,
157 CQE_RX_ERR_IP_NOT = 0x41,
158 CQE_RX_ERR_IP_CHK = 0x42,
159 CQE_RX_ERR_IP_MAL = 0x43,
160 CQE_RX_ERR_IP_MALD = 0x44,
161 CQE_RX_ERR_IP_HOP = 0x45,
162 CQE_RX_ERR_L3_ICRC = 0x46,
163 CQE_RX_ERR_L3_PCLP = 0x47,
164 CQE_RX_ERR_L4_MAL = 0x61,
165 CQE_RX_ERR_L4_CHK = 0x62,
166 CQE_RX_ERR_UDP_LEN = 0x63,
167 CQE_RX_ERR_L4_PORT = 0x64,
168 CQE_RX_ERR_TCP_FLAG = 0x65,
169 CQE_RX_ERR_TCP_OFFSET = 0x66,
170 CQE_RX_ERR_L4_PCLP = 0x67,
171 CQE_RX_ERR_RBDR_TRUNC = 0x70,
175 #if defined(__BIG_ENDIAN_BITFIELD)
176 u64 cqe_type:4; /* W0 */
188 u64 vlan2_stripped:1;
195 u64 pkt_len:16; /* W1 */
204 u64 rss_tag:32; /* W2 */
209 u64 rb3_sz:16; /* W3 */
214 u64 rb7_sz:16; /* W4 */
219 u64 rb11_sz:16; /* W5 */
223 #elif defined(__LITTLE_ENDIAN_BITFIELD)
229 u64 vlan2_stripped:1;
241 u64 cqe_type:4; /* W0 */
249 u64 pkt_len:16; /* W1 */
253 u64 rss_tag:32; /* W2 */
257 u64 rb3_sz:16; /* W3 */
261 u64 rb7_sz:16; /* W4 */
265 u64 rb11_sz:16; /* W5 */
281 struct cqe_rx_tcp_err_t {
282 #if defined(__BIG_ENDIAN_BITFIELD)
283 u64 cqe_type:4; /* W0 */
286 u64 rsvd1:4; /* W1 */
291 #elif defined(__LITTLE_ENDIAN_BITFIELD)
303 struct cqe_rx_tcp_t {
304 #if defined(__BIG_ENDIAN_BITFIELD)
305 u64 cqe_type:4; /* W0 */
309 u64 rsvd1:32; /* W1 */
310 u64 tcp_cntx_bytes:8;
312 u64 tcp_err_bytes:16;
313 #elif defined(__LITTLE_ENDIAN_BITFIELD)
316 u64 cqe_type:4; /* W0 */
318 u64 tcp_err_bytes:16;
320 u64 tcp_cntx_bytes:8;
321 u64 rsvd1:32; /* W1 */
326 #if defined(__BIG_ENDIAN_BITFIELD)
327 u64 cqe_type:4; /* W0 */
337 u64 ptp_timestamp:64; /* W1 */
338 #elif defined(__LITTLE_ENDIAN_BITFIELD)
347 u64 cqe_type:4; /* W0 */
349 u64 ptp_timestamp:64; /* W1 */
355 struct cqe_send_t snd_hdr;
356 struct cqe_rx_t rx_hdr;
357 struct cqe_rx_tcp_t rx_tcp_hdr;
358 struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
361 struct rbdr_entry_t {
362 #if defined(__BIG_ENDIAN_BITFIELD)
366 #elif defined(__LITTLE_ENDIAN_BITFIELD)
373 /* TCP reassembly context */
374 struct rbe_tcp_cnxt_t {
375 #if defined(__BIG_ENDIAN_BITFIELD)
378 u64 align_hdr_bytes:4;
379 u64 align_ptr_bytes:4;
384 u64 tcp_end_reason:2;
386 #elif defined(__LITTLE_ENDIAN_BITFIELD)
388 u64 tcp_end_reason:2;
393 u64 align_ptr_bytes:4;
394 u64 align_hdr_bytes:4;
400 /* Always Big endian */
406 u64 disable_tcp_reassembly:1;
413 enum send_l4_csum_type {
414 SEND_L4_CSUM_DISABLE = 0x00,
415 SEND_L4_CSUM_UDP = 0x01,
416 SEND_L4_CSUM_TCP = 0x02,
417 SEND_L4_CSUM_SCTP = 0x03,
421 SEND_CRCALG_CRC32 = 0x00,
422 SEND_CRCALG_CRC32C = 0x01,
423 SEND_CRCALG_ICRC = 0x02,
426 enum send_load_type {
427 SEND_LD_TYPE_LDD = 0x00,
428 SEND_LD_TYPE_LDT = 0x01,
429 SEND_LD_TYPE_LDWB = 0x02,
432 enum send_mem_alg_type {
433 SEND_MEMALG_SET = 0x00,
434 SEND_MEMALG_ADD = 0x08,
435 SEND_MEMALG_SUB = 0x09,
436 SEND_MEMALG_ADDLEN = 0x0A,
437 SEND_MEMALG_SUBLEN = 0x0B,
440 enum send_mem_dsz_type {
441 SEND_MEMDSZ_B64 = 0x00,
442 SEND_MEMDSZ_B32 = 0x01,
443 SEND_MEMDSZ_B8 = 0x03,
446 enum sq_subdesc_type {
447 SQ_DESC_TYPE_INVALID = 0x00,
448 SQ_DESC_TYPE_HEADER = 0x01,
449 SQ_DESC_TYPE_CRC = 0x02,
450 SQ_DESC_TYPE_IMMEDIATE = 0x03,
451 SQ_DESC_TYPE_GATHER = 0x04,
452 SQ_DESC_TYPE_MEMORY = 0x05,
455 struct sq_crc_subdesc {
456 #if defined(__BIG_ENDIAN_BITFIELD)
462 u64 crc_insert_pos:16;
465 #elif defined(__LITTLE_ENDIAN_BITFIELD)
468 u64 crc_insert_pos:16;
477 struct sq_gather_subdesc {
478 #if defined(__BIG_ENDIAN_BITFIELD)
479 u64 subdesc_type:4; /* W0 */
484 u64 rsvd1:15; /* W1 */
486 #elif defined(__LITTLE_ENDIAN_BITFIELD)
490 u64 subdesc_type:4; /* W0 */
493 u64 rsvd1:15; /* W1 */
497 /* SQ immediate subdescriptor */
498 struct sq_imm_subdesc {
499 #if defined(__BIG_ENDIAN_BITFIELD)
500 u64 subdesc_type:4; /* W0 */
504 u64 data:64; /* W1 */
505 #elif defined(__LITTLE_ENDIAN_BITFIELD)
508 u64 subdesc_type:4; /* W0 */
510 u64 data:64; /* W1 */
514 struct sq_mem_subdesc {
515 #if defined(__BIG_ENDIAN_BITFIELD)
516 u64 subdesc_type:4; /* W0 */
523 u64 rsvd1:15; /* W1 */
525 #elif defined(__LITTLE_ENDIAN_BITFIELD)
531 u64 subdesc_type:4; /* W0 */
534 u64 rsvd1:15; /* W1 */
538 struct sq_hdr_subdesc {
539 #if defined(__BIG_ENDIAN_BITFIELD)
542 u64 post_cqe:1; /* Post CQE on no error also */
552 u64 tot_len:20; /* W0 */
557 u64 tso_flags_last:12;
558 u64 tso_flags_first:12;
560 u64 tso_max_paysize:14; /* W1 */
561 #elif defined(__LITTLE_ENDIAN_BITFIELD)
572 u64 post_cqe:1; /* Post CQE on no error also */
574 u64 subdesc_type:4; /* W0 */
576 u64 tso_max_paysize:14;
578 u64 tso_flags_first:12;
579 u64 tso_flags_last:12;
582 u64 tso_sdc_cont:8; /* W1 */
586 /* Queue config register formats */
588 #if defined(__BIG_ENDIAN_BITFIELD)
589 u64 reserved_2_63:62;
592 #elif defined(__LITTLE_ENDIAN_BITFIELD)
595 u64 reserved_2_63:62;
600 #if defined(__BIG_ENDIAN_BITFIELD)
601 u64 reserved_43_63:21;
605 u64 reserved_35_39:5;
607 u64 reserved_25_31:7;
609 u64 reserved_0_15:16;
610 #elif defined(__LITTLE_ENDIAN_BITFIELD)
611 u64 reserved_0_15:16;
613 u64 reserved_25_31:7;
615 u64 reserved_35_39:5;
619 u64 reserved_43_63:21;
624 #if defined(__BIG_ENDIAN_BITFIELD)
625 u64 reserved_20_63:44;
627 u64 reserved_18_18:1;
630 u64 reserved_11_15:5;
633 u64 tstmp_bgx_intf:3;
634 #elif defined(__LITTLE_ENDIAN_BITFIELD)
635 u64 tstmp_bgx_intf:3;
638 u64 reserved_11_15:5;
641 u64 reserved_18_18:1;
643 u64 reserved_20_63:44;
648 #if defined(__BIG_ENDIAN_BITFIELD)
649 u64 reserved_45_63:19;
653 u64 reserved_36_41:6;
655 u64 reserved_25_31:7;
657 u64 reserved_12_15:4;
659 #elif defined(__LITTLE_ENDIAN_BITFIELD)
661 u64 reserved_12_15:4;
663 u64 reserved_25_31:7;
665 u64 reserved_36_41:6;
669 u64 reserved_45_63:19;
674 #if defined(__BIG_ENDIAN_BITFIELD)
675 u64 reserved_32_63:32;
677 u64 reserved_27_30:4;
681 u64 lock_viol_cqe_ena:1;
682 u64 send_tstmp_ena:1;
686 #elif defined(__LITTLE_ENDIAN_BITFIELD)
690 u64 send_tstmp_ena:1;
691 u64 lock_viol_cqe_ena:1;
695 u64 reserved_27_30:4;
697 u64 reserved_32_63:32;
701 #endif /* Q_STRUCT_H */