1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/bitops.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/string.h>
40 #include <linux/qed/qed_chain.h>
46 #include "qed_reg_addr.h"
48 #include "qed_sriov.h"
50 int qed_sp_init_request(struct qed_hwfn *p_hwfn,
51 struct qed_spq_entry **pp_ent,
52 u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
54 u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
55 struct qed_spq_entry *p_ent = NULL;
61 rc = qed_spq_get_entry(p_hwfn, pp_ent);
68 p_ent->elem.hdr.cid = cpu_to_le32(opaque_cid);
69 p_ent->elem.hdr.cmd_id = cmd;
70 p_ent->elem.hdr.protocol_id = protocol;
72 p_ent->priority = QED_SPQ_PRIORITY_NORMAL;
73 p_ent->comp_mode = p_data->comp_mode;
74 p_ent->comp_done.done = 0;
76 switch (p_ent->comp_mode) {
77 case QED_SPQ_MODE_EBLOCK:
78 p_ent->comp_cb.cookie = &p_ent->comp_done;
81 case QED_SPQ_MODE_BLOCK:
82 if (!p_data->p_comp_data)
85 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
89 if (!p_data->p_comp_data)
90 p_ent->comp_cb.function = NULL;
92 p_ent->comp_cb = *p_data->p_comp_data;
96 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
101 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
102 "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
103 opaque_cid, cmd, protocol,
104 (unsigned long)&p_ent->ramrod,
105 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
106 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
109 memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
114 /* qed_spq_get_entry() can either get an entry from the free_pool,
115 * or, if no entries are left, allocate a new entry and add it to
116 * the unlimited_pending list.
118 if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
121 qed_spq_return_entry(p_hwfn, p_ent);
126 static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
129 case QED_TUNN_CLSS_MAC_VLAN:
130 return TUNNEL_CLSS_MAC_VLAN;
131 case QED_TUNN_CLSS_MAC_VNI:
132 return TUNNEL_CLSS_MAC_VNI;
133 case QED_TUNN_CLSS_INNER_MAC_VLAN:
134 return TUNNEL_CLSS_INNER_MAC_VLAN;
135 case QED_TUNN_CLSS_INNER_MAC_VNI:
136 return TUNNEL_CLSS_INNER_MAC_VNI;
137 case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
138 return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
140 return TUNNEL_CLSS_MAC_VLAN;
145 qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
146 struct qed_tunnel_info *p_src, bool b_pf_start)
148 if (p_src->vxlan.b_update_mode || b_pf_start)
149 p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
151 if (p_src->l2_gre.b_update_mode || b_pf_start)
152 p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
154 if (p_src->ip_gre.b_update_mode || b_pf_start)
155 p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
157 if (p_src->l2_geneve.b_update_mode || b_pf_start)
158 p_tun->l2_geneve.b_mode_enabled =
159 p_src->l2_geneve.b_mode_enabled;
161 if (p_src->ip_geneve.b_update_mode || b_pf_start)
162 p_tun->ip_geneve.b_mode_enabled =
163 p_src->ip_geneve.b_mode_enabled;
166 static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
167 struct qed_tunnel_info *p_src)
171 p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
172 p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
174 type = qed_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
175 p_tun->vxlan.tun_cls = type;
176 type = qed_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
177 p_tun->l2_gre.tun_cls = type;
178 type = qed_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
179 p_tun->ip_gre.tun_cls = type;
180 type = qed_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
181 p_tun->l2_geneve.tun_cls = type;
182 type = qed_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
183 p_tun->ip_geneve.tun_cls = type;
186 static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun,
187 struct qed_tunnel_info *p_src)
189 p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
190 p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
192 if (p_src->geneve_port.b_update_port)
193 p_tun->geneve_port.port = p_src->geneve_port.port;
195 if (p_src->vxlan_port.b_update_port)
196 p_tun->vxlan_port.port = p_src->vxlan_port.port;
200 __qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
201 struct qed_tunn_update_type *tun_type)
203 *p_tunn_cls = tun_type->tun_cls;
207 qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
208 struct qed_tunn_update_type *tun_type,
211 struct qed_tunn_update_udp_port *p_udp_port)
213 __qed_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
214 if (p_udp_port->b_update_port) {
216 *p_port = cpu_to_le16(p_udp_port->port);
221 qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
222 struct qed_tunnel_info *p_src,
223 struct pf_update_tunnel_config *p_tunn_cfg)
225 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
227 qed_set_pf_update_tunn_mode(p_tun, p_src, false);
228 qed_set_tunn_cls_info(p_tun, p_src);
229 qed_set_tunn_ports(p_tun, p_src);
231 qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
233 &p_tunn_cfg->set_vxlan_udp_port_flg,
234 &p_tunn_cfg->vxlan_udp_port,
237 qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
239 &p_tunn_cfg->set_geneve_udp_port_flg,
240 &p_tunn_cfg->geneve_udp_port,
241 &p_tun->geneve_port);
243 __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
246 __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
249 __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
252 p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
255 static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
256 struct qed_ptt *p_ptt,
257 struct qed_tunnel_info *p_tun)
259 qed_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
260 p_tun->ip_gre.b_mode_enabled);
261 qed_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
263 qed_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
264 p_tun->ip_geneve.b_mode_enabled);
267 static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn,
268 struct qed_ptt *p_ptt,
269 struct qed_tunnel_info *p_tunn)
271 if (p_tunn->vxlan_port.b_update_port)
272 qed_set_vxlan_dest_port(p_hwfn, p_ptt,
273 p_tunn->vxlan_port.port);
275 if (p_tunn->geneve_port.b_update_port)
276 qed_set_geneve_dest_port(p_hwfn, p_ptt,
277 p_tunn->geneve_port.port);
279 qed_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
283 qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
284 struct qed_tunnel_info *p_src,
285 struct pf_start_tunnel_config *p_tunn_cfg)
287 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
292 qed_set_pf_update_tunn_mode(p_tun, p_src, true);
293 qed_set_tunn_cls_info(p_tun, p_src);
294 qed_set_tunn_ports(p_tun, p_src);
296 qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
298 &p_tunn_cfg->set_vxlan_udp_port_flg,
299 &p_tunn_cfg->vxlan_udp_port,
302 qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
304 &p_tunn_cfg->set_geneve_udp_port_flg,
305 &p_tunn_cfg->geneve_udp_port,
306 &p_tun->geneve_port);
308 __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
311 __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
314 __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
318 int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
319 struct qed_ptt *p_ptt,
320 struct qed_tunnel_info *p_tunn,
321 enum qed_mf_mode mode, bool allow_npar_tx_switch)
323 struct pf_start_ramrod_data *p_ramrod = NULL;
324 u16 sb = qed_int_get_sp_sb_id(p_hwfn);
325 u8 sb_index = p_hwfn->p_eq->eq_sb_index;
326 struct qed_spq_entry *p_ent = NULL;
327 struct qed_sp_init_data init_data;
331 /* update initial eq producer */
332 qed_eq_prod_update(p_hwfn,
333 qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
335 memset(&init_data, 0, sizeof(init_data));
336 init_data.cid = qed_spq_get_cid(p_hwfn);
337 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
338 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
340 rc = qed_sp_init_request(p_hwfn, &p_ent,
341 COMMON_RAMROD_PF_START,
342 PROTOCOLID_COMMON, &init_data);
346 p_ramrod = &p_ent->ramrod.pf_start;
348 p_ramrod->event_ring_sb_id = cpu_to_le16(sb);
349 p_ramrod->event_ring_sb_index = sb_index;
350 p_ramrod->path_id = QED_PATH_ID(p_hwfn);
351 p_ramrod->dont_log_ramrods = 0;
352 p_ramrod->log_type_mask = cpu_to_le16(0xf);
357 p_ramrod->mf_mode = MF_NPAR;
360 p_ramrod->mf_mode = MF_OVLAN;
363 DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
364 p_ramrod->mf_mode = MF_NPAR;
366 p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
368 /* Place EQ address in RAMROD */
369 DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
370 p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
371 page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
372 p_ramrod->event_ring_num_pages = page_cnt;
373 DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
374 p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
376 qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
378 if (IS_MF_SI(p_hwfn))
379 p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
381 switch (p_hwfn->hw_info.personality) {
383 p_ramrod->personality = PERSONALITY_ETH;
386 p_ramrod->personality = PERSONALITY_FCOE;
389 p_ramrod->personality = PERSONALITY_ISCSI;
391 case QED_PCI_ETH_ROCE:
392 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
395 DP_NOTICE(p_hwfn, "Unknown personality %d\n",
396 p_hwfn->hw_info.personality);
397 p_ramrod->personality = PERSONALITY_ETH;
400 if (p_hwfn->cdev->p_iov_info) {
401 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
403 p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
404 p_ramrod->num_vfs = (u8) p_iov->total_vfs;
406 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
407 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
409 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
410 "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
411 sb, sb_index, p_ramrod->outer_tag);
413 rc = qed_spq_post(p_hwfn, p_ent, NULL);
416 qed_set_hw_tunn_mode_port(p_hwfn, p_ptt,
417 &p_hwfn->cdev->tunnel);
422 int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
424 struct qed_spq_entry *p_ent = NULL;
425 struct qed_sp_init_data init_data;
429 memset(&init_data, 0, sizeof(init_data));
430 init_data.cid = qed_spq_get_cid(p_hwfn);
431 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
432 init_data.comp_mode = QED_SPQ_MODE_CB;
434 rc = qed_sp_init_request(p_hwfn, &p_ent,
435 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
440 qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
441 &p_ent->ramrod.pf_update);
443 return qed_spq_post(p_hwfn, p_ent, NULL);
446 /* Set pf update ramrod command params */
447 int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
448 struct qed_ptt *p_ptt,
449 struct qed_tunnel_info *p_tunn,
450 enum spq_mode comp_mode,
451 struct qed_spq_comp_cb *p_comp_data)
453 struct qed_spq_entry *p_ent = NULL;
454 struct qed_sp_init_data init_data;
457 if (IS_VF(p_hwfn->cdev))
458 return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
464 memset(&init_data, 0, sizeof(init_data));
465 init_data.cid = qed_spq_get_cid(p_hwfn);
466 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
467 init_data.comp_mode = comp_mode;
468 init_data.p_comp_data = p_comp_data;
470 rc = qed_sp_init_request(p_hwfn, &p_ent,
471 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
476 qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
477 &p_ent->ramrod.pf_update.tunnel_config);
479 rc = qed_spq_post(p_hwfn, p_ent, NULL);
483 qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel);
488 int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
490 struct qed_spq_entry *p_ent = NULL;
491 struct qed_sp_init_data init_data;
495 memset(&init_data, 0, sizeof(init_data));
496 init_data.cid = qed_spq_get_cid(p_hwfn);
497 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
498 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
500 rc = qed_sp_init_request(p_hwfn, &p_ent,
501 COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
506 return qed_spq_post(p_hwfn, p_ent, NULL);
509 int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
511 struct qed_spq_entry *p_ent = NULL;
512 struct qed_sp_init_data init_data;
516 memset(&init_data, 0, sizeof(init_data));
517 init_data.cid = qed_spq_get_cid(p_hwfn);
518 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
519 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
521 rc = qed_sp_init_request(p_hwfn, &p_ent,
522 COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
527 return qed_spq_post(p_hwfn, p_ent, NULL);
530 int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
532 struct qed_spq_entry *p_ent = NULL;
533 struct qed_sp_init_data init_data;
537 memset(&init_data, 0, sizeof(init_data));
538 init_data.cid = qed_spq_get_cid(p_hwfn);
539 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
540 init_data.comp_mode = QED_SPQ_MODE_CB;
542 rc = qed_sp_init_request(p_hwfn, &p_ent,
543 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
548 p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
549 p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan);
551 return qed_spq_post(p_hwfn, p_ent, NULL);